From 8f120bb64c18b60081dd15c564202dbe8564d924 Mon Sep 17 00:00:00 2001 From: "agent-platform-auto-pr[bot]" <153269286+agent-platform-auto-pr[bot]@users.noreply.github.com> Date: Thu, 21 Nov 2024 10:54:03 +0000 Subject: [PATCH 01/13] [test-infra-definitions][automated] Bump test-infra-definitions to bef3516a51ab32f7dd392883dbbdc4303b21be48 (#31299) Co-authored-by: agent-platform-auto-pr[bot] <153269286+agent-platform-auto-pr[bot]@users.noreply.github.com> --- .gitlab/common/test_infra_version.yml | 2 +- test/new-e2e/go.mod | 2 +- test/new-e2e/go.sum | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.gitlab/common/test_infra_version.yml b/.gitlab/common/test_infra_version.yml index b2461c4666a84..35f159053f232 100644 --- a/.gitlab/common/test_infra_version.yml +++ b/.gitlab/common/test_infra_version.yml @@ -4,4 +4,4 @@ variables: # and check the job creating the image to make sure you have the right SHA prefix TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX: "" # Make sure to update test-infra-definitions version in go.mod as well - TEST_INFRA_DEFINITIONS_BUILDIMAGES: 7cd5e8a62570 + TEST_INFRA_DEFINITIONS_BUILDIMAGES: bef3516a51ab diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod index 1f490aec794ba..aacfe3d41d09c 100644 --- a/test/new-e2e/go.mod +++ b/test/new-e2e/go.mod @@ -60,7 +60,7 @@ require ( // `TEST_INFRA_DEFINITIONS_BUILDIMAGES` matches the commit sha in the module version // Example: github.com/DataDog/test-infra-definitions v0.0.0-YYYYMMDDHHmmSS-0123456789AB // => TEST_INFRA_DEFINITIONS_BUILDIMAGES: 0123456789AB - github.com/DataDog/test-infra-definitions v0.0.0-20241115164330-7cd5e8a62570 + github.com/DataDog/test-infra-definitions v0.0.0-20241121090639-bef3516a51ab github.com/aws/aws-sdk-go-v2 v1.32.2 github.com/aws/aws-sdk-go-v2/config v1.27.40 github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.2 diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum index 5ac39b6fdf4bd..674e8ad0c8d88 100644 --- a/test/new-e2e/go.sum +++ b/test/new-e2e/go.sum @@ -16,8 +16,8 @@ github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a h1:m9REhmyaWD5YJ0P53ygRHxKKo+KM+nw+zz0hEdKztMo= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/test-infra-definitions v0.0.0-20241115164330-7cd5e8a62570 h1:vVkrzQIPIhgxZP+GMd+9UhILnZTj1Uf4wZlxhcDGysA= -github.com/DataDog/test-infra-definitions v0.0.0-20241115164330-7cd5e8a62570/go.mod h1:l0n0FQYdWWQxbI5a2EkuynRQIteUQcYOaOhdxD9TvJs= +github.com/DataDog/test-infra-definitions v0.0.0-20241121090639-bef3516a51ab h1:abQ5giOKHmI2oC1ADgF/z3yZJhkMIiC6t+D/BDnW2+w= +github.com/DataDog/test-infra-definitions v0.0.0-20241121090639-bef3516a51ab/go.mod h1:l0n0FQYdWWQxbI5a2EkuynRQIteUQcYOaOhdxD9TvJs= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A= From c27a53e09e974ebe72f2c14b157dc715e43f0d22 Mon Sep 17 00:00:00 2001 From: Yoann Ghigoff Date: Thu, 21 Nov 2024 12:06:18 +0100 Subject: [PATCH 02/13] [CWS] Remove `connect.server.*` secl field aliases (#31218) --- .../linux_expressions.md | 23 ++--- docs/cloud-workload-security/secl_linux.json | 35 ------- pkg/security/secl/model/accessors_unix.go | 98 ------------------- .../secl/model/field_accessors_unix.go | 32 ------ .../secl/model/field_handlers_unix.go | 1 - pkg/security/secl/model/model_unix.go | 5 +- pkg/security/tests/connect_test.go | 4 +- 7 files changed, 11 insertions(+), 187 deletions(-) diff --git a/docs/cloud-workload-security/linux_expressions.md b/docs/cloud-workload-security/linux_expressions.md index fbc72d894b298..f7974b2aeb8bd 100644 --- a/docs/cloud-workload-security/linux_expressions.md +++ b/docs/cloud-workload-security/linux_expressions.md @@ -524,10 +524,6 @@ A connect was executed | [`connect.addr.is_public`](#common-ipportcontext-is_public-doc) | Whether the IP address belongs to a public network | | [`connect.addr.port`](#common-ipportcontext-port-doc) | Port number | | [`connect.retval`](#common-syscallevent-retval-doc) | Return value of the syscall | -| [`connect.server.addr.family`](#connect-server-addr-family-doc) | Server address family | -| [`connect.server.addr.ip`](#common-ipportcontext-ip-doc) | IP address | -| [`connect.server.addr.is_public`](#common-ipportcontext-is_public-doc) | Whether the IP address belongs to a public network | -| [`connect.server.addr.port`](#common-ipportcontext-port-doc) | Port number | ### Event `dns` @@ -2144,8 +2140,8 @@ Type: IP/CIDR Definition: IP address -`*.ip` has 7 possible prefixes: -`bind.addr` `connect.addr` `connect.server.addr` `network.destination` `network.source` `packet.destination` `packet.source` +`*.ip` has 6 possible prefixes: +`bind.addr` `connect.addr` `network.destination` `network.source` `packet.destination` `packet.source` ### `*.is_exec` {#common-process-is_exec-doc} @@ -2171,8 +2167,8 @@ Type: bool Definition: Whether the IP address belongs to a public network -`*.is_public` has 7 possible prefixes: -`bind.addr` `connect.addr` `connect.server.addr` `network.destination` `network.source` `packet.destination` `packet.source` +`*.is_public` has 6 possible prefixes: +`bind.addr` `connect.addr` `network.destination` `network.source` `packet.destination` `packet.source` ### `*.is_thread` {#common-process-is_thread-doc} @@ -2368,8 +2364,8 @@ Type: int Definition: Port number -`*.port` has 7 possible prefixes: -`bind.addr` `connect.addr` `connect.server.addr` `network.destination` `network.source` `packet.destination` `packet.source` +`*.port` has 6 possible prefixes: +`bind.addr` `connect.addr` `network.destination` `network.source` `packet.destination` `packet.source` ### `*.ppid` {#common-process-ppid-doc} @@ -2672,13 +2668,6 @@ Definition: Address family -### `connect.server.addr.family` {#connect-server-addr-family-doc} -Type: int - -Definition: Server address family - - - ### `container.created_at` {#container-created_at-doc} Type: int diff --git a/docs/cloud-workload-security/secl_linux.json b/docs/cloud-workload-security/secl_linux.json index c6deb90ff4e13..c567cdef19e65 100644 --- a/docs/cloud-workload-security/secl_linux.json +++ b/docs/cloud-workload-security/secl_linux.json @@ -1839,26 +1839,6 @@ "name": "connect.retval", "definition": "Return value of the syscall", "property_doc_link": "common-syscallevent-retval-doc" - }, - { - "name": "connect.server.addr.family", - "definition": "Server address family", - "property_doc_link": "connect-server-addr-family-doc" - }, - { - "name": "connect.server.addr.ip", - "definition": "IP address", - "property_doc_link": "common-ipportcontext-ip-doc" - }, - { - "name": "connect.server.addr.is_public", - "definition": "Whether the IP address belongs to a public network", - "property_doc_link": "common-ipportcontext-is_public-doc" - }, - { - "name": "connect.server.addr.port", - "definition": "Port number", - "property_doc_link": "common-ipportcontext-port-doc" } ] }, @@ -8370,7 +8350,6 @@ "prefixes": [ "bind.addr", "connect.addr", - "connect.server.addr", "network.destination", "network.source", "packet.destination", @@ -8432,7 +8411,6 @@ "prefixes": [ "bind.addr", "connect.addr", - "connect.server.addr", "network.destination", "network.source", "packet.destination", @@ -9128,7 +9106,6 @@ "prefixes": [ "bind.addr", "connect.addr", - "connect.server.addr", "network.destination", "network.source", "packet.destination", @@ -9738,18 +9715,6 @@ "constants_link": "", "examples": [] }, - { - "name": "connect.server.addr.family", - "link": "connect-server-addr-family-doc", - "type": "int", - "definition": "Server address family", - "prefixes": [ - "connect" - ], - "constants": "", - "constants_link": "", - "examples": [] - }, { "name": "container.created_at", "link": "container-created_at-doc", diff --git a/pkg/security/secl/model/accessors_unix.go b/pkg/security/secl/model/accessors_unix.go index 417bd66d33417..41f4d62195e0b 100644 --- a/pkg/security/secl/model/accessors_unix.go +++ b/pkg/security/secl/model/accessors_unix.go @@ -999,42 +999,6 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval Field: field, Weight: eval.FunctionWeight, }, nil - case "connect.server.addr.family": - return &eval.IntEvaluator{ - EvalFnc: func(ctx *eval.Context) int { - ev := ctx.Event.(*Event) - return int(ev.Connect.AddrFamily) - }, - Field: field, - Weight: eval.FunctionWeight, - }, nil - case "connect.server.addr.ip": - return &eval.CIDREvaluator{ - EvalFnc: func(ctx *eval.Context) net.IPNet { - ev := ctx.Event.(*Event) - return ev.Connect.Addr.IPNet - }, - Field: field, - Weight: eval.FunctionWeight, - }, nil - case "connect.server.addr.is_public": - return &eval.BoolEvaluator{ - EvalFnc: func(ctx *eval.Context) bool { - ev := ctx.Event.(*Event) - return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Connect.Addr) - }, - Field: field, - Weight: eval.HandlerWeight, - }, nil - case "connect.server.addr.port": - return &eval.IntEvaluator{ - EvalFnc: func(ctx *eval.Context) int { - ev := ctx.Event.(*Event) - return int(ev.Connect.Addr.Port) - }, - Field: field, - Weight: eval.FunctionWeight, - }, nil case "container.created_at": return &eval.IntEvaluator{ EvalFnc: func(ctx *eval.Context) int { @@ -20172,10 +20136,6 @@ func (ev *Event) GetFields() []eval.Field { "connect.addr.is_public", "connect.addr.port", "connect.retval", - "connect.server.addr.family", - "connect.server.addr.ip", - "connect.server.addr.is_public", - "connect.server.addr.port", "container.created_at", "container.id", "container.runtime", @@ -21692,14 +21652,6 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { return int(ev.Connect.Addr.Port), nil case "connect.retval": return int(ev.Connect.SyscallEvent.Retval), nil - case "connect.server.addr.family": - return int(ev.Connect.AddrFamily), nil - case "connect.server.addr.ip": - return ev.Connect.Addr.IPNet, nil - case "connect.server.addr.is_public": - return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Connect.Addr), nil - case "connect.server.addr.port": - return int(ev.Connect.Addr.Port), nil case "container.created_at": return int(ev.FieldHandlers.ResolveContainerCreatedAt(ev, ev.BaseEvent.ContainerContext)), nil case "container.id": @@ -28394,14 +28346,6 @@ func (ev *Event) GetFieldEventType(field eval.Field) (eval.EventType, error) { return "connect", nil case "connect.retval": return "connect", nil - case "connect.server.addr.family": - return "connect", nil - case "connect.server.addr.ip": - return "connect", nil - case "connect.server.addr.is_public": - return "connect", nil - case "connect.server.addr.port": - return "connect", nil case "container.created_at": return "", nil case "container.id": @@ -31223,14 +31167,6 @@ func (ev *Event) GetFieldType(field eval.Field) (reflect.Kind, error) { return reflect.Int, nil case "connect.retval": return reflect.Int, nil - case "connect.server.addr.family": - return reflect.Int, nil - case "connect.server.addr.ip": - return reflect.Struct, nil - case "connect.server.addr.is_public": - return reflect.Bool, nil - case "connect.server.addr.port": - return reflect.Int, nil case "container.created_at": return reflect.Int, nil case "container.id": @@ -34566,40 +34502,6 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } ev.Connect.SyscallEvent.Retval = int64(rv) return nil - case "connect.server.addr.family": - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "Connect.AddrFamily"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Connect.AddrFamily"} - } - ev.Connect.AddrFamily = uint16(rv) - return nil - case "connect.server.addr.ip": - rv, ok := value.(net.IPNet) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "Connect.Addr.IPNet"} - } - ev.Connect.Addr.IPNet = rv - return nil - case "connect.server.addr.is_public": - rv, ok := value.(bool) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "Connect.Addr.IsPublic"} - } - ev.Connect.Addr.IsPublic = rv - return nil - case "connect.server.addr.port": - rv, ok := value.(int) - if !ok { - return &eval.ErrValueTypeMismatch{Field: "Connect.Addr.Port"} - } - if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Connect.Addr.Port"} - } - ev.Connect.Addr.Port = uint16(rv) - return nil case "container.created_at": if ev.BaseEvent.ContainerContext == nil { ev.BaseEvent.ContainerContext = &ContainerContext{} diff --git a/pkg/security/secl/model/field_accessors_unix.go b/pkg/security/secl/model/field_accessors_unix.go index 9d7d7dea7488a..2f17ea6bc00f4 100644 --- a/pkg/security/secl/model/field_accessors_unix.go +++ b/pkg/security/secl/model/field_accessors_unix.go @@ -946,38 +946,6 @@ func (ev *Event) GetConnectRetval() int64 { return ev.Connect.SyscallEvent.Retval } -// GetConnectServerAddrFamily returns the value of the field, resolving if necessary -func (ev *Event) GetConnectServerAddrFamily() uint16 { - if ev.GetEventType().String() != "connect" { - return uint16(0) - } - return ev.Connect.AddrFamily -} - -// GetConnectServerAddrIp returns the value of the field, resolving if necessary -func (ev *Event) GetConnectServerAddrIp() net.IPNet { - if ev.GetEventType().String() != "connect" { - return net.IPNet{} - } - return ev.Connect.Addr.IPNet -} - -// GetConnectServerAddrIsPublic returns the value of the field, resolving if necessary -func (ev *Event) GetConnectServerAddrIsPublic() bool { - if ev.GetEventType().String() != "connect" { - return false - } - return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Connect.Addr) -} - -// GetConnectServerAddrPort returns the value of the field, resolving if necessary -func (ev *Event) GetConnectServerAddrPort() uint16 { - if ev.GetEventType().String() != "connect" { - return uint16(0) - } - return ev.Connect.Addr.Port -} - // GetContainerCreatedAt returns the value of the field, resolving if necessary func (ev *Event) GetContainerCreatedAt() int { if ev.BaseEvent.ContainerContext == nil { diff --git a/pkg/security/secl/model/field_handlers_unix.go b/pkg/security/secl/model/field_handlers_unix.go index 1140ebbef5def..4910242decc1e 100644 --- a/pkg/security/secl/model/field_handlers_unix.go +++ b/pkg/security/secl/model/field_handlers_unix.go @@ -299,7 +299,6 @@ func (ev *Event) resolveFields(forADs bool) { } case "connect": _ = ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Connect.Addr) - _ = ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Connect.Addr) case "dns": case "exec": if ev.Exec.Process.IsNotKworker() { diff --git a/pkg/security/secl/model/model_unix.go b/pkg/security/secl/model/model_unix.go index 9f20419c54447..6f9e3f88144b3 100644 --- a/pkg/security/secl/model/model_unix.go +++ b/pkg/security/secl/model/model_unix.go @@ -645,8 +645,9 @@ type BindEvent struct { // ConnectEvent represents a connect event type ConnectEvent struct { SyscallEvent - Addr IPPortContext `field:"addr;server.addr"` // Connection address - AddrFamily uint16 `field:"addr.family;server.addr.family"` // SECLDoc[addr.family] Definition:`Address family` SECLDoc[server.addr.family] Definition:`Server address family` + + Addr IPPortContext `field:"addr"` // Connection address + AddrFamily uint16 `field:"addr.family"` // SECLDoc[addr.family] Definition:`Address family` } // NetDevice represents a network device diff --git a/pkg/security/tests/connect_test.go b/pkg/security/tests/connect_test.go index d81c2aa3600ad..ff80fc95e3316 100644 --- a/pkg/security/tests/connect_test.go +++ b/pkg/security/tests/connect_test.go @@ -30,11 +30,11 @@ func TestConnectEvent(t *testing.T) { ruleDefs := []*rules.RuleDefinition{ { ID: "test_connect_af_inet", - Expression: `connect.server.addr.family == AF_INET && process.file.name == "syscall_tester"`, + Expression: `connect.addr.family == AF_INET && process.file.name == "syscall_tester"`, }, { ID: "test_connect_af_inet6", - Expression: `connect.server.addr.family == AF_INET6 && process.file.name == "syscall_tester"`, + Expression: `connect.addr.family == AF_INET6 && process.file.name == "syscall_tester"`, }, } From 85675963f915b82d0e6bd10a797e2d24cffb3fea Mon Sep 17 00:00:00 2001 From: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> Date: Thu, 21 Nov 2024 14:13:38 +0100 Subject: [PATCH 03/13] Revert "[test-infra-definitions][automated] Bump test-infra-definitions (#31299)" (#31316) --- .gitlab/common/test_infra_version.yml | 2 +- test/new-e2e/go.mod | 2 +- test/new-e2e/go.sum | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.gitlab/common/test_infra_version.yml b/.gitlab/common/test_infra_version.yml index 35f159053f232..b2461c4666a84 100644 --- a/.gitlab/common/test_infra_version.yml +++ b/.gitlab/common/test_infra_version.yml @@ -4,4 +4,4 @@ variables: # and check the job creating the image to make sure you have the right SHA prefix TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX: "" # Make sure to update test-infra-definitions version in go.mod as well - TEST_INFRA_DEFINITIONS_BUILDIMAGES: bef3516a51ab + TEST_INFRA_DEFINITIONS_BUILDIMAGES: 7cd5e8a62570 diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod index aacfe3d41d09c..1f490aec794ba 100644 --- a/test/new-e2e/go.mod +++ b/test/new-e2e/go.mod @@ -60,7 +60,7 @@ require ( // `TEST_INFRA_DEFINITIONS_BUILDIMAGES` matches the commit sha in the module version // Example: github.com/DataDog/test-infra-definitions v0.0.0-YYYYMMDDHHmmSS-0123456789AB // => TEST_INFRA_DEFINITIONS_BUILDIMAGES: 0123456789AB - github.com/DataDog/test-infra-definitions v0.0.0-20241121090639-bef3516a51ab + github.com/DataDog/test-infra-definitions v0.0.0-20241115164330-7cd5e8a62570 github.com/aws/aws-sdk-go-v2 v1.32.2 github.com/aws/aws-sdk-go-v2/config v1.27.40 github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.2 diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum index 674e8ad0c8d88..5ac39b6fdf4bd 100644 --- a/test/new-e2e/go.sum +++ b/test/new-e2e/go.sum @@ -16,8 +16,8 @@ github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a h1:m9REhmyaWD5YJ0P53ygRHxKKo+KM+nw+zz0hEdKztMo= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/test-infra-definitions v0.0.0-20241121090639-bef3516a51ab h1:abQ5giOKHmI2oC1ADgF/z3yZJhkMIiC6t+D/BDnW2+w= -github.com/DataDog/test-infra-definitions v0.0.0-20241121090639-bef3516a51ab/go.mod h1:l0n0FQYdWWQxbI5a2EkuynRQIteUQcYOaOhdxD9TvJs= +github.com/DataDog/test-infra-definitions v0.0.0-20241115164330-7cd5e8a62570 h1:vVkrzQIPIhgxZP+GMd+9UhILnZTj1Uf4wZlxhcDGysA= +github.com/DataDog/test-infra-definitions v0.0.0-20241115164330-7cd5e8a62570/go.mod h1:l0n0FQYdWWQxbI5a2EkuynRQIteUQcYOaOhdxD9TvJs= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A= From 634ecaa06f5f91ef7c8ee45a1e4fa1adca69b233 Mon Sep 17 00:00:00 2001 From: Sylvain Baubeau Date: Thu, 21 Nov 2024 14:27:50 +0100 Subject: [PATCH 04/13] [CWS] Fix file.flags JSON schema (#31294) --- pkg/security/secl/schemas/file.schema.json | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/pkg/security/secl/schemas/file.schema.json b/pkg/security/secl/schemas/file.schema.json index fb3407feeb3c7..a5cd3001a3e18 100644 --- a/pkg/security/secl/schemas/file.schema.json +++ b/pkg/security/secl/schemas/file.schema.json @@ -36,11 +36,9 @@ }, "flags": { "type": "array", - "items": [ - { - "type": "string" - } - ] + "items": { + "type": "string" + } }, "mount_path": { "type": "string" From efa45af5c18fc3f6057c68c829f48102f5e81cd8 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Thu, 21 Nov 2024 15:26:19 +0100 Subject: [PATCH 05/13] [CWS] make sure stress tests are build gated on linux (#31315) --- pkg/security/tests/latency_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/security/tests/latency_test.go b/pkg/security/tests/latency_test.go index 73d0dc661f5af..6685f6f81758a 100644 --- a/pkg/security/tests/latency_test.go +++ b/pkg/security/tests/latency_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build stresstests +//go:build linux && stresstests // Package tests holds tests related files package tests From a166214977bf7e2ef79cd9df601e70fe8734b758 Mon Sep 17 00:00:00 2001 From: Alexandre Yang Date: Thu, 21 Nov 2024 15:52:25 +0100 Subject: [PATCH 06/13] [ha-agent] Add haagent component (#30910) --- .github/CODEOWNERS | 1 + cmd/agent/subcommands/run/command.go | 2 + comp/README.md | 6 ++ comp/haagent/def/component.go | 25 ++++++++ comp/haagent/fx/fx.go | 23 +++++++ comp/haagent/impl/config.go | 22 +++++++ comp/haagent/impl/haagent.go | 49 +++++++++++++++ comp/haagent/impl/haagent_comp.go | 33 ++++++++++ comp/haagent/impl/haagent_test.go | 62 +++++++++++++++++++ comp/haagent/impl/haagent_testutils_test.go | 37 +++++++++++ comp/haagent/mock/mock.go | 45 ++++++++++++++ pkg/config/setup/config.go | 4 ++ .../add_haagent_comp-060918c70bcadb08.yaml | 11 ++++ 13 files changed, 320 insertions(+) create mode 100644 comp/haagent/def/component.go create mode 100644 comp/haagent/fx/fx.go create mode 100644 comp/haagent/impl/config.go create mode 100644 comp/haagent/impl/haagent.go create mode 100644 comp/haagent/impl/haagent_comp.go create mode 100644 comp/haagent/impl/haagent_test.go create mode 100644 comp/haagent/impl/haagent_testutils_test.go create mode 100644 comp/haagent/mock/mock.go create mode 100644 releasenotes/notes/add_haagent_comp-060918c70bcadb08.yaml diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index d6a7b9e940a14..81eafd8261c7a 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -317,6 +317,7 @@ /comp/trace/etwtracer @DataDog/windows-agent /comp/autoscaling/datadogclient @DataDog/container-integrations /comp/etw @DataDog/windows-agent +/comp/haagent @DataDog/ndm-core /comp/languagedetection/client @DataDog/container-platform /comp/rdnsquerier @DataDog/ndm-integrations /comp/serializer/compression @DataDog/agent-metrics-logs diff --git a/cmd/agent/subcommands/run/command.go b/cmd/agent/subcommands/run/command.go index 986ff25c5349f..e23e94d67a3f7 100644 --- a/cmd/agent/subcommands/run/command.go +++ b/cmd/agent/subcommands/run/command.go @@ -30,6 +30,7 @@ import ( internalsettings "github.com/DataDog/datadog-agent/cmd/agent/subcommands/run/internal/settings" agenttelemetry "github.com/DataDog/datadog-agent/comp/core/agenttelemetry/def" agenttelemetryfx "github.com/DataDog/datadog-agent/comp/core/agenttelemetry/fx" + haagentfx "github.com/DataDog/datadog-agent/comp/haagent/fx" // checks implemented as components @@ -472,6 +473,7 @@ func getSharedFxOption() fx.Option { agenttelemetryfx.Module(), networkpath.Bundle(), remoteagentregistryfx.Module(), + haagentfx.Module(), ) } diff --git a/comp/README.md b/comp/README.md index d6d7902fd7b34..d9c2cf8ab27d4 100644 --- a/comp/README.md +++ b/comp/README.md @@ -597,6 +597,12 @@ Package datadogclient provides a client to query the datadog API Package etw provides an ETW tracing interface +### [comp/haagent](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/haagent) + +*Datadog Team*: ndm-core + +Package haagent handles states for HA Agent feature. + ### [comp/languagedetection/client](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/languagedetection/client) *Datadog Team*: container-platform diff --git a/comp/haagent/def/component.go b/comp/haagent/def/component.go new file mode 100644 index 0000000000000..2472322d9a400 --- /dev/null +++ b/comp/haagent/def/component.go @@ -0,0 +1,25 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package haagent handles states for HA Agent feature. +package haagent + +// team: ndm-core + +// Component is the component type. +type Component interface { + // Enabled returns true if ha_agent.enabled is set to true + Enabled() bool + + // GetGroup returns the value of ha_agent.group + GetGroup() string + + // IsLeader returns true if the current Agent is leader + IsLeader() bool + + // SetLeader takes the leader agent hostname as input, if it matches the current agent hostname, + // the isLeader state is set to true, otherwise false. + SetLeader(leaderAgentHostname string) +} diff --git a/comp/haagent/fx/fx.go b/comp/haagent/fx/fx.go new file mode 100644 index 0000000000000..a60f314250169 --- /dev/null +++ b/comp/haagent/fx/fx.go @@ -0,0 +1,23 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package fx provides the fx module for the haagent component +package fx + +import ( + haagent "github.com/DataDog/datadog-agent/comp/haagent/def" + haagentimpl "github.com/DataDog/datadog-agent/comp/haagent/impl" + "github.com/DataDog/datadog-agent/pkg/util/fxutil" +) + +// Module defines the fx options for this component +func Module() fxutil.Module { + return fxutil.Component( + fxutil.ProvideComponentConstructor( + haagentimpl.NewComponent, + ), + fxutil.ProvideOptional[haagent.Component](), + ) +} diff --git a/comp/haagent/impl/config.go b/comp/haagent/impl/config.go new file mode 100644 index 0000000000000..2417106455a7d --- /dev/null +++ b/comp/haagent/impl/config.go @@ -0,0 +1,22 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +package haagentimpl + +import ( + "github.com/DataDog/datadog-agent/comp/core/config" +) + +type haAgentConfigs struct { + enabled bool + group string +} + +func newHaAgentConfigs(agentConfig config.Component) *haAgentConfigs { + return &haAgentConfigs{ + enabled: agentConfig.GetBool("ha_agent.enabled"), + group: agentConfig.GetString("ha_agent.group"), + } +} diff --git a/comp/haagent/impl/haagent.go b/comp/haagent/impl/haagent.go new file mode 100644 index 0000000000000..7974867596547 --- /dev/null +++ b/comp/haagent/impl/haagent.go @@ -0,0 +1,49 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +package haagentimpl + +import ( + "context" + + log "github.com/DataDog/datadog-agent/comp/core/log/def" + "github.com/DataDog/datadog-agent/pkg/util/hostname" + "go.uber.org/atomic" +) + +type haAgentImpl struct { + log log.Component + haAgentConfigs *haAgentConfigs + isLeader *atomic.Bool +} + +func newHaAgentImpl(log log.Component, haAgentConfigs *haAgentConfigs) *haAgentImpl { + return &haAgentImpl{ + log: log, + haAgentConfigs: haAgentConfigs, + isLeader: atomic.NewBool(false), + } +} + +func (h *haAgentImpl) Enabled() bool { + return h.haAgentConfigs.enabled +} + +func (h *haAgentImpl) GetGroup() string { + return h.haAgentConfigs.group +} + +func (h *haAgentImpl) IsLeader() bool { + return h.isLeader.Load() +} + +func (h *haAgentImpl) SetLeader(leaderAgentHostname string) { + agentHostname, err := hostname.Get(context.TODO()) + if err != nil { + h.log.Warnf("Error getting the hostname: %v", err) + return + } + h.isLeader.Store(agentHostname == leaderAgentHostname) +} diff --git a/comp/haagent/impl/haagent_comp.go b/comp/haagent/impl/haagent_comp.go new file mode 100644 index 0000000000000..24f591741fa0d --- /dev/null +++ b/comp/haagent/impl/haagent_comp.go @@ -0,0 +1,33 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package haagentimpl implements the haagent component interface +package haagentimpl + +import ( + "github.com/DataDog/datadog-agent/comp/core/config" + log "github.com/DataDog/datadog-agent/comp/core/log/def" + haagent "github.com/DataDog/datadog-agent/comp/haagent/def" +) + +// Requires defines the dependencies for the haagent component +type Requires struct { + Logger log.Component + AgentConfig config.Component +} + +// Provides defines the output of the haagent component +type Provides struct { + Comp haagent.Component +} + +// NewComponent creates a new haagent component +func NewComponent(reqs Requires) (Provides, error) { + haAgentConfigs := newHaAgentConfigs(reqs.AgentConfig) + provides := Provides{ + Comp: newHaAgentImpl(reqs.Logger, haAgentConfigs), + } + return provides, nil +} diff --git a/comp/haagent/impl/haagent_test.go b/comp/haagent/impl/haagent_test.go new file mode 100644 index 0000000000000..3be3d2d6341ee --- /dev/null +++ b/comp/haagent/impl/haagent_test.go @@ -0,0 +1,62 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +package haagentimpl + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Enabled(t *testing.T) { + tests := []struct { + name string + configs map[string]interface{} + expectedEnabled bool + }{ + { + name: "enabled", + configs: map[string]interface{}{ + "ha_agent.enabled": true, + }, + expectedEnabled: true, + }, + { + name: "disabled", + configs: map[string]interface{}{ + "ha_agent.enabled": false, + }, + expectedEnabled: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + haAgent := newTestHaAgentComponent(t, tt.configs) + assert.Equal(t, tt.expectedEnabled, haAgent.Enabled()) + }) + } +} + +func Test_GetGroup(t *testing.T) { + agentConfigs := map[string]interface{}{ + "ha_agent.group": "my-group-01", + } + haAgent := newTestHaAgentComponent(t, agentConfigs) + assert.Equal(t, "my-group-01", haAgent.GetGroup()) +} + +func Test_IsLeader_SetLeader(t *testing.T) { + agentConfigs := map[string]interface{}{ + "hostname": "my-agent-hostname", + } + haAgent := newTestHaAgentComponent(t, agentConfigs) + + haAgent.SetLeader("another-agent") + assert.False(t, haAgent.IsLeader()) + + haAgent.SetLeader("my-agent-hostname") + assert.True(t, haAgent.IsLeader()) +} diff --git a/comp/haagent/impl/haagent_testutils_test.go b/comp/haagent/impl/haagent_testutils_test.go new file mode 100644 index 0000000000000..a401d9bdd8a61 --- /dev/null +++ b/comp/haagent/impl/haagent_testutils_test.go @@ -0,0 +1,37 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +package haagentimpl + +import ( + "testing" + + "github.com/DataDog/datadog-agent/comp/core/config" + logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" + haagent "github.com/DataDog/datadog-agent/comp/haagent/def" + "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "github.com/stretchr/testify/require" + "go.uber.org/fx" +) + +func newTestHaAgentComponent(t *testing.T, agentConfigs map[string]interface{}) haagent.Component { + logComponent := logmock.New(t) + agentConfigComponent := fxutil.Test[config.Component](t, fx.Options( + config.MockModule(), + fx.Replace(config.MockParams{Overrides: agentConfigs}), + )) + + requires := Requires{ + Logger: logComponent, + AgentConfig: agentConfigComponent, + } + + provides, err := NewComponent(requires) + require.NoError(t, err) + + comp := provides.Comp + require.NotNil(t, comp) + return comp +} diff --git a/comp/haagent/mock/mock.go b/comp/haagent/mock/mock.go new file mode 100644 index 0000000000000..6ee0c733361f7 --- /dev/null +++ b/comp/haagent/mock/mock.go @@ -0,0 +1,45 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build test + +// Package mock provides a mock for the haagent component +package mock + +import ( + "testing" + + log "github.com/DataDog/datadog-agent/comp/core/log/def" + haagent "github.com/DataDog/datadog-agent/comp/haagent/def" +) + +type mock struct { + Logger log.Component +} + +func (m *mock) GetGroup() string { + return "mockGroup01" +} + +func (m *mock) Enabled() bool { + return true +} + +func (m *mock) SetLeader(_ string) { +} + +func (m *mock) IsLeader() bool { return false } + +// Provides that defines the output of mocked snmpscan component +type Provides struct { + comp haagent.Component +} + +// Mock returns a mock for haagent component. +func Mock(_ *testing.T) Provides { + return Provides{ + comp: &mock{}, + } +} diff --git a/pkg/config/setup/config.go b/pkg/config/setup/config.go index 9894d1fb2c6ad..91fcef102fb7a 100644 --- a/pkg/config/setup/config.go +++ b/pkg/config/setup/config.go @@ -474,6 +474,10 @@ func InitConfig(config pkgconfigmodel.Setup) { config.BindEnvAndSetDefault("network_path.collector.reverse_dns_enrichment.timeout", 5000) bindEnvAndSetLogsConfigKeys(config, "network_path.forwarder.") + // HA Agent + config.BindEnvAndSetDefault("ha_agent.enabled", false) + config.BindEnv("ha_agent.group") + // Kube ApiServer config.BindEnvAndSetDefault("kubernetes_kubeconfig_path", "") config.BindEnvAndSetDefault("kubernetes_apiserver_ca_path", "") diff --git a/releasenotes/notes/add_haagent_comp-060918c70bcadb08.yaml b/releasenotes/notes/add_haagent_comp-060918c70bcadb08.yaml new file mode 100644 index 0000000000000..14d0ca1570690 --- /dev/null +++ b/releasenotes/notes/add_haagent_comp-060918c70bcadb08.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +features: + - | + [ha-agent] Add haagent component used for HA Agent feature. From a0215301ac79e5381bea2f342120f7f3728eced8 Mon Sep 17 00:00:00 2001 From: David Ortiz Date: Thu, 21 Nov 2024 15:59:27 +0100 Subject: [PATCH 07/13] [tagger/mock] Refactor to use fake impl directly and avoid unwanted dependencies (#31297) --- comp/api/api/apiimpl/api_test.go | 9 ++ .../api/apiimpl/internal/agent/agent_test.go | 9 ++ comp/core/tagger/impl/tagger.go | 3 +- comp/core/tagger/impl/tagger_test.go | 11 +- .../core/tagger/{impl => mock}/fake_tagger.go | 39 +++++- comp/core/tagger/mock/mock.go | 123 +----------------- .../agent/agentimpl/agent_linux_test.go | 12 ++ comp/process/agent/agentimpl/agent_test.go | 7 + .../kubernetesapiserver/events_common_test.go | 2 +- pkg/logs/internal/util/adlistener/ad_test.go | 2 +- pkg/logs/schedulers/cca/scheduler_test.go | 2 +- 11 files changed, 82 insertions(+), 137 deletions(-) rename comp/core/tagger/{impl => mock}/fake_tagger.go (82%) diff --git a/comp/api/api/apiimpl/api_test.go b/comp/api/api/apiimpl/api_test.go index f24241bf2f150..d92e8f1e4fa48 100644 --- a/comp/api/api/apiimpl/api_test.go +++ b/comp/api/api/apiimpl/api_test.go @@ -26,11 +26,16 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/autodiscoveryimpl" "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameimpl" + log "github.com/DataDog/datadog-agent/comp/core/log/def" + logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" remoteagentregistry "github.com/DataDog/datadog-agent/comp/core/remoteagentregistry/def" "github.com/DataDog/datadog-agent/comp/core/secrets/secretsimpl" tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" taggermock "github.com/DataDog/datadog-agent/comp/core/tagger/mock" "github.com/DataDog/datadog-agent/comp/core/telemetry" + "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" + workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" "github.com/DataDog/datadog-agent/comp/dogstatsd/pidmap/pidmapimpl" replaymock "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/fx-mock" dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" @@ -92,6 +97,10 @@ func getTestAPIServer(t *testing.T, params config.MockParams) testdeps { } }), fx.Provide(func() remoteagentregistry.Component { return nil }), + telemetryimpl.MockModule(), + config.MockModule(), + workloadmetafxmock.MockModule(workloadmeta.NewParams()), + fx.Provide(func(t testing.TB) log.Component { return logmock.New(t) }), ) } diff --git a/comp/api/api/apiimpl/internal/agent/agent_test.go b/comp/api/api/apiimpl/internal/agent/agent_test.go index 98823634836ce..b842e456a431d 100644 --- a/comp/api/api/apiimpl/internal/agent/agent_test.go +++ b/comp/api/api/apiimpl/internal/agent/agent_test.go @@ -23,8 +23,13 @@ import ( "github.com/DataDog/datadog-agent/comp/collector/collector" "github.com/DataDog/datadog-agent/comp/core/autodiscovery" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/autodiscoveryimpl" + "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/flare/flareimpl" "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface" + log "github.com/DataDog/datadog-agent/comp/core/log/def" + logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" + "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" + workloadmetafx "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx" "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/comp/core/secrets/secretsimpl" @@ -108,6 +113,10 @@ func getComponentDeps(t *testing.T) handlerdeps { autodiscoveryimpl.MockModule(), ), settingsimpl.MockModule(), + config.MockModule(), + fx.Provide(func(t testing.TB) log.Component { return logmock.New(t) }), + workloadmetafx.Module(workloadmeta.NewParams()), + telemetryimpl.MockModule(), ) } diff --git a/comp/core/tagger/impl/tagger.go b/comp/core/tagger/impl/tagger.go index aa59641d7f894..2397d6052cd7d 100644 --- a/comp/core/tagger/impl/tagger.go +++ b/comp/core/tagger/impl/tagger.go @@ -27,6 +27,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" taggercommon "github.com/DataDog/datadog-agent/comp/core/tagger/common" tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" + taggermock "github.com/DataDog/datadog-agent/comp/core/tagger/mock" "github.com/DataDog/datadog-agent/comp/core/tagger/telemetry" "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/comp/core/tagger/utils" @@ -142,7 +143,7 @@ func NewTaggerClient(params tagger.Params, cfg config.Component, wmeta workloadm var err error telemetryStore := telemetry.NewStore(telemetryComp) if params.UseFakeTagger { - defaultTagger = newFakeTagger() + defaultTagger = taggermock.New().Comp } else { defaultTagger, err = newLocalTagger(cfg, wmeta, telemetryStore) } diff --git a/comp/core/tagger/impl/tagger_test.go b/comp/core/tagger/impl/tagger_test.go index 822fd435ece04..da581f20f3fd0 100644 --- a/comp/core/tagger/impl/tagger_test.go +++ b/comp/core/tagger/impl/tagger_test.go @@ -17,6 +17,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" + "github.com/DataDog/datadog-agent/comp/core/tagger/mock" "github.com/DataDog/datadog-agent/comp/core/tagger/types" noopTelemetry "github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" @@ -67,7 +68,7 @@ func TestEnrichTags(t *testing.T) { tagger, err := NewTaggerClient(params, c, wmeta, logComponent, noopTelemetry.GetCompatComponent()) assert.NoError(t, err) - fakeTagger := tagger.defaultTagger.(*FakeTagger) + fakeTagger := tagger.defaultTagger.(*mock.FakeTagger) containerName, initContainerName, containerID, initContainerID, podUID := "container-name", "init-container-name", "container-id", "init-container-id", "pod-uid" @@ -191,7 +192,7 @@ func TestEnrichTagsOrchestrator(t *testing.T) { tagger, err := NewTaggerClient(params, c, wmeta, logComponent, noopTelemetry.GetCompatComponent()) assert.NoError(t, err) - fakeTagger := tagger.defaultTagger.(*FakeTagger) + fakeTagger := tagger.defaultTagger.(*mock.FakeTagger) fakeTagger.SetTags(types.NewEntityID(types.ContainerID, "bar"), "fooSource", []string{"container-low"}, []string{"container-orch"}, nil, nil) tb := tagset.NewHashingTagsAccumulator() @@ -216,7 +217,7 @@ func TestEnrichTagsOptOut(t *testing.T) { tagger, err := NewTaggerClient(params, c, wmeta, logComponent, noopTelemetry.GetCompatComponent()) assert.NoError(t, err) - fakeTagger := tagger.defaultTagger.(*FakeTagger) + fakeTagger := tagger.defaultTagger.(*mock.FakeTagger) fakeTagger.SetTags(types.NewEntityID(types.ContainerID, "bar"), "fooSource", []string{"container-low"}, []string{"container-orch"}, nil, nil) @@ -312,7 +313,7 @@ func TestAgentTags(t *testing.T) { tagger, err := NewTaggerClient(params, c, wmeta, logComponent, noopTelemetry.GetCompatComponent()) assert.NoError(t, err) - fakeTagger := tagger.defaultTagger.(*FakeTagger) + fakeTagger := tagger.defaultTagger.(*mock.FakeTagger) agentContainerID, podUID := "agentContainerID", "podUID" mockMetricsProvider := collectormock.NewMetricsProvider() @@ -351,7 +352,7 @@ func TestGlobalTags(t *testing.T) { tagger, err := NewTaggerClient(params, c, wmeta, logComponent, noopTelemetry.GetCompatComponent()) assert.NoError(t, err) - fakeTagger := tagger.defaultTagger.(*FakeTagger) + fakeTagger := tagger.defaultTagger.(*mock.FakeTagger) fakeTagger.SetTags(types.NewEntityID(types.ContainerID, "bar"), "fooSource", []string{"container-low"}, []string{"container-orch"}, []string{"container-high"}, nil) fakeTagger.SetGlobalTags([]string{"global-low"}, []string{"global-orch"}, []string{"global-high"}, nil) diff --git a/comp/core/tagger/impl/fake_tagger.go b/comp/core/tagger/mock/fake_tagger.go similarity index 82% rename from comp/core/tagger/impl/fake_tagger.go rename to comp/core/tagger/mock/fake_tagger.go index 31a71a1b8bff5..7d453916b86a0 100644 --- a/comp/core/tagger/impl/fake_tagger.go +++ b/comp/core/tagger/mock/fake_tagger.go @@ -3,32 +3,54 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package taggerimpl +package mock import ( "context" + "net/http" "strconv" + api "github.com/DataDog/datadog-agent/comp/api/api/def" taggercommon "github.com/DataDog/datadog-agent/comp/core/tagger/common" tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" "github.com/DataDog/datadog-agent/comp/core/tagger/tagstore" "github.com/DataDog/datadog-agent/comp/core/tagger/telemetry" "github.com/DataDog/datadog-agent/comp/core/tagger/types" - taggertypes "github.com/DataDog/datadog-agent/pkg/tagger/types" "github.com/DataDog/datadog-agent/pkg/tagset" ) +// Mock implements mock-specific methods for the tagger component. +type Mock interface { + tagger.Component + + // SetTags allows to set tags in the mock fake tagger + SetTags(entityID types.EntityID, source string, low, orch, high, std []string) + + // SetGlobalTags allows to set tags in store for the global entity + SetGlobalTags(low, orch, high, std []string) +} + // FakeTagger is a fake implementation of the tagger interface type FakeTagger struct { errors map[string]error store *tagstore.TagStore } -func newFakeTagger() *FakeTagger { - return &FakeTagger{ - errors: make(map[string]error), - store: tagstore.NewTagStore(nil), +// Provides is a struct containing the mock and the endpoint +type Provides struct { + Comp Mock + Endpoint api.AgentEndpointProvider +} + +// New instantiates a new fake tagger +func New() Provides { + return Provides{ + Comp: &FakeTagger{ + errors: make(map[string]error), + store: tagstore.NewTagStore(nil), + }, + Endpoint: api.NewAgentEndpointProvider(mockHandleRequest, "/tagger-list", "GET"), } } @@ -170,3 +192,8 @@ func (f *FakeTagger) ChecksCardinality() types.TagCardinality { func (f *FakeTagger) DogstatsdCardinality() types.TagCardinality { return types.LowCardinality } + +// mockHandleRequest is a simple mocked http.Handler function to test the route is registered correctly on the api component +func mockHandleRequest(w http.ResponseWriter, _ *http.Request) { + w.Write([]byte("OK")) +} diff --git a/comp/core/tagger/mock/mock.go b/comp/core/tagger/mock/mock.go index 30fcabd7ef0b8..26b9781c871ec 100644 --- a/comp/core/tagger/mock/mock.go +++ b/comp/core/tagger/mock/mock.go @@ -4,127 +4,20 @@ // Copyright 2016-present Datadog, Inc. //go:build test -// +build test // Package mock contains the implementation of the mock for the tagger component. package mock import ( - "net/http" "testing" - "go.uber.org/fx" - - "github.com/stretchr/testify/assert" - - api "github.com/DataDog/datadog-agent/comp/api/api/def" - "github.com/DataDog/datadog-agent/comp/core/config" - log "github.com/DataDog/datadog-agent/comp/core/log/def" - logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" - "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig/sysprobeconfigimpl" - tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" - taggerimpl "github.com/DataDog/datadog-agent/comp/core/tagger/impl" - "github.com/DataDog/datadog-agent/comp/core/tagger/types" - "github.com/DataDog/datadog-agent/comp/core/telemetry" - noopTelemetry "github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl" - "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - workloadmetafx "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx" - configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) -// Mock implements mock-specific methods for the tagger component. -type Mock interface { - tagger.Component - - // SetTags allows to set tags in the mock fake tagger - SetTags(entityID types.EntityID, source string, low, orch, high, std []string) - - // SetGlobalTags allows to set tags in store for the global entity - SetGlobalTags(low, orch, high, std []string) -} - -// mockTaggerClient is a mock of the tagger Component -type mockTaggerClient struct { - *taggerimpl.TaggerWrapper -} - -// mockHandleRequest is a simple mocked http.Handler function to test the route is registered correctly on the api component -func (m *mockTaggerClient) mockHandleRequest(w http.ResponseWriter, _ *http.Request) { - w.Write([]byte("OK")) -} - -// New returns a Mock -func New(t testing.TB) Mock { - c := configmock.New(t) - params := tagger.Params{ - UseFakeTagger: true, - } - logComponent := logmock.New(t) - wmeta := fxutil.Test[workloadmeta.Component](t, - fx.Provide(func() log.Component { return logComponent }), - fx.Provide(func() config.Component { return c }), - workloadmetafx.Module(workloadmeta.NewParams()), - ) - - tagger, err := taggerimpl.NewTaggerClient(params, c, wmeta, logComponent, noopTelemetry.GetCompatComponent()) - - assert.NoError(t, err) - - return &mockTaggerClient{ - tagger, - } -} - -// Provides is a struct containing the mock and the endpoint -type Provides struct { - fx.Out - - Comp Mock - Endpoint api.AgentEndpointProvider -} - -type dependencies struct { - fx.In - - Config config.Component - Log log.Component - WMeta workloadmeta.Component - Telemetry telemetry.Component -} - -// NewMock returns a Provides -func NewMock(deps dependencies) (Provides, error) { - params := tagger.Params{ - UseFakeTagger: true, - } - - tagger, err := taggerimpl.NewTaggerClient(params, deps.Config, deps.WMeta, deps.Log, deps.Telemetry) - if err != nil { - return Provides{}, err - } - - c := &mockTaggerClient{ - tagger, - } - return Provides{ - Comp: c, - Endpoint: api.NewAgentEndpointProvider(c.mockHandleRequest, "/tagger-list", "GET"), - }, nil -} - // Module is a module containing the mock, useful for testing func Module() fxutil.Module { return fxutil.Component( - fx.Provide(NewMock), - fx.Supply(config.Params{}), - fx.Supply(log.Params{}), - fx.Provide(func(t testing.TB) log.Component { return logmock.New(t) }), - config.MockModule(), - sysprobeconfigimpl.MockModule(), - workloadmetafx.Module(workloadmeta.NewParams()), - telemetryimpl.MockModule(), + fxutil.ProvideComponentConstructor(New), ) } @@ -132,17 +25,3 @@ func Module() fxutil.Module { func SetupFakeTagger(t testing.TB) Mock { return fxutil.Test[Mock](t, Module()) } - -// SetTags calls faketagger SetTags which sets the tags for an entity -func (m *mockTaggerClient) SetTags(entity types.EntityID, source string, low, orch, high, std []string) { - if v, ok := m.TaggerWrapper.GetDefaultTagger().(*taggerimpl.FakeTagger); ok { - v.SetTags(entity, source, low, orch, high, std) - } -} - -// SetGlobalTags calls faketagger SetGlobalTags which sets the tags for the global entity -func (m *mockTaggerClient) SetGlobalTags(low, orch, high, std []string) { - if v, ok := m.TaggerWrapper.GetDefaultTagger().(*taggerimpl.FakeTagger); ok { - v.SetGlobalTags(low, orch, high, std) - } -} diff --git a/comp/process/agent/agentimpl/agent_linux_test.go b/comp/process/agent/agentimpl/agent_linux_test.go index bbfcf36a66081..e155fdfb551ba 100644 --- a/comp/process/agent/agentimpl/agent_linux_test.go +++ b/comp/process/agent/agentimpl/agent_linux_test.go @@ -17,6 +17,9 @@ import ( "go.uber.org/fx" configComp "github.com/DataDog/datadog-agent/comp/core/config" + log "github.com/DataDog/datadog-agent/comp/core/log/def" + logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" + "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig/sysprobeconfigimpl" taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock" "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" @@ -129,6 +132,9 @@ func TestProcessAgentComponentOnLinux(t *testing.T) { submitterimpl.MockModule(), taggerMock.Module(), statsd.MockModule(), + fx.Provide(func(t testing.TB) log.Component { return logmock.New(t) }), + configComp.MockModule(), + sysprobeconfigimpl.MockModule(), Module(), fx.Replace(configComp.MockParams{Overrides: map[string]interface{}{ @@ -197,6 +203,9 @@ func TestStatusProvider(t *testing.T) { "process_config.run_in_core_agent.enabled": true, }}), processcheckimpl.MockModule(), + fx.Provide(func(t testing.TB) log.Component { return logmock.New(t) }), + configComp.MockModule(), + sysprobeconfigimpl.MockModule(), fx.Provide(func() func(c *checkMocks.Check) { return func(c *checkMocks.Check) { c.On("Init", mock.Anything, mock.Anything, mock.AnythingOfType("bool")).Return(nil).Maybe() @@ -242,6 +251,9 @@ func TestTelemetryCoreAgent(t *testing.T) { "telemetry.enabled": true, }}), processcheckimpl.MockModule(), + fx.Provide(func(t testing.TB) log.Component { return logmock.New(t) }), + configComp.MockModule(), + sysprobeconfigimpl.MockModule(), fx.Provide(func() func(c *checkMocks.Check) { return func(c *checkMocks.Check) { c.On("Init", mock.Anything, mock.Anything, mock.AnythingOfType("bool")).Return(nil).Maybe() diff --git a/comp/process/agent/agentimpl/agent_test.go b/comp/process/agent/agentimpl/agent_test.go index 5f36175977222..c5578328202fc 100644 --- a/comp/process/agent/agentimpl/agent_test.go +++ b/comp/process/agent/agentimpl/agent_test.go @@ -13,6 +13,10 @@ import ( "github.com/stretchr/testify/assert" "go.uber.org/fx" + "github.com/DataDog/datadog-agent/comp/core/config" + log "github.com/DataDog/datadog-agent/comp/core/log/def" + logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" + "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig/sysprobeconfigimpl" taggermock "github.com/DataDog/datadog-agent/comp/core/tagger/mock" "github.com/DataDog/datadog-agent/comp/dogstatsd/statsd" "github.com/DataDog/datadog-agent/comp/process/agent" @@ -66,6 +70,9 @@ func TestProcessAgentComponent(t *testing.T) { taggermock.Module(), statsd.MockModule(), Module(), + fx.Provide(func(t testing.TB) log.Component { return logmock.New(t) }), + config.MockModule(), + sysprobeconfigimpl.MockModule(), } if tc.checksEnabled { diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go index 53fe552d08581..af0efc2196462 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common_test.go @@ -53,7 +53,7 @@ func TestGetDDAlertType(t *testing.T) { } func Test_getInvolvedObjectTags(t *testing.T) { - taggerInstance := mockTagger.New(t) + taggerInstance := mockTagger.New().Comp taggerInstance.SetTags(types.NewEntityID(types.KubernetesPodUID, "nginx"), "workloadmeta-kubernetes_pod", nil, []string{"additional_pod_tag:nginx"}, nil, nil) taggerInstance.SetTags(types.NewEntityID(types.KubernetesDeployment, "workload-redis/my-deployment-1"), "workloadmeta-kubernetes_deployment", nil, []string{"deployment_tag:redis-1"}, nil, nil) taggerInstance.SetTags(types.NewEntityID(types.KubernetesDeployment, "default/my-deployment-2"), "workloadmeta-kubernetes_deployment", nil, []string{"deployment_tag:redis-2"}, nil, nil) diff --git a/pkg/logs/internal/util/adlistener/ad_test.go b/pkg/logs/internal/util/adlistener/ad_test.go index d54417f77de84..58ef03be889f9 100644 --- a/pkg/logs/internal/util/adlistener/ad_test.go +++ b/pkg/logs/internal/util/adlistener/ad_test.go @@ -32,7 +32,7 @@ func TestListenersGetScheduleCalls(t *testing.T) { autodiscoveryimpl.MockModule(), workloadmetafxmock.MockModule(workloadmeta.NewParams()), core.MockBundle(), - fx.Provide(taggermock.NewMock), + taggermock.Module(), ) got1 := make(chan struct{}, 1) diff --git a/pkg/logs/schedulers/cca/scheduler_test.go b/pkg/logs/schedulers/cca/scheduler_test.go index 0ab1cdb18175b..8f12ac4dd9114 100644 --- a/pkg/logs/schedulers/cca/scheduler_test.go +++ b/pkg/logs/schedulers/cca/scheduler_test.go @@ -32,7 +32,7 @@ func setup(t *testing.T) (scheduler *Scheduler, ac autodiscovery.Component, spy autodiscoveryimpl.MockModule(), workloadmetafxmock.MockModule(workloadmeta.NewParams()), core.MockBundle(), - fx.Provide(taggermock.NewMock), + taggermock.Module(), ) scheduler = New(ac).(*Scheduler) spy = &schedulers.MockSourceManager{} From 8228cf4022c789c8b69b609ec7964db800d1fdf2 Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Thu, 21 Nov 2024 16:09:23 +0100 Subject: [PATCH 08/13] [CWS] fix `TestLoaderCompile` build (#31309) --- pkg/network/tracer/connection/dump.go | 1 - pkg/security/ebpf/compile_test.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/network/tracer/connection/dump.go b/pkg/network/tracer/connection/dump.go index a9a9b1aff1337..07261bc46f399 100644 --- a/pkg/network/tracer/connection/dump.go +++ b/pkg/network/tracer/connection/dump.go @@ -22,7 +22,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" ) - func dumpMapsHandler(w io.Writer, _ *manager.Manager, mapName string, currentMap *ebpf.Map) { switch mapName { diff --git a/pkg/security/ebpf/compile_test.go b/pkg/security/ebpf/compile_test.go index 77fa4256da806..cd12d6f64223e 100644 --- a/pkg/security/ebpf/compile_test.go +++ b/pkg/security/ebpf/compile_test.go @@ -20,7 +20,7 @@ import ( func TestLoaderCompile(t *testing.T) { ebpftest.TestBuildMode(t, ebpftest.RuntimeCompiled, "", func(t *testing.T) { - _, err := sysconfig.New("") + _, err := sysconfig.New("", "") require.NoError(t, err) cfg, err := config.NewConfig() require.NoError(t, err) From 44782fd103d3e12abcc4c11969fdb2ccb3233cb1 Mon Sep 17 00:00:00 2001 From: Gustavo Caso Date: Thu, 21 Nov 2024 16:35:48 +0100 Subject: [PATCH 09/13] ensure remote tagger do not block when starting (#31279) --- comp/core/tagger/impl-remote/remote.go | 9 --- comp/core/tagger/impl-remote/remote_test.go | 69 +++++++++++++++++++++ 2 files changed, 69 insertions(+), 9 deletions(-) create mode 100644 comp/core/tagger/impl-remote/remote_test.go diff --git a/comp/core/tagger/impl-remote/remote.go b/comp/core/tagger/impl-remote/remote.go index 408d8067fed7b..68e56f3832847 100644 --- a/comp/core/tagger/impl-remote/remote.go +++ b/comp/core/tagger/impl-remote/remote.go @@ -186,15 +186,6 @@ func (t *remoteTagger) Start(ctx context.Context) error { t.client = pb.NewAgentSecureClient(t.conn) - err = t.startTaggerStream(noTimeout) - if err != nil { - // tagger stopped before being connected - if errors.Is(err, errTaggerStreamNotStarted) { - return nil - } - return err - } - t.log.Info("remote tagger initialized successfully") go t.run() diff --git a/comp/core/tagger/impl-remote/remote_test.go b/comp/core/tagger/impl-remote/remote_test.go new file mode 100644 index 0000000000000..22a2354b4609e --- /dev/null +++ b/comp/core/tagger/impl-remote/remote_test.go @@ -0,0 +1,69 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package remotetaggerimpl + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/DataDog/datadog-agent/comp/core/config" + logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" + tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" + nooptelemetry "github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl" + configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + "github.com/DataDog/datadog-agent/pkg/util/grpc" +) + +func TestStart(t *testing.T) { + grpcServer, authToken, err := grpc.NewMockGrpcSecureServer("5001") + require.NoError(t, err) + defer grpcServer.Stop() + + params := tagger.RemoteParams{ + RemoteFilter: types.NewMatchAllFilter(), + RemoteTarget: func(config.Component) (string, error) { return ":5001", nil }, + RemoteTokenFetcher: func(config.Component) func() (string, error) { + return func() (string, error) { + return authToken, nil + } + }, + } + + cfg := configmock.New(t) + log := logmock.New(t) + telemetry := nooptelemetry.GetCompatComponent() + + remoteTagger, err := NewRemoteTagger(params, cfg, log, telemetry) + require.NoError(t, err) + err = remoteTagger.Start(context.TODO()) + require.NoError(t, err) + remoteTagger.Stop() +} + +func TestStartDoNotBlockIfServerIsNotAvailable(t *testing.T) { + params := tagger.RemoteParams{ + RemoteFilter: types.NewMatchAllFilter(), + RemoteTarget: func(config.Component) (string, error) { return ":5001", nil }, + RemoteTokenFetcher: func(config.Component) func() (string, error) { + return func() (string, error) { + return "something", nil + } + }, + } + + cfg := configmock.New(t) + log := logmock.New(t) + telemetry := nooptelemetry.GetCompatComponent() + + remoteTagger, err := NewRemoteTagger(params, cfg, log, telemetry) + require.NoError(t, err) + err = remoteTagger.Start(context.TODO()) + require.NoError(t, err) + remoteTagger.Stop() +} From 9dbee932c5334c4ccd67318ffbee68d3a7ad3f2f Mon Sep 17 00:00:00 2001 From: Vincent Whitchurch Date: Thu, 21 Nov 2024 16:57:27 +0100 Subject: [PATCH 10/13] usm: Enable event stream by default (#31262) --- pkg/config/setup/system_probe.go | 2 +- pkg/network/config/config_test.go | 12 ++++++------ pkg/network/usm/monitor_tls_test.go | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/config/setup/system_probe.go b/pkg/config/setup/system_probe.go index 464620f3942be..6ca49f348e4b4 100644 --- a/pkg/config/setup/system_probe.go +++ b/pkg/config/setup/system_probe.go @@ -271,7 +271,7 @@ func InitSystemProbeConfig(cfg pkgconfigmodel.Config) { cfg.BindEnv(join(smNS, "enable_quantization")) cfg.BindEnv(join(smNS, "enable_connection_rollup")) cfg.BindEnv(join(smNS, "enable_ring_buffers")) - cfg.BindEnv(join(smNS, "enable_event_stream")) + cfg.BindEnvAndSetDefault(join(smNS, "enable_event_stream"), true) oldHTTPRules := join(netNS, "http_replace_rules") newHTTPRules := join(smNS, "http_replace_rules") diff --git a/pkg/network/config/config_test.go b/pkg/network/config/config_test.go index 4873d9b1baeb8..0db2f6743c721 100644 --- a/pkg/network/config/config_test.go +++ b/pkg/network/config/config_test.go @@ -1344,23 +1344,23 @@ func TestUSMEventStream(t *testing.T) { mock.NewSystemProbe(t) cfg := New() - assert.False(t, cfg.EnableUSMEventStream) + assert.True(t, cfg.EnableUSMEventStream) }) t.Run("via yaml", func(t *testing.T) { mockSystemProbe := mock.NewSystemProbe(t) - mockSystemProbe.SetWithoutSource("service_monitoring_config.enable_event_stream", true) + mockSystemProbe.SetWithoutSource("service_monitoring_config.enable_event_stream", false) cfg := New() - assert.True(t, cfg.EnableUSMEventStream) + assert.False(t, cfg.EnableUSMEventStream) }) - t.Run("via deprecated ENV variable", func(t *testing.T) { + t.Run("via ENV variable", func(t *testing.T) { mock.NewSystemProbe(t) - t.Setenv("DD_SERVICE_MONITORING_CONFIG_ENABLE_EVENT_STREAM", "true") + t.Setenv("DD_SERVICE_MONITORING_CONFIG_ENABLE_EVENT_STREAM", "false") cfg := New() - assert.True(t, cfg.EnableUSMEventStream) + assert.False(t, cfg.EnableUSMEventStream) }) } diff --git a/pkg/network/usm/monitor_tls_test.go b/pkg/network/usm/monitor_tls_test.go index d81a484aef350..4a0776f259dbd 100644 --- a/pkg/network/usm/monitor_tls_test.go +++ b/pkg/network/usm/monitor_tls_test.go @@ -859,7 +859,7 @@ func setupUSMTLSMonitor(t *testing.T, cfg *config.Config) *Monitor { usmMonitor, err := NewMonitor(cfg, nil) require.NoError(t, err) require.NoError(t, usmMonitor.Start()) - if cfg.EnableUSMEventStream { + if cfg.EnableUSMEventStream && usmconfig.NeedProcessMonitor(cfg) { eventmonitortestutil.StartEventMonitor(t, procmontestutil.RegisterProcessMonitorEventConsumer) } t.Cleanup(usmMonitor.Stop) From e6f8f7627343bd8ed60e26cb5c4f90da4f972170 Mon Sep 17 00:00:00 2001 From: Toby Lawrence Date: Thu, 21 Nov 2024 12:26:12 -0500 Subject: [PATCH 11/13] chore: disable remote agent registry support by default (#31323) --- comp/api/api/apiimpl/grpc.go | 4 +++ .../impl/remoteagentregistry.go | 5 +++ .../impl/remoteagentregistry_test.go | 33 ++++++++++++++----- pkg/config/setup/config.go | 1 + 4 files changed, 34 insertions(+), 9 deletions(-) diff --git a/comp/api/api/apiimpl/grpc.go b/comp/api/api/apiimpl/grpc.go index 0689b17b3cfcc..0386f022c6927 100644 --- a/comp/api/api/apiimpl/grpc.go +++ b/comp/api/api/apiimpl/grpc.go @@ -187,6 +187,10 @@ func (s *serverSecure) WorkloadmetaStreamEntities(in *pb.WorkloadmetaStreamReque } func (s *serverSecure) RegisterRemoteAgent(_ context.Context, in *pb.RegisterRemoteAgentRequest) (*pb.RegisterRemoteAgentResponse, error) { + if s.remoteAgentRegistry == nil { + return nil, status.Error(codes.Unimplemented, "remote agent registry not enabled") + } + registration := rarproto.ProtobufToRemoteAgentRegistration(in) recommendedRefreshIntervalSecs, err := s.remoteAgentRegistry.RegisterRemoteAgent(registration) if err != nil { diff --git a/comp/core/remoteagentregistry/impl/remoteagentregistry.go b/comp/core/remoteagentregistry/impl/remoteagentregistry.go index 2a8a6e230c924..d5c09563b2afa 100644 --- a/comp/core/remoteagentregistry/impl/remoteagentregistry.go +++ b/comp/core/remoteagentregistry/impl/remoteagentregistry.go @@ -44,6 +44,11 @@ type Provides struct { // NewComponent creates a new remoteagent component func NewComponent(reqs Requires) Provides { + enabled := reqs.Config.GetBool("remote_agent_registry.enabled") + if !enabled { + return Provides{} + } + ra := newRemoteAgent(reqs) return Provides{ diff --git a/comp/core/remoteagentregistry/impl/remoteagentregistry_test.go b/comp/core/remoteagentregistry/impl/remoteagentregistry_test.go index 91b198c7a8c9a..d4264e25e4418 100644 --- a/comp/core/remoteagentregistry/impl/remoteagentregistry_test.go +++ b/comp/core/remoteagentregistry/impl/remoteagentregistry_test.go @@ -16,6 +16,7 @@ import ( "strconv" "testing" + "github.com/DataDog/datadog-agent/comp/core/config" helpers "github.com/DataDog/datadog-agent/comp/core/flare/helpers" remoteagent "github.com/DataDog/datadog-agent/comp/core/remoteagentregistry/def" compdef "github.com/DataDog/datadog-agent/comp/def" @@ -33,7 +34,7 @@ import ( ) func TestRemoteAgentCreation(t *testing.T) { - provides, lc := buildComponent(t) + provides, lc, _ := buildComponent(t) assert.NotNil(t, provides.Comp) assert.NotNil(t, provides.FlareProvider) @@ -48,10 +49,10 @@ func TestRemoteAgentCreation(t *testing.T) { func TestRecommendedRefreshInterval(t *testing.T) { expectedRefreshIntervalSecs := uint32(27) - config := configmock.New(t) + + provides, _, config := buildComponent(t) config.SetWithoutSource("remote_agent_registry.recommended_refresh_interval", fmt.Sprintf("%ds", expectedRefreshIntervalSecs)) - provides, _ := buildComponentWithConfig(t, config) component := provides.Comp registrationData := &remoteagent.RegistrationData{ @@ -71,7 +72,7 @@ func TestRecommendedRefreshInterval(t *testing.T) { } func TestGetRegisteredAgents(t *testing.T) { - provides, _ := buildComponent(t) + provides, _, _ := buildComponent(t) component := provides.Comp registrationData := &remoteagent.RegistrationData{ @@ -90,7 +91,7 @@ func TestGetRegisteredAgents(t *testing.T) { } func TestGetRegisteredAgentStatuses(t *testing.T) { - provides, _ := buildComponent(t) + provides, _, _ := buildComponent(t) component := provides.Comp remoteAgentServer := &testRemoteAgentServer{ @@ -120,7 +121,7 @@ func TestGetRegisteredAgentStatuses(t *testing.T) { } func TestFlareProvider(t *testing.T) { - provides, _ := buildComponent(t) + provides, _, _ := buildComponent(t) component := provides.Comp flareProvider := provides.FlareProvider @@ -153,7 +154,7 @@ func TestFlareProvider(t *testing.T) { } func TestStatusProvider(t *testing.T) { - provides, _ := buildComponent(t) + provides, _, _ := buildComponent(t) component := provides.Comp statusProvider := provides.Status @@ -199,8 +200,22 @@ func TestStatusProvider(t *testing.T) { require.Equal(t, "test_value", registeredAgentStatuses[0].MainSection["test_key"]) } -func buildComponent(t *testing.T) (Provides, *compdef.TestLifecycle) { - return buildComponentWithConfig(t, configmock.New(t)) +func TestDisabled(t *testing.T) { + config := configmock.New(t) + + provides, _ := buildComponentWithConfig(t, config) + + require.Nil(t, provides.Comp) + require.Nil(t, provides.FlareProvider.Callback) + require.Nil(t, provides.Status.Provider) +} + +func buildComponent(t *testing.T) (Provides, *compdef.TestLifecycle, config.Component) { + config := configmock.New(t) + config.SetWithoutSource("remote_agent_registry.enabled", true) + + provides, lc := buildComponentWithConfig(t, config) + return provides, lc, config } func buildComponentWithConfig(t *testing.T, config configmodel.Config) (Provides, *compdef.TestLifecycle) { diff --git a/pkg/config/setup/config.go b/pkg/config/setup/config.go index 91fcef102fb7a..d156d6fec9e9e 100644 --- a/pkg/config/setup/config.go +++ b/pkg/config/setup/config.go @@ -1015,6 +1015,7 @@ func InitConfig(config pkgconfigmodel.Setup) { config.BindEnvAndSetDefault("reverse_dns_enrichment.rate_limiter.recovery_interval", time.Duration(0)) // Remote agents + config.BindEnvAndSetDefault("remote_agent_registry.enabled", false) config.BindEnvAndSetDefault("remote_agent_registry.idle_timeout", time.Duration(30*time.Second)) config.BindEnvAndSetDefault("remote_agent_registry.query_timeout", time.Duration(3*time.Second)) config.BindEnvAndSetDefault("remote_agent_registry.recommended_refresh_interval", time.Duration(10*time.Second)) From e899ec25ece2e884559a639f61dbd9184a89e2f5 Mon Sep 17 00:00:00 2001 From: Vickenty Fesunov Date: Thu, 21 Nov 2024 18:26:19 +0100 Subject: [PATCH 12/13] AMLII-2115 Add BouncyCastle FIPS provider for JMXFetch (#31094) --- .github/CODEOWNERS | 1 + .github/dependabot.yaml | 8 + Dockerfiles/agent/Dockerfile | 12 +- .../agent/bouncycastle-fips/bc-fips.policy | 5 + .../agent/bouncycastle-fips/java.security | 468 ++++++++++++++++++ Dockerfiles/agent/bouncycastle-fips/pom.xml | 41 ++ Dockerfiles/agent/install-fips.ps1 | 23 + Dockerfiles/agent/windows/amd64/Dockerfile | 5 + 8 files changed, 562 insertions(+), 1 deletion(-) create mode 100644 Dockerfiles/agent/bouncycastle-fips/bc-fips.policy create mode 100644 Dockerfiles/agent/bouncycastle-fips/java.security create mode 100644 Dockerfiles/agent/bouncycastle-fips/pom.xml create mode 100644 Dockerfiles/agent/install-fips.ps1 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 81eafd8261c7a..78f9b3a110ea6 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -250,6 +250,7 @@ /Dockerfiles/agent/entrypoint.ps1 @DataDog/container-integrations @DataDog/windows-agent /Dockerfiles/agent/windows/ @DataDog/container-integrations @DataDog/windows-agent /Dockerfiles/agent-ot @DataDog/opentelemetry +/Dockerfiles/agent/bouncycastle-fips @DataDog/agent-metrics-logs /docs/ @DataDog/documentation @DataDog/agent-devx-loops /docs/dev/checks/ @DataDog/documentation @DataDog/agent-metrics-logs diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index 4ab4b963ea26c..fbbec5da82991 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -183,3 +183,11 @@ updates: schedule: interval: weekly open-pull-requests-limit: 100 + - package-ecosystem: maven + directory: Dockerfiles/agent/bouncycastle-fips + labels: + - dependencies + - team/agent-metrics-logs + - changelog/no-changelog + schedule: + interval: weekly diff --git a/Dockerfiles/agent/Dockerfile b/Dockerfiles/agent/Dockerfile index 3e31218c987e8..bd68bf33a83cc 100644 --- a/Dockerfiles/agent/Dockerfile +++ b/Dockerfiles/agent/Dockerfile @@ -51,7 +51,7 @@ WORKDIR /output ENV S6_VERSION="v2.2.0.3" ENV JUST_CONTAINERS_DOWNLOAD_LOCATION=${GENERAL_ARTIFACTS_CACHE_BUCKET_URL:+${GENERAL_ARTIFACTS_CACHE_BUCKET_URL}/s6-overlay} ENV JUST_CONTAINERS_DOWNLOAD_LOCATION=${JUST_CONTAINERS_DOWNLOAD_LOCATION:-https://github.com/just-containers/s6-overlay/releases/download} -RUN apt install --no-install-recommends -y curl ca-certificates +RUN apt install --no-install-recommends -y curl ca-certificates maven RUN S6ARCH=$([ "$TARGETARCH" = "amd64" ] && echo "amd64" || echo "aarch64") && curl -L ${JUST_CONTAINERS_DOWNLOAD_LOCATION}/${S6_VERSION}/s6-overlay-${S6ARCH}.tar.gz -o /output/s6.tgz COPY s6.$TARGETARCH.sha256 /output/s6.$TARGETARCH.sha256 # To calculate S6_SHA256SUM for a specific version, run: @@ -104,6 +104,10 @@ COPY datadog*.yaml etc/datadog-agent/ # Installation information COPY install_info etc/datadog-agent/ +# Download BouncyCastle FIPS provider jar files. +COPY bouncycastle-fips/pom.xml /opt/bouncycastle-fips/ +RUN if [ -n "$WITH_JMX" ]; then cd /opt/bouncycastle-fips && mvn dependency:copy-dependencies; else mkdir -p /opt/bouncycastle-fips/target/dependency; fi + ###################################### # Actual docker image construction # ###################################### @@ -209,6 +213,12 @@ RUN mv /etc/s6/init/init-stage3 /etc/s6/init/init-stage3-original COPY init-stage3 /etc/s6/init/init-stage3 COPY init-stage3-host-pid /etc/s6/init/init-stage3-host-pid +# Copy BouncyCastle Java FIPS provider binaries and configuration +COPY --from=extract /opt/bouncycastle-fips/target/dependency/*.jar /opt/bouncycastle-fips/ +COPY bouncycastle-fips/java.security /opt/bouncycastle-fips/ +COPY bouncycastle-fips/bc-fips.policy /opt/bouncycastle-fips/ +RUN if [ -z "$WITH_JMX" ]; then rm -rf /opt/bouncycastle-fips; fi + # Update if optional OTel Agent process should not run RUN if [ ! -f /opt/datadog-agent/embedded/bin/otel-agent ]; then \ rm -rf /etc/services.d/otel ; \ diff --git a/Dockerfiles/agent/bouncycastle-fips/bc-fips.policy b/Dockerfiles/agent/bouncycastle-fips/bc-fips.policy new file mode 100644 index 0000000000000..bb98d7bfcfa58 --- /dev/null +++ b/Dockerfiles/agent/bouncycastle-fips/bc-fips.policy @@ -0,0 +1,5 @@ +grant { +permission java.lang.RuntimePermission "getProtectionDomain"; +permission java.util.PropertyPermission "java.runtime.name", "read"; +permission java.lang.RuntimePermission "accessDeclaredMembers"; +} diff --git a/Dockerfiles/agent/bouncycastle-fips/java.security b/Dockerfiles/agent/bouncycastle-fips/java.security new file mode 100644 index 0000000000000..f0517d7c1cb35 --- /dev/null +++ b/Dockerfiles/agent/bouncycastle-fips/java.security @@ -0,0 +1,468 @@ +# This is a system java.security file modified to use BouncyCastle +# FIPS provider as the main JSSE provider. + +# See BouncyCastle documentation for specific instructions on how to modify this file. +# See the main java.security file provided with the JVM for description of available +# options. + +security.provider.1=org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider +security.provider.2=org.bouncycastle.jsse.provider.BouncyCastleJsseProvider fips:BCFIPS +security.provider.3=sun.security.provider.Sun + +# +# Sun Provider SecureRandom seed source. +# +# Select the primary source of seed data for the "NativePRNG", "SHA1PRNG" +# and "DRBG" SecureRandom implementations in the "Sun" provider. +# (Other SecureRandom implementations might also use this property.) +# +# On Unix-like systems (for example, Solaris/Linux/MacOS), the +# "NativePRNG", "SHA1PRNG" and "DRBG" implementations obtains seed data from +# special device files such as file:/dev/random. +# +# On Windows systems, specifying the URLs "file:/dev/random" or +# "file:/dev/urandom" will enable the native Microsoft CryptoAPI seeding +# mechanism for SHA1PRNG and DRBG. +# +# By default, an attempt is made to use the entropy gathering device +# specified by the "securerandom.source" Security property. If an +# exception occurs while accessing the specified URL: +# +# NativePRNG: +# a default value of /dev/random will be used. If neither +# are available, the implementation will be disabled. +# "file" is the only currently supported protocol type. +# +# SHA1PRNG and DRBG: +# the traditional system/thread activity algorithm will be used. +# +# The entropy gathering device can also be specified with the System +# property "java.security.egd". For example: +# +# % java -Djava.security.egd=file:/dev/random MainClass +# +# Specifying this System property will override the +# "securerandom.source" Security property. +# +# In addition, if "file:/dev/random" or "file:/dev/urandom" is +# specified, the "NativePRNG" implementation will be more preferred than +# DRBG and SHA1PRNG in the Sun provider. +# +securerandom.source=file:/dev/random + +# +# A list of known strong SecureRandom implementations. +# +# To help guide applications in selecting a suitable strong +# java.security.SecureRandom implementation, Java distributions should +# indicate a list of known strong implementations using the property. +# +# This is a comma-separated list of algorithm and/or algorithm:provider +# entries. +# +securerandom.strongAlgorithms=NativePRNGBlocking:SUN,DRBG:SUN + +# +# Sun provider DRBG configuration and default instantiation request. +# +# NIST SP 800-90Ar1 lists several DRBG mechanisms. Each can be configured +# with a DRBG algorithm name, and can be instantiated with a security strength, +# prediction resistance support, etc. This property defines the configuration +# and the default instantiation request of "DRBG" SecureRandom implementations +# in the SUN provider. (Other DRBG implementations can also use this property.) +# Applications can request different instantiation parameters like security +# strength, capability, personalization string using one of the +# getInstance(...,SecureRandomParameters,...) methods with a +# DrbgParameters.Instantiation argument, but other settings such as the +# mechanism and DRBG algorithm names are not currently configurable by any API. +# +# Please note that the SUN implementation of DRBG always supports reseeding. +# +# The value of this property is a comma-separated list of all configurable +# aspects. The aspects can appear in any order but the same aspect can only +# appear at most once. Its BNF-style definition is: +# +# Value: +# aspect { "," aspect } +# +# aspect: +# mech_name | algorithm_name | strength | capability | df +# +# // The DRBG mechanism to use. Default "Hash_DRBG" +# mech_name: +# "Hash_DRBG" | "HMAC_DRBG" | "CTR_DRBG" +# +# // The DRBG algorithm name. The "SHA-***" names are for Hash_DRBG and +# // HMAC_DRBG, default "SHA-256". The "AES-***" names are for CTR_DRBG, +# // default "AES-128" when using the limited cryptographic or "AES-256" +# // when using the unlimited. +# algorithm_name: +# "SHA-224" | "SHA-512/224" | "SHA-256" | +# "SHA-512/256" | "SHA-384" | "SHA-512" | +# "AES-128" | "AES-192" | "AES-256" +# +# // Security strength requested. Default "128" +# strength: +# "112" | "128" | "192" | "256" +# +# // Prediction resistance and reseeding request. Default "none" +# // "pr_and_reseed" - Both prediction resistance and reseeding +# // support requested +# // "reseed_only" - Only reseeding support requested +# // "none" - Neither prediction resistance not reseeding +# // support requested +# pr: +# "pr_and_reseed" | "reseed_only" | "none" +# +# // Whether a derivation function should be used. only applicable +# // to CTR_DRBG. Default "use_df" +# df: +# "use_df" | "no_df" +# +# Examples, +# securerandom.drbg.config=Hash_DRBG,SHA-224,112,none +# securerandom.drbg.config=CTR_DRBG,AES-256,192,pr_and_reseed,use_df +# +# The default value is an empty string, which is equivalent to +# securerandom.drbg.config=Hash_DRBG,SHA-256,128,none +# +securerandom.drbg.config= + +# +# Class to instantiate as the javax.security.auth.login.Configuration +# provider. +# +login.configuration.provider=sun.security.provider.ConfigFile + +# +# Class to instantiate as the system Policy. This is the name of the class +# that will be used as the Policy object. The system class loader is used to +# locate this class. +# +policy.provider=sun.security.provider.PolicyFile + +# The default is to have a single system-wide policy file, +# and a policy file in the user's home directory. +# +# A second policy file is added via the command-line (location varies between OS) +# that contains permission grants required by BouncyCastle provider. +policy.url.1=file:${java.home}/conf/security/java.policy + +# whether or not we expand properties in the policy file +# if this is set to false, properties (${...}) will not be expanded in policy +# files. +# +policy.expandProperties=true + +# whether or not we allow an extra policy to be passed on the command line +# with -Djava.security.policy=somefile. Comment out this line to disable +# this feature. +# +policy.allowSystemProperty=true + +# whether or not we look into the IdentityScope for trusted Identities +# when encountering a 1.1 signed JAR file. If the identity is found +# and is trusted, we grant it AllPermission. Note: the default policy +# provider (sun.security.provider.PolicyFile) does not support this property. +# +policy.ignoreIdentityScope=false + +# +# Default keystore type. +# +keystore.type=BCFKS + +# +# Controls compatibility mode for JKS and PKCS12 keystore types. +# +# When set to 'true', both JKS and PKCS12 keystore types support loading +# keystore files in either JKS or PKCS12 format. When set to 'false' the +# JKS keystore type supports loading only JKS keystore files and the PKCS12 +# keystore type supports loading only PKCS12 keystore files. +# +keystore.type.compat=true + +# +# List of comma-separated packages that start with or equal this string +# will cause a security exception to be thrown when passed to the +# SecurityManager::checkPackageAccess method unless the corresponding +# RuntimePermission("accessClassInPackage."+package) has been granted. +# +package.access=sun.misc.,\ + sun.reflect. + +# +# List of comma-separated packages that start with or equal this string +# will cause a security exception to be thrown when passed to the +# SecurityManager::checkPackageDefinition method unless the corresponding +# RuntimePermission("defineClassInPackage."+package) has been granted. +# +# By default, none of the class loaders supplied with the JDK call +# checkPackageDefinition. +# +package.definition=sun.misc.,\ + sun.reflect. + +# +# Determines whether this properties file can be appended to +# or overridden on the command line via -Djava.security.properties +# +security.overridePropertiesFile=true + +# +# Determines the default key and trust manager factory algorithms for +# the javax.net.ssl package. +# +ssl.KeyManagerFactory.algorithm=PKIX +ssl.TrustManagerFactory.algorithm=PKIX + +# The Java-level namelookup cache policy for failed lookups: +# +# any negative value: cache forever +# any positive value: the number of seconds to cache negative lookup results +# zero: do not cache +# +# In some Microsoft Windows networking environments that employ +# the WINS name service in addition to DNS, name service lookups +# that fail may take a noticeably long time to return (approx. 5 seconds). +# For this reason the default caching policy is to maintain these +# results for 10 seconds. +# +networkaddress.cache.negative.ttl=10 + +# +# Policy for failed Kerberos KDC lookups: +# +# When a KDC is unavailable (network error, service failure, etc), it is +# put inside a secondary list and accessed less often for future requests. The +# value (case-insensitive) for this policy can be: +# +# tryLast +# KDCs in the secondary list are always tried after those not on the list. +# +# tryLess[:max_retries,timeout] +# KDCs in the secondary list are still tried by their order in the +# configuration, but with smaller max_retries and timeout values. +# max_retries and timeout are optional numerical parameters (default 1 and +# 5000, which means once and 5 seconds). Please note that if any of the +# values defined here are more than what is defined in krb5.conf, it will be +# ignored. +# +# Whenever a KDC is detected as available, it is removed from the secondary +# list. The secondary list is reset when krb5.conf is reloaded. You can add +# refreshKrb5Config=true to a JAAS configuration file so that krb5.conf is +# reloaded whenever a JAAS authentication is attempted. +# +# Example, +# krb5.kdc.bad.policy = tryLast +# krb5.kdc.bad.policy = tryLess:2,2000 +# +krb5.kdc.bad.policy = tryLast + +# +# Kerberos cross-realm referrals (RFC 6806) +# +# OpenJDK's Kerberos client supports cross-realm referrals as defined in +# RFC 6806. This allows to setup more dynamic environments in which clients +# do not need to know in advance how to reach the realm of a target principal +# (either a user or service). +# +# When a client issues an AS or a TGS request, the "canonicalize" option +# is set to announce support of this feature. A KDC server may fulfill the +# request or reply referring the client to a different one. If referred, +# the client will issue a new request and the cycle repeats. +# +# In addition to referrals, the "canonicalize" option allows the KDC server +# to change the client name in response to an AS request. For security reasons, +# RFC 6806 (section 11) FAST scheme is enforced. +# +# Disable Kerberos cross-realm referrals. Value may be overwritten with a +# System property (-Dsun.security.krb5.disableReferrals). +sun.security.krb5.disableReferrals=false + +# Maximum number of AS or TGS referrals to avoid infinite loops. Value may +# be overwritten with a System property (-Dsun.security.krb5.maxReferrals). +sun.security.krb5.maxReferrals=5 + +# +# Cryptographic Jurisdiction Policy defaults +# +# Import and export control rules on cryptographic software vary from +# country to country. By default, Java provides two different sets of +# cryptographic policy files[1]: +# +# unlimited: These policy files contain no restrictions on cryptographic +# strengths or algorithms +# +# limited: These policy files contain more restricted cryptographic +# strengths +# +# The default setting is determined by the value of the "crypto.policy" +# Security property below. If your country or usage requires the +# traditional restrictive policy, the "limited" Java cryptographic +# policy is still available and may be appropriate for your environment. +# +# If you have restrictions that do not fit either use case mentioned +# above, Java provides the capability to customize these policy files. +# The "crypto.policy" security property points to a subdirectory +# within /conf/security/policy/ which can be customized. +# Please see the /conf/security/policy/README.txt file or consult +# the Java Security Guide/JCA documentation for more information. +# +# YOU ARE ADVISED TO CONSULT YOUR EXPORT/IMPORT CONTROL COUNSEL OR ATTORNEY +# TO DETERMINE THE EXACT REQUIREMENTS. +# +# [1] Please note that the JCE for Java SE, including the JCE framework, +# cryptographic policy files, and standard JCE providers provided with +# the Java SE, have been reviewed and approved for export as mass market +# encryption item by the US Bureau of Industry and Security. +# +# Note: This property is currently used by the JDK Reference implementation. +# It is not guaranteed to be examined and used by other implementations. +# +crypto.policy=unlimited + +# +# The policy for the XML Signature secure validation mode. Validation of +# XML Signatures that violate any of these constraints will fail. The +# mode is enforced by default. The mode can be disabled by setting the +# property "org.jcp.xml.dsig.secureValidation" to Boolean.FALSE with the +# javax.xml.crypto.XMLCryptoContext.setProperty() method. +# +# Policy: +# Constraint {"," Constraint } +# Constraint: +# AlgConstraint | MaxTransformsConstraint | MaxReferencesConstraint | +# ReferenceUriSchemeConstraint | KeySizeConstraint | OtherConstraint +# AlgConstraint +# "disallowAlg" Uri +# MaxTransformsConstraint: +# "maxTransforms" Integer +# MaxReferencesConstraint: +# "maxReferences" Integer +# ReferenceUriSchemeConstraint: +# "disallowReferenceUriSchemes" String { String } +# KeySizeConstraint: +# "minKeySize" KeyAlg Integer +# OtherConstraint: +# "noDuplicateIds" | "noRetrievalMethodLoops" +# +# For AlgConstraint, Uri is the algorithm URI String that is not allowed. +# See the XML Signature Recommendation for more information on algorithm +# URI Identifiers. For KeySizeConstraint, KeyAlg is the standard algorithm +# name of the key type (ex: "RSA"). If the MaxTransformsConstraint, +# MaxReferencesConstraint or KeySizeConstraint (for the same key type) is +# specified more than once, only the last entry is enforced. +# +# Note: This property is currently used by the JDK Reference implementation. +# It is not guaranteed to be examined and used by other implementations. +# +jdk.xml.dsig.secureValidationPolicy=\ + disallowAlg http://www.w3.org/TR/1999/REC-xslt-19991116,\ + disallowAlg http://www.w3.org/2001/04/xmldsig-more#rsa-md5,\ + disallowAlg http://www.w3.org/2001/04/xmldsig-more#hmac-md5,\ + disallowAlg http://www.w3.org/2001/04/xmldsig-more#md5,\ + maxTransforms 5,\ + maxReferences 30,\ + disallowReferenceUriSchemes file http https,\ + minKeySize RSA 1024,\ + minKeySize DSA 1024,\ + minKeySize EC 224,\ + noDuplicateIds,\ + noRetrievalMethodLoops + +# +# JCEKS Encrypted Key Serial Filter +# +# This filter, if configured, is used by the JCEKS KeyStore during the +# deserialization of the encrypted Key object stored inside a key entry. +# If not configured or the filter result is UNDECIDED (i.e. none of the patterns +# matches), the filter configured by jdk.serialFilter will be consulted. +# +# If the system property jceks.key.serialFilter is also specified, it supersedes +# the security property value defined here. +# +# The filter pattern uses the same format as jdk.serialFilter. The default +# pattern allows java.lang.Enum, java.security.KeyRep, java.security.KeyRep$Type, +# and javax.crypto.spec.SecretKeySpec and rejects all the others. +jceks.key.serialFilter = java.base/java.lang.Enum;java.base/java.security.KeyRep;\ + java.base/java.security.KeyRep$Type;java.base/javax.crypto.spec.SecretKeySpec;!* + +# +# Disabled mechanisms for the Simple Authentication and Security Layer (SASL) +# +# Disabled mechanisms will not be negotiated by both SASL clients and servers. +# These mechanisms will be ignored if they are specified in the "mechanisms" +# argument of "Sasl.createSaslClient" or the "mechanism" argument of +# "Sasl.createSaslServer". +# +# The value of this property is a comma-separated list of SASL mechanisms. +# The mechanisms are case-sensitive. Whitespaces around the commas are ignored. +# +# Note: This property is currently used by the JDK Reference implementation. +# It is not guaranteed to be examined and used by other implementations. +# +# Example: +# jdk.sasl.disabledMechanisms=PLAIN, CRAM-MD5, DIGEST-MD5 +jdk.sasl.disabledMechanisms= + +# +# Policies for distrusting Certificate Authorities (CAs). +# +# This is a comma separated value of one or more case-sensitive strings, each +# of which represents a policy for determining if a CA should be distrusted. +# The supported values are: +# +# SYMANTEC_TLS : Distrust TLS Server certificates anchored by a Symantec +# root CA and issued after April 16, 2019 unless issued by one of the +# following subordinate CAs which have a later distrust date: +# 1. Apple IST CA 2 - G1, SHA-256 fingerprint: +# AC2B922ECFD5E01711772FEA8ED372DE9D1E2245FCE3F57A9CDBEC77296A424B +# Distrust after December 31, 2019. +# 2. Apple IST CA 8 - G1, SHA-256 fingerprint: +# A4FE7C7F15155F3F0AEF7AAA83CF6E06DEB97CA3F909DF920AC1490882D488ED +# Distrust after December 31, 2019. +# +# Leading and trailing whitespace surrounding each value are ignored. +# Unknown values are ignored. If the property is commented out or set to the +# empty String, no policies are enforced. +# +# Note: This property is currently used by the JDK Reference implementation. +# It is not guaranteed to be supported by other SE implementations. Also, this +# property does not override other security properties which can restrict +# certificates such as jdk.tls.disabledAlgorithms or +# jdk.certpath.disabledAlgorithms; those restrictions are still enforced even +# if this property is not enabled. +# +jdk.security.caDistrustPolicies=SYMANTEC_TLS + +# +# FilePermission path canonicalization +# +# This security property dictates how the path argument is processed and stored +# while constructing a FilePermission object. If the value is set to true, the +# path argument is canonicalized and FilePermission methods (such as implies, +# equals, and hashCode) are implemented based on this canonicalized result. +# Otherwise, the path argument is not canonicalized and FilePermission methods are +# implemented based on the original input. See the implementation note of the +# FilePermission class for more details. +# +# If a system property of the same name is also specified, it supersedes the +# security property value defined here. +# +# The default value for this property is false. +# +jdk.io.permissionsUseCanonicalPath=false + +# +# The default Character set name (java.nio.charset.Charset.forName()) +# for converting TLS ALPN values between byte arrays and Strings. +# Prior versions of the JDK may use UTF-8 as the default charset. If +# you experience interoperability issues, setting this property to UTF-8 +# may help. +# +# jdk.tls.alpnCharset=UTF-8 +jdk.tls.alpnCharset=ISO_8859_1 + +# Force BouncyCastle to operate in approved-only mode. +org.bouncycastle.fips.approved_only=true diff --git a/Dockerfiles/agent/bouncycastle-fips/pom.xml b/Dockerfiles/agent/bouncycastle-fips/pom.xml new file mode 100644 index 0000000000000..bcda513ee91a6 --- /dev/null +++ b/Dockerfiles/agent/bouncycastle-fips/pom.xml @@ -0,0 +1,41 @@ + + 4.0.0 + + com.datadoghq + datadog-agent-bouncycastle-deps + datadog-agent-bouncycastle-deps + 1.0.0 + + + + + UTF-8 + 1.7 + + + + + org.bouncycastle + bc-fips + 2.0.0 + + + org.bouncycastle + bcpkix-fips + 2.0.7 + + + org.bouncycastle + bctls-fips + 2.0.19 + + + org.bouncycastle + bcutil-fips + 2.0.3 + + + + diff --git a/Dockerfiles/agent/install-fips.ps1 b/Dockerfiles/agent/install-fips.ps1 new file mode 100644 index 0000000000000..70be3b0da44bb --- /dev/null +++ b/Dockerfiles/agent/install-fips.ps1 @@ -0,0 +1,23 @@ +$ErrorActionPreference = 'Stop' + +$maven_sha512 = '8BEAC8D11EF208F1E2A8DF0682B9448A9A363D2AD13CA74AF43705549E72E74C9378823BF689287801CBBFC2F6EA9596201D19CCACFDFB682EE8A2FF4C4418BA' + +if ("$env:WITH_JMX" -ne "false") { + cd \fips-build + Invoke-WebRequest -Outfile maven.zip https://dlcdn.apache.org/maven/maven-3/3.9.9/binaries/apache-maven-3.9.9-bin.zip + if ((Get-FileHash -Algorithm SHA512 maven.zip).Hash -eq $maven_sha512) { + Write-Host "Maven checksum match" + } else { + Write-Error "Checksum mismatch" + } + Expand-Archive -Force -Path maven.zip -DestinationPath . + .\apache-maven-3.9.9\bin\mvn -D maven.repo.local=maven-repo dependency:copy-dependencies + New-Item -Force -ItemType directory -Path 'C:/Program Files/Datadog/BouncyCastle FIPS/' + Move-Item -Force -Path @("target/dependency/*.jar", "java.security", "bc-fips.policy") 'C:/Program Files/Datadog/BouncyCastle FIPS/' + \java\bin\java --module-path 'C:\Program Files\Datadog\BouncyCastle FIPS' org.bouncycastle.util.DumpInfo + if (!$?) { + Write-Error ("BouncyCastle self check failed with exit code: {0}" -f $LASTEXITCODE) + } +} +cd \ +Remove-Item -Force -Recurse \fips-build diff --git a/Dockerfiles/agent/windows/amd64/Dockerfile b/Dockerfiles/agent/windows/amd64/Dockerfile index 0577c8a0103b7..275801062f047 100755 --- a/Dockerfiles/agent/windows/amd64/Dockerfile +++ b/Dockerfiles/agent/windows/amd64/Dockerfile @@ -15,9 +15,14 @@ USER ContainerAdministrator SHELL ["pwsh", "-Command", "$ErrorActionPreference = 'Stop';"] COPY ["Datadog Agent", "C:/Program Files/Datadog/Datadog Agent"] + COPY install.ps1 ./ RUN . ./install.ps1 +COPY bouncycastle-fips /fips-build +COPY install-fips.ps1 ./ +RUN . ./install-fips.ps1 + EXPOSE 8125/udp 8126/tcp COPY entrypoint.exe C:/entrypoint.exe From c35c4b6ede92d6daceff6022a81e1b2e01dd1a0b Mon Sep 17 00:00:00 2001 From: Paul Cacheux Date: Thu, 21 Nov 2024 19:31:24 +0100 Subject: [PATCH 13/13] [CWS] remove stresstests (#31328) --- LICENSE-3rdparty.csv | 13 - go.mod | 3 +- go.sum | 2 - pkg/security/tests/.gitignore | 2 - pkg/security/tests/activity_dumps_common.go | 2 +- pkg/security/tests/cmdwrapper.go | 2 +- pkg/security/tests/discarders_stress_test.go | 250 ------------- pkg/security/tests/files_generator.go | 2 +- pkg/security/tests/latency_test.go | 146 -------- pkg/security/tests/main_linux.go | 2 +- pkg/security/tests/main_test.go | 2 +- pkg/security/tests/main_windows.go | 2 +- pkg/security/tests/module_stresser.go | 353 ------------------- pkg/security/tests/module_tester.go | 2 +- pkg/security/tests/module_tester_linux.go | 2 +- pkg/security/tests/module_tester_windows.go | 2 +- pkg/security/tests/schemas.go | 2 +- pkg/security/tests/simple_test.go | 2 +- pkg/security/tests/stress_test.go | 284 --------------- pkg/security/tests/syscalls_amd64.go | 2 +- pkg/security/tests/syscalls_arm64.go | 2 +- pkg/security/tests/testopts.go | 2 +- tasks/kmt.py | 3 +- tasks/security_agent.py | 91 ----- 24 files changed, 16 insertions(+), 1159 deletions(-) delete mode 100644 pkg/security/tests/discarders_stress_test.go delete mode 100644 pkg/security/tests/latency_test.go delete mode 100644 pkg/security/tests/module_stresser.go delete mode 100644 pkg/security/tests/stress_test.go diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index f196205ae326f..dfe208d349e1c 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -1176,19 +1176,7 @@ core,github.com/google/gopacket/pcap,BSD-3-Clause,"Copyright (c) 2009-2011 Andre core,github.com/google/gopacket/pcapgo,BSD-3-Clause,"Copyright (c) 2009-2011 Andreas Krennmair. All rights reserved. | Copyright (c) 2012 Google, Inc. All rights reserved." core,github.com/google/licenseclassifier/v2,Apache-2.0,Copyright 2017 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. core,github.com/google/licenseclassifier/v2/assets,Apache-2.0,Copyright 2017 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. -core,github.com/google/pprof/driver,Apache-2.0,Andrew Hunter | Google Inc. | Hyoun Kyu Cho | Martin Spier | Raul Silvera | Taco de Wolff | Tipp Moseley -core,github.com/google/pprof/internal/binutils,Apache-2.0,Andrew Hunter | Google Inc. | Hyoun Kyu Cho | Martin Spier | Raul Silvera | Taco de Wolff | Tipp Moseley -core,github.com/google/pprof/internal/driver,Apache-2.0,Andrew Hunter | Google Inc. | Hyoun Kyu Cho | Martin Spier | Raul Silvera | Taco de Wolff | Tipp Moseley -core,github.com/google/pprof/internal/elfexec,Apache-2.0,Andrew Hunter | Google Inc. | Hyoun Kyu Cho | Martin Spier | Raul Silvera | Taco de Wolff | Tipp Moseley -core,github.com/google/pprof/internal/graph,Apache-2.0,Andrew Hunter | Google Inc. | Hyoun Kyu Cho | Martin Spier | Raul Silvera | Taco de Wolff | Tipp Moseley -core,github.com/google/pprof/internal/measurement,Apache-2.0,Andrew Hunter | Google Inc. | Hyoun Kyu Cho | Martin Spier | Raul Silvera | Taco de Wolff | Tipp Moseley -core,github.com/google/pprof/internal/plugin,Apache-2.0,Andrew Hunter | Google Inc. | Hyoun Kyu Cho | Martin Spier | Raul Silvera | Taco de Wolff | Tipp Moseley -core,github.com/google/pprof/internal/report,Apache-2.0,Andrew Hunter | Google Inc. | Hyoun Kyu Cho | Martin Spier | Raul Silvera | Taco de Wolff | Tipp Moseley -core,github.com/google/pprof/internal/symbolizer,Apache-2.0,Andrew Hunter | Google Inc. | Hyoun Kyu Cho | Martin Spier | Raul Silvera | Taco de Wolff | Tipp Moseley -core,github.com/google/pprof/internal/symbolz,Apache-2.0,Andrew Hunter | Google Inc. | Hyoun Kyu Cho | Martin Spier | Raul Silvera | Taco de Wolff | Tipp Moseley -core,github.com/google/pprof/internal/transport,Apache-2.0,Andrew Hunter | Google Inc. | Hyoun Kyu Cho | Martin Spier | Raul Silvera | Taco de Wolff | Tipp Moseley core,github.com/google/pprof/profile,Apache-2.0,Andrew Hunter | Google Inc. | Hyoun Kyu Cho | Martin Spier | Raul Silvera | Taco de Wolff | Tipp Moseley -core,github.com/google/pprof/third_party/svgpan,BSD-3-Clause,Andrew Hunter | Copyright 2009-2017 Andrea Leofreddi . All rights reserved | Google Inc. | Hyoun Kyu Cho | Martin Spier | Raul Silvera | Taco de Wolff | Tipp Moseley core,github.com/google/s2a-go,Apache-2.0,Copyright (c) 2020 Google core,github.com/google/s2a-go/fallback,Apache-2.0,Copyright (c) 2020 Google core,github.com/google/s2a-go/internal/authinfo,Apache-2.0,Copyright (c) 2020 Google @@ -1298,7 +1286,6 @@ core,github.com/hetznercloud/hcloud-go/v2/hcloud/internal/instrumentation,MIT,Co core,github.com/hetznercloud/hcloud-go/v2/hcloud/schema,MIT,Copyright (c) 2018-2020 Hetzner Cloud GmbH core,github.com/huandu/xstrings,MIT,Copyright (c) 2015 Huan Du core,github.com/iancoleman/strcase,MIT,"Copyright (c) 2015 Ian Coleman | Copyright (c) 2018 Ma_124, " -core,github.com/ianlancetaylor/demangle,BSD-3-Clause,Copyright (c) 2015 The Go Authors. All rights reserved core,github.com/imdario/mergo,BSD-3-Clause,Copyright (c) 2012 The Go Authors. All rights reserved | Copyright (c) 2013 Dario Castañé. All rights reserved core,github.com/in-toto/in-toto-golang/in_toto,Apache-2.0,Copyright 2018 New York University core,github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common,Apache-2.0,Copyright 2018 New York University diff --git a/go.mod b/go.mod index d0d3b487e40d5..a1f0367beeed8 100644 --- a/go.mod +++ b/go.mod @@ -220,7 +220,7 @@ require ( github.com/google/go-containerregistry v0.20.2 github.com/google/gofuzz v1.2.0 github.com/google/gopacket v1.1.19 - github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 + github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect github.com/gorilla/mux v1.8.1 github.com/gosnmp/gosnmp v1.38.0 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 @@ -444,7 +444,6 @@ require ( github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hashicorp/serf v0.10.1 // indirect github.com/huandu/xstrings v1.5.0 // indirect - github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465 // indirect github.com/in-toto/in-toto-golang v0.9.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/itchyny/timefmt-go v0.1.6 // indirect diff --git a/go.sum b/go.sum index 64d9e49a03ed2..2139597acb719 100644 --- a/go.sum +++ b/go.sum @@ -1027,8 +1027,6 @@ github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSAS github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465 h1:KwWnWVWCNtNq/ewIX7HIKnELmEx2nDP42yskD/pi7QE= -github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= diff --git a/pkg/security/tests/.gitignore b/pkg/security/tests/.gitignore index 60bf37628c7b5..5fb1586c9528e 100644 --- a/pkg/security/tests/.gitignore +++ b/pkg/security/tests/.gitignore @@ -1,4 +1,2 @@ testsuite -stresssuite -!latency/bin !syscall_tester/bin diff --git a/pkg/security/tests/activity_dumps_common.go b/pkg/security/tests/activity_dumps_common.go index 0115e6765b265..5640aa6e78041 100644 --- a/pkg/security/tests/activity_dumps_common.go +++ b/pkg/security/tests/activity_dumps_common.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build linux && (functionaltests || stresstests) +//go:build linux && functionaltests // Package tests holds tests related files package tests diff --git a/pkg/security/tests/cmdwrapper.go b/pkg/security/tests/cmdwrapper.go index e2335a32a897b..1d25a6b4a7664 100644 --- a/pkg/security/tests/cmdwrapper.go +++ b/pkg/security/tests/cmdwrapper.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build functionaltests || stresstests +//go:build functionaltests // Package tests holds tests related files package tests diff --git a/pkg/security/tests/discarders_stress_test.go b/pkg/security/tests/discarders_stress_test.go deleted file mode 100644 index e9eb5f9a28123..0000000000000 --- a/pkg/security/tests/discarders_stress_test.go +++ /dev/null @@ -1,250 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build stresstests - -// Package tests holds tests related files -package tests - -import ( - "flag" - "fmt" - "os" - "runtime" - "runtime/pprof" - "strings" - "testing" - "time" - - "github.com/DataDog/datadog-agent/pkg/security/metrics" - "github.com/DataDog/datadog-agent/pkg/security/secl/rules" -) - -var ( - nbDiscardersRuns int - testDuration time.Duration - maxTotalFiles int - eventsPerSec int - mountDir bool - mountParentDir bool - remountEvery time.Duration - maxDepth int - memTopFrom string - open bool -) - -type metric struct { - vals []int64 - min int64 - max int64 - avg int64 -} - -func computeMetrics(metrics map[string]*metric) { - for _, metric := range metrics { - metric.min = metric.vals[0] - metric.max = metric.vals[0] - total := metric.vals[0] - for _, val := range metric.vals[1:] { - if val > metric.max { - metric.max = val - } else if val < metric.min { - metric.min = val - } - total += val - } - metric.avg = int64(total / int64(len(metric.vals))) - } -} - -func dumpMetrics(metrics map[string]*metric) { - fmt.Printf("\nRESULT METRICS for %d runs of %v: \n", nbDiscardersRuns, testDuration) - for id, metric := range metrics { - if strings.Contains(id, "action.") { - fmt.Printf("%s: %d (min: %d, max: %d)\n", id, metric.avg, metric.min, metric.max) - } - } - fmt.Printf("---\n") - for id, metric := range metrics { - if strings.Contains(id, "datadog.") { - fmt.Printf("%s: %d (min: %d, max: %d)\n", id, metric.avg, metric.min, metric.max) - } - } - fmt.Printf("---\n") - for id, metric := range metrics { - if strings.Contains(id, "mem.") { - fmt.Printf("%s: %d (min: %d, max: %d)\n", id, metric.avg, metric.min, metric.max) - } - } -} - -func addMetricVal(ms map[string]*metric, key string, val int64) { - m := ms[key] - if m == nil { - m = &metric{} - ms[key] = m - } - m.vals = append(m.vals, val) -} - -func addResultMetrics(res *EstimatedResult, metrics map[string]*metric) { - addMetricVal(metrics, "action.file_creation", res.FileCreation) - addMetricVal(metrics, "action.file_access", res.FileAccess) - addMetricVal(metrics, "action.file_deletion", res.FileDeletion) -} - -func addMemoryMetrics(t *testing.T, test *testModule, metrics map[string]*metric) error { - runtime.GC() - proMemFile, err := os.CreateTemp("/tmp", "stress-mem-") - if err != nil { - t.Error(err) - return err - } - - if err := pprof.WriteHeapProfile(proMemFile); err != nil { - t.Error(err) - return err - } - - topDataMem, err := getTopData(proMemFile.Name(), memTopFrom, 50) - if err != nil { - t.Error(err) - return err - } - - fmt.Printf("\nMemory report:\n%s\n", string(topDataMem)) - return nil -} - -func addModuleMetrics(test *testModule, ms map[string]*metric) { - test.eventMonitor.SendStats() - test.eventMonitor.SendStats() - - fmt.Printf("Metrics:\n") - - key := metrics.MetricDiscarderAdded + ":event_type:open" - val := test.statsdClient.Get(key) - key = metrics.MetricDiscarderAdded + ":event_type:unlink" - val += test.statsdClient.Get(key) - fmt.Printf(" %s:event_type:* %d\n", metrics.MetricDiscarderAdded, val) - addMetricVal(ms, metrics.MetricDiscarderAdded, val) - - key = metrics.MetricEventDiscarded + ":event_type:open" - val = test.statsdClient.Get(key) - key = metrics.MetricEventDiscarded + ":event_type:unlink" - val += test.statsdClient.Get(key) - fmt.Printf(" %s:event_type:* %d\n", metrics.MetricEventDiscarded, val) - addMetricVal(ms, metrics.MetricEventDiscarded, val) - - key = metrics.MetricPerfBufferEventsWrite + ":event_type:open" - val = test.statsdClient.Get(key) - key = metrics.MetricPerfBufferEventsWrite + ":event_type:unlink" - val += test.statsdClient.Get(key) - fmt.Printf(" %s:event_type:* %d\n", metrics.MetricPerfBufferEventsWrite, val) - addMetricVal(ms, metrics.MetricPerfBufferEventsWrite, val) - - key = metrics.MetricPerfBufferEventsRead + ":event_type:open" - val = test.statsdClient.Get(key) - key = metrics.MetricPerfBufferEventsRead + ":event_type:unlink" - val += test.statsdClient.Get(key) - fmt.Printf(" %s:event_type:* %d\n", metrics.MetricPerfBufferEventsRead, val) - addMetricVal(ms, metrics.MetricPerfBufferEventsRead, val) - - for _, key = range []string{ - metrics.MetricPerfBufferBytesWrite + ":map:events", - metrics.MetricPerfBufferBytesRead + ":map:events", - metrics.MetricDentryResolverHits + ":type:cache", - metrics.MetricDentryResolverMiss + ":type:cache", - } { - val = test.statsdClient.Get(key) - fmt.Printf(" %s: %d\n", key, val) - addMetricVal(ms, key, val) - } -} - -// goal: measure the performance behavior of discarders on load -func runTestDiscarders(t *testing.T, metrics map[string]*metric) { - rules := []*rules.RuleDefinition{ - { - ID: "rule", - Expression: fmt.Sprintf(`open.file.path =~ "{{.Root}}/files_generator_root/%s/no-approver-*"`, noDiscardersDirName), - }, - { - ID: "rule2", - Expression: fmt.Sprintf(`unlink.file.path =~ "{{.Root}}/files_generator_root/%s/no-approver-*"`, noDiscardersDirName), - }, - } - test, err := newTestModule(t, nil, rules, withStaticOpts(testOpts{enableActivityDump: false})) - if err != nil { - t.Fatal(err) - } - defer test.Close() - - rootPath, _, err := test.Path("files_generator_root") - if err != nil { - t.Fatal(err) - } - fileGen, err := NewFileGenerator(rootPath) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(rootPath) - - err = fileGen.PrepareFileGenerator(FileGeneratorConfig{ - id: "parent_mount", - TestDuration: testDuration, - Debug: false, - MaxTotalFiles: maxTotalFiles, - EventsPerSec: eventsPerSec, - MountDir: mountDir, - MountParentDir: mountParentDir, - RemountEvery: remountEvery, - MaxDepth: maxDepth, - Open: open, - }) - if err != nil { - t.Fatal(err) - } - if err := fileGen.Start(); err != nil { - t.Fatal(err) - } - res, err := fileGen.Wait() - if err != nil { - t.Fatal(err) - } - - fmt.Printf("Test result:\n") - res.Print() - addResultMetrics(res, metrics) - res = nil - - addModuleMetrics(test, metrics) - addMemoryMetrics(t, test, metrics) -} - -// goal: measure the performance behavior of discarders on load -func TestDiscarders(t *testing.T) { - metrics := make(map[string]*metric) - - for i := 0; i < nbDiscardersRuns; i++ { - fmt.Printf("\nRUN: %d\n", i+1) - runTestDiscarders(t, metrics) - } - computeMetrics(metrics) - dumpMetrics(metrics) -} - -func init() { - flag.IntVar(&nbDiscardersRuns, "nb_discarders_runs", 5, "number of tests to run") - flag.DurationVar(&testDuration, "test_duration", time.Second*60*5, "duration of the test") - flag.IntVar(&maxTotalFiles, "max_total_files", 10000, "maximum number of files") - flag.IntVar(&eventsPerSec, "events_per_sec", 2000, "max events per sec") - flag.BoolVar(&mountDir, "mount_dir", true, "set to true to have a working directory tmpfs mounted") - flag.BoolVar(&mountParentDir, "mount_parent_dir", false, "set to true to have a parent working directory tmpfs mounted") - flag.DurationVar(&remountEvery, "remount_every", time.Second*60*3, "time between every mount points umount/remount") - flag.IntVar(&maxDepth, "max_depth", 1, "directories max depth") - flag.StringVar(&memTopFrom, "memory top from", "probe", "set to the package to filter for mem stats") - flag.BoolVar(&open, "open", true, "true to enable randomly open events") -} diff --git a/pkg/security/tests/files_generator.go b/pkg/security/tests/files_generator.go index 5fc0a2e448590..809a85b692500 100644 --- a/pkg/security/tests/files_generator.go +++ b/pkg/security/tests/files_generator.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build linux && (functionaltests || stresstests) +//go:build linux && functionaltests // Package tests holds tests related files package tests diff --git a/pkg/security/tests/latency_test.go b/pkg/security/tests/latency_test.go deleted file mode 100644 index 6685f6f81758a..0000000000000 --- a/pkg/security/tests/latency_test.go +++ /dev/null @@ -1,146 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build linux && stresstests - -// Package tests holds tests related files -package tests - -import ( - "embed" - "flag" - "fmt" - "os" - "os/exec" - "testing" - "unsafe" - - "github.com/DataDog/datadog-agent/pkg/security/secl/rules" -) - -var ( - coreID int - nbRuns int - nbSkips int - host string -) - -//go:embed latency/bin -var benchLatencyhFS embed.FS - -// modified version of testModule.CreateWithOption, to be able to call it without testing module -func CreateWithOptions(tb testing.TB, filename string, user, group, mode int) (string, unsafe.Pointer, error) { - var macros []*rules.MacroDefinition - var rules []*rules.RuleDefinition - - if err := initLogger(); err != nil { - return "", nil, err - } - - st, err := newSimpleTest(tb, macros, rules, "") - if err != nil { - return "", nil, err - } - - testFile, testFilePtr, err := st.Path(filename) - if err != nil { - return testFile, testFilePtr, err - } - - // Create file - f, err := os.OpenFile(testFile, os.O_CREATE, os.FileMode(mode)) - if err != nil { - return "", nil, err - } - f.Close() - - // Chown the file - err = os.Chown(testFile, user, group) - return testFile, testFilePtr, err -} - -// load embedded binary -func loadBenchLatencyBin(tb testing.TB, binary string) (string, error) { - testerBin, err := benchLatencyhFS.ReadFile(fmt.Sprintf("latency/bin/%s", binary)) - if err != nil { - return "", err - } - - perm := 0o700 - binPath, _, _ := CreateWithOptions(tb, binary, -1, -1, perm) - - f, err := os.OpenFile(binPath, os.O_WRONLY|os.O_CREATE, os.FileMode(perm)) - if err != nil { - return "", err - } - - if _, err = f.Write(testerBin); err != nil { - f.Close() - return "", err - } - f.Close() - - return binPath, nil -} - -// bench induced latency for DNS req -func benchLatencyDNS(t *testing.T, rule *rules.RuleDefinition, executable string) { - // do not load module if no rule is provided - if rule != nil { - var ruleDefs []*rules.RuleDefinition - ruleDefs = append(ruleDefs, rule) - test, err := newTestModule(t, nil, ruleDefs) - if err != nil { - t.Fatal(err) - } - defer test.Close() - } - - // load bench binary - executable, err := loadBenchLatencyBin(t, executable) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(executable) - - // exec the bench tool - cmd := exec.Command("taskset", "-c", fmt.Sprint(coreID), - executable, host, fmt.Sprint(nbRuns), fmt.Sprint(nbSkips)) - output, err := cmd.CombinedOutput() - t.Logf("Output:\n%s", output) - if err != nil { - t.Fatal(err) - } -} - -// goal: measure the induced latency when no kprobes/tc are loaded -func TestLatency_DNSNoKprobe(t *testing.T) { - benchLatencyDNS(t, nil, "bench_net_DNS") -} - -// goal: measure the induced latency when kprobes are loaded, but without a matching rule -func TestLatency_DNSNoRule(t *testing.T) { - rule := &rules.RuleDefinition{ - ID: "test_rule", - Expression: fmt.Sprintf(`dns.question.name == "%s.nope"`, host), - } - benchLatencyDNS(t, rule, "bench_net_DNS") -} - -// goal: measure the induced latency when kprobes are loaded, with a matching rule -func TestLatency_DNS(t *testing.T) { - rule := &rules.RuleDefinition{ - ID: "test_rule", - Expression: fmt.Sprintf(`dns.question.name == "%s"`, host), - } - benchLatencyDNS(t, rule, "bench_net_DNS") -} - -func init() { - flag.IntVar(&nbRuns, "nbruns", 100100, "number of runs to perform") - flag.IntVar(&nbSkips, "nbskips", 100, "number of first runs to skip from measurement") - flag.IntVar(&coreID, "coreid", 0, "CPU core ID to pin the bench program") - flag.StringVar(&host, "host", "google.com", "Host to query") -} diff --git a/pkg/security/tests/main_linux.go b/pkg/security/tests/main_linux.go index 47398d60d71e2..1caee6dd31363 100644 --- a/pkg/security/tests/main_linux.go +++ b/pkg/security/tests/main_linux.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build linux && (functionaltests || stresstests) +//go:build linux && functionaltests // Package tests holds tests related files package tests diff --git a/pkg/security/tests/main_test.go b/pkg/security/tests/main_test.go index c9be26e73f733..6ef5e6cdb8a80 100644 --- a/pkg/security/tests/main_test.go +++ b/pkg/security/tests/main_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build functionaltests || stresstests +//go:build functionaltests // Package tests holds tests related files package tests diff --git a/pkg/security/tests/main_windows.go b/pkg/security/tests/main_windows.go index 85b515be3be2d..945126ae6c59c 100644 --- a/pkg/security/tests/main_windows.go +++ b/pkg/security/tests/main_windows.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build windows && (functionaltests || stresstests) +//go:build windows && functionaltests // Package tests holds tests related files package tests diff --git a/pkg/security/tests/module_stresser.go b/pkg/security/tests/module_stresser.go deleted file mode 100644 index 7053f665acdea..0000000000000 --- a/pkg/security/tests/module_stresser.go +++ /dev/null @@ -1,353 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build stresstests - -// Package tests holds tests related files -package tests - -import ( - "bufio" - "encoding/json" - "fmt" - "os" - "runtime" - "runtime/pprof" - "strings" - "testing" - "text/tabwriter" - "time" - - "github.com/google/pprof/driver" -) - -// StressOpts defines Stresser options -type StressOpts struct { - KeepProfile bool - ReportFile string - DiffBase string - TopFrom string - Duration time.Duration -} - -// StressFlag implements pprof Flag interface -type StressFlag struct { - Path string - Top string - From string -} - -// Bool implements pprof Flag interface -func (s *StressFlag) Bool(name string, def bool, usage string) *bool { - v := def - - switch name { - case "top": - v = true - } - - return &v -} - -// Int implements pprof Flag interface -func (s *StressFlag) Int(name string, def int, usage string) *int { - v := def - return &v -} - -// Float64 implements pprof Flag interface -func (s *StressFlag) Float64(name string, def float64, usage string) *float64 { - v := def - return &v -} - -// String implements pprof Flag interface -func (s *StressFlag) String(name string, def string, usage string) *string { - v := def - - switch name { - case "output": - v = s.Top - case "show_from": - v = s.From - } - - return &v -} - -// StringList implements pprof Flag interface -func (s *StressFlag) StringList(name string, def string, usage string) *[]*string { - v := []*string{&def} - return &v -} - -// ExtraUsage implements pprof Flag interface -func (s *StressFlag) ExtraUsage() string { - return "" -} - -// AddExtraUsage implements pprof Flag interface -func (s *StressFlag) AddExtraUsage(eu string) {} - -// Parse implements pprof Flag interface -func (s *StressFlag) Parse(usage func()) []string { - return []string{s.Path} -} - -// StressReports represents a map of StressReport -type StressReports map[string]*StressReport - -// StressReport defines a Stresser report -type StressReport struct { - Duration time.Duration - Iteration int - BaseIteration int `json:",omitempty"` - Extras map[string]struct { - Value float64 - Unit string - } `json:",omitempty"` - TopCPU []byte `json:"-"` - TopMem []byte `json:"-"` -} - -// AddMetric add custom metrics to the report -func (s *StressReport) AddMetric(name string, value float64, unit string) { - if s.Extras == nil { - s.Extras = map[string]struct { - Value float64 - Unit string - }{} - } - s.Extras[name] = struct { - Value float64 - Unit string - }{ - Value: value, - Unit: unit, - } -} - -// Delta returns the delta between the base and the currrent report in percentage -func (s *StressReport) Delta() float64 { - if s.BaseIteration != 0 { - return float64(s.Iteration-s.BaseIteration) * 100.0 / float64(s.BaseIteration) - } - - return 0 -} - -// Print prints the report in a human readable format -func (s *StressReport) Print(t *testing.T) { - fmt.Printf("----- Stress Report for %s -----\n", t.Name()) - w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', tabwriter.AlignRight) - fmt.Fprintf(w, "%s\t\t%d iterations\t%15.4f ns/iteration", s.Duration, s.Iteration, float64(s.Duration.Nanoseconds())/float64(s.Iteration)) - if s.Extras != nil { - for _, metric := range s.Extras { - fmt.Fprintf(w, "\t%15.4f %s", metric.Value, metric.Unit) - } - } - - if delta := s.Delta(); delta != 0 { - fmt.Fprintf(w, "\t%15.4f %%iterations", delta) - } - - fmt.Fprintln(w) - w.Flush() - - fmt.Println() - fmt.Printf("----- Profiling Report CPU for %s -----\n", t.Name()) - fmt.Println(string(s.TopCPU)) - fmt.Println() - - fmt.Println() - fmt.Printf("----- Profiling Report Memory for %s -----\n", t.Name()) - fmt.Println(string(s.TopMem)) - fmt.Println() -} - -// Save writes the report information for delta computation -func (s *StressReport) Save(filename string, name string) error { - var reports StressReports - if err := reports.Load(filename); err != nil { - reports = map[string]*StressReport{ - name: s, - } - } else { - reports[name] = s - } - - fmt.Printf("Writing reports in %s\n", filename) - - j, _ := json.Marshal(reports) - return os.WriteFile(filename, j, 0644) -} - -// Load previous report -func (s *StressReports) Load(filename string) error { - data, err := os.ReadFile(filename) - if err != nil { - return err - } - - return json.Unmarshal(data, s) -} - -func getTopData(filename string, from string, size int) ([]byte, error) { - topFile, err := os.CreateTemp("/tmp", "stress-top-") - if err != nil { - return nil, err - } - defer os.Remove(topFile.Name()) - - flagSet := &StressFlag{Path: filename, Top: topFile.Name(), From: from} - - if err := driver.PProf(&driver.Options{Flagset: flagSet}); err != nil { - return nil, err - } - - file, err := os.Open(topFile.Name()) - if err != nil { - return nil, err - } - - scanner := bufio.NewScanner(file) - scanner.Split(bufio.ScanLines) - - var topLines []string - for scanner.Scan() { - topLines = append(topLines, scanner.Text()) - if len(topLines) > size { - break - } - } - file.Close() - - return []byte(strings.Join(topLines, "\n")), nil -} - -// StressIt starts the stress test -func StressIt(t *testing.T, pre, post, fnc func() error, opts StressOpts) (StressReport, error) { - var report StressReport - - proCPUFile, err := os.CreateTemp("/tmp", "stress-cpu-") - if err != nil { - t.Error(err) - return report, err - } - - if !opts.KeepProfile { - defer os.Remove(proCPUFile.Name()) - } else { - fmt.Printf("Generating CPU profile in %s\n", proCPUFile.Name()) - } - - if pre != nil { - if err := pre(); err != nil { - t.Error(err) - return report, err - } - } - - if err := pprof.StartCPUProfile(proCPUFile); err != nil { - t.Error(err) - return report, err - } - - done := make(chan bool) - var iteration int - - start := time.Now() - - go func() { - time.Sleep(opts.Duration) - done <- true - }() - -LOOP: - for { - select { - case <-done: - break LOOP - default: - err = fnc() - iteration++ - - if err != nil { - break LOOP - } - } - } - - duration := time.Since(start) - - pprof.StopCPUProfile() - proCPUFile.Close() - - runtime.GC() - proMemFile, err := os.CreateTemp("/tmp", "stress-mem-") - if err != nil { - t.Error(err) - return report, err - } - - if !opts.KeepProfile { - defer os.Remove(proMemFile.Name()) - } else { - fmt.Printf("Generating Memory profile in %s\n", proMemFile.Name()) - } - - if err := pprof.WriteHeapProfile(proMemFile); err != nil { - t.Error(err) - return report, err - } - - if post != nil { - if err := post(); err != nil { - t.Error(err) - return report, err - } - } - - topDataCPU, err := getTopData(proCPUFile.Name(), opts.TopFrom, 50) - if err != nil { - t.Error(err) - return report, err - } - - topDataMem, err := getTopData(proMemFile.Name(), opts.TopFrom, 50) - if err != nil { - t.Error(err) - return report, err - } - - report = StressReport{ - Duration: duration, - Iteration: iteration, - TopCPU: topDataCPU, - TopMem: topDataMem, - } - - if opts.DiffBase != "" { - var baseReports StressReports - if err := baseReports.Load(opts.DiffBase); err != nil { - t.Log(err) - } else { - baseReport, exists := baseReports[t.Name()] - if exists { - report.BaseIteration = baseReport.Iteration - } - } - } - - // save report for further comparison - if opts.ReportFile != "" { - if err := report.Save(opts.ReportFile, t.Name()); err != nil { - t.Error(err) - return report, err - } - } - - return report, err -} diff --git a/pkg/security/tests/module_tester.go b/pkg/security/tests/module_tester.go index ebfcc3eba0a52..22711615cc70f 100644 --- a/pkg/security/tests/module_tester.go +++ b/pkg/security/tests/module_tester.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build functionaltests || stresstests +//go:build functionaltests // Package tests holds tests related files package tests diff --git a/pkg/security/tests/module_tester_linux.go b/pkg/security/tests/module_tester_linux.go index 1b20ff18cd690..0012e19426ee1 100644 --- a/pkg/security/tests/module_tester_linux.go +++ b/pkg/security/tests/module_tester_linux.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build linux && (functionaltests || stresstests) +//go:build linux && functionaltests // Package tests holds tests related files package tests diff --git a/pkg/security/tests/module_tester_windows.go b/pkg/security/tests/module_tester_windows.go index a52bf144c7db6..0d68f2aed4023 100644 --- a/pkg/security/tests/module_tester_windows.go +++ b/pkg/security/tests/module_tester_windows.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build windows && (functionaltests || stresstests) +//go:build windows && functionaltests // Package tests holds tests related files package tests diff --git a/pkg/security/tests/schemas.go b/pkg/security/tests/schemas.go index a9fcacc601369..3fd8e7c006b77 100644 --- a/pkg/security/tests/schemas.go +++ b/pkg/security/tests/schemas.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build linux && (functionaltests || stresstests) +//go:build linux && functionaltests // Package tests holds tests related files package tests diff --git a/pkg/security/tests/simple_test.go b/pkg/security/tests/simple_test.go index 5ea30457e8dd2..fb7f93e38f8ea 100644 --- a/pkg/security/tests/simple_test.go +++ b/pkg/security/tests/simple_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build functionaltests || stresstests +//go:build functionaltests // Package tests holds tests related files package tests diff --git a/pkg/security/tests/stress_test.go b/pkg/security/tests/stress_test.go deleted file mode 100644 index db6172d6900cb..0000000000000 --- a/pkg/security/tests/stress_test.go +++ /dev/null @@ -1,284 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build stresstests - -// Package tests holds tests related files -package tests - -import ( - "flag" - "fmt" - "os" - "os/exec" - "path" - "testing" - "time" - - sprobe "github.com/DataDog/datadog-agent/pkg/security/probe" - "github.com/DataDog/datadog-agent/pkg/security/secl/model" - "github.com/DataDog/datadog-agent/pkg/security/secl/rules" -) - -var ( - keepProfile bool - reportFile string - diffBase string - duration int -) - -// Stress test of open syscalls -func stressOpen(t *testing.T, rule *rules.RuleDefinition, pathname string, size int) { - var ruleDefs []*rules.RuleDefinition - if rule != nil { - ruleDefs = append(ruleDefs, rule) - } - - test, err := newTestModule(t, nil, ruleDefs) - if err != nil { - t.Fatal(err) - } - defer test.Close() - - p, ok := test.probe.PlatformProbe.(*sprobe.EBPFProbe) - if !ok { - t.Skip("not supported") - } - - testFolder, _, err := test.Path(path.Dir(pathname)) - if err != nil { - t.Fatal(err) - } - - os.MkdirAll(testFolder, os.ModePerm) - - testFile, _, err := test.Path(pathname) - if err != nil { - t.Fatal(err) - } - - eventStreamMonitor := p.GetMonitors().GetEventStreamMonitor() - - eventStreamMonitor.GetAndResetLostCount("events", -1) - eventStreamMonitor.GetKernelLostCount("events", -1, model.MaxKernelEventType) - - fnc := func() error { - f, err := os.Create(testFile) - if err != nil { - return err - } - - if size > 0 { - data := make([]byte, size, size) - if n, err := f.Write(data); err != nil || n != 1024 { - return err - } - } - - return f.Close() - } - - opts := StressOpts{ - Duration: time.Duration(duration) * time.Second, - KeepProfile: keepProfile, - DiffBase: diffBase, - TopFrom: "probe", - ReportFile: reportFile, - } - - events := 0 - test.RegisterRuleEventHandler(func(_ *model.Event, _ *rules.Rule) { - events++ - }) - defer test.RegisterRuleEventHandler(nil) - - report, err := StressIt(t, nil, nil, fnc, opts) - test.RegisterRuleEventHandler(nil) - - if err != nil { - t.Fatal(err) - } - - report.AddMetric("lost", float64(eventStreamMonitor.GetLostCount("events", -1)), "lost") - report.AddMetric("kernel_lost", float64(eventStreamMonitor.GetKernelLostCount("events", -1, model.MaxKernelEventType)), "kernel lost") - report.AddMetric("events", float64(events), "events") - report.AddMetric("events/sec", float64(events)/report.Duration.Seconds(), "event/s") - - report.Print(t) - - if report.Delta() < -2.0 { - t.Error("unexpected performance degradation") - - cmdOutput, _ := exec.Command("pstree").Output() - fmt.Println(string(cmdOutput)) - - cmdOutput, _ = exec.Command("ps", "aux").Output() - fmt.Println(string(cmdOutput)) - } -} - -// goal: measure host abality to handle open syscall without any kprobe, act as a reference -// this benchmark generate syscall but without having kprobe installed - -func TestStress_E2EOpenNoKprobe(t *testing.T) { - stressOpen(t, nil, "folder1/folder2/folder1/folder2/test", 0) -} - -// goal: measure the impact of an event catched and passed from the kernel to the userspace -// this benchmark generate event that passs from the kernel to the userspace -func TestStress_E2EOpenEvent(t *testing.T) { - rule := &rules.RuleDefinition{ - ID: "test_rule", - Expression: `open.file.path == "{{.Root}}/folder1/folder2/test" && open.flags & O_CREAT != 0`, - } - - stressOpen(t, rule, "folder1/folder2/test", 0) -} - -// goal: measure the impact on the kprobe only -// this benchmark generate syscall but without having event generated -func TestStress_E2EOpenNoEvent(t *testing.T) { - rule := &rules.RuleDefinition{ - ID: "test_rule", - Expression: `open.file.path == "{{.Root}}/folder1/folder2/test-no-event" && open.flags & O_APPEND != 0`, - } - - stressOpen(t, rule, "folder1/folder2/test", 0) -} - -// goal: measure the impact of an event catched and passed from the kernel to the userspace -// this benchmark generate event that passs from the kernel to the userspace -func TestStress_E2EOpenWrite1KEvent(t *testing.T) { - rule := &rules.RuleDefinition{ - ID: "test_rule", - Expression: `open.file.path == "{{.Root}}/folder1/folder2/test" && open.flags & O_CREAT != 0`, - } - - stressOpen(t, rule, "folder1/folder2/test", 1024) -} - -// goal: measure host abality to handle open syscall without any kprobe, act as a reference -// this benchmark generate syscall but without having kprobe installed - -func TestStress_E2EOpenWrite1KNoKprobe(t *testing.T) { - stressOpen(t, nil, "folder1/folder2/test", 1024) -} - -// goal: measure the impact on the kprobe only -// this benchmark generate syscall but without having event generated -func TestStress_E2EOpenWrite1KNoEvent(t *testing.T) { - rule := &rules.RuleDefinition{ - ID: "test_rule", - Expression: `open.file.path == "{{.Root}}/folder1/folder2/test-no-event" && open.flags & O_APPEND != 0`, - } - - stressOpen(t, rule, "folder1/folder2/test", 1024) -} - -// Stress test of fork/exec syscalls -func stressExec(t *testing.T, rule *rules.RuleDefinition, pathname string, executable string) { - var ruleDefs []*rules.RuleDefinition - if rule != nil { - ruleDefs = append(ruleDefs, rule) - } - - test, err := newTestModule(t, nil, ruleDefs) - if err != nil { - t.Fatal(err) - } - defer test.Close() - - p, ok := test.probe.PlatformProbe.(*sprobe.EBPFProbe) - if !ok { - t.Skip("not supported") - } - - testFolder, _, err := test.Path(path.Dir(pathname)) - if err != nil { - t.Fatal(err) - } - - os.MkdirAll(testFolder, os.ModePerm) - - testFile, _, err := test.Path(pathname) - if err != nil { - t.Fatal(err) - } - - eventStreamMonitor := p.GetMonitors().GetEventStreamMonitor() - eventStreamMonitor.GetAndResetLostCount("events", -1) - eventStreamMonitor.GetKernelLostCount("events", -1, model.MaxKernelEventType) - - fnc := func() error { - cmd := exec.Command(executable, testFile) - _, err := cmd.CombinedOutput() - return err - } - - opts := StressOpts{ - Duration: time.Duration(duration) * time.Second, - KeepProfile: keepProfile, - DiffBase: diffBase, - TopFrom: "probe", - ReportFile: reportFile, - } - - events := 0 - test.RegisterRuleEventHandler(func(_ *model.Event, _ *rules.Rule) { - events++ - }) - defer test.RegisterRuleEventHandler(nil) - - kevents := 0 - test.RegisterProbeEventHandler(func(_ *model.Event) { - kevents++ - }) - defer test.RegisterProbeEventHandler(nil) - - report, err := StressIt(t, nil, nil, fnc, opts) - if err != nil { - t.Fatal(err) - } - - time.Sleep(2 * time.Second) - - report.AddMetric("lost", float64(eventStreamMonitor.GetLostCount("events", -1)), "lost") - report.AddMetric("kernel_lost", float64(eventStreamMonitor.GetKernelLostCount("events", -1, model.MaxKernelEventType)), "kernel lost") - report.AddMetric("events", float64(events), "events") - report.AddMetric("events/sec", float64(events)/report.Duration.Seconds(), "event/s") - report.AddMetric("kevents", float64(kevents), "kevents") - report.AddMetric("kevents/sec", float64(kevents)/report.Duration.Seconds(), "kevent/s") - - report.Print(t) -} - -// goal: measure host abality to handle open syscall without any kprobe, act as a reference -// this benchmark generate syscall but without having kprobe installed - -func TestStress_E2EExecNoKprobe(t *testing.T) { - executable := which(t, "touch") - - stressExec(t, nil, "folder1/folder2/folder1/folder2/test", executable) -} - -// goal: measure the impact of an event catched and passed from the kernel to the userspace -// this benchmark generate event that passs from the kernel to the userspace -func TestStress_E2EExecEvent(t *testing.T) { - executable := which(t, "touch") - - rule := &rules.RuleDefinition{ - ID: "test_rule", - Expression: fmt.Sprintf(`open.file.path == "{{.Root}}/folder1/folder2/test-ancestors" && process.file.name == "%s"`, "touch"), - } - - stressExec(t, rule, "folder1/folder2/test-ancestors", executable) -} - -func init() { - flag.BoolVar(&keepProfile, "keep-profile", false, "do not delete profile after run") - flag.StringVar(&reportFile, "report-file", "", "save report of the stress test") - flag.StringVar(&diffBase, "diff-base", "", "source of base stress report for comparison") - flag.IntVar(&duration, "duration", 60, "duration of the run in second") -} diff --git a/pkg/security/tests/syscalls_amd64.go b/pkg/security/tests/syscalls_amd64.go index 6538e1943f6ea..9432105640bc2 100644 --- a/pkg/security/tests/syscalls_amd64.go +++ b/pkg/security/tests/syscalls_amd64.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build linux && ((functionaltests && amd64) || (stresstests && amd64)) +//go:build linux && functionaltests && amd64 // Package tests holds tests related files package tests diff --git a/pkg/security/tests/syscalls_arm64.go b/pkg/security/tests/syscalls_arm64.go index beaeea527f7db..1ea1f0ff45415 100644 --- a/pkg/security/tests/syscalls_arm64.go +++ b/pkg/security/tests/syscalls_arm64.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build (functionaltests && !amd64) || (stresstests && !amd64) +//go:build linux && functionaltests && arm64 package tests diff --git a/pkg/security/tests/testopts.go b/pkg/security/tests/testopts.go index f1a37fc300b57..fe1a31333cc26 100644 --- a/pkg/security/tests/testopts.go +++ b/pkg/security/tests/testopts.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build functionaltests || stresstests +//go:build functionaltests // Package tests holds tests related files package tests diff --git a/tasks/kmt.py b/tasks/kmt.py index 12b0ac2ba1953..2a70c89788ba8 100644 --- a/tasks/kmt.py +++ b/tasks/kmt.py @@ -51,7 +51,7 @@ from tasks.libs.pipeline.tools import loop_status from tasks.libs.releasing.version import VERSION_RE, check_version from tasks.libs.types.arch import Arch, KMTArchName -from tasks.security_agent import build_functional_tests, build_stress_tests +from tasks.security_agent import build_functional_tests from tasks.system_probe import ( BPF_TAG, EMBEDDED_SHARE_DIR, @@ -694,7 +694,6 @@ def kmt_secagent_prepare( skip_object_files=True, arch=arch, ) - build_stress_tests(ctx, output=f"{kmt_paths.secagent_tests}/pkg/security/stresssuite", skip_linters=True) go_path = "go" go_root = os.getenv("GOROOT") diff --git a/tasks/security_agent.py b/tasks/security_agent.py index ee6f65f2b82bc..6b5f20ac20092 100644 --- a/tasks/security_agent.py +++ b/tasks/security_agent.py @@ -45,7 +45,6 @@ BIN_DIR = os.path.join(".", "bin") BIN_PATH = os.path.join(BIN_DIR, "security-agent", bin_name("security-agent")) CI_PROJECT_DIR = os.environ.get("CI_PROJECT_DIR", ".") -STRESS_TEST_SUITE = "stresssuite" @task(iterable=["build_tags"]) @@ -256,47 +255,6 @@ def ninja_c_syscall_tester_common(nw, file_name, build_dir, flags=None, libs=Non return syscall_tester_exe_file -def ninja_c_latency_common(nw, file_name, build_dir, flags=None, libs=None, static=True): - if flags is None: - flags = [] - if libs is None: - libs = [] - - latency_c_dir = os.path.join("pkg", "security", "tests", "latency", "c") - latency_c_file = os.path.join(latency_c_dir, f"{file_name}.c") - latency_exe_file = os.path.join(build_dir, file_name) - - if static: - flags.append("-static") - - nw.build( - inputs=[latency_c_file], - outputs=[latency_exe_file], - rule="execlang", - variables={"exeflags": flags, "exelibs": libs}, - ) - return latency_exe_file - - -def ninja_latency_tools(ctx, build_dir, static=True): - return ninja_c_latency_common(ctx, "bench_net_DNS", build_dir, libs=["-lpthread"], static=static) - - -@task -def build_embed_latency_tools(ctx, static=True): - check_for_ninja(ctx) - build_dir = os.path.join("pkg", "security", "tests", "latency", "bin") - create_dir_if_needed(build_dir) - - nf_path = os.path.join(ctx.cwd, 'latency-tools.ninja') - with open(nf_path, 'w') as ninja_file: - nw = NinjaWriter(ninja_file, width=120) - ninja_define_exe_compiler(nw) - ninja_latency_tools(nw, build_dir, static=static) - - ctx.run(f"ninja -f {nf_path}") - - def ninja_syscall_x86_tester(ctx, build_dir, static=True, compiler='clang'): return ninja_c_syscall_tester_common( ctx, "syscall_x86_tester", build_dir, flags=["-m32"], static=static, compiler=compiler @@ -431,55 +389,6 @@ def build_functional_tests( ctx.run(cmd.format(**args), env=env) -@task -def build_stress_tests( - ctx, - output=f"pkg/security/tests/{STRESS_TEST_SUITE}", - major_version='7', - bundle_ebpf=True, - skip_linters=False, - kernel_release=None, -): - build_embed_latency_tools(ctx) - build_functional_tests( - ctx, - output=output, - major_version=major_version, - build_tags='stresstests', - bundle_ebpf=bundle_ebpf, - skip_linters=skip_linters, - kernel_release=kernel_release, - ) - - -@task -def stress_tests( - ctx, - verbose=False, - major_version='7', - output=f"pkg/security/tests/{STRESS_TEST_SUITE}", - bundle_ebpf=True, - testflags='', - skip_linters=False, - kernel_release=None, -): - build_stress_tests( - ctx, - major_version=major_version, - output=output, - bundle_ebpf=bundle_ebpf, - skip_linters=skip_linters, - kernel_release=kernel_release, - ) - - run_functional_tests( - ctx, - testsuite=output, - verbose=verbose, - testflags=testflags, - ) - - @task def functional_tests( ctx,