-
Notifications
You must be signed in to change notification settings - Fork 180
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #639 from carreter/e2e
Bump go 1.22.2->1.22.5 and add E2E tests
- Loading branch information
Showing
261 changed files
with
55,714 additions
and
215 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
# End-to-end tests for konnectivity-network-proxy running in a kind cluster | ||
|
||
These e2e tests deploy the KNP agent and server to a local [kind](https://kind.sigs.k8s.io/) | ||
cluster to verify their functionality. | ||
|
||
These can be run automatically using `make e2e-test`. | ||
|
||
## Setup in `main_test.go` | ||
|
||
Before any of the actual tests are run, the `TestMain()` function | ||
in `main_test.go` performs the following set up steps: | ||
|
||
- Spin up a new kind cluster with the node image provided by the `-kind-image` flag. | ||
- Sideload the KNP agent and server images provided with `-agent-image` and `-server-image` into the cluster. | ||
- Deploy the necessary RBAC and service templates for both the KNP agent and server (see `renderAndApplyManifests`). | ||
|
||
## The tests | ||
|
||
### `static_count_test.go` | ||
|
||
These tests deploy the KNP servers and agents to the previously created kind cluster. | ||
After the deployments are up, the tests check that both the agent and server report | ||
the correct number of connections on their metrics endpoints. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,178 @@ | ||
package e2e | ||
|
||
import ( | ||
"bytes" | ||
"context" | ||
"flag" | ||
"fmt" | ||
"log" | ||
"os" | ||
"path" | ||
"testing" | ||
"text/template" | ||
"time" | ||
|
||
"k8s.io/apimachinery/pkg/runtime/schema" | ||
"k8s.io/client-go/kubernetes/scheme" | ||
"sigs.k8s.io/controller-runtime/pkg/client" | ||
"sigs.k8s.io/e2e-framework/klient/wait" | ||
"sigs.k8s.io/e2e-framework/klient/wait/conditions" | ||
"sigs.k8s.io/e2e-framework/pkg/env" | ||
"sigs.k8s.io/e2e-framework/pkg/envconf" | ||
"sigs.k8s.io/e2e-framework/pkg/envfuncs" | ||
"sigs.k8s.io/e2e-framework/support/kind" | ||
) | ||
|
||
var ( | ||
testenv env.Environment | ||
agentImage = flag.String("agent-image", "", "The proxy agent's docker image.") | ||
serverImage = flag.String("server-image", "", "The proxy server's docker image.") | ||
kindImage = flag.String("kind-image", "kindest/node", "Image to use for kind nodes.") | ||
connectionMode = flag.String("mode", "grpc", "Connection mode to use during e2e tests.") | ||
) | ||
|
||
func TestMain(m *testing.M) { | ||
flag.Parse() | ||
if *agentImage == "" { | ||
log.Fatalf("must provide agent image with -agent-image") | ||
} | ||
if *serverImage == "" { | ||
log.Fatalf("must provide server image with -server-image") | ||
} | ||
|
||
scheme.AddToScheme(scheme.Scheme) | ||
|
||
testenv = env.New() | ||
kindClusterName := "kind-test" | ||
kindCluster := kind.NewCluster(kindClusterName).WithOpts(kind.WithImage(*kindImage)) | ||
|
||
testenv.Setup( | ||
envfuncs.CreateCluster(kindCluster, kindClusterName), | ||
envfuncs.LoadImageToCluster(kindClusterName, *agentImage), | ||
envfuncs.LoadImageToCluster(kindClusterName, *serverImage), | ||
renderAndApplyManifests, | ||
) | ||
|
||
testenv.Finish(envfuncs.DestroyCluster(kindClusterName)) | ||
|
||
os.Exit(testenv.Run(m)) | ||
} | ||
|
||
// renderTemplate renders a template from e2e/templates into a kubernetes object. | ||
// Template paths are relative to e2e/templates. | ||
func renderTemplate(file string, params any) (client.Object, *schema.GroupVersionKind, error) { | ||
b := &bytes.Buffer{} | ||
|
||
tmp, err := template.ParseFiles(path.Join("templates/", file)) | ||
if err != nil { | ||
return nil, nil, fmt.Errorf("could not parse template %v: %w", file, err) | ||
} | ||
|
||
err = tmp.Execute(b, params) | ||
if err != nil { | ||
return nil, nil, fmt.Errorf("could not execute template %v: %w", file, err) | ||
} | ||
|
||
decoder := scheme.Codecs.UniversalDeserializer() | ||
|
||
obj, gvk, err := decoder.Decode(b.Bytes(), nil, nil) | ||
if err != nil { | ||
return nil, nil, fmt.Errorf("could not decode rendered yaml into kubernetes object: %w", err) | ||
} | ||
|
||
return obj.(client.Object), gvk, nil | ||
} | ||
|
||
type KeyValue struct { | ||
Key string | ||
Value string | ||
} | ||
|
||
type DeploymentConfig struct { | ||
Replicas int | ||
Image string | ||
Args []KeyValue | ||
} | ||
|
||
func renderAndApplyManifests(ctx context.Context, cfg *envconf.Config) (context.Context, error) { | ||
client := cfg.Client() | ||
|
||
// Render agent RBAC and Service templates. | ||
agentServiceAccount, _, err := renderTemplate("agent/serviceaccount.yaml", struct{}{}) | ||
if err != nil { | ||
return nil, err | ||
} | ||
agentClusterRole, _, err := renderTemplate("agent/clusterrole.yaml", struct{}{}) | ||
if err != nil { | ||
return nil, err | ||
} | ||
agentClusterRoleBinding, _, err := renderTemplate("agent/clusterrolebinding.yaml", struct{}{}) | ||
if err != nil { | ||
return ctx, err | ||
} | ||
agentService, _, err := renderTemplate("agent/service.yaml", struct{}{}) | ||
if err != nil { | ||
return ctx, err | ||
} | ||
|
||
// Submit agent RBAC templates to k8s. | ||
err = client.Resources().Create(ctx, agentServiceAccount) | ||
if err != nil { | ||
return ctx, err | ||
} | ||
err = client.Resources().Create(ctx, agentClusterRole) | ||
if err != nil { | ||
return ctx, err | ||
} | ||
err = client.Resources().Create(ctx, agentClusterRoleBinding) | ||
if err != nil { | ||
return ctx, err | ||
} | ||
err = client.Resources().Create(ctx, agentService) | ||
if err != nil { | ||
return ctx, err | ||
} | ||
|
||
// Render server RBAC and Service templates. | ||
serverClusterRoleBinding, _, err := renderTemplate("server/clusterrolebinding.yaml", struct{}{}) | ||
if err != nil { | ||
return ctx, err | ||
} | ||
serverService, _, err := renderTemplate("server/service.yaml", struct{}{}) | ||
if err != nil { | ||
return ctx, err | ||
} | ||
|
||
// Submit server templates to k8s. | ||
err = client.Resources().Create(ctx, serverClusterRoleBinding) | ||
if err != nil { | ||
return ctx, err | ||
} | ||
err = client.Resources().Create(ctx, serverService) | ||
if err != nil { | ||
return ctx, err | ||
} | ||
|
||
return ctx, nil | ||
} | ||
|
||
func deployAndWaitForDeployment(deployment client.Object) func(context.Context, *testing.T, *envconf.Config) context.Context { | ||
return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { | ||
client := cfg.Client() | ||
err := client.Resources().Create(ctx, deployment) | ||
if err != nil { | ||
t.Fatalf("could not create Deployment: %v", err) | ||
} | ||
|
||
err = wait.For( | ||
conditions.New(client.Resources()).DeploymentAvailable(deployment.GetName(), deployment.GetNamespace()), | ||
wait.WithTimeout(1*time.Minute), | ||
wait.WithInterval(10*time.Second), | ||
) | ||
if err != nil { | ||
t.Fatalf("waiting for Deployment failed: %v", err) | ||
} | ||
|
||
return ctx | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,85 @@ | ||
package e2e | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"net/http" | ||
"testing" | ||
|
||
corev1 "k8s.io/api/core/v1" | ||
|
||
"github.com/prometheus/common/expfmt" | ||
"sigs.k8s.io/e2e-framework/klient/k8s/resources" | ||
"sigs.k8s.io/e2e-framework/pkg/envconf" | ||
) | ||
|
||
func getMetricsGaugeValue(url string, name string) (int, error) { | ||
resp, err := http.Get(url) | ||
if err != nil { | ||
return 0, fmt.Errorf("could not get metrics from url %v: %w", url, err) | ||
} | ||
|
||
metricsParser := &expfmt.TextParser{} | ||
metricsFamilies, err := metricsParser.TextToMetricFamilies(resp.Body) | ||
if err != nil { | ||
return 0, fmt.Errorf("could not parse metrics: %w", err) | ||
} | ||
defer resp.Body.Close() | ||
|
||
metricFamily, exists := metricsFamilies[name] | ||
if !exists { | ||
return 0, fmt.Errorf("metric %v does not exist", name) | ||
} | ||
value := int(metricFamily.GetMetric()[0].GetGauge().GetValue()) | ||
return value, nil | ||
} | ||
|
||
func assertAgentsAreConnected(expectedConnections int, adminPort int) func(context.Context, *testing.T, *envconf.Config) context.Context { | ||
return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { | ||
client := cfg.Client() | ||
|
||
var agentPods *corev1.PodList | ||
err := client.Resources().List(ctx, agentPods, resources.WithLabelSelector("k8s-app=konnectivity-agent")) | ||
if err != nil { | ||
t.Fatalf("couldn't get agent pods (label selector 'k8s-app=konnectivity-agent'): %v", err) | ||
} | ||
|
||
for _, agentPod := range agentPods.Items { | ||
numConnections, err := getMetricsGaugeValue(fmt.Sprintf("%v:%v/metrics", agentPod.Status.PodIP, adminPort), "konnectivity_network_proxy_agent_open_server_connections") | ||
if err != nil { | ||
t.Fatalf("couldn't get agent metric 'konnectivity_network_proxy_agent_open_server_connections' for pod %v: %v", agentPod.Name, err) | ||
} | ||
|
||
if numConnections != expectedConnections { | ||
t.Errorf("incorrect number of connected servers (want: %d, got: %d)", expectedConnections, numConnections) | ||
} | ||
} | ||
|
||
return ctx | ||
} | ||
} | ||
|
||
func assertServersAreConnected(expectedConnections int, adminPort int) func(context.Context, *testing.T, *envconf.Config) context.Context { | ||
return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { | ||
client := cfg.Client() | ||
|
||
var serverPods *corev1.PodList | ||
err := client.Resources().List(ctx, serverPods, resources.WithLabelSelector("k8s-app=konnectivity-server")) | ||
if err != nil { | ||
t.Fatalf("couldn't get server pods (label selector 'k8s-app=konnectivity-server'): %v", err) | ||
} | ||
|
||
for _, serverPod := range serverPods.Items { | ||
numConnections, err := getMetricsGaugeValue(fmt.Sprintf("%v:%v/metrics", serverPod.Status.PodIP, adminPort), "konnectivity_network_proxy_server_ready_backend_connections") | ||
if err != nil { | ||
t.Fatalf("couldn't get agent metric 'konnectivity_network_proxy_server_ready_backend_connections' for pod %v: %v", serverPod.Name, err) | ||
} | ||
|
||
if numConnections != expectedConnections { | ||
t.Errorf("incorrect number of connected agents (want: %d, got: %d)", expectedConnections, numConnections) | ||
} | ||
} | ||
|
||
return ctx | ||
} | ||
} |
Oops, something went wrong.