Skip to content

Commit

Permalink
integ-tests: use egoscale v3 (#20)
Browse files Browse the repository at this point in the history
We remove egoscale v2 from the integration tests(at the time v3 wasn't
available). This will enable testing whether the behavior of storage
class retain policies is correct.
  • Loading branch information
sauterp authored Mar 15, 2024
1 parent f40ebce commit bda9167
Show file tree
Hide file tree
Showing 8 changed files with 280 additions and 385 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
* go.mk: remove submodule and initialize through make #15
* integ-tests: use IAMv3 API key #13
* document and minimize IAM rule policy for CSI #19
* integ-tests: use egoscale v3 #20

## 0.29.2

Expand Down
11 changes: 3 additions & 8 deletions internal/integ/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,7 @@
package cluster

import (
"context"

exov2 "github.com/exoscale/egoscale/v2"
exov3 "github.com/exoscale/egoscale/v3"
"github.com/exoscale/exoscale/csi-driver/internal/integ/k8s"
)

Expand All @@ -17,13 +15,10 @@ func Get() *Cluster {
}

type Cluster struct {
exoV2Context context.Context
exoV2ContextCancel context.CancelFunc

Name string
ID string
ID exov3.UUID
K8s *k8s.K8S
Ego *exov2.Client
Ego *exov3.Client
APIKeyName string
APIRoleName string
}
108 changes: 58 additions & 50 deletions internal/integ/cluster/setup.go
Original file line number Diff line number Diff line change
@@ -1,85 +1,93 @@
package cluster

import (
"context"
"flag"
"fmt"
"log/slog"
"os"

"github.com/exoscale/exoscale/csi-driver/internal/integ/flags"

exov2 "github.com/exoscale/egoscale/v2"
exov3 "github.com/exoscale/egoscale/v3"
)

func (c *Cluster) getLatestSKSVersion() (string, error) {
versions, err := c.Ego.ListSKSClusterVersions(c.exoV2Context)
func (c *Cluster) getLatestSKSVersion(ctx context.Context) (string, error) {
versions, err := c.Ego.ListSKSClusterVersions(ctx)
if err != nil {
return "", fmt.Errorf("error retrieving SKS versions: %w", err)
}

if len(versions) == 0 {
if len(versions.SKSClusterVersions) == 0 {
return "", fmt.Errorf("no SKS version returned by the API")
}

return versions[0], nil
return versions.SKSClusterVersions[0], nil
}

func (c *Cluster) provisionSKSCluster(zone string) error {
func (c *Cluster) getInstanceType(ctx context.Context, family, size string) (*exov3.InstanceType, error) {
instanceTypes, err := c.Ego.ListInstanceTypes(ctx)
if err != nil {
return nil, err
}

for _, instanceType := range instanceTypes.InstanceTypes {
if instanceType.Family == exov3.InstanceTypeFamilyStandard && instanceType.Size == exov3.InstanceTypeSizeMedium {
return c.Ego.GetInstanceType(ctx, instanceType.ID)
}
}

return nil, fmt.Errorf("unable to find instance type %s.%s", family, size)
}

func (c *Cluster) provisionSKSCluster(ctx context.Context, zone string) error {
// do nothing if cluster exists
_, err := c.getCluster()
_, err := c.getCluster(ctx)
if err == nil {
return nil
}

latestSKSVersion, err := c.getLatestSKSVersion()
latestSKSVersion, err := c.getLatestSKSVersion(ctx)
if err != nil {
return err
}

// intance type must be at least standard.medium for block storage volume attachment to work
instanceType, err := c.Ego.FindInstanceType(c.exoV2Context, zone, "standard.medium")
instanceType, err := c.getInstanceType(ctx, "standard", "medium")
if err != nil {
return err
}

nodepool := exov2.SKSNodepool{
Name: ptr(c.Name + "-nodepool"),
DiskSize: ptr(int64(20)),
Size: ptr(int64(2)),
InstancePrefix: ptr("pool"),
InstanceTypeID: instanceType.ID,
}

sksCluster := &exov2.SKSCluster{
AddOns: &[]string{
// TODO(sauterp) remove once the CCM is no longer necessary for the CSI.
op, err := c.Ego.CreateSKSCluster(ctx, exov3.CreateSKSClusterRequest{
Addons: []string{
"exoscale-cloud-controller",
},
CNI: ptr("calico"),
Description: ptr("This cluster was created to test the exoscale CSI driver in SKS."),
Name: ptr(c.Name),
Nodepools: []*exov2.SKSNodepool{
ptr(nodepool),
},
ServiceLevel: ptr("pro"),
Version: ptr(latestSKSVersion),
Zone: ptr(zone),
}

newCluster, err := c.Ego.CreateSKSCluster(c.exoV2Context, zone, sksCluster)
Cni: "calico",
Description: "This cluster was created to test the exoscale CSI driver in SKS.",
Name: c.Name,
Level: exov3.CreateSKSClusterRequestLevelPro,
Version: latestSKSVersion,
})
newClusterID, err := c.awaitID(ctx, op, err)
if err != nil {
return err
}

c.ID = *newCluster.ID
slog.Info("successfully created cluster", "clusterID", c.ID)
c.ID = newClusterID

_, err = c.Ego.CreateSKSNodepool(c.exoV2Context, zone, newCluster, &nodepool)
if err != nil {
op, err = c.Ego.CreateSKSNodepool(ctx, newClusterID, exov3.CreateSKSNodepoolRequest{
Name: c.Name + "-nodepool",
DiskSize: int64(20),
Size: int64(2),
InstancePrefix: "pool",
InstanceType: instanceType,
})
if err = c.awaitSuccess(ctx, op, err); err != nil {
// this can error even when the nodepool is successfully created
// it's probably a bug, so we're not returning the error
slog.Warn("error creating nodepool", "err", err)
}
slog.Info("successfully created cluster", "clusterID", c.ID)

return nil
}
Expand All @@ -92,27 +100,25 @@ func exitApplication(msg string, err error) {
os.Exit(1)
}

func ConfigureCluster(createCluster bool, name, zone string) (*Cluster, error) {
v2Client, ctx, ctxCancel, err := createV2ClientAndContext()
func ConfigureCluster(ctx context.Context, createCluster bool, name, zone string) (*Cluster, error) {
client, err := createEgoscaleClient()
if err != nil {
return nil, fmt.Errorf("error creating egoscale v2 client: %w", err)
return nil, fmt.Errorf("error creating egoscale v3 client: %w", err)
}

cluster := &Cluster{
Ego: v2Client,
Name: name,
exoV2Context: ctx,
exoV2ContextCancel: ctxCancel,
Ego: client,
Name: name,
}

if createCluster {
err = cluster.provisionSKSCluster(zone)
err = cluster.provisionSKSCluster(ctx, zone)
if err != nil {
return nil, fmt.Errorf("error creating SKS cluster: %w", err)
}
}

id, err := cluster.getClusterID()
id, err := cluster.getClusterID(ctx)
if err != nil {
return nil, fmt.Errorf("error getting cluster ID: %w", err)
}
Expand All @@ -121,7 +127,7 @@ func ConfigureCluster(createCluster bool, name, zone string) (*Cluster, error) {
cluster.APIKeyName = apiKeyPrefix + cluster.Name
cluster.APIRoleName = cluster.APIKeyName + "-role"

k, err := cluster.getK8sClients()
k, err := cluster.getK8sClients(ctx)
if err != nil {
return nil, fmt.Errorf("error initializing k8s clients: %w", err)
}
Expand All @@ -132,30 +138,32 @@ func ConfigureCluster(createCluster bool, name, zone string) (*Cluster, error) {
}

func Setup() error {
ctx := context.Background()

if err := flags.ValidateFlags(); err != nil {
exitApplication("invalid flags", err)

return err
}

var err error
testCluster, err = ConfigureCluster(*flags.CreateCluster, *flags.ClusterName, *flags.Zone)
testCluster, err = ConfigureCluster(ctx, *flags.CreateCluster, *flags.ClusterName, *flags.Zone)
if err != nil {
return err
}

calicoControllerName := "calico-kube-controllers"
if err := testCluster.awaitDeploymentReadiness(calicoControllerName); err != nil {
if err := testCluster.awaitDeploymentReadiness(ctx, calicoControllerName); err != nil {
slog.Warn("error while awaiting", "deployment", calicoControllerName, "error", err)
}

calicoNodeName := "calico-node"
if err := testCluster.awaitDaemonSetReadiness(calicoNodeName); err != nil {
if err := testCluster.awaitDaemonSetReadiness(ctx, calicoNodeName); err != nil {
slog.Warn("error while awaiting", "DaemonSet", calicoNodeName, "error", err)
}

if !*flags.DontApplyCSI {
if err := testCluster.applyCSI(); err != nil {
if err := testCluster.applyCSI(ctx); err != nil {
return fmt.Errorf("error applying CSI: %w", err)
}
}
Expand Down
50 changes: 38 additions & 12 deletions internal/integ/cluster/teardown.go
Original file line number Diff line number Diff line change
@@ -1,63 +1,89 @@
package cluster

import (
"context"
"fmt"
"log/slog"

"github.com/exoscale/exoscale/csi-driver/internal/integ/flags"

exov3 "github.com/exoscale/egoscale/v3"
)

func (c *Cluster) tearDownCluster() error {
id, err := c.getClusterID()
func (c *Cluster) tearDownCluster(ctx context.Context) error {
id, err := c.getClusterID(ctx)
if err != nil {
return fmt.Errorf("error getting cluster ID: %w", err)
}

cluster, err := c.Ego.GetSKSCluster(c.exoV2Context, *flags.Zone, id)
cluster, err := c.Ego.GetSKSCluster(ctx, id)
if err != nil {
return err
}

if len(cluster.Nodepools) > 0 {
if err := c.Ego.DeleteSKSNodepool(c.exoV2Context, *flags.Zone, cluster, cluster.Nodepools[0]); err != nil {
op, err := c.Ego.DeleteSKSNodepool(ctx, cluster.ID, cluster.Nodepools[0].ID)
if err := c.awaitSuccess(ctx, op, err); err != nil {
return fmt.Errorf("error deleting nodepool: %w", err)
}
}

return c.Ego.DeleteSKSCluster(c.exoV2Context, *flags.Zone, cluster)
op, err := c.Ego.DeleteSKSCluster(ctx, cluster.ID)
return c.awaitSuccess(ctx, op, err)
}

func (c *Cluster) awaitID(ctx context.Context, op *exov3.Operation, err error) (exov3.UUID, error) {
if err != nil {
return "", err
}

finishedOP, err := c.Ego.Wait(ctx, op, exov3.OperationStateSuccess)
if err != nil {
return "", err
}

if finishedOP.Reference != nil {
return finishedOP.Reference.ID, nil
}

return "", nil
}

func (c *Cluster) awaitSuccess(ctx context.Context, op *exov3.Operation, err error) error {
_, err = c.awaitID(ctx, op, err)
return err
}

func (c *Cluster) TearDown() error {
ctx := context.Background()
if *flags.TearDownCSI {
if err := c.tearDownCSI(); err != nil {
if err := c.tearDownCSI(ctx); err != nil {
return err
}
}

if *flags.TearDownCluster {
if err := c.tearDownCluster(); err != nil {
if err := c.tearDownCluster(ctx); err != nil {
return err
}
}

c.exoV2ContextCancel()

return nil
}

func (c *Cluster) tearDownCSI() error {
func (c *Cluster) tearDownCSI(ctx context.Context) error {
var finalErr error = nil

for _, manifestPath := range allManifests {
err := c.K8s.DeleteManifest(c.exoV2Context, manifestDir+manifestPath)
err := c.K8s.DeleteManifest(ctx, manifestDir+manifestPath)
if err != nil {
slog.Error("failed to delete manifest", "manifest", manifestPath, "err", err)

finalErr = fmt.Errorf("errors while deleting manifests: %w", err)
}
}

err := c.deleteAPIKeyAndRole()
err := c.deleteAPIKeyAndRole(ctx)
if err != nil {
slog.Error("failed to clean up CSI API key and role", "err", err)

Expand Down
Loading

0 comments on commit bda9167

Please sign in to comment.