Skip to content

Commit

Permalink
Create management cluster using controller behind feature flag
Browse files Browse the repository at this point in the history
  • Loading branch information
mitalipaygude committed Jan 16, 2024
1 parent 82d0a0a commit 2fe5f77
Show file tree
Hide file tree
Showing 36 changed files with 1,637 additions and 76 deletions.
17 changes: 17 additions & 0 deletions cmd/eksctl-anywhere/cmd/createcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"github.com/aws/eks-anywhere/pkg/validations/createvalidations"
"github.com/aws/eks-anywhere/pkg/workflow/management"
"github.com/aws/eks-anywhere/pkg/workflows"
newManagementWorkflow "github.com/aws/eks-anywhere/pkg/workflows/management"
"github.com/aws/eks-anywhere/pkg/workflows/workload"
)

Expand Down Expand Up @@ -269,6 +270,22 @@ func (cc *createClusterOptions) createCluster(cmd *cobra.Command, _ []string) er
)
err = createWorkloadCluster.Run(ctx, clusterSpec, createValidations)

} else if clusterSpec.Cluster.IsSelfManaged() && features.UseControllerViaCLIWorkflow().IsActive() {

logger.Info("Using the new workflow using the controller for management cluster create")

createMgmtCluster := newManagementWorkflow.NewCreate(
deps.Bootstrapper,
deps.Provider,
deps.ClusterManager,
deps.GitOpsFlux,
deps.Writer,
deps.EksdInstaller,
deps.PackageInstaller,
deps.ClusterApplier,
)

err = createMgmtCluster.Run(ctx, clusterSpec, createValidations)

Check warning on line 288 in cmd/eksctl-anywhere/cmd/createcluster.go

View check run for this annotation

Codecov / codecov/patch

cmd/eksctl-anywhere/cmd/createcluster.go#L273-L288

Added lines #L273 - L288 were not covered by tests
} else {
err = createCluster.Run(ctx, clusterSpec, createValidations, cc.forceClean)
}
Expand Down
56 changes: 56 additions & 0 deletions pkg/clustermanager/cluster_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -450,6 +450,46 @@ func (c *ClusterManager) CreateWorkloadCluster(ctx context.Context, managementCl
return workloadCluster, nil
}

// GetWorkloadCluster gets workload cluster.
func (c *ClusterManager) GetWorkloadCluster(ctx context.Context, managementCluster *types.Cluster, clusterSpec *cluster.Spec, provider providers.Provider) (*types.Cluster, error) {
clusterName := clusterSpec.Cluster.Name

workloadCluster := &types.Cluster{
Name: clusterName,
}

logger.V(3).Info("Waiting for workload kubeconfig generation", "cluster", clusterName)

// Use a buffer to cache the kubeconfig.
var buf bytes.Buffer

if err := c.getWorkloadClusterKubeconfig(ctx, clusterName, managementCluster, &buf); err != nil {
return nil, fmt.Errorf("waiting for workload kubeconfig: %v", err)
}

Check warning on line 468 in pkg/clustermanager/cluster_manager.go

View check run for this annotation

Codecov / codecov/patch

pkg/clustermanager/cluster_manager.go#L467-L468

Added lines #L467 - L468 were not covered by tests

rawKubeconfig := buf.Bytes()

// The Docker provider wants to update the kubeconfig to patch the server address before
// we write it to disk. This is to ensure we can communicate with the cluster even when
// hosted inside a Docker Desktop VM.
if err := provider.UpdateKubeConfig(&rawKubeconfig, clusterName); err != nil {
return nil, err
}

Check warning on line 477 in pkg/clustermanager/cluster_manager.go

View check run for this annotation

Codecov / codecov/patch

pkg/clustermanager/cluster_manager.go#L476-L477

Added lines #L476 - L477 were not covered by tests

kubeconfigFile, err := c.writer.Write(
kubeconfig.FormatWorkloadClusterKubeconfigFilename(clusterName),
rawKubeconfig,
filewriter.PersistentFile,
filewriter.Permission0600,
)
if err != nil {
return nil, fmt.Errorf("writing workload kubeconfig: %v", err)
}

Check warning on line 487 in pkg/clustermanager/cluster_manager.go

View check run for this annotation

Codecov / codecov/patch

pkg/clustermanager/cluster_manager.go#L486-L487

Added lines #L486 - L487 were not covered by tests
workloadCluster.KubeconfigFile = kubeconfigFile

return workloadCluster, nil
}

func (c *ClusterManager) waitUntilControlPlaneAvailable(
ctx context.Context,
clusterSpec *cluster.Spec,
Expand Down Expand Up @@ -1166,6 +1206,22 @@ func (c *ClusterManager) CreateEKSAResources(ctx context.Context, cluster *types
return c.ApplyReleases(ctx, clusterSpec, cluster)
}

// CreateEKSAReleaseBundle applies the eks-a release bundle to the cluster.
func (c *ClusterManager) CreateEKSAReleaseBundle(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error {
if clusterSpec.Cluster.Namespace != "" {
if err := c.clusterClient.CreateNamespaceIfNotPresent(ctx, cluster.KubeconfigFile, clusterSpec.Cluster.Namespace); err != nil {
return err
}

Check warning on line 1214 in pkg/clustermanager/cluster_manager.go

View check run for this annotation

Codecov / codecov/patch

pkg/clustermanager/cluster_manager.go#L1210-L1214

Added lines #L1210 - L1214 were not covered by tests
}

clusterSpec.Cluster.AddManagedByCLIAnnotation()

if err := c.ApplyBundles(ctx, clusterSpec, cluster); err != nil {
return err
}
return c.ApplyReleases(ctx, clusterSpec, cluster)

Check warning on line 1222 in pkg/clustermanager/cluster_manager.go

View check run for this annotation

Codecov / codecov/patch

pkg/clustermanager/cluster_manager.go#L1217-L1222

Added lines #L1217 - L1222 were not covered by tests
}

func (c *ClusterManager) ApplyBundles(ctx context.Context, clusterSpec *cluster.Spec, cluster *types.Cluster) error {
bundleObj, err := yaml.Marshal(clusterSpec.Bundles)
if err != nil {
Expand Down
25 changes: 25 additions & 0 deletions pkg/clustermanager/cluster_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -345,6 +345,31 @@ func TestClusterManagerCreateWorkloadClusterSuccess(t *testing.T) {
}
}

func TestClusterManagerGetWorkloadClusterSuccess(t *testing.T) {
ctx := context.Background()
clusterName := "cluster-name"
clusterSpec := test.NewClusterSpec(func(s *cluster.Spec) {
s.Cluster.Name = clusterName
s.Cluster.Spec.ControlPlaneConfiguration.Count = 3
s.Cluster.Spec.WorkerNodeGroupConfigurations[0].Count = ptr.Int(3)
})

mgmtCluster := &types.Cluster{
Name: clusterName,
KubeconfigFile: "mgmt-kubeconfig",
}

c, m := newClusterManager(t)
kubeconfig := []byte("content")
m.client.EXPECT().GetWorkloadKubeconfig(ctx, clusterName, mgmtCluster).Return(kubeconfig, nil)
m.provider.EXPECT().UpdateKubeConfig(&kubeconfig, clusterName)
m.writer.EXPECT().Write(clusterName+"-eks-a-cluster.kubeconfig", gomock.Any(), gomock.Not(gomock.Nil()))

if _, err := c.GetWorkloadCluster(ctx, mgmtCluster, clusterSpec, m.provider); err != nil {
t.Errorf("ClusterManager.GetWorkloadCluster() error = %v, wantErr nil", err)
}
}

func TestClusterManagerCreateWorkloadClusterErrorGetKubeconfig(t *testing.T) {
tt := newTest(t)
tt.clusterSpec.Cluster.Name = tt.clusterName
Expand Down
2 changes: 1 addition & 1 deletion pkg/providers/cloudstack/cloudstack.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ func (p *cloudstackProvider) PreCAPIInstallOnBootstrap(ctx context.Context, clus
return p.UpdateSecrets(ctx, cluster, nil)
}

func (p *cloudstackProvider) PostBootstrapSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error {
func (p *cloudstackProvider) PostCAPIInstallSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error {
return nil
}

Expand Down
20 changes: 20 additions & 0 deletions pkg/providers/cloudstack/cloudstack_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -952,6 +952,26 @@ func TestPreCAPIInstallOnBootstrap(t *testing.T) {
}
}

func TestPostCAPIInstallSetup(t *testing.T) {
mockCtrl := gomock.NewController(t)
ctx := context.Background()
kubectl := mocks.NewMockProviderKubectlClient(mockCtrl)
cluster := &types.Cluster{}
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
clusterConfig := &v1alpha1.Cluster{}
datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename)
validator := givenWildcardValidator(mockCtrl, clusterSpec)
provider := newProviderWithKubectl(t, datacenterConfig, clusterSpec.Cluster, kubectl, validator)

if provider == nil {
t.Fatalf("provider object is nil")
}

if err := provider.PostCAPIInstallSetup(ctx, clusterConfig, cluster); err != nil {
t.Fatalf("provider.PostCAPIInstallSetup() err = %v, want err = nil", err)
}
}

func TestSetupAndValidateSSHAuthorizedKeyEmptyCP(t *testing.T) {
ctx := context.Background()
clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename)
Expand Down
2 changes: 1 addition & 1 deletion pkg/providers/docker/docker.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ func (p *provider) PreCAPIInstallOnBootstrap(ctx context.Context, cluster *types
return nil
}

func (p *provider) PostBootstrapSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error {
func (p *provider) PostCAPIInstallSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error {
return nil
}

Expand Down
18 changes: 18 additions & 0 deletions pkg/providers/docker/docker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1011,3 +1011,21 @@ func TestTemplateBuilder_CertSANs(t *testing.T) {
test.AssertContentToFile(t, string(data), tc.Output)
}
}

func TestPostCAPIInstallsetup(t *testing.T) {
mockCtrl := gomock.NewController(t)
ctx := context.Background()
client := dockerMocks.NewMockProviderClient(mockCtrl)
kubectl := dockerMocks.NewMockProviderKubectlClient(mockCtrl)
provider := docker.NewProvider(&v1alpha1.DockerDatacenterConfig{}, client, kubectl, test.FakeNow)
clusterObj := &types.Cluster{Name: "node"}

if provider == nil {
t.Fatalf("provider object is nil")
}

err := provider.PostCAPIInstallSetup(ctx, &v1alpha1.Cluster{}, clusterObj)
if err != nil {
t.Fatalf("failed to setup PostCAPIInstallSetup: %v", err)
}
}
24 changes: 12 additions & 12 deletions pkg/providers/mocks/providers.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion pkg/providers/nutanix/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,8 @@ func (p *Provider) BootstrapSetup(ctx context.Context, clusterConfig *v1alpha1.C
return nil
}

func (p *Provider) PostBootstrapSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error {
// PostCAPIInstallSetup defines steps to carry out post the CAPI installation.
func (p *Provider) PostCAPIInstallSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error {
// TODO(nutanix): figure out if we need something else here
return nil
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/providers/nutanix/provider_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ func TestNutanixProviderBootstrapSetup(t *testing.T) {

func TestNutanixProviderPostBootstrapSetup(t *testing.T) {
provider := testDefaultNutanixProvider(t)
err := provider.PostBootstrapSetup(context.Background(), provider.clusterConfig, &types.Cluster{Name: "eksa-unit-test"})
err := provider.PostCAPIInstallSetup(context.Background(), provider.clusterConfig, &types.Cluster{Name: "eksa-unit-test"})
assert.NoError(t, err)
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/providers/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ type Provider interface {
GenerateCAPISpecForUpgrade(ctx context.Context, bootstrapCluster, workloadCluster *types.Cluster, currrentSpec, newClusterSpec *cluster.Spec) (controlPlaneSpec, workersSpec []byte, err error)
// PreCAPIInstallOnBootstrap is called after the bootstrap cluster is setup but before CAPI resources are installed on it. This allows us to do provider specific configuration on the bootstrap cluster.
PreCAPIInstallOnBootstrap(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error
PostBootstrapSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error
PostCAPIInstallSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error
PostBootstrapDeleteForUpgrade(ctx context.Context, cluster *types.Cluster) error
PostBootstrapSetupUpgrade(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error
// PostWorkloadInit is called after the workload cluster is created and initialized with a CNI. This allows us to do provider specific configuration on the workload cluster.
Expand Down
3 changes: 2 additions & 1 deletion pkg/providers/snow/snow.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,8 @@ func (p *SnowProvider) PreCAPIInstallOnBootstrap(ctx context.Context, cluster *t
return nil
}

func (p *SnowProvider) PostBootstrapSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error {
// PostCAPIInstallSetup defines steps to carry out post the CAPI installation.
func (p *SnowProvider) PostCAPIInstallSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error {
return nil
}

Expand Down
6 changes: 6 additions & 0 deletions pkg/providers/snow/snow_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -599,6 +599,12 @@ func TestSetupAndValidateCreateClusterUnsupportedInstanceTypeError(t *testing.T)
tt.Expect(err).To(MatchError(ContainSubstring("not supported in device [1.2.3.4]")))
}

func TestPostCAPIInstallSetup(t *testing.T) {
tt := newSnowTest(t)
err := tt.provider.PostCAPIInstallSetup(tt.ctx, test.Cluster(), tt.cluster)
tt.Expect(err).To(BeNil())
}

func TestSetupAndValidateCreateClusterInstanceTypeVCPUError(t *testing.T) {
tt := newSnowTest(t)
instanceTypes := []aws.EC2InstanceType{
Expand Down
5 changes: 3 additions & 2 deletions pkg/providers/tinkerbell/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,8 @@ func (p *Provider) PreCAPIInstallOnBootstrap(ctx context.Context, cluster *types
return nil
}

func (p *Provider) PostBootstrapSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error {
// PostCAPIInstallSetup defines steps to carry out post the CAPI installation.
func (p *Provider) PostCAPIInstallSetup(ctx context.Context, clusterConfig *v1alpha1.Cluster, cluster *types.Cluster) error {
return p.applyHardware(ctx, cluster)
}

Expand Down Expand Up @@ -231,7 +232,7 @@ func (p *Provider) readCSVToCatalogue() error {
machineValidator := hardware.NewDefaultMachineValidator()

// Translate all Machine instances from the p.machines source into Kubernetes object types.
// The PostBootstrapSetup() call invoked elsewhere in the program serializes the catalogue
// The PostCAPIInstallSetup() call invoked elsewhere in the program serializes the catalogue
// and submits it to the clsuter.
machines, err := hardware.NewNormalizedCSVReaderFromFile(p.hardwareCSVFile, p.BMCOptions)
if err != nil {
Expand Down
Loading

0 comments on commit 2fe5f77

Please sign in to comment.