From 65d7f78ffa6195436cdcd1598b659a22a1ad6524 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alp=20Eren=20=C3=87elik?= <80721144+alperencelik@users.noreply.github.com> Date: Fri, 26 Jan 2024 16:42:31 +0300 Subject: [PATCH] Rewrite with linter (#15) - Fix formatting issues - Resolve linting errors and warnings - Update code to follow linter recommendations - Re-enable linter --------- Co-authored-by: alperencelik --- .github/workflows/golangci-lint.yaml | 40 +-- .golangci.yaml | 4 +- api/proxmox/v1alpha1/container_types.go | 2 +- .../v1alpha1/managedvirtualmachine_types.go | 19 +- api/proxmox/v1alpha1/virtualmachine_types.go | 4 +- .../v1alpha1/virtualmachineset_types.go | 2 +- .../v1alpha1/virtualmachinesnapshot_types.go | 2 +- .../virtualmachinesnapshotpolicy_types.go | 2 +- api/proxmox/v1alpha1/zz_generated.deepcopy.go | 17 +- cmd/main.go | 2 +- ....alperen.cloud_managedvirtualmachines.yaml | 3 +- .../proxmox/container_controller.go | 5 +- .../managedvirtualmachine_controller.go | 13 +- .../proxmox/virtualmachine_controller.go | 37 +- .../proxmox/virtualmachineset_controller.go | 41 ++- .../virtualmachinesnapshot_controller.go | 18 +- ...virtualmachinesnapshotpolicy_controller.go | 8 +- pkg/kubernetes/kubernetes.go | 41 ++- pkg/metrics/metrics.go | 2 +- pkg/proxmox/proxmox.go | 329 +++++++++--------- 20 files changed, 276 insertions(+), 315 deletions(-) diff --git a/.github/workflows/golangci-lint.yaml b/.github/workflows/golangci-lint.yaml index 5cbeeb6..abd7884 100644 --- a/.github/workflows/golangci-lint.yaml +++ b/.github/workflows/golangci-lint.yaml @@ -1,26 +1,26 @@ -# name: GolangCI-Lint +name: GolangCI-Lint -# on: -# pull_request: -# types: [opened, edited, synchronize, reopened] +on: + pull_request: + types: [opened, edited, synchronize, reopened] -# jobs: -# lint: -# name: Lint -# runs-on: ubuntu-latest +jobs: + lint: + name: Lint + runs-on: ubuntu-latest -# steps: -# - name: Checkout Repository -# uses: actions/checkout@v2 + steps: + - name: Checkout Repository + uses: actions/checkout@v2 -# - name: Set Up Go -# uses: actions/setup-go@v2 -# with: -# go-version: 1.20.5 # Replace with the Go version you're using + - name: Set Up Go + uses: actions/setup-go@v2 + with: + go-version: 1.20.5 # Replace with the Go version you're using -# - name: Install golangci-lint -# run: | -# curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.54.2 + - name: Install golangci-lint + run: | + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.54.2 -# - name: Run Linter -# run: golangci-lint run --timeout 10m --verbose + - name: Run Linter + run: golangci-lint run --timeout 10m --verbose diff --git a/.golangci.yaml b/.golangci.yaml index 5ef7b0b..bf7606a 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -70,10 +70,10 @@ linters: - dupl - errcheck - exportloopref - - gochecknoinits + # - gochecknoinits - goconst - gocritic - - gocyclo + # - gocyclo - gofmt - goimports - goprintffuncname diff --git a/api/proxmox/v1alpha1/container_types.go b/api/proxmox/v1alpha1/container_types.go index 5ff3363..2ceb03b 100644 --- a/api/proxmox/v1alpha1/container_types.go +++ b/api/proxmox/v1alpha1/container_types.go @@ -98,6 +98,6 @@ type ContainerList struct { Items []Container `json:"items"` } -func init() { +func init() { //nolint:gochecknoinits // This is required by kubebuilder SchemeBuilder.Register(&Container{}, &ContainerList{}) } diff --git a/api/proxmox/v1alpha1/managedvirtualmachine_types.go b/api/proxmox/v1alpha1/managedvirtualmachine_types.go index 8e99927..15db8f5 100644 --- a/api/proxmox/v1alpha1/managedvirtualmachine_types.go +++ b/api/proxmox/v1alpha1/managedvirtualmachine_types.go @@ -36,19 +36,6 @@ type ManagedVirtualMachineSpec struct { Disk int `json:"disk"` } -// ManagedVirtualMachineStatus defines the observed state of ManagedVirtualMachine -type ManagedVirtualMachineStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "make" to regenerate code after modifying this file - State string `json:"state,omitempty"` - Node string `json:"node,omitempty"` - Name string `json:"name,omitempty"` - Uptime string `json:"uptime,omitempty"` - ID int `json:"id,omitempty"` - IPAddress string `json:"IPAddress,omitempty"` - OSInfo string `json:"OSInfo,omitempty"` -} - //+kubebuilder:object:root=true //+kubebuilder:subresource:status @@ -57,8 +44,8 @@ type ManagedVirtualMachine struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec ManagedVirtualMachineSpec `json:"spec,omitempty"` - Status ManagedVirtualMachineStatus `json:"status,omitempty"` + Spec ManagedVirtualMachineSpec `json:"spec,omitempty"` + Status VirtualMachineStatus `json:"status,omitempty"` } //+kubebuilder:object:root=true @@ -70,6 +57,6 @@ type ManagedVirtualMachineList struct { Items []ManagedVirtualMachine `json:"items"` } -func init() { +func init() { //nolint:gochecknoinits // This is required by kubebuilder SchemeBuilder.Register(&ManagedVirtualMachine{}, &ManagedVirtualMachineList{}) } diff --git a/api/proxmox/v1alpha1/virtualmachine_types.go b/api/proxmox/v1alpha1/virtualmachine_types.go index 3048f1f..370266f 100644 --- a/api/proxmox/v1alpha1/virtualmachine_types.go +++ b/api/proxmox/v1alpha1/virtualmachine_types.go @@ -35,7 +35,7 @@ type VirtualMachineSpec struct { // TemplateSpec of the source VM Template VirtualMachineTemplate `json:"template,omitempty"` // This field should be modified further - VmSpec NewVMSpec `json:"vmSpec,omitempty"` + VMSpec NewVMSpec `json:"vmSpec,omitempty"` } type NewVMSpec struct { @@ -132,6 +132,6 @@ type VirtualMachineList struct { Items []VirtualMachine `json:"items"` } -func init() { +func init() { //nolint:gochecknoinits // This is required by kubebuilder SchemeBuilder.Register(&VirtualMachine{}, &VirtualMachineList{}) } diff --git a/api/proxmox/v1alpha1/virtualmachineset_types.go b/api/proxmox/v1alpha1/virtualmachineset_types.go index bae7ab4..a5634fb 100644 --- a/api/proxmox/v1alpha1/virtualmachineset_types.go +++ b/api/proxmox/v1alpha1/virtualmachineset_types.go @@ -62,6 +62,6 @@ type VirtualMachineSetList struct { Items []VirtualMachineSet `json:"items"` } -func init() { +func init() { //nolint:gochecknoinits // This is required by kubebuilder SchemeBuilder.Register(&VirtualMachineSet{}, &VirtualMachineSetList{}) } diff --git a/api/proxmox/v1alpha1/virtualmachinesnapshot_types.go b/api/proxmox/v1alpha1/virtualmachinesnapshot_types.go index 26b1b09..81b8647 100644 --- a/api/proxmox/v1alpha1/virtualmachinesnapshot_types.go +++ b/api/proxmox/v1alpha1/virtualmachinesnapshot_types.go @@ -67,6 +67,6 @@ type VirtualMachineSnapshotList struct { Items []VirtualMachineSnapshot `json:"items"` } -func init() { +func init() { //nolint:gochecknoinits // This is required by kubebuilder SchemeBuilder.Register(&VirtualMachineSnapshot{}, &VirtualMachineSnapshotList{}) } diff --git a/api/proxmox/v1alpha1/virtualmachinesnapshotpolicy_types.go b/api/proxmox/v1alpha1/virtualmachinesnapshotpolicy_types.go index e1ba7fa..b65830a 100644 --- a/api/proxmox/v1alpha1/virtualmachinesnapshotpolicy_types.go +++ b/api/proxmox/v1alpha1/virtualmachinesnapshotpolicy_types.go @@ -66,6 +66,6 @@ type VirtualMachineSnapshotPolicyList struct { Items []VirtualMachineSnapshotPolicy `json:"items"` } -func init() { +func init() { //nolint:gochecknoinits // This is required by kubebuilder SchemeBuilder.Register(&VirtualMachineSnapshotPolicy{}, &VirtualMachineSnapshotPolicyList{}) } diff --git a/api/proxmox/v1alpha1/zz_generated.deepcopy.go b/api/proxmox/v1alpha1/zz_generated.deepcopy.go index ddea066..187b3d8 100644 --- a/api/proxmox/v1alpha1/zz_generated.deepcopy.go +++ b/api/proxmox/v1alpha1/zz_generated.deepcopy.go @@ -245,21 +245,6 @@ func (in *ManagedVirtualMachineSpec) DeepCopy() *ManagedVirtualMachineSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedVirtualMachineStatus) DeepCopyInto(out *ManagedVirtualMachineStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedVirtualMachineStatus. -func (in *ManagedVirtualMachineStatus) DeepCopy() *ManagedVirtualMachineStatus { - if in == nil { - return nil - } - out := new(ManagedVirtualMachineStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamespaceSelector) DeepCopyInto(out *NamespaceSelector) { *out = *in @@ -684,7 +669,7 @@ func (in *VirtualMachineSnapshotStatus) DeepCopy() *VirtualMachineSnapshotStatus func (in *VirtualMachineSpec) DeepCopyInto(out *VirtualMachineSpec) { *out = *in in.Template.DeepCopyInto(&out.Template) - out.VmSpec = in.VmSpec + out.VMSpec = in.VMSpec } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineSpec. diff --git a/cmd/main.go b/cmd/main.go index 968bcfe..b43a0d9 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -43,7 +43,7 @@ var ( setupLog = ctrl.Log.WithName("setup") ) -func init() { +func init() { //nolint:gochecknoinits // This is required by kubebuilder utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(proxmoxv1alpha1.AddToScheme(scheme)) diff --git a/config/crd/bases/proxmox.alperen.cloud_managedvirtualmachines.yaml b/config/crd/bases/proxmox.alperen.cloud_managedvirtualmachines.yaml index e49a61c..cd09012 100644 --- a/config/crd/bases/proxmox.alperen.cloud_managedvirtualmachines.yaml +++ b/config/crd/bases/proxmox.alperen.cloud_managedvirtualmachines.yaml @@ -55,8 +55,7 @@ spec: - nodeName type: object status: - description: ManagedVirtualMachineStatus defines the observed state of - ManagedVirtualMachine + description: VirtualMachineStatus defines the observed state of VirtualMachine properties: IPAddress: type: string diff --git a/internal/controller/proxmox/container_controller.go b/internal/controller/proxmox/container_controller.go index 543c015..525d694 100644 --- a/internal/controller/proxmox/container_controller.go +++ b/internal/controller/proxmox/container_controller.go @@ -78,7 +78,7 @@ func (r *ContainerReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // The object is not being deleted, so if it does not have our finalizer, then lets add the finalizer and update the object. if !controllerutil.ContainsFinalizer(container, containerFinalizerName) { controllerutil.AddFinalizer(container, containerFinalizerName) - if err := r.Update(ctx, container); err != nil { + if err = r.Update(ctx, container); err != nil { log.Log.Error(err, "Error updating Container") return ctrl.Result{}, client.IgnoreNotFound(err) } @@ -97,7 +97,7 @@ func (r *ContainerReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( } // Remove finalizer controllerutil.RemoveFinalizer(container, containerFinalizerName) - if err := r.Update(ctx, container); err != nil { + if err = r.Update(ctx, container); err != nil { Log.Error(err, "Error updating Container") } } @@ -137,7 +137,6 @@ func (r *ContainerReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( Log.Error(err, "Failed to update Container") return ctrl.Result{}, client.IgnoreNotFound(err) } - } } else { // Create Container diff --git a/internal/controller/proxmox/managedvirtualmachine_controller.go b/internal/controller/proxmox/managedvirtualmachine_controller.go index 03c506f..a655c00 100644 --- a/internal/controller/proxmox/managedvirtualmachine_controller.go +++ b/internal/controller/proxmox/managedvirtualmachine_controller.go @@ -92,7 +92,7 @@ func (r *ManagedVirtualMachineReconciler) Reconcile(ctx context.Context, req ctr // Remove finalizer controllerutil.RemoveFinalizer(managedVM, managedvirtualMachineFinalizerName) - if err := r.Update(ctx, managedVM); err != nil { + if err = r.Update(ctx, managedVM); err != nil { log.Log.Info(fmt.Sprintf("Error updatin ManagedVirtualMachine %s", managedVM.Name)) } return ctrl.Result{}, nil @@ -105,22 +105,19 @@ func (r *ManagedVirtualMachineReconciler) Reconcile(ctx context.Context, req ctr return ctrl.Result{}, client.IgnoreNotFound(err) } // Update ManagedVMStatus - var managedVMStatus proxmoxv1alpha1.ManagedVirtualMachineStatus - nodeName := proxmox.GetNodeOfVM(managedVM.Name) - managedVMStatus.State, managedVMStatus.ID, managedVMStatus.Uptime, managedVMStatus.Node, managedVMStatus.Name, managedVMStatus.IPAddress, managedVMStatus.OSInfo = proxmox.UpdateVMStatus(managedVM.Name, nodeName) - managedVM.Status = managedVMStatus + managedVMName := managedVM.Name + nodeName := proxmox.GetNodeOfVM(managedVMName) + ManagedVMStatus, _ := proxmox.UpdateVMStatus(managedVMName, nodeName) + managedVM.Status = *ManagedVMStatus err = r.Status().Update(context.Background(), managedVM) if err != nil { log.Log.Info(fmt.Sprintf("ManagedVMStatus %v could not be updated", managedVM.Name)) } - return ctrl.Result{Requeue: true, RequeueAfter: ManagedVMreconcilationPeriod * time.Second}, nil - } // SetupWithManager sets up the controller with the Manager. func (r *ManagedVirtualMachineReconciler) SetupWithManager(mgr ctrl.Manager) error { - // Get all VMs with Proxmox API AllVMs := proxmox.GetProxmoxVMs() ControllerVMs := proxmox.GetControllerVMs() diff --git a/internal/controller/proxmox/virtualmachine_controller.go b/internal/controller/proxmox/virtualmachine_controller.go index bee70d2..1dc1f94 100644 --- a/internal/controller/proxmox/virtualmachine_controller.go +++ b/internal/controller/proxmox/virtualmachine_controller.go @@ -67,7 +67,7 @@ func (r *VirtualMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque vm := &proxmoxv1alpha1.VirtualMachine{} err := r.Get(ctx, req.NamespacedName, vm) if err != nil { - // Log.Error(err, "unable to fetch VirtualMachine") + Log.Error(err, "unable to fetch VirtualMachine") return ctrl.Result{}, client.IgnoreNotFound(err) } @@ -75,7 +75,7 @@ func (r *VirtualMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque if vm.ObjectMeta.DeletionTimestamp.IsZero() { if !controllerutil.ContainsFinalizer(vm, virtualMachineFinalizerName) { controllerutil.AddFinalizer(vm, virtualMachineFinalizerName) - if err := r.Update(ctx, vm); err != nil { + if err = r.Update(ctx, vm); err != nil { log.Log.Error(err, "Error updating VirtualMachine") } } @@ -93,10 +93,9 @@ func (r *VirtualMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque } // Remove finalizer controllerutil.RemoveFinalizer(vm, virtualMachineFinalizerName) - if err := r.Update(ctx, vm); err != nil { + if err = r.Update(ctx, vm); err != nil { fmt.Printf("Error updating VirtualMachine %s", vm.Spec.Name) } - } // Stop reconciliation as the item is being deleted return ctrl.Result{}, client.IgnoreNotFound(err) @@ -131,55 +130,51 @@ func (r *VirtualMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque // If not exists, create the VM Log.Info(fmt.Sprintf("VirtualMachine %s doesn't exist", vmName)) vmType := proxmox.CheckVMType(vm) - if vmType == "template" { + switch vmType { + case "template": kubernetes.CreateVMKubernetesEvent(vm, Clientset, "Creating") proxmox.CreateVMFromTemplate(vm) proxmox.StartVM(vmName, nodeName) kubernetes.CreateVMKubernetesEvent(vm, Clientset, "Created") - // metrics.SetVirtualMachineCPUCores(vmName, vm.Namespace, float64(vm.Spec.Template.Cores)) - // metrics.SetVirtualMachineMemory(vmName, vm.Namespace, float64(vm.Spec.Template.Memory)) - } else if vmType == "scratch" { + case "scratch": kubernetes.CreateVMKubernetesEvent(vm, Clientset, "Creating") proxmox.CreateVMFromScratch(vm) proxmox.StartVM(vmName, nodeName) kubernetes.CreateVMKubernetesEvent(vm, Clientset, "Created") - // metrics.SetVirtualMachineCPUCores(vmName, vm.Namespace, float64(vm.Spec.VmSpec.Cores)) - // metrics.SetVirtualMachineMemory(vmName, vm.Namespace, float64(vm.Spec.VmSpec.Memory)) - } else { + default: Log.Info(fmt.Sprintf("VM %s doesn't have any template or vmSpec defined", vmName)) } } - // If template and created VM has different resources then update the VM with new resources the function itself decides if VM restart needed or not + // If template and created VM has different resources then update the VM with new resources the function itself + // decides if VM restart needed or not proxmox.UpdateVM(vmName, nodeName, vm) err = r.Update(context.Background(), vm) if err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } // Update the status of VirtualMachine resource - var Status proxmoxv1alpha1.VirtualMachineStatus - Status.State, Status.ID, Status.Uptime, Status.Node, Status.Name, Status.IPAddress, Status.OSInfo = proxmox.UpdateVMStatus(vmName, nodeName) - vm.Status = Status + Status, _ := proxmox.UpdateVMStatus(vmName, nodeName) + vm.Status = *Status err = r.Status().Update(ctx, vm) if err != nil { Log.Error(err, "Error updating VirtualMachine status") } return ctrl.Result{Requeue: true, RequeueAfter: VMreconcilationPeriod * time.Second}, client.IgnoreNotFound(err) - } // SetupWithManager sets up the controller with the Manager. func (r *VirtualMachineReconciler) SetupWithManager(mgr ctrl.Manager) error { - - log := log.FromContext(context.Background()) + logger := log.FromContext(context.Background()) version, err := proxmox.GetProxmoxVersion() if err != nil { - log.Error(err, "Error getting Proxmox version") + logger.Error(err, "Error getting Proxmox version") } - log.Info(fmt.Sprintf("Connected to the Proxmox, version is: %s", version)) + logger.Info(fmt.Sprintf("Connected to the Proxmox, version is: %s", version)) return ctrl.NewControllerManagedBy(mgr). For(&proxmoxv1alpha1.VirtualMachine{}). - WithEventFilter(predicate.GenerationChangedPredicate{}). // --> This was needed for reconcile loop to work properly, otherwise it was reconciling 3-4 times every 10 seconds + // --> This was needed for reconcile loop to work properly, otherwise it was reconciling 3-4 times every 10 seconds + WithEventFilter(predicate.GenerationChangedPredicate{}). WithOptions(controller.Options{MaxConcurrentReconciles: VMmaxConcurrentReconciles}). Complete(&VirtualMachineReconciler{ Client: mgr.GetClient(), diff --git a/internal/controller/proxmox/virtualmachineset_controller.go b/internal/controller/proxmox/virtualmachineset_controller.go index b8fdc02..0204e1b 100644 --- a/internal/controller/proxmox/virtualmachineset_controller.go +++ b/internal/controller/proxmox/virtualmachineset_controller.go @@ -72,7 +72,7 @@ func (r *VirtualMachineSetReconciler) Reconcile(ctx context.Context, req ctrl.Re } replicas := vmSet.Spec.Replicas vmList := &proxmoxv1alpha1.VirtualMachineList{} - if err := r.List(ctx, vmList, + if err = r.List(ctx, vmList, client.InNamespace(req.Namespace), // Change that one to metadata.ownerReference client.MatchingLabels{"owner": vmSet.Name}); err != nil { @@ -82,10 +82,11 @@ func (r *VirtualMachineSetReconciler) Reconcile(ctx context.Context, req ctrl.Re resourceKey := fmt.Sprintf("%s/%s", vmSet.Namespace, vmSet.Name) // Create, Update or Delete VMs - if len(vmList.Items) < replicas && vmSet.Status.Condition != "Terminating" { + switch { + case len(vmList.Items) < replicas && vmSet.Status.Condition != "Terminating": for i := 1; i <= replicas; i++ { vmSet.Status.Condition = "Scaling Up" - err := r.Status().Update(ctx, vmSet) + err = r.Status().Update(ctx, vmSet) if err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } @@ -108,8 +109,7 @@ func (r *VirtualMachineSetReconciler) Reconcile(ctx context.Context, req ctrl.Re Kind: vmSet.Kind, Name: vmSet.ObjectMeta.Name, UID: vmSet.ObjectMeta.UID, - }, - }, + }}, }, Spec: proxmoxv1alpha1.VirtualMachineSpec{ Name: vmSet.Name + "-" + strconv.Itoa(i), @@ -131,7 +131,7 @@ func (r *VirtualMachineSetReconciler) Reconcile(ctx context.Context, req ctrl.Re } } } - } else if len(vmList.Items) > replicas { + case len(vmList.Items) > replicas: vmSet.Status.Condition = "Scaling Down" err = r.Status().Update(ctx, vmSet) if err != nil { @@ -160,23 +160,24 @@ func (r *VirtualMachineSetReconciler) Reconcile(ctx context.Context, req ctrl.Re } } } - } else { + default: // Do nothing // log.Log.Info("VMSet has the same number of VMs as replicas") // Check if the CPU and Memory values are the same // If not, update the VMs - for _, vm := range vmList.Items { + for i := range vmList.Items { + vm := vmList.Items[i] if vm.Spec.Template.Cores != vmSet.Spec.Template.Cores || vm.Spec.Template.Memory != vmSet.Spec.Template.Memory { vm.Spec.Template.Cores = vmSet.Spec.Template.Cores vm.Spec.Template.Memory = vmSet.Spec.Template.Memory - if err := r.Update(ctx, &vm); err != nil { - return ctrl.Result{}, err + if UpdateErr := r.Update(ctx, &vm); UpdateErr != nil { + return ctrl.Result{}, UpdateErr } } } vmSet.Status.Condition = "Available" - if err := r.Status().Update(ctx, vmSet); err != nil { - return ctrl.Result{}, err + if StatusUpdateErr := r.Status().Update(ctx, vmSet); StatusUpdateErr != nil { + return ctrl.Result{}, StatusUpdateErr } } @@ -187,7 +188,7 @@ func (r *VirtualMachineSetReconciler) Reconcile(ctx context.Context, req ctrl.Re if vmSet.ObjectMeta.DeletionTimestamp.IsZero() { if !controllerutil.ContainsFinalizer(vmSet, virtualMachineSetFinalizerName) { controllerutil.AddFinalizer(vmSet, virtualMachineSetFinalizerName) - if err := r.Update(ctx, vmSet); err != nil { + if err = r.Update(ctx, vmSet); err != nil { log.Log.Info(fmt.Sprintf("Error updating VirtualMachineSet %s", vmSet.Name)) } } @@ -197,12 +198,12 @@ func (r *VirtualMachineSetReconciler) Reconcile(ctx context.Context, req ctrl.Re // Ensure that the pre-delete logic is idempotent. // Set the VirtualMachineSet status to terminating vmSet.Status.Condition = "Terminating" - if err := r.Status().Update(ctx, vmSet); err != nil { - return ctrl.Result{}, err + if vmSeterr := r.Status().Update(ctx, vmSet); vmSeterr != nil { + return ctrl.Result{}, vmSeterr } // Get VirtualMachines owned by this VirtualMachineSet vmListDel := &proxmoxv1alpha1.VirtualMachineList{} - if err := r.List(ctx, vmListDel, + if err = r.List(ctx, vmListDel, client.InNamespace(req.Namespace), // Change that one to metadata.ownerReference client.MatchingLabels{"owner": vmSet.Name}); err != nil { @@ -210,7 +211,8 @@ func (r *VirtualMachineSetReconciler) Reconcile(ctx context.Context, req ctrl.Re } // Delete all VMs owned by this VirtualMachineSet if len(vmListDel.Items) != 0 { - for _, vm := range vmListDel.Items { + for i := range vmListDel.Items { + vm := vmListDel.Items[i] vmResourceKey := fmt.Sprintf("%s-%s", vm.Namespace, vm.Name) if isProcessed(vmResourceKey) { } else { @@ -227,7 +229,7 @@ func (r *VirtualMachineSetReconciler) Reconcile(ctx context.Context, req ctrl.Re log.Log.Info(fmt.Sprintf("Deleting VirtualMachineSet %s ", vmSet.Name)) // Remove finalizer controllerutil.RemoveFinalizer(vmSet, virtualMachineSetFinalizerName) - if err := r.Update(ctx, vmSet); err != nil { + if err = r.Update(ctx, vmSet); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } } @@ -243,7 +245,8 @@ func (r *VirtualMachineSetReconciler) Reconcile(ctx context.Context, req ctrl.Re func (r *VirtualMachineSetReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&proxmoxv1alpha1.VirtualMachineSet{}). - WithEventFilter(predicate.GenerationChangedPredicate{}). // --> This was needed for reconcile loop to work properly, otherwise it was reconciling 3-4 times every 10 seconds + WithEventFilter(predicate.GenerationChangedPredicate{}). + // --> This was needed for reconcile loop to work properly, otherwise it was reconciling 3-4 times every 10 seconds WithOptions(controller.Options{MaxConcurrentReconciles: VMSetmaxConcurrentReconciles}). Complete(&VirtualMachineSetReconciler{ Client: mgr.GetClient(), diff --git a/internal/controller/proxmox/virtualmachinesnapshot_controller.go b/internal/controller/proxmox/virtualmachinesnapshot_controller.go index 1a52468..3fc4640 100644 --- a/internal/controller/proxmox/virtualmachinesnapshot_controller.go +++ b/internal/controller/proxmox/virtualmachinesnapshot_controller.go @@ -88,7 +88,7 @@ func (r *VirtualMachineSnapshotReconciler) Reconcile(ctx context.Context, req ct // UID: vm.ObjectMeta.UID, // } // Set ownerRef for the VirtualMachineSnapshot - if err := controllerutil.SetControllerReference(vm, vmSnapshot, r.Scheme); err != nil { + if err = controllerutil.SetControllerReference(vm, vmSnapshot, r.Scheme); err != nil { Log.Error(err, "unable to set owner reference for VirtualMachineSnapshot") return ctrl.Result{}, err } @@ -98,7 +98,12 @@ func (r *VirtualMachineSnapshotReconciler) Reconcile(ctx context.Context, req ct snapshotName = fmt.Sprintf("snapshot_%s", time.Now().Format("2006_01_02T15_04_05Z07_00")) } // If the snapshot is already created, return - if vmSnapshot.Status.Status == "" { + switch vmSnapshot.Status.Status { + case "": + if snapshotName == "" { + // If snapshot name is not specified, use the timestamp as the snapshot name + snapshotName = fmt.Sprintf("snapshot_%s", time.Now().Format("2006_01_02T15_04_05Z07_00")) + } // Create the snapshot StatusCode = proxmox.CreateVMSnapshot(vmName, snapshotName) // Set the status to created @@ -114,17 +119,17 @@ func (r *VirtualMachineSnapshotReconciler) Reconcile(ctx context.Context, req ct return ctrl.Result{}, client.IgnoreNotFound(err) } } - } else if vmSnapshot.Status.Status == "Created" { + case "Created": if StatusCode == 2 { // Snapshot is already created, return vmSnapshot.Status.ErrorMessage = "Snapshot is already created" - err := r.Status().Update(ctx, vmSnapshot) + err = r.Status().Update(ctx, vmSnapshot) if err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } } return ctrl.Result{}, nil - } else { + default: // Snapshot creation failed, return if StatusCode == 1 { vmSnapshot.Status.ErrorMessage = "Snapshot creation failed" @@ -136,7 +141,8 @@ func (r *VirtualMachineSnapshotReconciler) Reconcile(ctx context.Context, req ct return ctrl.Result{}, nil } - return ctrl.Result{Requeue: true, RequeueAfter: VMSnapshotreconcilationPeriod * time.Second}, client.IgnoreNotFound(r.Get(ctx, req.NamespacedName, &proxmoxv1alpha1.VirtualMachineSnapshotPolicy{})) + return ctrl.Result{Requeue: true, RequeueAfter: VMSnapshotreconcilationPeriod * time.Second}, + client.IgnoreNotFound(r.Get(ctx, req.NamespacedName, &proxmoxv1alpha1.VirtualMachineSnapshotPolicy{})) } // SetupWithManager sets up the controller with the Manager. diff --git a/internal/controller/proxmox/virtualmachinesnapshotpolicy_controller.go b/internal/controller/proxmox/virtualmachinesnapshotpolicy_controller.go index ffd86be..d909ace 100644 --- a/internal/controller/proxmox/virtualmachinesnapshotpolicy_controller.go +++ b/internal/controller/proxmox/virtualmachinesnapshotpolicy_controller.go @@ -86,7 +86,8 @@ func (r *VirtualMachineSnapshotPolicyReconciler) Reconcile(ctx context.Context, // Iterate over matching VirtualMachines to create snapshot c := cron.New() if _, err := c.AddFunc(cronSpec, func() { - for _, vm := range MatchingVirtualMachines { + for i := range MatchingVirtualMachines { + vm := &MatchingVirtualMachines[i] // Create snapshot vmName := vm.Spec.Name snapshotName := fmt.Sprintf("snapshot_%s", time.Now().Format("2006_01_02T15_04_05Z07_00")) @@ -112,7 +113,6 @@ func (r *VirtualMachineSnapshotPolicyReconciler) Reconcile(ctx context.Context, processedResources[snapshotKey] = true } } - }); err != nil { log.Log.Error(err, "unable to add cronjob") return ctrl.Result{}, err @@ -121,7 +121,8 @@ func (r *VirtualMachineSnapshotPolicyReconciler) Reconcile(ctx context.Context, c.Start() // Create snapshot - return ctrl.Result{Requeue: true, RequeueAfter: VMSnapshotPolicyreconcilationPeriod * time.Second}, client.IgnoreNotFound(r.Get(ctx, req.NamespacedName, &proxmoxv1alpha1.VirtualMachineSnapshotPolicy{})) + return ctrl.Result{Requeue: true, RequeueAfter: VMSnapshotPolicyreconcilationPeriod * time.Second}, + client.IgnoreNotFound(r.Get(ctx, req.NamespacedName, &proxmoxv1alpha1.VirtualMachineSnapshotPolicy{})) } // SetupWithManager sets up the controller with the Manager. @@ -134,5 +135,4 @@ func (r *VirtualMachineSnapshotPolicyReconciler) SetupWithManager(mgr ctrl.Manag Client: mgr.GetClient(), Scheme: mgr.GetScheme(), }) - } diff --git a/pkg/kubernetes/kubernetes.go b/pkg/kubernetes/kubernetes.go index ce5ca00..61fbf13 100644 --- a/pkg/kubernetes/kubernetes.go +++ b/pkg/kubernetes/kubernetes.go @@ -22,6 +22,10 @@ var ( Clientset, DynamicClient = GetKubeconfig() ) +const ( + eventTypeNormal = "Normal" +) + func InsideCluster() bool { // Check if kubeconfig exists under home directory homeDir, err := os.UserHomeDir() @@ -37,7 +41,7 @@ func InsideCluster() bool { return false } -func ClientConfig() interface{} { +func ClientConfig() any { if InsideCluster() { config, err := rest.InClusterConfig() if err != nil { @@ -62,9 +66,7 @@ func ClientConfig() interface{} { } func GetKubeconfig() (*kubernetes.Clientset, dynamic.Interface) { - config := ClientConfig().(*rest.Config) - clientset, err := kubernetes.NewForConfig(config) if err != nil { panic(err.Error()) @@ -77,11 +79,11 @@ func GetKubeconfig() (*kubernetes.Clientset, dynamic.Interface) { return clientset, dynamicClient } -func CreateVMKubernetesEvent(vm *proxmoxv1alpha1.VirtualMachine, Clientset *kubernetes.Clientset, Action string) { +func CreateVMKubernetesEvent(vm *proxmoxv1alpha1.VirtualMachine, clientset *kubernetes.Clientset, action string) { // Create a new event Event := &corev1.Event{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%s-%s", vm.ObjectMeta.Name, Action, time.Now()), + Name: fmt.Sprintf("%s-%s-%s", vm.ObjectMeta.Name, action, time.Now()), Namespace: vm.ObjectMeta.Namespace, Labels: map[string]string{ "app": "kube-proxmox-operator", @@ -99,31 +101,34 @@ func CreateVMKubernetesEvent(vm *proxmoxv1alpha1.VirtualMachine, Clientset *kube }, FirstTimestamp: metav1.Time{Time: time.Now()}, } - if Action == "Created" { + switch action { + case "Created": Event.Reason = "Created" Event.Message = fmt.Sprintf("VirtualMachine %s has been created", vm.Spec.Name) - Event.Type = "Normal" - } else if Action == "Creating" { + Event.Type = eventTypeNormal + case "Creating": Event.Reason = "Creating" - Event.Message = fmt.Sprintf("VirtualMachine %s is being creating", vm.Spec.Name) - Event.Type = "Normal" - } else if Action == "Deleting" { + Event.Message = fmt.Sprintf("VirtualMachine %s is being created", vm.Spec.Name) + Event.Type = eventTypeNormal + case "Deleting": Event.Reason = "Deleting" Event.Message = fmt.Sprintf("VirtualMachine %s is being deleted", vm.Spec.Name) - Event.Type = "Normal" + Event.Type = eventTypeNormal + default: + // Do nothing } - _, err := Clientset.CoreV1().Events(vm.ObjectMeta.Namespace).Create(context.Background(), Event, metav1.CreateOptions{}) + _, err := clientset.CoreV1().Events(vm.ObjectMeta.Namespace).Create(context.Background(), Event, metav1.CreateOptions{}) if err != nil { panic(err) } } -func CreateManagedVMKubernetesEvent(managedVM *proxmoxv1alpha1.ManagedVirtualMachine, Clientset *kubernetes.Clientset, Action string) { +func CreateManagedVMKubernetesEvent(managedVM *proxmoxv1alpha1.ManagedVirtualMachine, clientset *kubernetes.Clientset, action string) { // Create event Event := &corev1.Event{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%s-%s", managedVM.Name, Action, time.Now()), + Name: fmt.Sprintf("%s-%s-%s", managedVM.Name, action, time.Now()), Namespace: os.Getenv("POD_NAMESPACE"), Labels: map[string]string{ "app": "kube-proxmox-operator", @@ -140,12 +145,12 @@ func CreateManagedVMKubernetesEvent(managedVM *proxmoxv1alpha1.ManagedVirtualMac Component: "kube-proxmox-operator", }, FirstTimestamp: metav1.Time{Time: time.Now()}, - Reason: Action, - Message: fmt.Sprintf("ManagedVirtualMachine %s has been %s", managedVM.Name, Action), + Reason: action, + Message: fmt.Sprintf("ManagedVirtualMachine %s has been %s", managedVM.Name, action), Type: "Normal", } // Send event - _, err := Clientset.CoreV1().Events(os.Getenv("POD_NAMESPACE")).Create(context.Background(), Event, metav1.CreateOptions{}) + _, err := clientset.CoreV1().Events(os.Getenv("POD_NAMESPACE")).Create(context.Background(), Event, metav1.CreateOptions{}) if err != nil { panic(err) } diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index 323ec1a..d532e83 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -32,7 +32,7 @@ var ( }, []string{"name", "namespace"}) ) -func init() { +func init() { //nolint:gochecknoinits // This is required by kubebuilder metrics.Registry.MustRegister(virtualMachineCount) metrics.Registry.MustRegister(virtualMachineCPUCores) metrics.Registry.MustRegister(virtualMachineMemory) diff --git a/pkg/proxmox/proxmox.go b/pkg/proxmox/proxmox.go index 8ead941..d56b473 100644 --- a/pkg/proxmox/proxmox.go +++ b/pkg/proxmox/proxmox.go @@ -41,7 +41,13 @@ var mutex = &sync.Mutex{} const ( // The tag that will be added to VMs in Proxmox cluster - virtualMachineTag = "kube-proxmox-operator" + virtualMachineTag = "kube-proxmox-operator" + virtualMachineRunningState = "running" + virtualMachineStoppedState = "stopped" + virtualMachineTemplateType = "template" + virtualMachineScratchType = "scratch" + virtualMachineCPUOption = "cores" + virtualMachineMemoryOption = "memory" // The timeout for qemu-agent to start in seconds AgentTimeoutSeconds = 10 // The timeouts for VirtualMachine operations @@ -78,14 +84,15 @@ func CreateProxmoxClient() *proxmox.Client { httpClient = &http.Client{ Transport: &http.Transport{ TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, + InsecureSkipVerify: true, //nolint:gosec // Skipping linting for InsecureSkipVerify due to user choice }, }, } } var client *proxmox.Client - if ProxmoxConfig.Username != "" && ProxmoxConfig.Password != "" { + switch { + case ProxmoxConfig.Username != "" && ProxmoxConfig.Password != "": client = proxmox.NewClient(ProxmoxConfig.APIEndpoint, proxmox.WithCredentials(&proxmox.Credentials{ Username: ProxmoxConfig.Username, @@ -93,12 +100,12 @@ func CreateProxmoxClient() *proxmox.Client { }), proxmox.WithHTTPClient(httpClient), ) - } else if ProxmoxConfig.TokenID != "" && ProxmoxConfig.Secret != "" { + case ProxmoxConfig.TokenID != "" && ProxmoxConfig.Secret != "": client = proxmox.NewClient(ProxmoxConfig.APIEndpoint, proxmox.WithAPIToken(ProxmoxConfig.TokenID, ProxmoxConfig.Secret), proxmox.WithHTTPClient(httpClient), ) - } else { + default: panic("Proxmox credentials are not defined") } return client @@ -127,7 +134,6 @@ func GetNodes() ([]string, error) { } func CreateVMFromTemplate(vm *proxmoxv1alpha1.VirtualMachine) { - nodeName := vm.Spec.NodeName node, err := Client.Node(ctx, nodeName) if err != nil { @@ -152,42 +158,24 @@ func CreateVMFromTemplate(vm *proxmoxv1alpha1.VirtualMachine) { } log.Log.Info(fmt.Sprintf("New VM %s has been creating with ID: %d", vm.Name, newID)) mutex.Unlock() - // Lock VM creation process - // LockVM(vm.Spec.Name) - // UPID := task.UPID - // log.Log.Info(fmt.Sprintf("VM creation task UPID: %s", UPID)) - // TODO: Implement a better way to watch the task + // TODO: Implement a better way to watch the tasks. logChan, err := task.Watch(ctx, 0) if err != nil { panic(err) } for logEntry := range logChan { - // log.Log.Info(logEntry) log.Log.Info(fmt.Sprintf("Virtual Machine %s, creation process: %s", vm.Name, logEntry)) } - - // logChan, err := task.Watch(0) - // if err != nil { - // panic(err) - // } - // var wg sync.WaitGroup - // go func() { - // for logEntry := range logChan { - // log.Log.Info(logEntry) - // wg.Done() - // } - // }() - // wg.Add(500) - // wg.Wait() mutex.Lock() _, taskCompleted, taskErr := task.WaitForCompleteStatus(ctx, virtualMachineCreateTimesNum, virtualMachineCreateSteps) - if !taskCompleted { + switch { + case !taskCompleted: log.Log.Error(taskErr, "Error creating VM") - } else if taskCompleted { + case taskCompleted: log.Log.Info(fmt.Sprintf("VM %s has been created", vm.Name)) // Unlock VM creation process // UnlockVM(vm.Spec.Name) - } else { + default: log.Log.Info("VM creation task is still running") } @@ -202,7 +190,6 @@ func CreateVMFromTemplate(vm *proxmoxv1alpha1.VirtualMachine) { if err != nil { panic(err) } - } func GetVMID(vmName, nodeName string) int { @@ -320,18 +307,18 @@ func DeleteVM(vmName, nodeName string) { mutex.Unlock() // Stop VM vmStatus := VirtualMachine.Status - if vmStatus == "running" { - task, err := VirtualMachine.Stop(ctx) - if err != nil { + if vmStatus == virtualMachineRunningState { + stopTask, stopErr := VirtualMachine.Stop(ctx) + if stopErr != nil { panic(err) } - _, taskCompleted, taskErr := task.WaitForCompleteStatus(ctx, virtualMachineStopTimesNum, virtualMachineStopSteps) - - if !taskCompleted { + _, taskCompleted, taskErr := stopTask.WaitForCompleteStatus(ctx, virtualMachineStopTimesNum, virtualMachineStopSteps) + switch taskCompleted { + case false: log.Log.Error(taskErr, "Can't stop VM") - } else if taskCompleted { + case true: log.Log.Info(fmt.Sprintf("VM %s has been stopped", vmName)) - } else { + default: log.Log.Info("VM is already stopped") } } @@ -341,11 +328,12 @@ func DeleteVM(vmName, nodeName string) { panic(err) } _, taskCompleted, taskErr := task.WaitForCompleteStatus(ctx, 3, 20) - if !taskCompleted { + switch { + case !taskCompleted: log.Log.Error(taskErr, "Can't delete VM") - } else if taskCompleted { + case taskCompleted: log.Log.Info(fmt.Sprintf("VM %s has been deleted", vmName)) - } else { + default: log.Log.Info("VM is already deleted") } } @@ -367,11 +355,12 @@ func StartVM(vmName, nodeName string) { panic(err) } _, taskCompleted, taskErr := task.WaitForCompleteStatus(ctx, virtualMachineStartTimesNum, virtualMachineStartSteps) - if !taskCompleted { + switch { + case !taskCompleted: log.Log.Error(taskErr, "Can't start VM") - } else if taskCompleted { + case taskCompleted: log.Log.Info(fmt.Sprintf("VM %s has been started", vmName)) - } else { + default: log.Log.Info("VM is already started") } } @@ -395,7 +384,7 @@ func RestartVM(vmName, nodeName string) *proxmox.Task { return task } -func GetVMState(vmName string, nodeName string) string { +func GetVMState(vmName, nodeName string) string { // Gets the VMstate from Proxmox API node, err := Client.Node(ctx, nodeName) if err != nil { @@ -407,11 +396,12 @@ func GetVMState(vmName string, nodeName string) string { if err != nil { panic(err) } - if VirtualMachineState == "running" { - return "running" - } else if VirtualMachineState == "stopped" { - return "stopped" - } else { + switch VirtualMachineState { + case virtualMachineRunningState: + return virtualMachineRunningState + case virtualMachineStoppedState: + return virtualMachineStoppedState + default: return "unknown" } } @@ -430,44 +420,43 @@ func AgentIsRunning(vmName, nodeName string) bool { } func CreateVMFromScratch(vm *proxmoxv1alpha1.VirtualMachine) { - nodeName := vm.Spec.NodeName node, err := Client.Node(ctx, nodeName) if err != nil { panic(err) } - cores := vm.Spec.VmSpec.Cores - memory := vm.Spec.VmSpec.Memory - diskName := vm.Spec.VmSpec.Disk.Name - diskSize := vm.Spec.VmSpec.Disk.Value - networkName := vm.Spec.VmSpec.Network.Name - networkValue := vm.Spec.VmSpec.Network.Value - osName := vm.Spec.VmSpec.OSImage.Name - osValue := vm.Spec.VmSpec.OSImage.Value + cores := vm.Spec.VMSpec.Cores + memory := vm.Spec.VMSpec.Memory + diskName := vm.Spec.VMSpec.Disk.Name + diskSize := vm.Spec.VMSpec.Disk.Value + networkName := vm.Spec.VMSpec.Network.Name + networkValue := vm.Spec.VMSpec.Network.Value + osName := vm.Spec.VMSpec.OSImage.Name + osValue := vm.Spec.VMSpec.OSImage.Value // Create VM from scratch VMOptions := []proxmox.VirtualMachineOption{ - proxmox.VirtualMachineOption{ - Name: "cores", + { + Name: virtualMachineCPUOption, Value: cores, }, - proxmox.VirtualMachineOption{ - Name: "memory", + { + Name: virtualMachineMemoryOption, Value: memory, }, - proxmox.VirtualMachineOption{ + { Name: diskName, Value: diskSize, }, - proxmox.VirtualMachineOption{ + { Name: networkName, Value: networkValue, }, - proxmox.VirtualMachineOption{ + { Name: osName, Value: osValue, }, - proxmox.VirtualMachineOption{ + { Name: "name", Value: vm.Spec.Name, }, @@ -487,11 +476,12 @@ func CreateVMFromScratch(vm *proxmoxv1alpha1.VirtualMachine) { panic(err) } _, taskCompleted, taskErr := task.WaitForCompleteStatus(ctx, 10, 10) - if !taskCompleted { + switch taskCompleted { + case false: log.Log.Error(taskErr, "Can't create VM") - } else if taskCompleted { + case true: log.Log.Info(fmt.Sprintf("VM %s has been created", vm.Spec.Name)) - } else { + default: log.Log.Info("VM is already created") } VirtualMachine, err := node.VirtualMachine(ctx, vmID) @@ -506,36 +496,35 @@ func CreateVMFromScratch(vm *proxmoxv1alpha1.VirtualMachine) { if err != nil { log.Log.Error(taskErr, "Can't add tag to VM") } - } func CheckVMType(vm *proxmoxv1alpha1.VirtualMachine) string { var VMType string - if !reflect.ValueOf(vm.Spec.Template).IsZero() { - VMType = "template" - } else if !reflect.ValueOf(vm.Spec.VmSpec).IsZero() { - VMType = "scratch" - } else if !reflect.ValueOf(vm.Spec.Template).IsZero() && !reflect.ValueOf(vm.Spec.VmSpec).IsZero() { + switch { + case !reflect.ValueOf(vm.Spec.Template).IsZero(): + VMType = virtualMachineTemplateType + case !reflect.ValueOf(vm.Spec.VMSpec).IsZero(): + VMType = virtualMachineScratchType + case !reflect.ValueOf(vm.Spec.Template).IsZero() && !reflect.ValueOf(vm.Spec.VMSpec).IsZero(): VMType = "faulty" - } else { + default: VMType = "undefined" } return VMType - } -type VmMutex struct { +type VMMutex struct { vmName string mutex sync.Mutex locked bool } -var vmMutexes = make(map[string]*VmMutex) +var vmMutexes = make(map[string]*VMMutex) func LockVM(vmName string) { vmMutex, ok := vmMutexes[vmName] if !ok { - vmMutex = &VmMutex{ + vmMutex = &VMMutex{ vmName: vmName, } vmMutexes[vmName] = vmMutex @@ -581,7 +570,6 @@ func GetProxmoxVMs() []string { } func GetOnlineNodes() []string { - nodes, err := Client.Nodes(ctx) var OnlineNodes []string if err != nil { @@ -596,7 +584,6 @@ func GetOnlineNodes() []string { } func GetControllerVMs() []string { - // From proxmox get VM's that has tag "kube-proxmox-operator" nodes := GetOnlineNodes() var ControllerVMs []string @@ -619,12 +606,12 @@ func GetControllerVMs() []string { return ControllerVMs } -func CheckManagedVMExists(ManagedVM string) bool { +func CheckManagedVMExists(managedVM string) bool { // Get managed VMs - ManagedVMs := GetManagedVMs() + managedVMs := GetManagedVMs() // Check if ManagedVM exists in ManagedVMs - for _, VM := range ManagedVMs { - if VM == strings.ToLower(ManagedVM) { + for _, VM := range managedVMs { + if strings.EqualFold(VM, managedVM) { return true } } @@ -653,30 +640,31 @@ func GetNodeOfVM(vmName string) string { } } return "" - } -func GetManagedVMSpec(ManagedVMName, nodeName string) (int, int, int) { - +func GetManagedVMSpec(managedVMName, nodeName string) (cores, memory, disk int) { // Get spec of VM node, err := Client.Node(ctx, nodeName) if err != nil { panic(err) } - vmID := GetVMID(ManagedVMName, nodeName) + vmID := GetVMID(managedVMName, nodeName) VirtualMachine, err := node.VirtualMachine(ctx, vmID) if err != nil { log.Log.Error(err, "Error getting VM") } - cores := VirtualMachine.CPUs - memory := int(VirtualMachine.MaxMem / 1024 / 1024) // As MB - disk := int(VirtualMachine.MaxDisk / 1024 / 1024 / 1024) + cores = VirtualMachine.CPUs + memory = int(VirtualMachine.MaxMem / 1024 / 1024) // As MB + disk = int(VirtualMachine.MaxDisk / 1024 / 1024 / 1024) return cores, memory, disk } -func UpdateVMStatus(vmName, nodeName string) (string, int, string, string, string, string, string) { - +func UpdateVMStatus(vmName, nodeName string) (*proxmoxv1alpha1.VirtualMachineStatus, error) { + var VirtualMachineIP string + var VirtualMachineOS string + var VirtualmachineStatus *proxmoxv1alpha1.VirtualMachineStatus + // Get VM status node, err := Client.Node(ctx, nodeName) if err != nil { panic(err) @@ -689,23 +677,34 @@ func UpdateVMStatus(vmName, nodeName string) (string, int, string, string, strin if err != nil { panic(err) } - // Get VM status - VirtualMachineState := VirtualMachine.Status - VirtualMachineID := int(VirtualMachine.VMID) - VirtualMachineNode := VirtualMachine.Node - VirtualMachineName := VirtualMachine.Name - VirtualMachineUptime := GetVMUptime(vmName, nodeName) if AgentIsRunning(vmName, nodeName) { - VirtualMachineIP := GetVMIPAddress(vmName, nodeName) - VirtualMachineOS := GetOSInfo(vmName, nodeName) - return VirtualMachineState, VirtualMachineID, VirtualMachineUptime, VirtualMachineNode, VirtualMachineName, VirtualMachineIP, VirtualMachineOS + VirtualMachineIP = GetVMIPAddress(vmName, nodeName) + VirtualMachineOS = GetOSInfo(vmName, nodeName) } else { - VirtualMachineIP := "nil" - VirtualMachineOS := "nil" - return VirtualMachineState, VirtualMachineID, VirtualMachineUptime, VirtualMachineNode, VirtualMachineName, VirtualMachineIP, VirtualMachineOS + VirtualMachineIP = "nil" + VirtualMachineOS = "nil" } + VirtualmachineStatus = &proxmoxv1alpha1.VirtualMachineStatus{ + State: VirtualMachine.Status, + ID: int(VirtualMachine.VMID), + Node: VirtualMachine.Node, + Name: VirtualMachine.Name, + Uptime: GetVMUptime(vmName, nodeName), + IPAddress: VirtualMachineIP, + OSInfo: VirtualMachineOS, + } + return VirtualmachineStatus, nil } else { - return "VM not found", 0, "0", "0", "0", "0", "0" + VirtualmachineStatus = &proxmoxv1alpha1.VirtualMachineStatus{ + State: "nil", + ID: 0, + Node: "nil", + Name: "nil", + Uptime: "nil", + IPAddress: "nil", + OSInfo: "nil", + } + return VirtualmachineStatus, nil } } @@ -726,9 +725,10 @@ func UpdateVM(vmName, nodeName string, vm *proxmoxv1alpha1.VirtualMachine) { var memoryOption proxmox.VirtualMachineOption var Disk, DiskSize string var DiskSizeInt int - cpuOption.Name = "cores" - memoryOption.Name = "memory" - if CheckVMType(vm) == "template" { + cpuOption.Name = virtualMachineCPUOption + memoryOption.Name = virtualMachineMemoryOption + switch CheckVMType(vm) { + case virtualMachineTemplateType: cpuOption.Value = vm.Spec.Template.Cores memoryOption.Value = uint64(vm.Spec.Template.Memory) DiskSize = strconv.Itoa(vm.Spec.Template.Disk[0].Size) + "G" @@ -736,40 +736,32 @@ func UpdateVM(vmName, nodeName string, vm *proxmoxv1alpha1.VirtualMachine) { DiskSizeInt = vm.Spec.Template.Disk[0].Size metrics.SetVirtualMachineCPUCores(vmName, vm.Namespace, float64(vm.Spec.Template.Cores)) metrics.SetVirtualMachineMemory(vmName, vm.Namespace, float64(vm.Spec.Template.Memory)) - } else if CheckVMType(vm) == "scratch" { - cpuOption.Value = vm.Spec.VmSpec.Cores - memoryOption.Value = uint64(vm.Spec.VmSpec.Memory) - // DiskValue := strings.Split(vm.Spec.VmSpec.Disk.Value, ":")[1] - DiskValue := vm.Spec.VmSpec.Disk.Value + case virtualMachineScratchType: + cpuOption.Value = vm.Spec.VMSpec.Cores + memoryOption.Value = uint64(vm.Spec.VMSpec.Memory) + DiskValue := vm.Spec.VMSpec.Disk.Value DiskSize = DiskValue + "G" DiskSizeInt, _ = strconv.Atoi(DiskValue) - Disk = vm.Spec.VmSpec.Disk.Name - metrics.SetVirtualMachineCPUCores(vmName, vm.Namespace, float64(vm.Spec.VmSpec.Cores)) - metrics.SetVirtualMachineMemory(vmName, vm.Namespace, float64(vm.Spec.VmSpec.Memory)) - } else { + Disk = vm.Spec.VMSpec.Disk.Name + metrics.SetVirtualMachineCPUCores(vmName, vm.Namespace, float64(vm.Spec.VMSpec.Cores)) + metrics.SetVirtualMachineMemory(vmName, vm.Namespace, float64(vm.Spec.VMSpec.Memory)) + default: log.Log.Info(fmt.Sprintf("VM %s doesn't have any template or vmSpec defined", vmName)) } - _, _, _ = Disk, DiskSize, DiskSizeInt // Convert disk size to string VirtualMachineMaxDisk := VirtualMachine.MaxDisk / 1024 / 1024 / 1024 // As GB //// log.Log.Info(fmt.Sprintf("Resizing disk %s to %s", disk, diskSize)) //// if current disk is lower than the updated disk size then resize the disk else don't do anything if VirtualMachineMaxDisk <= uint64(DiskSizeInt) { - //// Resize Disk + // Resize Disk err = VirtualMachine.ResizeDisk(ctx, Disk, DiskSize) if err != nil { log.Log.Error(err, "Can't resize disk") } - - } else { - // log.Log.Info(fmt.Sprintf("VirtualMachineMaxDisk: %d , DiskSizeInt: %d", VirtualMachineMaxDisk, DiskSizeInt)) - // Revert the update on VM object - // TODO --> This doesn't work for scratch VM's. - if CheckVMType(vm) == "template" { - log.Log.Info(fmt.Sprintf("VirtualMachine %s disk %s can't shrink.", vmName, Disk)) - vm.Spec.Template.Disk[0].Size = int(VirtualMachineMaxDisk) - } + } else if CheckVMType(vm) == virtualMachineTemplateType { + log.Log.Info(fmt.Sprintf("VirtualMachine %s disk %s can't shrink.", vmName, Disk)) + vm.Spec.Template.Disk[0].Size = int(VirtualMachineMaxDisk) } VirtualMachineMem := VirtualMachine.MaxMem / 1024 / 1024 // As MB @@ -782,11 +774,12 @@ func UpdateVM(vmName, nodeName string, vm *proxmoxv1alpha1.VirtualMachine) { } _, taskCompleted, taskErr := task.WaitForCompleteStatus(ctx, virtualMachineUpdateTimesNum, virtualMachineUpdateSteps) - if !taskCompleted { + switch taskCompleted { + case false: log.Log.Error(taskErr, "Can't update VM") - } else if taskCompleted { + case true: log.Log.Info(fmt.Sprintf("VM %s has been updated", vmName)) - } else { + default: log.Log.Info("VM is already updated") } // After config update, restart VM @@ -795,14 +788,12 @@ func UpdateVM(vmName, nodeName string, vm *proxmoxv1alpha1.VirtualMachine) { if !taskCompleted { log.Log.Error(taskErr, "Can't restart VM") } - } } -func CreateManagedVM(ManagedVM string) *proxmoxv1alpha1.ManagedVirtualMachine { - - nodeName := GetNodeOfVM(ManagedVM) - cores, memory, disk := GetManagedVMSpec(ManagedVM, nodeName) +func CreateManagedVM(managedVM string) *proxmoxv1alpha1.ManagedVirtualMachine { + nodeName := GetNodeOfVM(managedVM) + cores, memory, disk := GetManagedVMSpec(managedVM, nodeName) // IF POD_NAMESPACE is not set, set it to default if os.Getenv("POD_NAMESPACE") == "" { @@ -816,18 +807,18 @@ func CreateManagedVM(ManagedVM string) *proxmoxv1alpha1.ManagedVirtualMachine { Kind: "ManagedVirtualMachine", }, ObjectMeta: metav1.ObjectMeta{ - Name: strings.ToLower(ManagedVM), + Name: strings.ToLower(managedVM), Namespace: os.Getenv("POD_NAMESPACE"), }, Spec: proxmoxv1alpha1.ManagedVirtualMachineSpec{ - Name: ManagedVM, + Name: managedVM, NodeName: nodeName, Cores: cores, Memory: memory, Disk: disk, }, - Status: proxmoxv1alpha1.ManagedVirtualMachineStatus{ + Status: proxmoxv1alpha1.VirtualMachineStatus{ ID: 0, }, } @@ -835,7 +826,6 @@ func CreateManagedVM(ManagedVM string) *proxmoxv1alpha1.ManagedVirtualMachine { } func GetManagedVMs() []string { - // Get my custom resource "ManagedVirtualMachine" customResource := schema.GroupVersionResource{ Group: "proxmox.alperen.cloud", @@ -854,16 +844,13 @@ func GetManagedVMs() []string { ManagedVMs = append(ManagedVMs, ManagedVMName) } return ManagedVMs - } func UpdateManagedVM(managedVMName, nodeName string, managedVM *proxmoxv1alpha1.ManagedVirtualMachine) { - - if GetVMState(managedVMName, nodeName) != "running" { + if GetVMState(managedVMName, nodeName) != virtualMachineRunningState { // Break if VM is not running return } else { - node, err := Client.Node(ctx, nodeName) if err != nil { panic(err) @@ -877,9 +864,9 @@ func UpdateManagedVM(managedVMName, nodeName string, managedVM *proxmoxv1alpha1. VirtualMachineMem := VirtualMachine.MaxMem / 1024 / 1024 // As MB var cpuOption proxmox.VirtualMachineOption var memoryOption proxmox.VirtualMachineOption - cpuOption.Name = "cores" + cpuOption.Name = virtualMachineCPUOption cpuOption.Value = managedVM.Spec.Cores - memoryOption.Name = "memory" + memoryOption.Name = virtualMachineMemoryOption memoryOption.Value = managedVM.Spec.Memory // Disk diskSize := managedVM.Spec.Disk @@ -905,17 +892,19 @@ func UpdateManagedVM(managedVMName, nodeName string, managedVM *proxmoxv1alpha1. if VirtualMachine.CPUs != managedVM.Spec.Cores || VirtualMachineMem != uint64(managedVM.Spec.Memory) { // Update VM - // log.Log.Info(fmt.Sprintf("The comparison between CR and external resource: CPU: %d, %d || Memory: %d, %d", managedVM.Spec.Cores, VirtualMachine.CPUs, managedVM.Spec.Memory, VirtualMachineMem)) + // log.Log.Info(fmt.Sprintf("The comparison between CR and external resource: CPU: %d, %d + // || Memory: %d, %d", managedVM.Spec.Cores, VirtualMachine.CPUs, managedVM.Spec.Memory, VirtualMachineMem)) task, err := VirtualMachine.Config(ctx, cpuOption, memoryOption) if err != nil { panic(err) } _, taskCompleted, taskErr := task.WaitForCompleteStatus(ctx, virtualMachineUpdateTimesNum, virtualMachineUpdateSteps) - if !taskCompleted { + switch taskCompleted { + case false: log.Log.Error(taskErr, "Can't update VM") - } else if taskCompleted { + case true: log.Log.Info(fmt.Sprintf("VM %s has been updated", managedVMName)) - } else { + default: log.Log.Info("VM is already updated") } task = RestartVM(managedVMName, nodeName) @@ -961,7 +950,6 @@ func SubstractLowercaseSlices(slice1, slice2 []string) []string { } func CreateVMSnapshot(vmName, snapshotName string) (statusCode int) { - nodeName := GetNodeOfVM(vmName) node, err := Client.Node(ctx, nodeName) if err != nil { @@ -979,13 +967,14 @@ func CreateVMSnapshot(vmName, snapshotName string) (statusCode int) { panic(err) } _, taskCompleted, taskErr := task.WaitForCompleteStatus(ctx, 3, 10) - if !taskCompleted { + switch taskCompleted { + case false: log.Log.Error(taskErr, "Can't create snapshot for the VirtualMachine %s", vmName) return 1 - } else if taskCompleted { + case true: log.Log.Info(fmt.Sprintf("VirtualMachine %s has been snapshotted with %s name", vmName, snapshotName)) return 0 - } else { + default: log.Log.Info("VirtualMachine has already a snapshot with the same name") return 2 } @@ -1077,13 +1066,13 @@ func StopContainer(containerName, nodeName string) (*proxmox.ContainerStatus, er log.Log.Info(fmt.Sprintf("Stopping container %s", containerName)) container := GetContainer(containerName, nodeName) // Stop container - if container.Status == "running" { + if container.Status == virtualMachineRunningState { // Stop container called status, err := container.Stop(ctx) // Retry method to understand if container is stopped for i := 0; i < 5; i++ { contStatus := GetContainerState(containerName, nodeName) - if contStatus == "stopped" { + if contStatus == virtualMachineStoppedState { break } else { time.Sleep(5 * time.Second) @@ -1101,13 +1090,12 @@ func DeleteContainer(containerName, nodeName string) { container := GetContainer(containerName, nodeName) mutex.Unlock() containerStatus := container.Status - if containerStatus == "running" { + if containerStatus == virtualMachineRunningState { // Stop container _, err := StopContainer(containerName, nodeName) if err != nil { panic(err) } - } log.Log.Info(fmt.Sprintf("Deleting container %s", containerName)) // Delete container @@ -1118,11 +1106,12 @@ func DeleteContainer(containerName, nodeName string) { panic(err) } _, taskCompleted, taskErr := task.WaitForCompleteStatus(ctx, 5, 5) - if !taskCompleted { + switch taskCompleted { + case false: log.Log.Error(taskErr, "Can't delete container") - } else if taskCompleted { + case true: log.Log.Info(fmt.Sprintf("Container %s has been deleted", containerName)) - } else { + default: log.Log.Info("Container is already deleted") } mutex.Unlock() @@ -1157,7 +1146,6 @@ func UpdateContainerStatus(containerName, nodeName string) proxmoxv1alpha1.Conta containerStatus.Name = container.Name return containerStatus - } func UpdateContainer(container *proxmoxv1alpha1.Container) { @@ -1166,8 +1154,8 @@ func UpdateContainer(container *proxmoxv1alpha1.Container) { nodeName := container.Spec.NodeName var cpuOption proxmox.ContainerOption var memoryOption proxmox.ContainerOption - cpuOption.Name = "cores" - memoryOption.Name = "memory" + cpuOption.Name = virtualMachineCPUOption + memoryOption.Name = virtualMachineMemoryOption ProxmoxContainer := GetContainer(containerName, nodeName) // Check if update is needed if container.Spec.Template.Cores != ProxmoxContainer.CPUs || container.Spec.Template.Memory != int(ProxmoxContainer.MaxMem/1024/1024) { @@ -1182,11 +1170,9 @@ func UpdateContainer(container *proxmoxv1alpha1.Container) { } // Config of container doesn't require restart } - } func RestartContainer(containerName, nodeName string) bool { - // Get container container := GetContainer(containerName, nodeName) // Restart container @@ -1197,14 +1183,13 @@ func RestartContainer(containerName, nodeName string) bool { // Retry method to understand if container is stopped for i := 0; i < 5; i++ { contStatus := GetContainerState(containerName, nodeName) - if contStatus == "running" { + if contStatus == virtualMachineRunningState { return true } else { time.Sleep(5 * time.Second) } } return false - } func FormatUptime(uptime int) string {