From 508f81ab56798d01405277addf3a2d1dc18165c2 Mon Sep 17 00:00:00 2001 From: joowon <85204858+joowons@users.noreply.github.com> Date: Fri, 18 Nov 2022 14:42:18 +0900 Subject: [PATCH] deps: Update to lastest version spider/tumblebug --- src/core/provision/provisioner.go | 18 +++++++++--------- src/core/service/cluster.go | 11 +++++------ src/core/service/mcir.go | 3 +-- src/core/service/node.go | 2 +- src/core/tumblebug/mcis.go | 13 ++++++------- src/core/tumblebug/types.go | 6 +++--- src/rest-api/router/cluster.go | 3 +++ 7 files changed, 28 insertions(+), 28 deletions(-) diff --git a/src/core/provision/provisioner.go b/src/core/provision/provisioner.go index e70b922..b5c9d01 100644 --- a/src/core/provision/provisioner.go +++ b/src/core/provision/provisioner.go @@ -198,15 +198,7 @@ func (self *Provisioner) InitControlPlane(kubernetesConfigReq app.ClusterConfigK } else { port = "6443" } - if self.Cluster.Etcd == app.ETCD_LOCAL { - if output, err := self.leader.executeSSH("cd %s;./%s %s %s %s %s %s", REMOTE_TARGET_PATH, "k8s-init.sh", kubernetesConfigReq.PodCidr, kubernetesConfigReq.ServiceCidr, kubernetesConfigReq.ServiceDnsDomain, self.leader.PublicIP, port); err != nil { - return nil, "", errors.New("Failed to initialize control-plane. (k8s-init.sh)") - } else if strings.Contains(output, "Your Kubernetes control-plane has initialized successfully") { - joinCmd = getJoinCmd(output) - } else { - return nil, "", errors.New("to initialize control-plane (the output not contains 'Your Kubernetes control-plane has initialized successfully')") - } - } else { + if self.Cluster.Etcd == app.ETCD_EXTERNAL { var etcdIp string for _, machine := range self.ControlPlaneMachines { etcdIp += fmt.Sprintf("%s ", machine.PrivateIP) @@ -218,6 +210,14 @@ func (self *Provisioner) InitControlPlane(kubernetesConfigReq app.ClusterConfigK } else { return nil, "", errors.New("to initialize control-plane (the output not contains 'Your Kubernetes control-plane has initialized successfully')") } + } else { + if output, err := self.leader.executeSSH("cd %s;./%s %s %s %s %s %s", REMOTE_TARGET_PATH, "k8s-init.sh", kubernetesConfigReq.PodCidr, kubernetesConfigReq.ServiceCidr, kubernetesConfigReq.ServiceDnsDomain, self.leader.PublicIP, port); err != nil { + return nil, "", errors.New("Failed to initialize control-plane. (k8s-init.sh)") + } else if strings.Contains(output, "Your Kubernetes control-plane has initialized successfully") { + joinCmd = getJoinCmd(output) + } else { + return nil, "", errors.New("to initialize control-plane (the output not contains 'Your Kubernetes control-plane has initialized successfully')") + } } ouput, _ := self.leader.executeSSH("sudo cat /etc/kubernetes/admin.conf") diff --git a/src/core/service/cluster.go b/src/core/service/cluster.go index 453be66..d3da7032 100644 --- a/src/core/service/cluster.go +++ b/src/core/service/cluster.go @@ -158,7 +158,7 @@ func CreateCluster(namespace string, req *app.ClusterReq) (*model.Cluster, error for i := 0; i < mcir.vmCount; i++ { name := lang.GenerateNewNodeName(string(app.WORKER), idx+1) mcis.VMs = append(mcis.VMs, mcir.NewVM(namespace, name, mcisName, "", worker.RootDisk.Type, worker.RootDisk.Size)) - provisioner.AppendWorkerNodeMachine(name, mcir.csp, mcir.region, mcir.zone, mcir.credential) + provisioner.AppendWorkerNodeMachine(name+"-1", mcir.csp, mcir.region, mcir.zone, mcir.credential) idx = idx + 1 } } @@ -179,12 +179,11 @@ func CreateCluster(namespace string, req *app.ClusterReq) (*model.Cluster, error logger.Infof("[%s.%s] MCIS creation has been completed.", namespace, clusterName) cluster.CpLeader = mcis.VMs[0].Name - for i := 0; i < len(mcis.VMs); i++ { - if cluster.CpGroup == mcis.VMs[i].VmGroupId { - provisioner.AppendControlPlaneMachine(mcis.VMs[i].Name, mcir.csp, mcir.region, mcir.zone, mcir.credential) + for _, vms := range mcis.VMs { + if cluster.CpGroup == vms.VmGroupId { + provisioner.AppendControlPlaneMachine(vms.Name, mcir.csp, mcir.region, mcir.zone, mcir.credential) } } - //create a NLB (contains control-plane) if cluster.Loadbalancer != app.LB_HAPROXY { NLB := mcir.NewNLB(namespace, mcisName, cluster.CpGroup) @@ -193,7 +192,7 @@ func CreateCluster(namespace string, req *app.ClusterReq) (*model.Cluster, error return nil, errors.New(cluster.Status.Message) } else if !exists { if err := NLB.POST(); err != nil { - cluster.FailReason(model.CreateNLBFailedReason, fmt.Sprintf("Failed to create a NLB. (cause='%v')", err)) + cluster.FailReason(model.CreateNLBFailedReason, fmt.Sprintf("Failed to create a NLB. (cause='%v')", NLB)) return nil, errors.New(cluster.Status.Message) } logger.Infof("[%s] NLB creation has been completed. (%s)", req.ControlPlane[0].Connection, NLB.TargetGroup.VmGroupId) diff --git a/src/core/service/mcir.go b/src/core/service/mcir.go index 6054cfc..791eb8b 100644 --- a/src/core/service/mcir.go +++ b/src/core/service/mcir.go @@ -204,8 +204,7 @@ func (self *MCIR) NewVM(namespace string, name string, mcisName string, vmCount } func (self *MCIR) NewNLB(namespace string, mcisName string, groupId string) tumblebug.NLBReq { - nlb := tumblebug.NewNLB(namespace, mcisName, groupId) - nlb.Config = self.config + nlb := tumblebug.NewNLB(namespace, mcisName, groupId, self.config) nlb.VPC = self.vpcName return *nlb } diff --git a/src/core/service/node.go b/src/core/service/node.go index 16bb208..8670433 100644 --- a/src/core/service/node.go +++ b/src/core/service/node.go @@ -113,7 +113,7 @@ func AddNode(namespace string, clusterName string, req *app.NodeReq) (*model.Nod return nil, err } vms = append(vms, vm) - provisioner.AppendWorkerNodeMachine(name, mcir.csp, mcir.region, mcir.zone, mcir.credential) + provisioner.AppendWorkerNodeMachine(vm.Name, mcir.csp, mcir.region, mcir.zone, mcir.credential) idx = idx + 1 } } diff --git a/src/core/tumblebug/mcis.go b/src/core/tumblebug/mcis.go index 7f97fba..b822062 100644 --- a/src/core/tumblebug/mcis.go +++ b/src/core/tumblebug/mcis.go @@ -26,12 +26,13 @@ func NewVM(namespace string, name string, mcisName string) *VM { } /* new instance of NLB */ -func NewNLB(ns string, mcisName string, groupId string) *NLBReq { +func NewNLB(ns string, mcisName string, groupId string, config string) *NLBReq { nlb := &NLBReq{ NLBBase: NLBBase{ - Model: Model{Name: groupId, Namespace: ns}, - Type: "PUBLIC", - Scope: "REGION", Listener: NLBProtocolBase{Protocol: "TCP", Port: "6443"}, + Model: Model{Name: groupId, Namespace: ns}, + Config: config, + Type: "PUBLIC", + Scope: "REGION", Listener: NLBProtocolBase{Protocol: "TCP", Port: "6443"}, TargetGroup: TargetGroup{NLBProtocolBase: NLBProtocolBase{Protocol: "TCP", Port: "6443"}, MCIS: mcisName, VmGroupId: groupId}, }, HealthChecker: HealthCheckReq{ @@ -39,11 +40,9 @@ func NewNLB(ns string, mcisName string, groupId string) *NLBReq { Interval: "default", Threshold: "default", Timeout: "default", }, } - - if strings.Contains(nlb.NLBBase.Config, string(app.CSP_NCPVPC)) { + if strings.Contains(config, string(app.CSP_NCPVPC)) || strings.Contains(config, string(app.CSP_AZURE)) { nlb.HealthChecker.Timeout = "-1" } - if strings.Contains(nlb.NLBBase.Config, string(app.CSP_GCP)) { nlb.HealthChecker.NLBProtocolBase.Protocol = "HTTP" nlb.HealthChecker.NLBProtocolBase.Port = "80" diff --git a/src/core/tumblebug/types.go b/src/core/tumblebug/types.go index f9093ca..dc9b776 100644 --- a/src/core/tumblebug/types.go +++ b/src/core/tumblebug/types.go @@ -164,8 +164,8 @@ type MCIS struct { type VM struct { Model mcisName string //private - VmGroupId string `json:"vmGroupId"` - VmGroupSize string `json:"vmGroupSize"` + VmGroupId string `json:"subGroupId"` + VmGroupSize string `json:"subGroupSize"` Config string `json:"connectionName"` VPC string `json:"vNetId"` Subnet string `json:"subnetId"` @@ -215,7 +215,7 @@ type HealthCheckRes struct { type TargetGroup struct { NLBProtocolBase MCIS string `json:"mcis"` - VmGroupId string `json:"vmGroupId"` + VmGroupId string `json:"subGroupId"` } // NLB diff --git a/src/rest-api/router/cluster.go b/src/rest-api/router/cluster.go index 9fc70f5..bf9fd14 100644 --- a/src/rest-api/router/cluster.go +++ b/src/rest-api/router/cluster.go @@ -161,6 +161,9 @@ func validateCreateClusterReq(clusterReq *app.ClusterReq) error { if len(clusterReq.Config.Kubernetes.Loadbalancer) != 0 && !(clusterReq.Config.Kubernetes.Loadbalancer == app.LB_HAPROXY || clusterReq.Config.Kubernetes.Loadbalancer == app.LB_NLB) { return errors.New("loadbalancer allows only haproxy or nlb") } + if !(clusterReq.Config.Kubernetes.Etcd == app.ETCD_LOCAL || clusterReq.Config.Kubernetes.Etcd == app.ETCD_EXTERNAL) { + return errors.New("etcd allows only local or external") + } if clusterReq.Config.Kubernetes.Etcd == app.ETCD_EXTERNAL && (clusterReq.ControlPlane[0].Count != 3 && clusterReq.ControlPlane[0].Count != 5 && clusterReq.ControlPlane[0].Count != 7) { return errors.New("External etcd must have 3,5,7 controlPlane count") }