Skip to content

Commit

Permalink
Fix readability of API responses (oceanbase#288)
Browse files Browse the repository at this point in the history
  • Loading branch information
powerfooI authored Apr 8, 2024
1 parent 07d3a54 commit ebe0de5
Show file tree
Hide file tree
Showing 21 changed files with 1,896 additions and 299 deletions.
3 changes: 2 additions & 1 deletion internal/clients/clients.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,5 +26,6 @@ var (
BackupJobClient = client.NewDynamicResourceClient[*v1alpha1.OBTenantBackup](schema.OBTenantBackupGVR, schema.OBTenantBackupKind)
OperationClient = client.NewDynamicResourceClient[*v1alpha1.OBTenantOperation](schema.OBTenantOperationGVR, schema.OBTenantOperationKind)
BackupPolicyClient = client.NewDynamicResourceClient[*v1alpha1.OBTenantBackupPolicy](schema.OBTenantBackupPolicyGVR, schema.OBTenantBackupPolicyKind)
RescueClient = client.NewDynamicResourceClient[*v1alpha1.OBResourceRescue](schema.OBResourceRescueGVR, schema.OBResourceRescueResource)
RescueClient = client.NewDynamicResourceClient[*v1alpha1.OBResourceRescue](schema.OBResourceRescueGVR, schema.OBResourceRescueKind)
RestoreJobClient = client.NewDynamicResourceClient[*v1alpha1.OBTenantRestore](schema.OBTenantRestoreGVR, schema.OBTenantRestoreKind)
)
33 changes: 33 additions & 0 deletions internal/clients/schema/obtenantrestore.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
/*
Copyright (c) 2023 OceanBase
ob-operator is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
*/

package schema

import "k8s.io/apimachinery/pkg/runtime/schema"

const (
OBTenantRestoreKind = "OBTenantRestore"
OBTenantRestoreResource = "obtenantrestores"
)

var (
OBTenantRestoreGVR = schema.GroupVersionResource{
Group: Group,
Version: Version,
Resource: OBTenantRestoreResource,
}
OBTenantRestoreGVK = schema.GroupVersionKind{
Group: Group,
Version: Version,
Kind: OBTenantRestoreKind,
}
)
105 changes: 26 additions & 79 deletions internal/dashboard/business/oceanbase/obcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ import (
clusterstatus "github.com/oceanbase/ob-operator/internal/const/status/obcluster"
"github.com/oceanbase/ob-operator/internal/dashboard/business/common"
"github.com/oceanbase/ob-operator/internal/dashboard/business/constant"
"github.com/oceanbase/ob-operator/internal/dashboard/business/k8s"
modelcommon "github.com/oceanbase/ob-operator/internal/dashboard/model/common"
"github.com/oceanbase/ob-operator/internal/dashboard/model/param"
"github.com/oceanbase/ob-operator/internal/dashboard/model/response"
Expand Down Expand Up @@ -72,6 +71,19 @@ func buildOBClusterOverview(ctx context.Context, obcluster *v1alpha1.OBCluster)
if err != nil {
return nil, errors.Wrap(err, "failed to build obcluster topology")
}
clusterMode := modelcommon.ClusterModeNormal
annotations := obcluster.GetAnnotations()
if annotations != nil {
if mode, ok := annotations[oceanbaseconst.AnnotationsMode]; ok {
switch mode {
case oceanbaseconst.ModeStandalone:
clusterMode = modelcommon.ClusterModeStandalone
case oceanbaseconst.ModeService:
clusterMode = modelcommon.ClusterModeService
default:
}
}
}
return &response.OBClusterOverview{
UID: string(obcluster.UID),
Namespace: obcluster.Namespace,
Expand All @@ -83,6 +95,7 @@ func buildOBClusterOverview(ctx context.Context, obcluster *v1alpha1.OBCluster)
CreateTime: obcluster.ObjectMeta.CreationTimestamp.Unix(),
Image: obcluster.Status.Image,
Topology: topology,
Mode: clusterMode,
}, nil
}

Expand Down Expand Up @@ -122,22 +135,6 @@ func buildOBClusterResponse(ctx context.Context, obcluster *v1alpha1.OBCluster)
respCluster.BackupVolume.Address = obcluster.Spec.BackupVolume.Volume.NFS.Server
respCluster.BackupVolume.Path = obcluster.Spec.BackupVolume.Volume.NFS.Path
}
labels := obcluster.GetLabels()
if labels != nil {
if mode, ok := labels[oceanbaseconst.AnnotationsMode]; ok {
switch mode {
case oceanbaseconst.ModeStandalone:
respCluster.Mode = modelcommon.ClusterModeStandalone
case oceanbaseconst.ModeService:
respCluster.Mode = modelcommon.ClusterModeService
default:
respCluster.Mode = modelcommon.ClusterModeNormal
}
}
}
if respCluster.Mode == "" {
respCluster.Mode = modelcommon.ClusterModeNormal
}
if obcluster.Spec.OBServerTemplate != nil {
respCluster.OBClusterExtra.Resource = response.ResourceSpecRender{
Cpu: obcluster.Spec.OBServerTemplate.Resource.Cpu.Value(),
Expand Down Expand Up @@ -467,9 +464,9 @@ func generateOBClusterInstance(param *param.CreateOBClusterParam) *v1alpha1.OBCl
topology := buildOBClusterTopology(param.Topology)
obcluster := &v1alpha1.OBCluster{
ObjectMeta: metav1.ObjectMeta{
Namespace: param.Namespace,
Name: param.Name,
Labels: map[string]string{},
Namespace: param.Namespace,
Name: param.Name,
Annotations: map[string]string{},
},
Spec: v1alpha1.OBClusterSpec{
ClusterName: param.ClusterName,
Expand All @@ -484,54 +481,17 @@ func generateOBClusterInstance(param *param.CreateOBClusterParam) *v1alpha1.OBCl
}
switch param.Mode {
case modelcommon.ClusterModeStandalone:
obcluster.Labels[oceanbaseconst.AnnotationsMode] = oceanbaseconst.ModeStandalone
obcluster.Annotations[oceanbaseconst.AnnotationsMode] = oceanbaseconst.ModeStandalone
case modelcommon.ClusterModeService:
obcluster.Labels[oceanbaseconst.AnnotationsMode] = oceanbaseconst.ModeService
obcluster.Annotations[oceanbaseconst.AnnotationsMode] = oceanbaseconst.ModeService
default:
}
return obcluster
}

func JudgeResourceEnoughForOBCluster(ctx context.Context, obcluster *v1alpha1.OBCluster) error {
nodes, err := k8s.ListNodeResources(ctx)
if err != nil {
return oberr.Wrap(err, oberr.ErrInternal, "List resource of nodes")
}
requiredMem := obcluster.Spec.OBServerTemplate.Resource.Memory.AsApproximateFloat64() / constant.GB
requiredCpu := obcluster.Spec.OBServerTemplate.Resource.Cpu.AsApproximateFloat64()

// Judge whether the remain resource is enough for monitor
if obcluster.Spec.MonitorTemplate != nil {
requiredMem += obcluster.Spec.MonitorTemplate.Resource.Memory.AsApproximateFloat64() / constant.GB
requiredCpu += obcluster.Spec.MonitorTemplate.Resource.Cpu.AsApproximateFloat64()
}
unmetCount := 0
for _, zone := range obcluster.Spec.Topology {
unmetCount += zone.Replica
}
for _, node := range nodes {
if unmetCount == 0 {
break
}
for node.MemoryFree > requiredMem && node.CpuFree > requiredCpu && unmetCount > 0 {
unmetCount--
node.MemoryFree -= requiredMem
node.CpuFree -= requiredCpu
}
}
if unmetCount > 0 {
return oberr.NewBadRequest("Resource not enough in k8s cluster")
}
return nil
}

func CreateOBCluster(ctx context.Context, param *param.CreateOBClusterParam) (*response.OBCluster, error) {
obcluster := generateOBClusterInstance(param)
err := JudgeResourceEnoughForOBCluster(ctx, obcluster)
if err != nil {
return nil, err
}
err = clients.CreateSecretsForOBCluster(ctx, obcluster, param.RootPassword)
err := clients.CreateSecretsForOBCluster(ctx, obcluster, param.RootPassword)
if err != nil {
return nil, errors.Wrap(err, "Create secrets for obcluster")
}
Expand Down Expand Up @@ -569,13 +529,11 @@ func ScaleOBServer(ctx context.Context, obzoneIdentity *param.OBZoneIdentity, sc
}
found := false
replicaChanged := false
var scaleDelta int
for idx, obzone := range obcluster.Spec.Topology {
if obzone.Zone == obzoneIdentity.OBZoneName {
found = true
if obzone.Replica != scaleParam.Replicas {
replicaChanged = true
scaleDelta = scaleParam.Replicas - obzone.Replica
logger.Infof("Scale obzone %s from %d to %d", obzone.Zone, obzone.Replica, scaleParam.Replicas)
obcluster.Spec.Topology[idx].Replica = scaleParam.Replicas
}
Expand All @@ -587,13 +545,6 @@ func ScaleOBServer(ctx context.Context, obzoneIdentity *param.OBZoneIdentity, sc
if !replicaChanged {
return nil, errors.Errorf("obzone %s replica already satisfied in obcluster %s %s", obzoneIdentity.OBZoneName, obzoneIdentity.Namespace, obzoneIdentity.Name)
}
// Judge whether the resource is enough for obcluster if the replica increases
if scaleDelta > 0 {
err := JudgeResourceEnoughForOBCluster(ctx, obcluster)
if err != nil {
return nil, err
}
}
cluster, err := clients.UpdateOBCluster(ctx, obcluster)
if err != nil {
return nil, oberr.NewInternal(err.Error())
Expand Down Expand Up @@ -650,10 +601,6 @@ func AddOBZone(ctx context.Context, obclusterIdentity *param.K8sObjectIdentity,
NodeSelector: common.KVsToMap(zone.NodeSelector),
Replica: zone.Replicas,
})
err = JudgeResourceEnoughForOBCluster(ctx, obcluster)
if err != nil {
return nil, err
}
cluster, err := clients.UpdateOBCluster(ctx, obcluster)
if err != nil {
return nil, oberr.NewInternal(err.Error())
Expand All @@ -674,8 +621,8 @@ func DeleteOBCluster(ctx context.Context, obclusterIdentity *param.K8sObjectIden
return err == nil, err
}

func GetOBClusterStatistic(ctx context.Context) ([]response.OBClusterStastistic, error) {
statisticResult := make([]response.OBClusterStastistic, 0)
func GetOBClusterStatistic(ctx context.Context) ([]response.OBClusterStatistic, error) {
statisticResult := make([]response.OBClusterStatistic, 0)
obclusterList, err := clients.ListAllOBClusters(ctx)
if err != nil {
return statisticResult, errors.Wrap(err, "failed to list obclusters")
Expand All @@ -699,16 +646,16 @@ func GetOBClusterStatistic(ctx context.Context) ([]response.OBClusterStastistic,
}
}
statisticResult = append(statisticResult,
response.OBClusterStastistic{
response.OBClusterStatistic{
Status: StatusRunning,
Count: runningCount,
}, response.OBClusterStastistic{
}, response.OBClusterStatistic{
Status: StatusDeleting,
Count: deletingCount,
}, response.OBClusterStastistic{
}, response.OBClusterStatistic{
Status: StatusOperating,
Count: operatingCount,
}, response.OBClusterStastistic{
}, response.OBClusterStatistic{
Status: StatusFailed,
Count: failedCount,
})
Expand Down
10 changes: 5 additions & 5 deletions internal/dashboard/business/oceanbase/obcluster_usage.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,11 +87,11 @@ func GetOBClusterUsages(ctx context.Context, nn *param.K8sObjectIdentity) (*resp
return essentials, nil
}

func getServerUsages(gvservers []model.GVOBServer) ([]response.OBServerAvailableResource, map[string]*response.OBZoneAvaiableResource) {
zoneMapping := make(map[string]*response.OBZoneAvaiableResource)
func getServerUsages(gvservers []model.GVOBServer) ([]response.OBServerAvailableResource, map[string]*response.OBZoneAvailableResource) {
zoneMapping := make(map[string]*response.OBZoneAvailableResource)
serverUsages := make([]response.OBServerAvailableResource, 0, len(gvservers))
for _, gvserver := range gvservers {
zoneResource := &response.OBZoneAvaiableResource{
zoneResource := &response.OBZoneAvailableResource{
ServerCount: 1,
OBZone: gvserver.Zone,
AvailableCPU: max(gvserver.CPUCapacity-gvserver.CPUAssigned, 0),
Expand All @@ -100,8 +100,8 @@ func getServerUsages(gvservers []model.GVOBServer) ([]response.OBServerAvailable
AvailableDataDisk: max(gvserver.DataDiskCapacity-gvserver.DataDiskAllocated, 0),
}
serverUsage := response.OBServerAvailableResource{
OBServerIP: gvserver.ServerIP,
OBZoneAvaiableResource: *zoneResource,
OBServerIP: gvserver.ServerIP,
OBZoneAvailableResource: *zoneResource,
}
if _, ok := zoneMapping[gvserver.Zone]; !ok {
zoneMapping[gvserver.Zone] = zoneResource
Expand Down
Loading

0 comments on commit ebe0de5

Please sign in to comment.