Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master'
Browse files Browse the repository at this point in the history
  • Loading branch information
yang1666204 committed May 22, 2024
2 parents 0844c59 + 1e38756 commit 6313f26
Show file tree
Hide file tree
Showing 46 changed files with 1,139 additions and 214 deletions.
2 changes: 1 addition & 1 deletion api/types/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,6 @@ import (
)

type StorageSpec struct {
StorageClass string `json:"storageClass"`
StorageClass string `json:"storageClass,omitempty"`
Size resource.Quantity `json:"size"`
}
54 changes: 52 additions & 2 deletions api/v1alpha1/obcluster_webhook.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"context"
"errors"
"fmt"
"sort"

v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
Expand Down Expand Up @@ -58,6 +59,7 @@ var _ webhook.Defaulter = &OBCluster{}
// Default implements webhook.Defaulter so a webhook will be registered for the type
func (r *OBCluster) Default() {
// fill default essential parameters, memory_limit, datafile_maxsize and datafile_next
logger := obclusterlog.WithValues("namespace", r.Namespace, "name", r.Name)

parameterMap := make(map[string]apitypes.Parameter, 0)
memorySize, ok := r.Spec.OBServerTemplate.Resource.Memory.AsInt64()
Expand All @@ -68,7 +70,7 @@ func (r *OBCluster) Default() {
Value: memoryLimit,
}
} else {
obclusterlog.Error(errors.New("Failed to parse memory size"), "parse observer's memory size failed")
logger.Error(errors.New("Failed to parse memory size"), "parse observer's memory size failed")
}
datafileDiskSize, ok := r.Spec.OBServerTemplate.Storage.DataStorage.Size.AsInt64()
if ok {
Expand All @@ -83,7 +85,7 @@ func (r *OBCluster) Default() {
Value: datafileNextSize,
}
} else {
obclusterlog.Error(errors.New("Failed to parse datafile size"), "parse observer's datafile size failed")
logger.Error(errors.New("Failed to parse datafile size"), "parse observer's datafile size failed")
}
parameterMap["enable_syslog_recycle"] = apitypes.Parameter{
Name: "enable_syslog_recycle",
Expand Down Expand Up @@ -122,6 +124,41 @@ func (r *OBCluster) Default() {
if r.Spec.ServiceAccount == "" {
r.Spec.ServiceAccount = "default"
}
if r.Spec.OBServerTemplate.Storage.DataStorage.StorageClass == "" ||
r.Spec.OBServerTemplate.Storage.LogStorage.StorageClass == "" ||
r.Spec.OBServerTemplate.Storage.RedoLogStorage.StorageClass == "" {
scList := &storagev1.StorageClassList{}
err := clt.List(context.TODO(), scList)
var defaults []string
if err != nil {
logger.Error(err, "Failed to list storage class")
} else {
sort.SliceStable(scList.Items, func(i, j int) bool {
return scList.Items[i].Name < scList.Items[j].Name
})
for _, sc := range scList.Items {
if sc.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" {
defaults = append(defaults, sc.Name)
}
}
if len(defaults) == 0 {
logger.Error(nil, "No default storage class found")
} else {
if len(defaults) > 1 {
logger.Info("Multiple default storage class found", "storageClasses", defaults, "selected", defaults[0])
}
if r.Spec.OBServerTemplate.Storage.DataStorage.StorageClass == "" {
r.Spec.OBServerTemplate.Storage.DataStorage.StorageClass = defaults[0]
}
if r.Spec.OBServerTemplate.Storage.LogStorage.StorageClass == "" {
r.Spec.OBServerTemplate.Storage.LogStorage.StorageClass = defaults[0]
}
if r.Spec.OBServerTemplate.Storage.RedoLogStorage.StorageClass == "" {
r.Spec.OBServerTemplate.Storage.RedoLogStorage.StorageClass = defaults[0]
}
}
}
}
}

// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation.
Expand Down Expand Up @@ -248,6 +285,19 @@ func (r *OBCluster) validateMutation() error {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("topology"), r.Spec.Topology, "empty topology is not permitted"))
}

if r.Spec.OBServerTemplate.Storage.DataStorage.StorageClass == "" {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("observer").Child("storage").Child("dataStorage").Child("storageClass"), "", "storageClass is required, default storage class is not found"))
}
if r.Spec.OBServerTemplate.Storage.LogStorage.StorageClass == "" {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("observer").Child("storage").Child("logStorage").Child("storageClass"), "", "storageClass is required, default storage class is not found"))
}
if r.Spec.OBServerTemplate.Storage.RedoLogStorage.StorageClass == "" {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("observer").Child("storage").Child("redoLogStorage").Child("storageClass"), "", "storageClass is required, default storage class is not found"))
}
if len(allErrs) != 0 {
return allErrs.ToAggregate()
}

// Validate storageClasses
storageClassMapping := make(map[string]bool)
storageClassMapping[r.Spec.OBServerTemplate.Storage.DataStorage.StorageClass] = true
Expand Down
8 changes: 5 additions & 3 deletions api/v1alpha1/obtenantbackuppolicy_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,12 @@ type OBTenantBackupPolicyStatus struct {
Status apitypes.BackupPolicyStatusType `json:"status"`
OperationContext *tasktypes.OperationContext `json:"operationContext,omitempty"`

ObservedGeneration int64 `json:"observedGeneration,omitempty"`
NextFull string `json:"nextFull,omitempty"`
NextIncremental string `json:"nextIncremental,omitempty"`
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
NextFull string `json:"nextFull,omitempty"`
NextIncremental string `json:"nextIncremental,omitempty"`
// Deprecated
TenantCR *OBTenant `json:"tenantCR,omitempty"`
TenantName string `json:"tenantName,omitempty"`
TenantInfo *model.OBTenant `json:"tenantInfo,omitempty"`
LatestFullBackupJob *model.OBBackupJob `json:"latestFullBackupJob,omitempty"`
LatestIncrementalJob *model.OBBackupJob `json:"latestIncrementalJob,omitempty"`
Expand Down
48 changes: 37 additions & 11 deletions cmd/operator/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ package main
import (
"context"
"os"
"os/signal"
"syscall"

//+kubebuilder:scaffold:imports

Expand All @@ -37,8 +39,11 @@ import (
oceanbaseconst "github.com/oceanbase/ob-operator/internal/const/oceanbase"
"github.com/oceanbase/ob-operator/internal/controller"
"github.com/oceanbase/ob-operator/internal/controller/config"
"github.com/oceanbase/ob-operator/internal/debug"
"github.com/oceanbase/ob-operator/internal/telemetry"
"github.com/oceanbase/ob-operator/pkg/coordinator"
"github.com/oceanbase/ob-operator/pkg/database"
"github.com/oceanbase/ob-operator/pkg/task"
)

var (
Expand Down Expand Up @@ -94,6 +99,9 @@ func main() {
coordinator.SetRetryBackoffThreshold(cfg.Time.TaskRetryBackoffThreshold)
coordinator.SetIgnoreDeletionAnnotation(oceanbaseconst.AnnotationsIgnoreDeletion)
coordinator.SetPausedAnnotation(oceanbaseconst.AnnotationsPauseReconciling)
task.SetDebugTask(cfg.Task.Debug)
task.SetTaskPoolSize(cfg.Task.PoolSize)
database.SetLRUCacheSize(cfg.Database.ConnectionLRUCacheSize)

ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))

Expand Down Expand Up @@ -123,79 +131,85 @@ func main() {
os.Exit(1)
}

ctx, cancel := context.WithCancel(context.TODO())

if err = (&controller.OBClusterReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor(config.OBClusterControllerName),
Recorder: telemetry.NewRecorder(ctx, mgr.GetEventRecorderFor(config.OBClusterControllerName)),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "Unable to create controller", "controller", "OBCluster")
os.Exit(1)
}
if err = (&controller.OBZoneReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor(config.OBZoneControllerName),
Recorder: telemetry.NewRecorder(ctx, mgr.GetEventRecorderFor(config.OBZoneControllerName)),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "Unable to create controller", "controller", "OBZone")
os.Exit(1)
}
if err = (&controller.OBServerReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor(config.OBServerControllerName),
Recorder: telemetry.NewRecorder(ctx, mgr.GetEventRecorderFor(config.OBServerControllerName)),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "Unable to create controller", "controller", "OBServer")
os.Exit(1)
}
if err = (&controller.OBParameterReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor(config.OBParameterControllerName),
Recorder: telemetry.NewRecorder(ctx, mgr.GetEventRecorderFor(config.OBParameterControllerName)),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "Unable to create controller", "controller", "OBParameter")
os.Exit(1)
}
if err = (&controller.OBTenantReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor(config.OBTenantControllerName),
Recorder: telemetry.NewRecorder(ctx, mgr.GetEventRecorderFor(config.OBTenantControllerName)),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "Unable to create controller", "controller", "OBTenant")
os.Exit(1)
}
if err = (&controller.OBTenantBackupReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: telemetry.NewRecorder(context.Background(), mgr.GetEventRecorderFor(config.OBTenantBackupControllerName)),
Recorder: telemetry.NewRecorder(ctx, mgr.GetEventRecorderFor(config.OBTenantBackupControllerName)),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "Unable to create controller", "controller", "OBTenantBackup")
os.Exit(1)
}
if err = (&controller.OBTenantRestoreReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor(config.OBTenantRestoreControllerName),
Recorder: telemetry.NewRecorder(ctx, mgr.GetEventRecorderFor(config.OBTenantRestoreControllerName)),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "Unable to create controller", "controller", "OBTenantRestore")
os.Exit(1)
}
if err = (&controller.OBTenantBackupPolicyReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor(config.OBTenantBackupPolicyControllerName),
Recorder: telemetry.NewRecorder(ctx, mgr.GetEventRecorderFor(config.OBTenantBackupPolicyControllerName)),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "Unable to create controller", "controller", "OBTenantBackupPolicy")
os.Exit(1)
}
if err = (&controller.OBTenantOperationReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor(config.OBTenantOperationControllerName),
Recorder: telemetry.NewRecorder(ctx, mgr.GetEventRecorderFor(config.OBTenantOperationControllerName)),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "Unable to create controller", "controller", "OBTenantOperation")
os.Exit(1)
}
if err = (controller.NewOBResourceRescueReconciler(mgr)).SetupWithManager(mgr); err != nil {
if err = (&controller.OBResourceRescueReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: telemetry.NewRecorder(ctx, mgr.GetEventRecorderFor(config.OBResourceRescueControllerName)),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "Unable to create controller", "controller", "OBResourceRescue")
os.Exit(1)
}
Expand Down Expand Up @@ -232,7 +246,19 @@ func main() {
os.Exit(1)
}

rcd := telemetry.NewRecorder(context.Background(), mgr.GetEventRecorderFor("ob-operator"))
signalCh := make(chan os.Signal, 1)
signal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM)

go func() {
<-signalCh
cancel()
}()

if obcfg.GetConfig().Manager.Debug {
go debug.PollingRuntimeStats(ctx.Done())
}

rcd := telemetry.NewRecorder(ctx, mgr.GetEventRecorderFor("ob-operator"))
rcd.GenerateTelemetryRecord(nil, telemetry.ObjectTypeOperator, "Start", "", "Start ob-operator", nil)

setupLog.WithValues(
Expand Down
3 changes: 0 additions & 3 deletions config/crd/bases/oceanbase.oceanbase.com_obclusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1696,7 +1696,6 @@ spec:
type: string
required:
- size
- storageClass
type: object
logStorage:
properties:
Expand All @@ -1710,7 +1709,6 @@ spec:
type: string
required:
- size
- storageClass
type: object
redoLogStorage:
properties:
Expand All @@ -1724,7 +1722,6 @@ spec:
type: string
required:
- size
- storageClass
type: object
required:
- dataStorage
Expand Down
3 changes: 0 additions & 3 deletions config/crd/bases/oceanbase.oceanbase.com_observers.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2541,7 +2541,6 @@ spec:
type: string
required:
- size
- storageClass
type: object
logStorage:
properties:
Expand All @@ -2555,7 +2554,6 @@ spec:
type: string
required:
- size
- storageClass
type: object
redoLogStorage:
properties:
Expand All @@ -2569,7 +2567,6 @@ spec:
type: string
required:
- size
- storageClass
type: object
required:
- dataStorage
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -429,7 +429,7 @@ spec:
status:
type: string
tenantCR:
description: OBTenant is the Schema for the obtenants API
description: Deprecated
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this
Expand Down Expand Up @@ -1027,6 +1027,8 @@ spec:
- tenant_role
- tenant_type
type: object
tenantName:
type: string
required:
- status
type: object
Expand Down
3 changes: 0 additions & 3 deletions config/crd/bases/oceanbase.oceanbase.com_obzones.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1702,7 +1702,6 @@ spec:
type: string
required:
- size
- storageClass
type: object
logStorage:
properties:
Expand All @@ -1716,7 +1715,6 @@ spec:
type: string
required:
- size
- storageClass
type: object
redoLogStorage:
properties:
Expand All @@ -1730,7 +1728,6 @@ spec:
type: string
required:
- size
- storageClass
type: object
required:
- dataStorage
Expand Down
6 changes: 2 additions & 4 deletions example/backup/backup_policy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,7 @@ metadata:
namespace: oceanbase
spec:
obClusterName: "test"
tenantName: "t1"
tenantSecret: "t1-credential"
tenantCRName: "t1"
jobKeepWindow: "1d"
dataClean:
recoveryWindow: "8d"
Expand All @@ -20,5 +19,4 @@ spec:
type: "NFS"
path: "t1/data_backup_custom_enc"
fullCrontab: "30 0 * * 6"
incrementalCrontab: "30 1 * * *"
encryptionSecret: t1-ro
incrementalCrontab: "30 1 * * *"
3 changes: 1 addition & 2 deletions example/backup/backup_policy_oss.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,7 @@ metadata:
namespace: oceanbase
spec:
obClusterName: "test"
tenantName: "t1"
tenantSecret: "t1-credential"
tenantCRName: "t1"
jobKeepWindow: "1d"
dataClean:
recoveryWindow: "8d"
Expand Down
2 changes: 1 addition & 1 deletion example/obcluster/obcluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ spec:
- zone: zone3
replica: 1
observer:
image: oceanbase/oceanbase-cloud-native:4.2.1.1-101010012023111012
image: oceanbase/oceanbase-cloud-native:4.2.1.6-106000012024042515
resource:
cpu: 2
memory: 10Gi
Expand Down
Loading

0 comments on commit 6313f26

Please sign in to comment.