diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 8eb4c2ac..ce5fcf31 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -59,7 +59,7 @@ jobs: uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: - go-version: '1.21' + go-version: '1.22' - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index d21043aa..4f5ad0cd 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -25,7 +25,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v5 with: - go-version: 1.21 + go-version: 1.22 - name: Build for all platforms run: | make build-all diff --git a/Dockerfile b/Dockerfile index c73ab5b6..469ade00 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # mysql backup image -FROM golang:1.21.13-alpine3.20 AS build +FROM golang:1.22.9-alpine3.20 AS build COPY . /src/mysql-backup WORKDIR /src/mysql-backup diff --git a/cmd/common_test.go b/cmd/common_test.go index 05790e95..341c4897 100644 --- a/cmd/common_test.go +++ b/cmd/common_test.go @@ -1,6 +1,7 @@ package cmd import ( + "context" "reflect" "github.com/databacker/mysql-backup/pkg/core" @@ -19,17 +20,17 @@ func newMockExecs() *mockExecs { return m } -func (m *mockExecs) Dump(opts core.DumpOptions) (core.DumpResults, error) { +func (m *mockExecs) Dump(ctx context.Context, opts core.DumpOptions) (core.DumpResults, error) { args := m.Called(opts) return core.DumpResults{}, args.Error(0) } -func (m *mockExecs) Restore(opts core.RestoreOptions) error { +func (m *mockExecs) Restore(ctx context.Context, opts core.RestoreOptions) error { args := m.Called(opts) return args.Error(0) } -func (m *mockExecs) Prune(opts core.PruneOptions) error { +func (m *mockExecs) Prune(ctx context.Context, opts core.PruneOptions) error { args := m.Called(opts) return args.Error(0) } diff --git a/cmd/dump.go b/cmd/dump.go index cc34fe1a..24205abe 100644 --- a/cmd/dump.go +++ b/cmd/dump.go @@ -1,16 +1,20 @@ package cmd import ( + "context" "fmt" "strings" "github.com/google/uuid" "github.com/spf13/cobra" "github.com/spf13/viper" + "go.opentelemetry.io/otel/codes" + "github.com/databacker/api/go/api" "github.com/databacker/mysql-backup/pkg/compression" "github.com/databacker/mysql-backup/pkg/core" "github.com/databacker/mysql-backup/pkg/storage" + "github.com/databacker/mysql-backup/pkg/util" ) const ( @@ -38,83 +42,75 @@ func dumpCmd(passedExecs execs, cmdConfig *cmdConfiguration) (*cobra.Command, er bindFlags(cmd, v) }, RunE: func(cmd *cobra.Command, args []string) error { + ctx := context.Background() + // this is the tracer that we will use throughout the entire run + tracer := getTracer("dump") + ctx = util.ContextWithTracer(ctx, tracer) + _, startupSpan := tracer.Start(ctx, "startup") cmdConfig.logger.Debug("starting dump") + defer func() { + tp := getTracerProvider() + tp.ForceFlush(ctx) + _ = tp.Shutdown(ctx) + }() // check targets targetURLs := v.GetStringSlice("target") var ( - targets []storage.Storage - err error + dumpConfig *api.Dump + scriptsConfig *api.Scripts ) - if len(targetURLs) > 0 { - for _, t := range targetURLs { - store, err := storage.ParseURL(t, cmdConfig.creds) - if err != nil { - return fmt.Errorf("invalid target url: %v", err) - } - targets = append(targets, store) - } - } else { - // try the config file - if cmdConfig.configuration != nil { - // parse the target objects, then the ones listed for the backup - targetStructures := cmdConfig.configuration.Targets - dumpTargets := cmdConfig.configuration.Dump.Targets - for _, t := range dumpTargets { - var store storage.Storage - if target, ok := targetStructures[t]; !ok { - return fmt.Errorf("target %s from dump configuration not found in targets configuration", t) - } else { - store, err = target.Storage.Storage() - if err != nil { - return fmt.Errorf("target %s from dump configuration has invalid URL: %v", t, err) - } - } - targets = append(targets, store) - } + if cmdConfig.configuration != nil { + dumpConfig = cmdConfig.configuration.Dump + if dumpConfig != nil { + scriptsConfig = dumpConfig.Scripts } } + targets, err := parseTargets(targetURLs, cmdConfig) + if err != nil { + return fmt.Errorf("error parsing targets: %v", err) + } if len(targets) == 0 { return fmt.Errorf("no targets specified") } safechars := v.GetBool("safechars") - if !v.IsSet("safechars") && cmdConfig.configuration != nil { - safechars = cmdConfig.configuration.Dump.Safechars + if !v.IsSet("safechars") && dumpConfig != nil && dumpConfig.Safechars != nil { + safechars = *dumpConfig.Safechars } include := v.GetStringSlice("include") - if len(include) == 0 && cmdConfig.configuration != nil { - include = cmdConfig.configuration.Dump.Include + if len(include) == 0 && dumpConfig != nil && dumpConfig.Include != nil { + include = *dumpConfig.Include } // make this slice nil if it's empty, so it is consistent; used mainly for test consistency if len(include) == 0 { include = nil } exclude := v.GetStringSlice("exclude") - if len(exclude) == 0 && cmdConfig.configuration != nil { - exclude = cmdConfig.configuration.Dump.Exclude + if len(exclude) == 0 && dumpConfig != nil && dumpConfig.Exclude != nil { + exclude = *dumpConfig.Exclude } // make this slice nil if it's empty, so it is consistent; used mainly for test consistency if len(exclude) == 0 { exclude = nil } preBackupScripts := v.GetString("pre-backup-scripts") - if preBackupScripts == "" && cmdConfig.configuration != nil { - preBackupScripts = cmdConfig.configuration.Dump.Scripts.PreBackup + if preBackupScripts == "" && scriptsConfig != nil && scriptsConfig.PreBackup != nil { + preBackupScripts = *scriptsConfig.PreBackup } postBackupScripts := v.GetString("post-backup-scripts") - if postBackupScripts == "" && cmdConfig.configuration != nil { - postBackupScripts = cmdConfig.configuration.Dump.Scripts.PostBackup + if postBackupScripts == "" && scriptsConfig != nil && scriptsConfig.PostBackup != nil { + postBackupScripts = *scriptsConfig.PostBackup } noDatabaseName := v.GetBool("no-database-name") - if !v.IsSet("no-database-name") && cmdConfig.configuration != nil { - noDatabaseName = cmdConfig.configuration.Dump.NoDatabaseName + if !v.IsSet("no-database-name") && dumpConfig != nil && dumpConfig.NoDatabaseName != nil { + noDatabaseName = *dumpConfig.NoDatabaseName } compact := v.GetBool("compact") - if !v.IsSet("compact") && cmdConfig.configuration != nil { - compact = cmdConfig.configuration.Dump.Compact + if !v.IsSet("compact") && dumpConfig != nil && dumpConfig.Compact != nil { + compact = *dumpConfig.Compact } maxAllowedPacket := v.GetInt("max-allowed-packet") - if !v.IsSet("max-allowed-packet") && cmdConfig.configuration != nil && cmdConfig.configuration.Dump.MaxAllowedPacket != 0 { - maxAllowedPacket = cmdConfig.configuration.Dump.MaxAllowedPacket + if !v.IsSet("max-allowed-packet") && dumpConfig != nil && dumpConfig.MaxAllowedPacket != nil && *dumpConfig.MaxAllowedPacket != 0 { + maxAllowedPacket = *dumpConfig.MaxAllowedPacket } // compression algorithm: check config, then CLI/env var overrides @@ -122,8 +118,8 @@ func dumpCmd(passedExecs execs, cmdConfig *cmdConfiguration) (*cobra.Command, er compressionAlgo string compressor compression.Compressor ) - if cmdConfig.configuration != nil { - compressionAlgo = cmdConfig.configuration.Dump.Compression + if cmdConfig.configuration != nil && dumpConfig.Compression != nil { + compressionAlgo = *dumpConfig.Compression } compressionVar := v.GetString("compression") if compressionVar != "" { @@ -138,41 +134,21 @@ func dumpCmd(passedExecs execs, cmdConfig *cmdConfiguration) (*cobra.Command, er // retention, if enabled retention := v.GetString("retention") - if retention == "" && cmdConfig.configuration != nil { - retention = cmdConfig.configuration.Prune.Retention + if retention == "" && cmdConfig.configuration != nil && cmdConfig.configuration.Prune.Retention != nil { + retention = *cmdConfig.configuration.Prune.Retention } filenamePattern := v.GetString("filename-pattern") - if !v.IsSet("filename-pattern") && cmdConfig.configuration != nil { - filenamePattern = cmdConfig.configuration.Dump.FilenamePattern + if !v.IsSet("filename-pattern") && dumpConfig != nil && dumpConfig.FilenamePattern != nil { + filenamePattern = *dumpConfig.FilenamePattern } if filenamePattern == "" { filenamePattern = defaultFilenamePattern } // timer options - once := v.GetBool("once") - if !v.IsSet("once") && cmdConfig.configuration != nil { - once = cmdConfig.configuration.Dump.Schedule.Once - } - cron := v.GetString("cron") - if cron == "" && cmdConfig.configuration != nil { - cron = cmdConfig.configuration.Dump.Schedule.Cron - } - begin := v.GetString("begin") - if begin == "" && cmdConfig.configuration != nil { - begin = cmdConfig.configuration.Dump.Schedule.Begin - } - frequency := v.GetInt("frequency") - if frequency == 0 && cmdConfig.configuration != nil { - frequency = cmdConfig.configuration.Dump.Schedule.Frequency - } - timerOpts := core.TimerOptions{ - Once: once, - Cron: cron, - Begin: begin, - Frequency: frequency, - } + timerOpts := parseTimerOptions(v, cmdConfig.configuration) + var executor execs executor = &core.Executor{} if passedExecs != nil { @@ -182,7 +158,14 @@ func dumpCmd(passedExecs execs, cmdConfig *cmdConfiguration) (*cobra.Command, er // at this point, any errors should not have usage cmd.SilenceUsage = true + + // done with the startup + startupSpan.End() + if err := executor.Timer(timerOpts, func() error { + // start a new span for the dump, should not be a child of the startup one + tracerCtx, dumpSpan := tracer.Start(ctx, "run") + defer dumpSpan.End() uid := uuid.New() dumpOpts := core.DumpOptions{ Targets: targets, @@ -199,15 +182,18 @@ func dumpCmd(passedExecs execs, cmdConfig *cmdConfiguration) (*cobra.Command, er Run: uid, FilenamePattern: filenamePattern, } - _, err := executor.Dump(dumpOpts) + _, err := executor.Dump(tracerCtx, dumpOpts) if err != nil { + dumpSpan.SetStatus(codes.Error, fmt.Sprintf("error running dump: %v", err)) return fmt.Errorf("error running dump: %w", err) } if retention != "" { - if err := executor.Prune(core.PruneOptions{Targets: targets, Retention: retention}); err != nil { + if err := executor.Prune(tracerCtx, core.PruneOptions{Targets: targets, Retention: retention}); err != nil { + dumpSpan.SetStatus(codes.Error, fmt.Sprintf("error running prune: %v", err)) return fmt.Errorf("error running prune: %w", err) } } + dumpSpan.SetStatus(codes.Ok, "dump complete") return nil }); err != nil { return fmt.Errorf("error running command: %w", err) @@ -278,3 +264,80 @@ S3: If it is a URL of the format s3://bucketname/path then it will connect via S return cmd, nil } + +func parseTimerOptions(v *viper.Viper, config *api.ConfigSpec) core.TimerOptions { + var scheduleConfig *api.Schedule + if config != nil { + dumpConfig := config.Dump + if dumpConfig != nil { + scheduleConfig = dumpConfig.Schedule + } + } + once := v.GetBool("once") + if !v.IsSet("once") && scheduleConfig != nil && scheduleConfig.Once != nil { + once = *scheduleConfig.Once + } + cron := v.GetString("cron") + if cron == "" && scheduleConfig != nil && scheduleConfig.Cron != nil { + cron = *scheduleConfig.Cron + } + begin := v.GetString("begin") + if begin == "" && scheduleConfig != nil && scheduleConfig.Begin != nil { + begin = fmt.Sprintf("%d", *scheduleConfig.Begin) + } + frequency := v.GetInt("frequency") + if frequency == 0 && scheduleConfig != nil && scheduleConfig.Frequency != nil { + frequency = *scheduleConfig.Frequency + } + return core.TimerOptions{ + Once: once, + Cron: cron, + Begin: begin, + Frequency: frequency, + } + +} + +func parseTargets(urls []string, cmdConfig *cmdConfiguration) ([]storage.Storage, error) { + var targets []storage.Storage + if len(urls) > 0 { + for _, t := range urls { + store, err := storage.ParseURL(t, cmdConfig.creds) + if err != nil { + return nil, fmt.Errorf("invalid target url: %v", err) + } + targets = append(targets, store) + } + } else { + // try the config file + if cmdConfig.configuration != nil { + // parse the target objects, then the ones listed for the backup + var ( + targetStructures map[string]api.Target + dumpTargets []string + ) + if cmdConfig.configuration.Targets != nil { + targetStructures = *cmdConfig.configuration.Targets + } + if cmdConfig.configuration != nil && cmdConfig.configuration.Dump != nil && cmdConfig.configuration.Dump.Targets != nil { + dumpTargets = *cmdConfig.configuration.Dump.Targets + } + for _, t := range dumpTargets { + var ( + store storage.Storage + err error + ) + if target, ok := targetStructures[t]; !ok { + return nil, fmt.Errorf("target %s from dump configuration not found in targets configuration", t) + } else { + store, err = storage.FromTarget(target) + if err != nil { + return nil, fmt.Errorf("target %s from dump configuration has invalid URL: %v", t, err) + } + } + targets = append(targets, store) + } + } + } + return targets, nil +} diff --git a/cmd/prune.go b/cmd/prune.go index 1eabe5a8..2a78b31c 100644 --- a/cmd/prune.go +++ b/cmd/prune.go @@ -1,6 +1,7 @@ package cmd import ( + "context" "fmt" "strings" @@ -9,7 +10,7 @@ import ( "github.com/spf13/viper" "github.com/databacker/mysql-backup/pkg/core" - "github.com/databacker/mysql-backup/pkg/storage" + "github.com/databacker/mysql-backup/pkg/util" ) func pruneCmd(passedExecs execs, cmdConfig *cmdConfiguration) (*cobra.Command, error) { @@ -32,69 +33,33 @@ func pruneCmd(passedExecs execs, cmdConfig *cmdConfiguration) (*cobra.Command, e }, RunE: func(cmd *cobra.Command, args []string) error { cmdConfig.logger.Debug("starting prune") + ctx := context.Background() + // this is the tracer that we will use throughout the entire run + defer func() { + tp := getTracerProvider() + tp.ForceFlush(ctx) + _ = tp.Shutdown(ctx) + }() + tracer := getTracer("prune") + ctx = util.ContextWithTracer(ctx, tracer) + _, startupSpan := tracer.Start(ctx, "startup") retention := v.GetString("retention") targetURLs := v.GetStringSlice("target") - var ( - targets []storage.Storage - err error - ) - - if len(targetURLs) > 0 { - for _, t := range targetURLs { - store, err := storage.ParseURL(t, cmdConfig.creds) - if err != nil { - return fmt.Errorf("invalid target url: %v", err) - } - targets = append(targets, store) - } - } else { - // try the config file - if cmdConfig.configuration != nil { - // parse the target objects, then the ones listed for the backup - targetStructures := cmdConfig.configuration.Targets - dumpTargets := cmdConfig.configuration.Dump.Targets - for _, t := range dumpTargets { - var store storage.Storage - if target, ok := targetStructures[t]; !ok { - return fmt.Errorf("target %s from dump configuration not found in targets configuration", t) - } else { - store, err = target.Storage.Storage() - if err != nil { - return fmt.Errorf("target %s from dump configuration has invalid URL: %v", t, err) - } - } - targets = append(targets, store) - } - } + + targets, err := parseTargets(targetURLs, cmdConfig) + if err != nil { + return fmt.Errorf("error parsing targets: %v", err) + } + if len(targets) == 0 { + return fmt.Errorf("no targets specified") } - if retention == "" && cmdConfig.configuration != nil { - retention = cmdConfig.configuration.Prune.Retention + if retention == "" && cmdConfig.configuration != nil && cmdConfig.configuration.Prune.Retention != nil { + retention = *cmdConfig.configuration.Prune.Retention } // timer options - once := v.GetBool("once") - if !v.IsSet("once") && cmdConfig.configuration != nil { - once = cmdConfig.configuration.Dump.Schedule.Once - } - cron := v.GetString("cron") - if cron == "" && cmdConfig.configuration != nil { - cron = cmdConfig.configuration.Dump.Schedule.Cron - } - begin := v.GetString("begin") - if begin == "" && cmdConfig.configuration != nil { - begin = cmdConfig.configuration.Dump.Schedule.Begin - } - frequency := v.GetInt("frequency") - if frequency == 0 && cmdConfig.configuration != nil { - frequency = cmdConfig.configuration.Dump.Schedule.Frequency - } - timerOpts := core.TimerOptions{ - Once: once, - Cron: cron, - Begin: begin, - Frequency: frequency, - } + timerOpts := parseTimerOptions(v, cmdConfig.configuration) var executor execs executor = &core.Executor{} @@ -102,10 +67,12 @@ func pruneCmd(passedExecs execs, cmdConfig *cmdConfiguration) (*cobra.Command, e executor = passedExecs } executor.SetLogger(cmdConfig.logger) + // done with the startup + startupSpan.End() if err := executor.Timer(timerOpts, func() error { uid := uuid.New() - return executor.Prune(core.PruneOptions{Targets: targets, Retention: retention, Run: uid}) + return executor.Prune(ctx, core.PruneOptions{Targets: targets, Retention: retention, Run: uid}) }); err != nil { return fmt.Errorf("error running prune: %w", err) } diff --git a/cmd/restore.go b/cmd/restore.go index cdaf9b0e..54a46897 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -1,6 +1,7 @@ package cmd import ( + "context" "fmt" "strings" @@ -8,6 +9,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" + "github.com/databacker/api/go/api" "github.com/databacker/mysql-backup/pkg/compression" "github.com/databacker/mysql-backup/pkg/core" "github.com/databacker/mysql-backup/pkg/storage" @@ -29,6 +31,15 @@ func restoreCmd(passedExecs execs, cmdConfig *cmdConfiguration) (*cobra.Command, Args: cobra.MinimumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { cmdConfig.logger.Debug("starting restore") + ctx := context.Background() + tracer := getTracer("restore") + defer func() { + tp := getTracerProvider() + tp.ForceFlush(ctx) + _ = tp.Shutdown(ctx) + }() + ctx = util.ContextWithTracer(ctx, tracer) + _, startupSpan := tracer.Start(ctx, "startup") targetFile := args[0] target := v.GetString("target") // get databases namesand mappings @@ -50,8 +61,8 @@ func restoreCmd(passedExecs execs, cmdConfig *cmdConfiguration) (*cobra.Command, compressor compression.Compressor err error ) - if cmdConfig.configuration != nil { - compressionAlgo = cmdConfig.configuration.Dump.Compression + if cmdConfig.configuration != nil && cmdConfig.configuration.Dump.Compression != nil { + compressionAlgo = *cmdConfig.configuration.Dump.Compression } compressionVar := v.GetString("compression") if compressionVar != "" { @@ -79,10 +90,15 @@ func restoreCmd(passedExecs execs, cmdConfig *cmdConfiguration) (*cobra.Command, if cmdConfig.configuration == nil { return fmt.Errorf("no configuration file found") } - if target, ok := cmdConfig.configuration.Targets[targetName]; !ok { + var targetStructures map[string]api.Target + if cmdConfig.configuration.Targets != nil { + targetStructures = *cmdConfig.configuration.Targets + } + + if target, ok := targetStructures[targetName]; !ok { return fmt.Errorf("target %s not found in configuration", targetName) } else { - if store, err = target.Storage.Storage(); err != nil { + if store, err = storage.FromTarget(target); err != nil { return fmt.Errorf("error creating storage for target %s: %v", targetName, err) } } @@ -112,7 +128,8 @@ func restoreCmd(passedExecs execs, cmdConfig *cmdConfiguration) (*cobra.Command, DBConn: cmdConfig.dbconn, Run: uid, } - if err := executor.Restore(restoreOpts); err != nil { + startupSpan.End() + if err := executor.Restore(ctx, restoreOpts); err != nil { return fmt.Errorf("error restoring: %v", err) } passedExecs.GetLogger().Info("Restore complete") diff --git a/cmd/root.go b/cmd/root.go index f21350bf..c11dbe35 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -1,27 +1,35 @@ package cmd import ( + "context" "fmt" + "net/url" "os" "strings" - "github.com/databacker/mysql-backup/pkg/config" - "github.com/databacker/mysql-backup/pkg/core" - "github.com/databacker/mysql-backup/pkg/database" - databacklog "github.com/databacker/mysql-backup/pkg/log" - "github.com/databacker/mysql-backup/pkg/storage/credentials" + "github.com/databacker/api/go/api" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/spf13/viper" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + + "github.com/databacker/mysql-backup/pkg/config" + "github.com/databacker/mysql-backup/pkg/core" + "github.com/databacker/mysql-backup/pkg/database" + "github.com/databacker/mysql-backup/pkg/remote" + "github.com/databacker/mysql-backup/pkg/storage/credentials" ) type execs interface { SetLogger(logger *log.Logger) GetLogger() *log.Logger - Dump(opts core.DumpOptions) (core.DumpResults, error) - Restore(opts core.RestoreOptions) error - Prune(opts core.PruneOptions) error + Dump(ctx context.Context, opts core.DumpOptions) (core.DumpResults, error) + Restore(ctx context.Context, opts core.RestoreOptions) error + Prune(ctx context.Context, opts core.PruneOptions) error Timer(timerOpts core.TimerOptions, cmd func() error) error } @@ -32,7 +40,7 @@ var subCommands = []subCommand{dumpCmd, restoreCmd, pruneCmd} type cmdConfiguration struct { dbconn database.Connection creds credentials.Creds - configuration *config.ConfigSpec + configuration *api.ConfigSpec logger *log.Logger } @@ -45,6 +53,7 @@ func rootCmd(execs execs) (*cobra.Command, error) { v *viper.Viper cmd *cobra.Command cmdConfig = &cmdConfiguration{} + ctx = context.Background() ) cmd = &cobra.Command{ Use: "mysql-backup", @@ -79,7 +88,10 @@ func rootCmd(execs execs) (*cobra.Command, error) { // read the config file, if needed; the structure of the config differs quite some // from the necessarily flat env vars/CLI flags, so we can't just use viper's // automatic config file support. - var actualConfig *config.ConfigSpec + var ( + actualConfig *api.ConfigSpec + tracerExporters []sdktrace.SpanExporter + ) if configFilePath := v.GetString("config-file"); configFilePath != "" { var ( @@ -101,27 +113,39 @@ func rootCmd(execs execs) (*cobra.Command, error) { // set up database connection if actualConfig != nil { - if actualConfig.Database.Server != "" { - cmdConfig.dbconn.Host = actualConfig.Database.Server - } - if actualConfig.Database.Port != 0 { - cmdConfig.dbconn.Port = actualConfig.Database.Port - } - if actualConfig.Database.Credentials.Username != "" { - cmdConfig.dbconn.User = actualConfig.Database.Credentials.Username - } - if actualConfig.Database.Credentials.Password != "" { - cmdConfig.dbconn.Pass = actualConfig.Database.Credentials.Password + if actualConfig.Database != nil { + if actualConfig.Database.Server != nil && *actualConfig.Database.Server != "" { + cmdConfig.dbconn.Host = *actualConfig.Database.Server + } + if actualConfig.Database.Port != nil && *actualConfig.Database.Port != 0 { + cmdConfig.dbconn.Port = *actualConfig.Database.Port + } + if actualConfig.Database.Credentials.Username != nil && *actualConfig.Database.Credentials.Username != "" { + cmdConfig.dbconn.User = *actualConfig.Database.Credentials.Username + } + if actualConfig.Database.Credentials.Password != nil && *actualConfig.Database.Credentials.Password != "" { + cmdConfig.dbconn.Pass = *actualConfig.Database.Credentials.Password + } } cmdConfig.configuration = actualConfig - if actualConfig.Telemetry.URL != "" { - // set up telemetry - loggerHook, err := databacklog.NewTelemetry(actualConfig.Telemetry, nil) + if actualConfig.Telemetry != nil && actualConfig.Telemetry.URL != nil && *actualConfig.Telemetry.URL != "" { + + // set up telemetry with tracing + u, err := url.Parse(*actualConfig.Telemetry.URL) + if err != nil { + return fmt.Errorf("invalid telemetry URL: %w", err) + } + tlsConfig, err := remote.GetTLSConfig(u.Hostname(), *actualConfig.Telemetry.Certificates, *actualConfig.Telemetry.Credentials) + if err != nil { + return fmt.Errorf("unable to set up telemetry: %w", err) + } + + tracerExporter, err := otlptracehttp.New(ctx, otlptracehttp.WithEndpoint(*actualConfig.Telemetry.URL), otlptracehttp.WithTLSClientConfig(tlsConfig)) if err != nil { return fmt.Errorf("unable to set up telemetry: %w", err) } - logger.AddHook(loggerHook) + tracerExporters = append(tracerExporters, tracerExporter) } } @@ -160,6 +184,20 @@ func rootCmd(execs execs) (*cobra.Command, error) { }, } cmdConfig.logger = logger + + if v.GetBool("trace-stderr") { + exp, err := stdouttrace.New(stdouttrace.WithPrettyPrint(), stdouttrace.WithWriter(os.Stderr)) + if err != nil { + return fmt.Errorf("failed to initialize stdouttrace exporter: %w", err) + } + tracerExporters = append(tracerExporters, exp) + } + var tracerProviderOpts []sdktrace.TracerProviderOption + for _, exp := range tracerExporters { + tracerProviderOpts = append(tracerProviderOpts, sdktrace.WithBatcher(exp)) + } + otel.SetTracerProvider(sdktrace.NewTracerProvider(tracerProviderOpts...)) + return nil }, } @@ -187,6 +225,7 @@ func rootCmd(execs execs) (*cobra.Command, error) { // debug via CLI or env var or default pflags.IntP("verbose", "v", 0, "set log level, 1 is debug, 2 is trace") pflags.Bool("debug", false, "set log level to debug, equivalent of --verbose=1; if both set, --version always overrides") + pflags.Bool("trace-stderr", false, "trace to stderr, in addition to any configured telemetry") // aws options pflags.String("aws-endpoint-url", "", "Specify an alternative endpoint for s3 interoperable systems e.g. Digitalocean; ignored if not using s3.") diff --git a/cmd/tracer.go b/cmd/tracer.go new file mode 100644 index 00000000..2b4a2d9c --- /dev/null +++ b/cmd/tracer.go @@ -0,0 +1,27 @@ +package cmd + +import ( + "fmt" + + "go.opentelemetry.io/otel" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" +) + +const ( + appName = "mysql-backup" +) + +// getTracer get a global tracer for the application, which incorporates both the name of the application +// and the command that is being run. +func getTracer(cmd string) trace.Tracer { + return getTracerProvider().Tracer(fmt.Sprintf("%s/%s", appName, cmd)) +} + +func getTracerProvider() *sdktrace.TracerProvider { + tp, ok := otel.GetTracerProvider().(*sdktrace.TracerProvider) + if !ok { + return nil + } + return tp +} diff --git a/go.mod b/go.mod index 9b427921..fa70d69c 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/databacker/mysql-backup -go 1.21 +go 1.22 + +toolchain go1.23.1 require ( github.com/aws/aws-sdk-go-v2 v1.32.3 @@ -17,7 +19,7 @@ require ( github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.6.3 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -29,17 +31,34 @@ require ( ) require ( + github.com/databacker/api/go/api v0.0.0-20241107135529-dabd5b1d3c68 + github.com/google/go-cmp v0.6.0 + go.opentelemetry.io/otel v1.31.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 + go.opentelemetry.io/otel/sdk v1.31.0 + go.opentelemetry.io/otel/trace v1.31.0 +) + +require ( + github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/containerd/log v0.1.0 // indirect github.com/distribution/reference v0.6.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/sys/user v0.2.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect - go.opentelemetry.io/otel v1.19.0 // indirect - go.opentelemetry.io/otel/metric v1.19.0 // indirect - go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/grpc v1.58.3 // indirect + google.golang.org/protobuf v1.31.0 // indirect ) require ( @@ -69,7 +88,6 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/klauspost/compress v1.16.5 // indirect - github.com/kr/pretty v0.3.0 // indirect github.com/magiconair/properties v1.8.1 // indirect github.com/mitchellh/mapstructure v1.1.2 // indirect github.com/moby/patternmatcher v0.5.0 // indirect @@ -86,12 +104,12 @@ require ( github.com/spf13/afero v1.2.2 // indirect github.com/spf13/cast v1.3.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.2.0 // indirect golang.org/x/crypto v0.22.0 // indirect golang.org/x/mod v0.11.0 // indirect golang.org/x/net v0.24.0 // indirect - golang.org/x/sys v0.19.0 // indirect + golang.org/x/sys v0.26.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.10.0 // indirect gopkg.in/ini.v1 v1.51.0 // indirect diff --git a/go.sum b/go.sum index f285ad3e..49c1830a 100644 --- a/go.sum +++ b/go.sum @@ -70,7 +70,10 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/databacker/api/go/api v0.0.0-20240527190403-4c5485cad742 h1:oNmyT4PkoPiZr1GADEb8L66nMVIKxCENrn6tURBp4tM= +github.com/databacker/api/go/api v0.0.0-20240527190403-4c5485cad742/go.mod h1:bQhbl71Lk1ATni0H+u249hjoQ8ShAdVNcNjnw6z+SbE= +github.com/databacker/api/go/api v0.0.0-20241107135529-dabd5b1d3c68 h1:xxLIWTReBE6tS0TrVbrGIPhuoaIbtB9hHOhe887EUTQ= +github.com/databacker/api/go/api v0.0.0-20241107135529-dabd5b1d3c68/go.mod h1:bQhbl71Lk1ATni0H+u249hjoQ8ShAdVNcNjnw6z+SbE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -99,8 +102,8 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= @@ -113,17 +116,20 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -132,7 +138,6 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= @@ -160,8 +165,8 @@ github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgo github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -214,8 +219,8 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= @@ -247,17 +252,14 @@ github.com/spf13/viper v1.6.3 h1:pDDu1OyEDTKzpJwdq4TiuLyMsUgRa/BT5cn5O62NoHs= github.com/spf13/viper v1.6.3/go.mod h1:jUMtyi0/lB5yZH/FjyGAoH7IMNrIhlBf6pXZmbMDvzw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -271,18 +273,20 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= -go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= -go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= -go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= -go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= -go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= -go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= -go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= -go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6BmckTU+9Rxn04m2bD3gD6Mk0OIOCPk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -349,8 +353,8 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -389,6 +393,7 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw= google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= @@ -397,13 +402,15 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= diff --git a/pkg/config/local.go b/pkg/config/local.go deleted file mode 100644 index 904d5811..00000000 --- a/pkg/config/local.go +++ /dev/null @@ -1,208 +0,0 @@ -package config - -import ( - "fmt" - - "github.com/databacker/mysql-backup/pkg/remote" - "github.com/databacker/mysql-backup/pkg/storage" - "github.com/databacker/mysql-backup/pkg/storage/credentials" - "github.com/databacker/mysql-backup/pkg/storage/s3" - "github.com/databacker/mysql-backup/pkg/storage/smb" - "github.com/databacker/mysql-backup/pkg/util" - "gopkg.in/yaml.v3" -) - -type ConfigSpec struct { - Logging logLevel `yaml:"logging"` - Dump Dump `yaml:"dump"` - Restore Restore `yaml:"restore"` - Database Database `yaml:"database"` - Targets Targets `yaml:"targets"` - Prune Prune `yaml:"prune"` - Telemetry Telemetry `yaml:"telemetry"` -} - -type Dump struct { - Include []string `yaml:"include"` - Exclude []string `yaml:"exclude"` - Safechars bool `yaml:"safechars"` - NoDatabaseName bool `yaml:"noDatabaseName"` - Schedule Schedule `yaml:"schedule"` - Compression string `yaml:"compression"` - Compact bool `yaml:"compact"` - MaxAllowedPacket int `yaml:"maxAllowedPacket"` - FilenamePattern string `yaml:"filenamePattern"` - Scripts BackupScripts `yaml:"scripts"` - Targets []string `yaml:"targets"` -} - -type Prune struct { - Retention string `yaml:"retention"` -} - -type Schedule struct { - Once bool `yaml:"once"` - Cron string `yaml:"cron"` - Frequency int `yaml:"frequency"` - Begin string `yaml:"begin"` -} - -type BackupScripts struct { - PreBackup string `yaml:"preBackup"` - PostBackup string `yaml:"postBackup"` -} - -type Restore struct { - Scripts RestoreScripts `yaml:"scripts"` -} - -type RestoreScripts struct { - PreRestore string `yaml:"preRestore"` - PostRestore string `yaml:"postRestore"` -} - -type Database struct { - Server string `yaml:"server"` - Port int `yaml:"port"` - Credentials DBCredentials `yaml:"credentials"` -} - -type DBCredentials struct { - Username string `yaml:"username"` - Password string `yaml:"password"` -} - -type Telemetry struct { - remote.Connection - // BufferSize is the size of the buffer for telemetry messages. It keeps BufferSize messages - // in memory before sending them remotely. The default of 0 is the same as 1, i.e. send every message. - BufferSize int `yaml:"bufferSize"` -} - -var _ yaml.Unmarshaler = &Target{} - -type Targets map[string]Target - -type Target struct { - Storage -} - -type Storage interface { - Storage() (storage.Storage, error) // convert to a storage.Storage instance -} - -func (t *Target) UnmarshalYAML(n *yaml.Node) error { - type T struct { - Type string `yaml:"type"` - URL string `yaml:"url"` - Details yaml.Node `yaml:",inline"` - } - obj := &T{} - if err := n.Decode(obj); err != nil { - return err - } - // based on the type, load the rest of the data - switch obj.Type { - case "s3": - var s3Target S3Target - if err := n.Decode(&s3Target); err != nil { - return err - } - t.Storage = s3Target - case "smb": - var smbTarget SMBTarget - if err := n.Decode(&smbTarget); err != nil { - return err - } - t.Storage = smbTarget - case "file": - var fileTarget FileTarget - if err := n.Decode(&fileTarget); err != nil { - return err - } - t.Storage = fileTarget - default: - return fmt.Errorf("unknown target type: %s", obj.Type) - } - - return nil -} - -type S3Target struct { - Type string `yaml:"type"` - URL string `yaml:"url"` - Region string `yaml:"region"` - Endpoint string `yaml:"endpoint"` - PathStyle bool `yaml:"pathStyle"` - Credentials AWSCredentials `yaml:"credentials"` -} - -func (s S3Target) Storage() (storage.Storage, error) { - u, err := util.SmartParse(s.URL) - if err != nil { - return nil, fmt.Errorf("invalid target url%v", err) - } - opts := []s3.Option{} - if s.Region != "" { - opts = append(opts, s3.WithRegion(s.Region)) - } - if s.Endpoint != "" { - opts = append(opts, s3.WithEndpoint(s.Endpoint)) - } - if s.PathStyle { - opts = append(opts, s3.WithPathStyle()) - } - if s.Credentials.AccessKeyId != "" { - opts = append(opts, s3.WithAccessKeyId(s.Credentials.AccessKeyId)) - } - if s.Credentials.SecretAccessKey != "" { - opts = append(opts, s3.WithSecretAccessKey(s.Credentials.SecretAccessKey)) - } - store := s3.New(*u, opts...) - return store, nil -} - -type AWSCredentials struct { - AccessKeyId string `yaml:"accessKeyId"` - SecretAccessKey string `yaml:"secretAccessKey"` -} - -type SMBTarget struct { - Type string `yaml:"type"` - URL string `yaml:"url"` - Credentials SMBCredentials `yaml:"credentials"` -} - -func (s SMBTarget) Storage() (storage.Storage, error) { - u, err := util.SmartParse(s.URL) - if err != nil { - return nil, fmt.Errorf("invalid target url%v", err) - } - opts := []smb.Option{} - if s.Credentials.Domain != "" { - opts = append(opts, smb.WithDomain(s.Credentials.Domain)) - } - if s.Credentials.Username != "" { - opts = append(opts, smb.WithUsername(s.Credentials.Username)) - } - if s.Credentials.Password != "" { - opts = append(opts, smb.WithPassword(s.Credentials.Password)) - } - store := smb.New(*u, opts...) - return store, nil -} - -type SMBCredentials struct { - Domain string `yaml:"domain"` - Username string `yaml:"username"` - Password string `yaml:"password"` -} - -type FileTarget struct { - Type string `yaml:"type"` - URL string `yaml:"url"` -} - -func (f FileTarget) Storage() (storage.Storage, error) { - return storage.ParseURL(f.URL, credentials.Creds{}) -} diff --git a/pkg/config/process.go b/pkg/config/process.go index f59c7441..40249295 100644 --- a/pkg/config/process.go +++ b/pkg/config/process.go @@ -1,42 +1,60 @@ package config import ( + "encoding/json" + "errors" "fmt" "io" - "github.com/databacker/mysql-backup/pkg/remote" - + "github.com/databacker/api/go/api" "gopkg.in/yaml.v3" + + "github.com/databacker/mysql-backup/pkg/remote" ) // ProcessConfig reads the configuration from a stream and returns the parsed configuration. // If the configuration is of type remote, it will retrieve the remote configuration. // Continues to process remotes until it gets a final valid ConfigSpec or fails. -func ProcessConfig(r io.Reader) (actualConfig *ConfigSpec, err error) { - var conf Config +func ProcessConfig(r io.Reader) (actualConfig *api.ConfigSpec, err error) { + var conf api.Config decoder := yaml.NewDecoder(r) if err := decoder.Decode(&conf); err != nil { return nil, fmt.Errorf("fatal error reading config file: %w", err) } // check that the version is something we recognize - if conf.Version != ConfigVersion { + if conf.Version != api.ConfigDatabackIoV1 { return nil, fmt.Errorf("unknown config version: %s", conf.Version) } + specBytes, err := yaml.Marshal(conf.Spec) + if err != nil { + return nil, fmt.Errorf("error marshalling spec part of configuration: %w", err) + } // if the config type is remote, retrieve our remote configuration // repeat until we end up with a configuration that is of type local for { switch conf.Kind { - case KindLocal: - // parse the config.Config - spec, ok := conf.Spec.(ConfigSpec) - if !ok { + case api.Local: + var spec api.ConfigSpec + // there is a problem that api.ConfigSpec has json tags but not yaml tags. + // This is because github.com/databacker/api uses oapi-codegen to generate the api + // which creates json tags and not yaml tags. There is a PR to get them in. + // http://github.com/oapi-codegen/oapi-codegen/pull/1798 + // Once that is in, and databacker/api uses them, this will work directly with yaml. + // For now, because there are no yaml tags, it defaults to just lowercasing the + // field. That means anything camelcase will be lowercased, which does not always + // parse properly. For example, `thisField` will expect `thisfield` in the yaml, which + // is incorrect. + // We fix this by converting the spec part of the config into json, + // as yaml is a valid subset of json, and then unmarshalling that. + jsonBytes := yamlToJSON(specBytes) + if err := json.Unmarshal(jsonBytes, &spec); err != nil { return nil, fmt.Errorf("parsed yaml had kind local, but spec invalid") } actualConfig = &spec - case KindRemote: - spec, ok := conf.Spec.(RemoteSpec) - if !ok { + case api.Remote: + var spec api.RemoteSpec + if err := yaml.Unmarshal(specBytes, &spec); err != nil { return nil, fmt.Errorf("parsed yaml had kind remote, but spec invalid") } remoteConfig, err := getRemoteConfig(spec) @@ -56,15 +74,18 @@ func ProcessConfig(r io.Reader) (actualConfig *ConfigSpec, err error) { // getRemoteConfig given a RemoteSpec for a config, retrieve the config from the remote // and parse it into a Config struct. -func getRemoteConfig(spec RemoteSpec) (conf Config, err error) { - resp, err := remote.OpenConnection(spec.URL, spec.Certificates, spec.Credentials) +func getRemoteConfig(spec api.RemoteSpec) (conf api.Config, err error) { + if spec.URL == nil || spec.Certificates == nil || spec.Credentials == nil { + return conf, errors.New("empty fields for components") + } + resp, err := remote.OpenConnection(*spec.URL, *spec.Certificates, *spec.Credentials) if err != nil { return conf, fmt.Errorf("error getting reader: %w", err) } defer resp.Body.Close() // Read the body of the response and convert to a config.Config struct - var baseConf Config + var baseConf api.Config decoder := yaml.NewDecoder(resp.Body) if err := decoder.Decode(&baseConf); err != nil { return conf, fmt.Errorf("invalid config file retrieved from server: %w", err) @@ -72,3 +93,15 @@ func getRemoteConfig(spec RemoteSpec) (conf Config, err error) { return baseConf, nil } + +func yamlToJSON(yamlBytes []byte) []byte { + var m map[string]interface{} + if err := yaml.Unmarshal(yamlBytes, &m); err != nil { + return nil + } + jsonBytes, err := json.Marshal(m) + if err != nil { + return nil + } + return jsonBytes +} diff --git a/pkg/config/process_test.go b/pkg/config/process_test.go new file mode 100644 index 00000000..4d20c53e --- /dev/null +++ b/pkg/config/process_test.go @@ -0,0 +1,90 @@ +package config + +import ( + "bytes" + "encoding/base64" + "net/http" + "os" + "strings" + "testing" + + utiltest "github.com/databacker/mysql-backup/pkg/internal/test" + "gopkg.in/yaml.v3" + + "github.com/databacker/api/go/api" + "github.com/google/go-cmp/cmp" +) + +func TestGetRemoteConfig(t *testing.T) { + configFile := "./testdata/config.yml" + content, err := os.ReadFile(configFile) + if err != nil { + t.Fatalf("failed to read config file: %v", err) + } + var validConfig api.Config + if err := yaml.Unmarshal(content, &validConfig); err != nil { + t.Fatalf("failed to unmarshal config: %v", err) + } + // start the server before the tests + server, fingerprint, clientKeys, err := utiltest.StartServer(1, func(w http.ResponseWriter, r *http.Request) { + var buf bytes.Buffer + f, err := os.Open(configFile) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(err.Error())) + return + } + if _, err = buf.ReadFrom(f); err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(err.Error())) + return + } + if _, err := w.Write(buf.Bytes()); err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(err.Error())) + return + } + w.WriteHeader(http.StatusOK) + }) + if err != nil { + t.Fatalf("failed to start server: %v", err) + } + defer server.Close() + tests := []struct { + name string + url string + err string + config api.Config + }{ + {"no url", "", "unsupported protocol scheme", api.Config{}}, + {"invalid server", "https://foo.bar/com", "no such host", api.Config{}}, + {"no path", "https://google.com/foo/bar/abc", "invalid config file", api.Config{}}, + {"nothing listening", "https://localhost:12345/foo/bar/abc", "connection refused", api.Config{}}, + {"valid", server.URL, "", validConfig}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + creds := base64.StdEncoding.EncodeToString(clientKeys[0]) + spec := api.RemoteSpec{ + URL: &tt.url, + Certificates: &[]string{fingerprint}, + Credentials: &creds, + } + conf, err := getRemoteConfig(spec) + switch { + case tt.err == "" && err != nil: + t.Fatalf("unexpected error: %v", err) + case tt.err != "" && err == nil: + t.Fatalf("expected error: %s", tt.err) + case tt.err != "" && !strings.Contains(err.Error(), tt.err): + t.Fatalf("mismatched error: %s, got: %v", tt.err, err) + default: + diff := cmp.Diff(tt.config, conf) + if diff != "" { + t.Fatalf("mismatched config: %s", diff) + } + } + }) + } + +} diff --git a/pkg/config/remote.go b/pkg/config/remote.go deleted file mode 100644 index 15dbac39..00000000 --- a/pkg/config/remote.go +++ /dev/null @@ -1,9 +0,0 @@ -package config - -import ( - "github.com/databacker/mysql-backup/pkg/remote" -) - -type RemoteSpec struct { - remote.Connection -} diff --git a/pkg/config/testdata/config.yml b/pkg/config/testdata/config.yml new file mode 100644 index 00000000..62d9893c --- /dev/null +++ b/pkg/config/testdata/config.yml @@ -0,0 +1,25 @@ +version: config.databack.io/v1 +kind: local + +spec: + database: + server: abcd + port: 3306 + credentials: + username: user2 + password: xxxx2 + + targets: + local: + type: file + url: file:///foo/bar + other: + type: file + url: /foo/bar + + dump: + targets: + - local + + prune: + retention: "1h" \ No newline at end of file diff --git a/pkg/config/type.go b/pkg/config/type.go deleted file mode 100644 index 4edb7d37..00000000 --- a/pkg/config/type.go +++ /dev/null @@ -1,73 +0,0 @@ -package config - -import ( - "fmt" - - "gopkg.in/yaml.v3" -) - -type logLevel string - -//nolint:unused // we expect to use these going forward -const ( - logLevelError logLevel = "error" - logLevelWarning logLevel = "warning" - logLevelInfo logLevel = "info" - logLevelDebug logLevel = "debug" - logLevelTrace logLevel = "trace" - logLevelDefault logLevel = logLevelInfo - - ConfigVersion = "config.databack.io/v1" - KindLocal = "local" - KindRemote = "remote" -) - -type Config struct { - Kind string `yaml:"kind"` - Version string `yaml:"version"` - Metadata Metadata `yaml:"metadata"` - Spec any `yaml:"spec"` -} - -var _ yaml.Unmarshaler = &Config{} - -type Metadata struct { - Name string `yaml:"name"` - Description string `yaml:"description"` - Digest string `yaml:"digest"` -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface, so we can handle the Spec correctly -func (c *Config) UnmarshalYAML(n *yaml.Node) error { - type T struct { - Kind string `yaml:"kind"` - Version string `yaml:"version"` - Metadata Metadata `yaml:"metadata"` - Spec yaml.Node `yaml:"spec"` - } - obj := &T{} - if err := n.Decode(obj); err != nil { - return err - } - switch obj.Kind { - case KindLocal: - // parse the config.Spec - var spec ConfigSpec - if err := obj.Spec.Decode(&spec); err != nil { - return err - } - c.Spec = spec - case KindRemote: - var spec RemoteSpec - if err := obj.Spec.Decode(&spec); err != nil { - return err - } - c.Spec = spec - default: - return fmt.Errorf("unknown config type: %s", obj.Kind) - } - c.Kind = obj.Kind - c.Version = obj.Version - c.Metadata = obj.Metadata - return nil -} diff --git a/pkg/core/dump.go b/pkg/core/dump.go index a6d395a0..c355e9c0 100644 --- a/pkg/core/dump.go +++ b/pkg/core/dump.go @@ -1,6 +1,7 @@ package core import ( + "context" "fmt" "os" "path" @@ -10,15 +11,23 @@ import ( "time" log "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" "github.com/databacker/mysql-backup/pkg/archive" "github.com/databacker/mysql-backup/pkg/database" + "github.com/databacker/mysql-backup/pkg/util" ) // Dump run a single dump, based on the provided opts -func (e *Executor) Dump(opts DumpOptions) (DumpResults, error) { +func (e *Executor) Dump(ctx context.Context, opts DumpOptions) (DumpResults, error) { results := DumpResults{Start: time.Now()} - defer func() { results.End = time.Now() }() + tracer := util.GetTracerFromContext(ctx) + ctx, span := tracer.Start(ctx, "dump") + defer func() { + results.End = time.Now() + span.End() + }() targets := opts.Targets safechars := opts.Safechars @@ -41,6 +50,7 @@ func (e *Executor) Dump(opts DumpOptions) (DumpResults, error) { timepart = strings.ReplaceAll(timepart, ":", "-") } results.Timestamp = timepart + span.SetAttributes(attribute.String("timestamp", timepart)) // sourceFilename: file that the uploader looks for when performing the upload // targetFilename: the remote file that is actually uploaded @@ -49,6 +59,7 @@ func (e *Executor) Dump(opts DumpOptions) (DumpResults, error) { if err != nil { return results, fmt.Errorf("failed to process filename pattern: %v", err) } + span.SetAttributes(attribute.String("source-filename", sourceFilename), attribute.String("target-filename", targetFilename)) // create a temporary working directory tmpdir, err := os.MkdirTemp("", "databacker_backup") @@ -57,7 +68,7 @@ func (e *Executor) Dump(opts DumpOptions) (DumpResults, error) { } defer os.RemoveAll(tmpdir) // execute pre-backup scripts if any - if err := preBackup(timepart, path.Join(tmpdir, sourceFilename), tmpdir, opts.PreBackupScripts, logger.Level == log.DebugLevel); err != nil { + if err := preBackup(ctx, timepart, path.Join(tmpdir, sourceFilename), tmpdir, opts.PreBackupScripts, logger.Level == log.DebugLevel); err != nil { return results, fmt.Errorf("error running pre-restore: %v", err) } @@ -70,12 +81,14 @@ func (e *Executor) Dump(opts DumpOptions) (DumpResults, error) { dw := make([]database.DumpWriter, 0) - // do we split the output by schema, or one big dump file? + // do we back up all schemas, or just provided ones + span.SetAttributes(attribute.Bool("provided-schemas", len(dbnames) == 0)) if len(dbnames) == 0 { if dbnames, err = database.GetSchemas(dbconn); err != nil { return results, fmt.Errorf("failed to list database schemas: %v", err) } } + span.SetAttributes(attribute.StringSlice("actual-schemas", dbnames)) for _, s := range dbnames { outFile := path.Join(workdir, fmt.Sprintf("%s_%s.sql", s, timepart)) f, err := os.Create(outFile) @@ -88,45 +101,62 @@ func (e *Executor) Dump(opts DumpOptions) (DumpResults, error) { }) } results.DumpStart = time.Now() - if err := database.Dump(dbconn, database.DumpOpts{ + dbDumpCtx, dbDumpSpan := tracer.Start(ctx, "database_dump") + if err := database.Dump(dbDumpCtx, dbconn, database.DumpOpts{ Compact: compact, SuppressUseDatabase: suppressUseDatabase, MaxAllowedPacket: maxAllowedPacket, }, dw); err != nil { + dbDumpSpan.SetStatus(codes.Error, err.Error()) + dbDumpSpan.End() return results, fmt.Errorf("failed to dump database: %v", err) } results.DumpEnd = time.Now() + dbDumpSpan.SetStatus(codes.Ok, "completed") + dbDumpSpan.End() // create my tar writer to archive it all together // WRONG: THIS WILL CAUSE IT TO TRY TO LOOP BACK ON ITSELF + _, tarSpan := tracer.Start(ctx, "output_tar") outFile := path.Join(tmpdir, sourceFilename) f, err := os.OpenFile(outFile, os.O_CREATE|os.O_WRONLY, 0o644) if err != nil { + tarSpan.SetStatus(codes.Error, err.Error()) + tarSpan.End() return results, fmt.Errorf("failed to open output file '%s': %v", outFile, err) } defer f.Close() cw, err := compressor.Compress(f) if err != nil { + tarSpan.SetStatus(codes.Error, err.Error()) + tarSpan.End() return results, fmt.Errorf("failed to create compressor: %v", err) } if err := archive.Tar(workdir, cw); err != nil { + tarSpan.SetStatus(codes.Error, err.Error()) + tarSpan.End() return results, fmt.Errorf("error creating the compressed archive: %v", err) } // we need to close it explicitly before moving ahead f.Close() + tarSpan.SetStatus(codes.Ok, "completed") + tarSpan.End() // execute post-backup scripts if any - if err := postBackup(timepart, path.Join(tmpdir, sourceFilename), tmpdir, opts.PostBackupScripts, logger.Level == log.DebugLevel); err != nil { + if err := postBackup(ctx, timepart, path.Join(tmpdir, sourceFilename), tmpdir, opts.PostBackupScripts, logger.Level == log.DebugLevel); err != nil { return results, fmt.Errorf("error running pre-restore: %v", err) } // upload to each destination + uploadCtx, uploadSpan := tracer.Start(ctx, "upload") for _, t := range targets { uploadResult := UploadResult{Target: t.URL(), Start: time.Now()} targetCleanFilename := t.Clean(targetFilename) logger.Debugf("uploading via protocol %s from %s to %s", t.Protocol(), sourceFilename, targetCleanFilename) - copied, err := t.Push(targetCleanFilename, filepath.Join(tmpdir, sourceFilename), logger) + copied, err := t.Push(uploadCtx, targetCleanFilename, filepath.Join(tmpdir, sourceFilename), logger) if err != nil { + uploadSpan.SetStatus(codes.Error, err.Error()) + uploadSpan.End() return results, fmt.Errorf("failed to push file: %v", err) } logger.Debugf("completed copying %d bytes", copied) @@ -134,12 +164,14 @@ func (e *Executor) Dump(opts DumpOptions) (DumpResults, error) { uploadResult.End = time.Now() results.Uploads = append(results.Uploads, uploadResult) } + uploadSpan.SetStatus(codes.Ok, "completed") + uploadSpan.End() return results, nil } // run pre-backup scripts, if they exist -func preBackup(timestamp, dumpfile, dumpdir, preBackupDir string, debug bool) error { +func preBackup(ctx context.Context, timestamp, dumpfile, dumpdir, preBackupDir string, debug bool) error { // construct any additional environment env := map[string]string{ "NOW": timestamp, @@ -147,10 +179,12 @@ func preBackup(timestamp, dumpfile, dumpdir, preBackupDir string, debug bool) er "DUMPDIR": dumpdir, "DB_DUMP_DEBUG": fmt.Sprintf("%v", debug), } - return runScripts(preBackupDir, env) + ctx, span := util.GetTracerFromContext(ctx).Start(ctx, "pre-backup") + defer span.End() + return runScripts(ctx, preBackupDir, env) } -func postBackup(timestamp, dumpfile, dumpdir, postBackupDir string, debug bool) error { +func postBackup(ctx context.Context, timestamp, dumpfile, dumpdir, postBackupDir string, debug bool) error { // construct any additional environment env := map[string]string{ "NOW": timestamp, @@ -158,7 +192,9 @@ func postBackup(timestamp, dumpfile, dumpdir, postBackupDir string, debug bool) "DUMPDIR": dumpdir, "DB_DUMP_DEBUG": fmt.Sprintf("%v", debug), } - return runScripts(postBackupDir, env) + ctx, span := util.GetTracerFromContext(ctx).Start(ctx, "post-backup") + defer span.End() + return runScripts(ctx, postBackupDir, env) } // ProcessFilenamePattern takes a template pattern and processes it with the current time. diff --git a/pkg/core/prune.go b/pkg/core/prune.go index 365afca4..2cc1506e 100644 --- a/pkg/core/prune.go +++ b/pkg/core/prune.go @@ -1,119 +1,148 @@ package core import ( + "context" "errors" "fmt" "regexp" "slices" "strconv" "time" + + "github.com/databacker/mysql-backup/pkg/storage" + "github.com/databacker/mysql-backup/pkg/util" + "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" ) // filenameRE is a regular expression to match a backup filename var filenameRE = regexp.MustCompile(`^db_backup_(\d{4})-(\d{2})-(\d{2})T(\d{2})[:-](\d{2})[:-](\d{2})Z\.\w+$`) // Prune prune older backups -func (e *Executor) Prune(opts PruneOptions) error { +func (e *Executor) Prune(ctx context.Context, opts PruneOptions) error { + tracer := util.GetTracerFromContext(ctx) + tracerCtx, span := tracer.Start(ctx, "prune") + defer span.End() logger := e.Logger.WithField("run", opts.Run.String()) logger.Level = e.Logger.Level logger.Info("beginning prune") var ( - candidates []string - now = opts.Now + now = opts.Now ) if now.IsZero() { now = time.Now() } + if len(opts.Targets) == 0 { + return errors.New("no targets") + } + retainHours, err1 := convertToHours(opts.Retention) retainCount, err2 := convertToCount(opts.Retention) - if err1 != nil && err2 != nil { + if (err1 != nil && err2 != nil) || (retainHours <= 0 && retainCount <= 0) { return fmt.Errorf("invalid retention string: %s", opts.Retention) } - if len(opts.Targets) == 0 { - return errors.New("no targets") - } for _, target := range opts.Targets { - var pruned int + if err := pruneTarget(tracerCtx, logger, target, now, retainHours, retainCount); err != nil { + return fmt.Errorf("failed to prune target %s: %v", target, err) + } + } - logger.Debugf("pruning target %s", target) - files, err := target.ReadDir(".", logger) - if err != nil { - return fmt.Errorf("failed to read directory: %v", err) + return nil +} + +// pruneTarget prunes an individual target +func pruneTarget(ctx context.Context, logger *logrus.Entry, target storage.Storage, now time.Time, retainHours, retainCount int) error { + var ( + pruned int + candidates, ignored, invalidDate []string + ) + ctx, span := util.GetTracerFromContext(ctx).Start(ctx, fmt.Sprintf("pruneTarget %s", target.URL())) + defer span.End() + + logger.Debugf("pruning target %s", target) + files, err := target.ReadDir(ctx, ".", logger) + if err != nil { + span.SetStatus(codes.Error, fmt.Sprintf("failed to read directory: %v", err)) + return fmt.Errorf("failed to read directory: %v", err) + } + + // create a slice with the filenames and their calculated times - these are *not* the timestamp times, but the times calculated from the filenames + var filesWithTimes []fileWithTime + + for _, fileInfo := range files { + filename := fileInfo.Name() + matches := filenameRE.FindStringSubmatch(filename) + if matches == nil { + logger.Debugf("ignoring filename that is not standard backup pattern: %s", filename) + ignored = append(ignored, filename) + continue } + logger.Debugf("checking filename that is standard backup pattern: %s", filename) - // create a slice with the filenames and their calculated times - these are *not* the timestamp times, but the times calculated from the filenames - var filesWithTimes []fileWithTime + // Parse the date from the filename + year, month, day, hour, minute, second := matches[1], matches[2], matches[3], matches[4], matches[5], matches[6] + dateTimeStr := fmt.Sprintf("%s-%s-%sT%s:%s:%sZ", year, month, day, hour, minute, second) + filetime, err := time.Parse(time.RFC3339, dateTimeStr) + if err != nil { + logger.Debugf("Error parsing date from filename %s: %v; ignoring", filename, err) + invalidDate = append(invalidDate, filename) + continue + } + filesWithTimes = append(filesWithTimes, fileWithTime{ + filename: filename, + filetime: filetime, + }) + } - for _, fileInfo := range files { - filename := fileInfo.Name() - matches := filenameRE.FindStringSubmatch(filename) - if matches == nil { - logger.Debugf("ignoring filename that is not standard backup pattern: %s", filename) + switch { + case retainHours > 0: + // if we had retainHours, we go through all of the files and find any whose timestamp is older than now-retainHours + for _, f := range filesWithTimes { + // Check if the file is within 'retain' hours from 'now' + age := now.Sub(f.filetime).Hours() + if age < float64(retainHours) { + logger.Debugf("file %s is %f hours old", f.filename, age) + logger.Debugf("keeping file %s", f.filename) continue } - logger.Debugf("checking filename that is standard backup pattern: %s", filename) - - // Parse the date from the filename - year, month, day, hour, minute, second := matches[1], matches[2], matches[3], matches[4], matches[5], matches[6] - dateTimeStr := fmt.Sprintf("%s-%s-%sT%s:%s:%sZ", year, month, day, hour, minute, second) - filetime, err := time.Parse(time.RFC3339, dateTimeStr) - if err != nil { - logger.Debugf("Error parsing date from filename %s: %v; ignoring", filename, err) - continue - } - filesWithTimes = append(filesWithTimes, fileWithTime{ - filename: filename, - filetime: filetime, - }) + logger.Debugf("Adding candidate file: %s", f.filename) + candidates = append(candidates, f.filename) } - - switch { - case retainHours > 0: - // if we had retainHours, we go through all of the files and find any whose timestamp is older than now-retainHours - for _, f := range filesWithTimes { - // Check if the file is within 'retain' hours from 'now' - age := now.Sub(f.filetime).Hours() - if age < float64(retainHours) { - logger.Debugf("file %s is %f hours old", f.filename, age) - logger.Debugf("keeping file %s", f.filename) - continue - } - logger.Debugf("Adding candidate file: %s", f.filename) - candidates = append(candidates, f.filename) + case retainCount > 0: + // if we had retainCount, we sort all of the files by timestamp, and add to the list all except the retainCount most recent + slices.SortFunc(filesWithTimes, func(i, j fileWithTime) int { + switch { + case i.filetime.Before(j.filetime): + return -1 + case i.filetime.After(j.filetime): + return 1 } - case retainCount > 0: - // if we had retainCount, we sort all of the files by timestamp, and add to the list all except the retainCount most recent - slices.SortFunc(filesWithTimes, func(i, j fileWithTime) int { - switch { - case i.filetime.Before(j.filetime): - return -1 - case i.filetime.After(j.filetime): - return 1 - } - return 0 - }) - slices.Reverse(filesWithTimes) - if retainCount >= len(filesWithTimes) { - for i := 0 + retainCount; i < len(filesWithTimes); i++ { - logger.Debugf("Adding candidate file %s:", filesWithTimes[i].filename) - candidates = append(candidates, filesWithTimes[i].filename) - } + return 0 + }) + slices.Reverse(filesWithTimes) + if retainCount >= len(filesWithTimes) { + for i := 0 + retainCount; i < len(filesWithTimes); i++ { + logger.Debugf("Adding candidate file %s:", filesWithTimes[i].filename) + candidates = append(candidates, filesWithTimes[i].filename) } - default: - return fmt.Errorf("invalid retention string: %s", opts.Retention) } + default: + span.SetStatus(codes.Error, "invalid retention time") + return fmt.Errorf("invalid retention time %d count %d hours", retainCount, retainHours) + } - // we have the list, remove them all - for _, filename := range candidates { - if err := target.Remove(filename, logger); err != nil { - return fmt.Errorf("failed to remove file %s: %v", filename, err) - } - pruned++ + // we have the list, remove them all + span.SetAttributes(attribute.StringSlice("candidates", candidates), attribute.StringSlice("ignored", ignored), attribute.StringSlice("invalidDate", invalidDate)) + for _, filename := range candidates { + if err := target.Remove(ctx, filename, logger); err != nil { + return fmt.Errorf("failed to remove file %s: %v", filename, err) } - logger.Debugf("pruning %d files from target %s", pruned, target) + pruned++ } - + logger.Debugf("pruning %d files from target %s", pruned, target) + span.SetStatus(codes.Ok, fmt.Sprintf("pruned %d files", pruned)) return nil } diff --git a/pkg/core/prune_test.go b/pkg/core/prune_test.go index 9593b225..cf5efe5a 100644 --- a/pkg/core/prune_test.go +++ b/pkg/core/prune_test.go @@ -1,6 +1,7 @@ package core import ( + "context" "fmt" "io" "os" @@ -75,9 +76,10 @@ func TestPrune(t *testing.T) { afterFiles []string err error }{ - {"invalid format", PruneOptions{Retention: "100x", Now: now}, nil, nil, fmt.Errorf("invalid retention string: 100x")}, {"no targets", PruneOptions{Retention: "1h", Now: now}, nil, nil, fmt.Errorf("no targets")}, // 1 hour - file[1] is 1h+30m = 1.5h, so it should be pruned + {"invalid format", PruneOptions{Retention: "100x", Now: now}, filenames, filenames[0:1], fmt.Errorf("invalid retention string: 100x")}, + // 1 hour - file[1] is 1h+30m = 1.5h, so it should be pruned {"1 hour", PruneOptions{Retention: "1h", Now: now}, filenames, filenames[0:1], nil}, // 2 hours - file[2] is 2h+30m = 2.5h, so it should be pruned {"2 hours", PruneOptions{Retention: "2h", Now: now}, filenames, filenames[0:2], nil}, @@ -96,6 +98,7 @@ func TestPrune(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() // create a temporary directory workDir := t.TempDir() // create beforeFiles in the directory and create a target, but only if there are beforeFiles @@ -124,12 +127,14 @@ func TestPrune(t *testing.T) { executor := Executor{ Logger: logger, } - err := executor.Prune(tt.opts) + err := executor.Prune(ctx, tt.opts) switch { case (err == nil && tt.err != nil) || (err != nil && tt.err == nil): t.Errorf("expected error %v, got %v", tt.err, err) case err != nil && tt.err != nil && err.Error() != tt.err.Error(): t.Errorf("expected error %v, got %v", tt.err, err) + case err != nil: + return } // check files match files, err := os.ReadDir(workDir) diff --git a/pkg/core/restore.go b/pkg/core/restore.go index d19f8860..730a530e 100644 --- a/pkg/core/restore.go +++ b/pkg/core/restore.go @@ -1,6 +1,7 @@ package core import ( + "context" "fmt" "io" "os" @@ -8,6 +9,9 @@ import ( "github.com/databacker/mysql-backup/pkg/archive" "github.com/databacker/mysql-backup/pkg/database" + "github.com/databacker/mysql-backup/pkg/util" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" ) const ( @@ -17,22 +21,38 @@ const ( ) // Restore restore a specific backup into the database -func (e *Executor) Restore(opts RestoreOptions) error { +func (e *Executor) Restore(ctx context.Context, opts RestoreOptions) error { + tracer := util.GetTracerFromContext(ctx) + ctx, span := tracer.Start(ctx, "restore") + defer span.End() logger := e.Logger.WithField("run", opts.Run.String()) logger.Level = e.Logger.Level logger.Info("beginning restore") // execute pre-restore scripts if any - if err := preRestore(opts.Target.URL()); err != nil { + if err := preRestore(ctx, opts.Target.URL()); err != nil { return fmt.Errorf("error running pre-restore: %v", err) } logger.Debugf("restoring via %s protocol, temporary file location %s", opts.Target.Protocol(), tmpRestoreFile) - copied, err := opts.Target.Pull(opts.TargetFile, tmpRestoreFile, logger) + _, pullSpan := tracer.Start(ctx, "pull file") + pullSpan.SetAttributes( + attribute.String("target", opts.Target.URL()), + attribute.String("targetfile", opts.TargetFile), + attribute.String("tmpfile", tmpRestoreFile), + ) + copied, err := opts.Target.Pull(ctx, opts.TargetFile, tmpRestoreFile, logger) if err != nil { + pullSpan.RecordError(err) + pullSpan.End() return fmt.Errorf("failed to pull target %s: %v", opts.Target, err) } + pullSpan.SetAttributes( + attribute.Int64("copied", copied), + ) + pullSpan.SetStatus(codes.Ok, "completed") + pullSpan.End() logger.Debugf("completed copying %d bytes", copied) // successfully download file, now restore it @@ -46,23 +66,36 @@ func (e *Executor) Restore(opts RestoreOptions) error { return fmt.Errorf("unable to read the temporary download file: %v", err) } defer f.Close() - os.Remove(tmpRestoreFile) + defer os.Remove(tmpRestoreFile) // create my tar reader to put the files in the directory + _, tarSpan := tracer.Start(ctx, "input_tar") cr, err := opts.Compressor.Uncompress(f) if err != nil { + tarSpan.SetStatus(codes.Error, fmt.Sprintf("unable to create an uncompressor: %v", err)) + tarSpan.End() return fmt.Errorf("unable to create an uncompressor: %v", err) } if err := archive.Untar(cr, tmpdir); err != nil { + tarSpan.SetStatus(codes.Error, fmt.Sprintf("error extracting the file: %v", err)) + tarSpan.End() return fmt.Errorf("error extracting the file: %v", err) } + tarSpan.SetStatus(codes.Ok, "completed") + tarSpan.End() // run through each file and apply it + dbRestoreCtx, dbRestoreSpan := tracer.Start(ctx, "database_restore") files, err := os.ReadDir(tmpdir) if err != nil { + dbRestoreSpan.SetStatus(codes.Error, fmt.Sprintf("failed to find extracted files to restore: %v", err)) + dbRestoreSpan.End() return fmt.Errorf("failed to find extracted files to restore: %v", err) } - readers := make([]io.ReadSeeker, 0) + var ( + readers = make([]io.ReadSeeker, 0) + fileNames []string + ) for _, f := range files { // ignore directories if f.IsDir() { @@ -74,31 +107,41 @@ func (e *Executor) Restore(opts RestoreOptions) error { } defer file.Close() readers = append(readers, file) + fileNames = append(fileNames, f.Name()) } - if err := database.Restore(opts.DBConn, opts.DatabasesMap, readers); err != nil { + dbRestoreSpan.SetAttributes(attribute.StringSlice("files", fileNames)) + if err := database.Restore(dbRestoreCtx, opts.DBConn, opts.DatabasesMap, readers); err != nil { + dbRestoreSpan.SetStatus(codes.Error, fmt.Sprintf("failed to restore database: %v", err)) + dbRestoreSpan.End() return fmt.Errorf("failed to restore database: %v", err) } + dbRestoreSpan.SetStatus(codes.Ok, "completed") + dbRestoreSpan.End() // execute post-restore scripts if any - if err := postRestore(opts.Target.URL()); err != nil { + if err := postRestore(ctx, opts.Target.URL()); err != nil { return fmt.Errorf("error running post-restove: %v", err) } return nil } // run pre-restore scripts, if they exist -func preRestore(target string) error { +func preRestore(ctx context.Context, target string) error { // construct any additional environment env := map[string]string{ "DB_RESTORE_TARGET": target, } - return runScripts(preRestoreDir, env) + ctx, span := util.GetTracerFromContext(ctx).Start(ctx, "pre-restore") + defer span.End() + return runScripts(ctx, preRestoreDir, env) } -func postRestore(target string) error { +func postRestore(ctx context.Context, target string) error { // construct any additional environment env := map[string]string{ "DB_RESTORE_TARGET": target, } - return runScripts(postRestoreDir, env) + ctx, span := util.GetTracerFromContext(ctx).Start(ctx, "post-restore") + defer span.End() + return runScripts(ctx, postRestoreDir, env) } diff --git a/pkg/core/scripts.go b/pkg/core/scripts.go index c538b334..a23073b8 100644 --- a/pkg/core/scripts.go +++ b/pkg/core/scripts.go @@ -1,37 +1,56 @@ package core import ( + "context" "fmt" + "io/fs" "os" "os/exec" "path" + + "github.com/databacker/mysql-backup/pkg/util" + "go.opentelemetry.io/otel/codes" ) -func runScripts(dir string, env map[string]string) error { +// runScripts run scripts in a directory with a given environment. +func runScripts(ctx context.Context, dir string, env map[string]string) error { + tracer := util.GetTracerFromContext(ctx) + files, err := os.ReadDir(dir) // if the directory does not exist, do not worry about it if err != nil && os.IsNotExist(err) { return nil } for _, f := range files { - // ignore directories and any files we cannot execute - fi, err := f.Info() - if err != nil { - return fmt.Errorf("error getting file info %s: %v", f.Name(), err) - } - if f.IsDir() || fi.Mode()&0111 == 0 { - continue - } - // execute the file - envSlice := os.Environ() - for k, v := range env { - envSlice = append(envSlice, fmt.Sprintf("%s=%s", k, v)) - } - cmd := exec.Command(path.Join(dir, f.Name())) - cmd.Env = envSlice - if err := cmd.Run(); err != nil { - return fmt.Errorf("error running file %s: %v", f.Name(), err) + _, span := tracer.Start(ctx, f.Name()) + if err := runScript(ctx, dir, f, env); err != nil { + span.SetStatus(codes.Error, err.Error()) + return err } + span.SetStatus(codes.Ok, "completed") + span.End() + } + return nil +} + +func runScript(ctx context.Context, dir string, f fs.DirEntry, env map[string]string) error { + // ignore directories and any files we cannot execute + fi, err := f.Info() + if err != nil { + return fmt.Errorf("error getting file info %s: %v", f.Name(), err) + } + if f.IsDir() || fi.Mode()&0111 == 0 { + return nil + } + // execute the file + envSlice := os.Environ() + for k, v := range env { + envSlice = append(envSlice, fmt.Sprintf("%s=%s", k, v)) + } + cmd := exec.Command(path.Join(dir, f.Name())) + cmd.Env = envSlice + if err := cmd.Run(); err != nil { + return fmt.Errorf("error running file %s: %v", f.Name(), err) } return nil } diff --git a/pkg/database/dump.go b/pkg/database/dump.go index 52195272..8233c0b3 100644 --- a/pkg/database/dump.go +++ b/pkg/database/dump.go @@ -1,6 +1,7 @@ package database import ( + "context" "database/sql" "fmt" @@ -13,7 +14,7 @@ type DumpOpts struct { MaxAllowedPacket int } -func Dump(dbconn Connection, opts DumpOpts, writers []DumpWriter) error { +func Dump(ctx context.Context, dbconn Connection, opts DumpOpts, writers []DumpWriter) error { // TODO: dump data for each writer: // per schema diff --git a/pkg/database/restore.go b/pkg/database/restore.go index 39375756..d03c1290 100644 --- a/pkg/database/restore.go +++ b/pkg/database/restore.go @@ -14,7 +14,7 @@ var ( createRegex = regexp.MustCompile(`(?i)^(CREATE\s+DATABASE\s*(\/\*.*\*\/\s*)?` + "`" + `)([^\s]+)(` + "`" + `\s*(\s*\/\*.*\*\/\s*)?\s*;$)`) ) -func Restore(dbconn Connection, databasesMap map[string]string, readers []io.ReadSeeker) error { +func Restore(ctx context.Context, dbconn Connection, databasesMap map[string]string, readers []io.ReadSeeker) error { db, err := sql.Open("mysql", dbconn.MySQL()) if err != nil { return fmt.Errorf("failed to open connection to database: %v", err) @@ -22,7 +22,6 @@ func Restore(dbconn Connection, databasesMap map[string]string, readers []io.Rea defer db.Close() // load data into database by reading from each reader - ctx := context.Background() for _, r := range readers { tx, err := db.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable}) if err != nil { diff --git a/pkg/log/telemetry.go b/pkg/log/telemetry.go deleted file mode 100644 index d46ca379..00000000 --- a/pkg/log/telemetry.go +++ /dev/null @@ -1,120 +0,0 @@ -package log - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - - "github.com/databacker/mysql-backup/pkg/config" - "github.com/databacker/mysql-backup/pkg/remote" - log "github.com/sirupsen/logrus" -) - -const ( - sourceField = "source" - sourceTelemetry = "telemetry" -) - -// NewTelemetry creates a new telemetry writer, which writes to the configured telemetry endpoint. -// NewTelemetry creates an initial connection, which it keeps open and then can reopen as needed for each write. -func NewTelemetry(conf config.Telemetry, ch chan<- int) (log.Hook, error) { - client, err := remote.GetTLSClient(conf.Certificates, conf.Credentials) - if err != nil { - return nil, err - } - req, err := http.NewRequest(http.MethodGet, conf.URL, nil) - if err != nil { - return nil, fmt.Errorf("error creating HTTP request: %w", err) - } - - // GET the telemetry endpoint; this is just done to check that it is valid. - // Other requests will be POSTs. - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("error requesting telemetry endpoint: %w", err) - } - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("error requesting telemetry endpoint: %s", resp.Status) - } - return &telemetry{conf: conf, client: client, ch: ch}, nil -} - -type telemetry struct { - conf config.Telemetry - client *http.Client - buffer []*log.Entry - // ch channel to indicate when done sending a message, in case needed for synchronization, e.g. testing. - // sends a count down the channel when done sending a message to the remote. The count is the number. - // of messages sent. - ch chan<- int -} - -// Levels the levels for which the hook should fire -func (t *telemetry) Levels() []log.Level { - return []log.Level{log.PanicLevel, log.FatalLevel, log.ErrorLevel, log.WarnLevel, log.InfoLevel, log.DebugLevel} -} - -// Fire send off a log entry. -func (t *telemetry) Fire(entry *log.Entry) error { - // send the log entry to the telemetry endpoint - // this is blocking, and we do not want to do so, so do it in a go routine - // and do not wait for the response. - - // if this message is from ourself, do not try to send it again - if entry.Data[sourceField] == sourceTelemetry { - return nil - } - t.buffer = append(t.buffer, entry) - if t.conf.BufferSize <= 1 || len(t.buffer) >= t.conf.BufferSize { - entries := t.buffer - t.buffer = nil - go func(entries []*log.Entry, ch chan<- int) { - if ch != nil { - defer func() { ch <- len(entries) }() - } - l := entry.Logger.WithField(sourceField, sourceTelemetry) - l.Level = entry.Logger.Level - remoteEntries := make([]LogEntry, len(entries)) - for i, entry := range entries { - // send the structured data to the telemetry endpoint - var runID string - if v, ok := entry.Data["run"]; ok { - runID = v.(string) - } - remoteEntries[i] = LogEntry{ - Run: runID, - Timestamp: entry.Time.Format("2006-01-02T15:04:05.000Z"), - Level: entry.Level.String(), - Fields: entry.Data, - Message: entry.Message, - } - } - // marshal to json - b, err := json.Marshal(remoteEntries) - if err != nil { - l.Errorf("error marshalling log entry: %v", err) - return - } - req, err := http.NewRequest(http.MethodPost, t.conf.URL, bytes.NewReader(b)) - if err != nil { - l.Errorf("error creating telemetry HTTP request: %v", err) - return - } - req.Header.Set("Content-Type", "application/json") - - // POST to the telemetry endpoint - resp, err := t.client.Do(req) - if err != nil { - l.Errorf("error connecting to telemetry endpoint: %v", err) - return - } - - if resp.StatusCode != http.StatusCreated { - l.Errorf("failed sending data telemetry endpoint: %s", resp.Status) - return - } - }(entries, t.ch) - } - return nil -} diff --git a/pkg/log/telemetry_test.go b/pkg/log/telemetry_test.go deleted file mode 100644 index a87ad1e0..00000000 --- a/pkg/log/telemetry_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package log - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "math/rand" - "net/http" - "testing" - "time" - - "github.com/databacker/mysql-backup/pkg/config" - utiltest "github.com/databacker/mysql-backup/pkg/internal/test" - "github.com/databacker/mysql-backup/pkg/remote" - - log "github.com/sirupsen/logrus" -) - -// TestSendLog tests sending logs. There is no `SendLog` function in the codebase, -// as it is all just a hook for logrus. This test is a test of the actual functionality. -func TestSendLog(t *testing.T) { - tests := []struct { - name string - level log.Level - fields map[string]interface{} - bufSize int - expected bool - }{ - {"normal", log.InfoLevel, nil, 1, true}, - {"fatal", log.FatalLevel, nil, 1, true}, - {"error", log.ErrorLevel, nil, 1, true}, - {"warn", log.WarnLevel, nil, 1, true}, - {"debug", log.DebugLevel, nil, 1, true}, - {"debug", log.DebugLevel, nil, 3, true}, - {"trace", log.TraceLevel, nil, 1, false}, - {"self-log", log.InfoLevel, map[string]interface{}{ - sourceField: sourceTelemetry, - }, 1, false}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var buf bytes.Buffer - server, fingerprint, clientKeys, err := utiltest.StartServer(1, func(w http.ResponseWriter, r *http.Request) { - _, err := buf.ReadFrom(r.Body) - if err != nil { - w.WriteHeader(http.StatusBadRequest) - return - } - }) - if err != nil { - t.Fatalf("failed to start server: %v", err) - } - defer server.Close() - - ch := make(chan int, 1) - logger := log.New() - hook, err := NewTelemetry(config.Telemetry{ - Connection: remote.Connection{ - URL: server.URL, - Certificates: []string{fingerprint}, - Credentials: base64.StdEncoding.EncodeToString(clientKeys[0]), - }, - BufferSize: tt.bufSize, - }, ch) - if err != nil { - t.Fatalf("failed to create telemetry hook: %v", err) - } - // add the hook and set the writer - logger.SetLevel(log.TraceLevel) - logger.AddHook(hook) - var localBuf bytes.Buffer - logger.SetOutput(&localBuf) - - buf.Reset() - var msgs []string - for i := 0; i < tt.bufSize; i++ { - msg := fmt.Sprintf("test message %d random %d", i, rand.Intn(1000)) - msgs = append(msgs, msg) - logger.WithFields(tt.fields).Log(tt.level, msg) - } - // wait for the message to get across, but only one second maximum, as it should be quick - // this allows us to handle those that should not have a message and never send anything - var msgCount int - select { - case msgCount = <-ch: - case <-time.After(1 * time.Second): - } - if tt.expected { - if buf.Len() == 0 { - t.Fatalf("expected log message, got none") - } - // message is sent as json, so convert to our structure and compare - var entries []LogEntry - if err := json.Unmarshal(buf.Bytes(), &entries); err != nil { - t.Fatalf("failed to unmarshal log entries: %v", err) - } - if len(entries) != msgCount { - t.Fatalf("channel sent %d log entries, actual got %d", msgCount, len(entries)) - } - if len(entries) != tt.bufSize { - t.Fatalf("expected %d log entries, got %d", tt.bufSize, len(entries)) - } - for i, le := range entries { - if le.Message != msgs[i] { - t.Errorf("message %d: expected message %q, got %q", i, msgs[i], le.Message) - } - if le.Level != tt.level.String() { - t.Errorf("expected level %q, got %q", tt.level.String(), le.Level) - } - } - } else { - if buf.Len() != 0 { - t.Fatalf("expected no log message, got one") - } - } - }) - } -} diff --git a/pkg/log/type.go b/pkg/log/type.go deleted file mode 100644 index ffb57f1b..00000000 --- a/pkg/log/type.go +++ /dev/null @@ -1,9 +0,0 @@ -package log - -type LogEntry struct { - Run string `json:"run"` - Timestamp string `json:"timestamp"` - Level string `json:"level"` - Fields map[string]interface{} `json:"fields"` - Message string `json:"message"` -} diff --git a/pkg/remote/get.go b/pkg/remote/get.go index 2a5e9631..8c2971c0 100644 --- a/pkg/remote/get.go +++ b/pkg/remote/get.go @@ -10,6 +10,7 @@ import ( "fmt" "net" "net/http" + "net/url" "strings" "time" @@ -31,16 +32,20 @@ func init() { // The credentials should be base64-encoded curve25519 private key. This is curve25519 and *not* ed25519; ed25519 calls this // the "seed key". It must be 32 bytes long. // The certs should be a list of fingerprints in the format "algo:hex-fingerprint". -func OpenConnection(u string, certs []string, credentials string) (resp *http.Response, err error) { +func OpenConnection(urlString string, certs []string, credentials string) (resp *http.Response, err error) { // open a connection to the URL. // Uses mTLS, but rather than verifying the CA that signed the client cert, // server should accept a self-signed cert. It then should check if the client's public key is in a known good list. - client, err := GetTLSClient(certs, credentials) + u, err := url.Parse(urlString) + if err != nil { + return nil, fmt.Errorf("error parsing URL: %w", err) + } + client, err := GetTLSClient(u.Hostname(), certs, credentials) if err != nil { return nil, fmt.Errorf("error creating TLS client: %w", err) } - req, err := http.NewRequest(http.MethodGet, u, nil) + req, err := http.NewRequest(http.MethodGet, urlString, nil) if err != nil { return nil, fmt.Errorf("error creating HTTP request: %w", err) } @@ -52,8 +57,27 @@ func OpenConnection(u string, certs []string, credentials string) (resp *http.Re // The credentials should be base64-encoded curve25519 private key. This is curve25519 and *not* ed25519; ed25519 calls this // the "seed key". It must be 32 bytes long. // The certs should be a list of fingerprints in the format "algo:hex-fingerprint". -func GetTLSClient(certs []string, credentials string) (client *http.Client, err error) { - // open a connection to the URL. +func GetTLSClient(serverName string, certs []string, credentials string) (client *http.Client, err error) { + tlsConfig, err := GetTLSConfig(serverName, certs, credentials) + if err != nil { + return nil, fmt.Errorf("error creating TLS config: %w", err) + } + client = &http.Client{ + Transport: &http.Transport{ + // Configure TLS via DialTLS + DialTLSContext: func(ctx context.Context, network, addr string) (net.Conn, error) { + return tls.Dial(network, addr, tlsConfig) + }, + }, + } + return client, nil +} + +// GetTLSConfig gets the TLS config a TLS client for a connection to a TLS server, given the digests of acceptable certs, and curve25519 key for authentication. +// The credentials should be base64-encoded curve25519 private key. This is curve25519 and *not* ed25519; ed25519 calls this +// the "seed key". It must be 32 bytes long. +// The certs should be a list of fingerprints in the format "algo:hex-fingerprint". +func GetTLSConfig(serverName string, certs []string, credentials string) (tlsConfig *tls.Config, err error) { // Uses mTLS, but rather than verifying the CA that signed the client cert, // server should accept a self-signed cert. It then should check if the client's public key is in a known good list. @@ -87,74 +111,61 @@ func GetTLSClient(certs []string, credentials string) (client *http.Client, err return nil, fmt.Errorf("error creating client certificate: %w", err) } - client = &http.Client{ - Transport: &http.Transport{ - // Configure TLS via DialTLS - DialTLSContext: func(ctx context.Context, network, addr string) (net.Conn, error) { - tlsConfig := &tls.Config{ - InsecureSkipVerify: true, // disable regular verification, because go has no way to do regular verification and only fallback to my function - VerifyPeerCertificate: func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { - // If verifiedChains is not empty, then the normal verification has passed. - if len(verifiedChains) > 0 { - return nil - } - - // get the address part of addr - host, _, err := net.SplitHostPort(addr) - if err != nil { - return fmt.Errorf("failed to parse address: %v", err) - } - - certs := make([]*x509.Certificate, len(rawCerts)) - for i, asn1Data := range rawCerts { - cert, err := x509.ParseCertificate(asn1Data) - if err != nil { - return fmt.Errorf("failed to parse certificate: %v", err) - } - certs[i] = cert - } - - // Try to verify the certificate chain using the system pool - opts := x509.VerifyOptions{ - Intermediates: x509.NewCertPool(), - DNSName: host, - } - for i, cert := range certs { - // skip the first cert, because it's the one we're trying to verify - if i == 0 { - continue - } - // add every other cert as a valid intermediate - opts.Intermediates.AddCert(cert) - } - - // if one of the certs is valid and verified, accept it - if _, err := certs[0].Verify(opts); err == nil { + tlsConfig = &tls.Config{ + ServerName: serverName, + InsecureSkipVerify: true, // disable regular verification, because go has no way to do regular verification and only fallback to my function + VerifyPeerCertificate: func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + // If verifiedChains is not empty, then the normal verification has passed. + if len(verifiedChains) > 0 { + return nil + } + + certs := make([]*x509.Certificate, len(rawCerts)) + for i, asn1Data := range rawCerts { + cert, err := x509.ParseCertificate(asn1Data) + if err != nil { + return fmt.Errorf("failed to parse certificate: %v", err) + } + certs[i] = cert + } + + // Try to verify the certificate chain using the system pool + opts := x509.VerifyOptions{ + Intermediates: x509.NewCertPool(), + DNSName: tlsConfig.ServerName, + } + for i, cert := range certs { + // skip the first cert, because it's the one we're trying to verify + if i == 0 { + continue + } + // add every other cert as a valid intermediate + opts.Intermediates.AddCert(cert) + } + + // if one of the certs is valid and verified, accept it + if _, err := certs[0].Verify(opts); err == nil { + return nil + } + + // the cert presented by the server was not signed by a known CA, so fall back to our own list + for _, rawCert := range rawCerts { + fingerprint := fmt.Sprintf("%x", sha256.Sum256(rawCert)) + if trustedFingerprints, ok := trustedCertsByAlgo[utilremote.DigestSha256]; ok { + if _, ok := trustedFingerprints[fingerprint]; ok { + if validateCert(certs[0], tlsConfig.ServerName) { return nil } - - // the cert presented by the server was not signed by a known CA, so fall back to our own list - for _, rawCert := range rawCerts { - fingerprint := fmt.Sprintf("%x", sha256.Sum256(rawCert)) - if trustedFingerprints, ok := trustedCertsByAlgo[utilremote.DigestSha256]; ok { - if _, ok := trustedFingerprints[fingerprint]; ok { - if validateCert(certs[0], host) { - return nil - } - } - } - } - - // not in system or in the approved list - return fmt.Errorf("certificate not trusted") - }, - Certificates: []tls.Certificate{*clientCert}, + } } - return tls.Dial(network, addr, tlsConfig) - }, + } + + // not in system or in the approved list + return fmt.Errorf("certificate not trusted") }, + Certificates: []tls.Certificate{*clientCert}, } - return client, nil + return tlsConfig, nil } // validateCert given a cert that we decided to trust its cert or signature, make sure its properties are correct: diff --git a/pkg/storage/file/file.go b/pkg/storage/file/file.go index dda95ea6..c1f21786 100644 --- a/pkg/storage/file/file.go +++ b/pkg/storage/file/file.go @@ -1,6 +1,7 @@ package file import ( + "context" "io" "io/fs" "net/url" @@ -20,11 +21,11 @@ func New(u url.URL) *File { return &File{u, u.Path} } -func (f *File) Pull(source, target string, logger *log.Entry) (int64, error) { +func (f *File) Pull(ctx context.Context, source, target string, logger *log.Entry) (int64, error) { return copyFile(path.Join(f.path, source), target) } -func (f *File) Push(target, source string, logger *log.Entry) (int64, error) { +func (f *File) Push(ctx context.Context, target, source string, logger *log.Entry) (int64, error) { return copyFile(source, filepath.Join(f.path, target)) } @@ -40,7 +41,7 @@ func (f *File) URL() string { return f.url.String() } -func (f *File) ReadDir(dirname string, logger *log.Entry) ([]fs.FileInfo, error) { +func (f *File) ReadDir(ctx context.Context, dirname string, logger *log.Entry) ([]fs.FileInfo, error) { entries, err := os.ReadDir(filepath.Join(f.path, dirname)) if err != nil { @@ -57,7 +58,7 @@ func (f *File) ReadDir(dirname string, logger *log.Entry) ([]fs.FileInfo, error) return files, nil } -func (f *File) Remove(target string, logger *log.Entry) error { +func (f *File) Remove(ctx context.Context, target string, logger *log.Entry) error { return os.Remove(filepath.Join(f.path, target)) } diff --git a/pkg/storage/parse.go b/pkg/storage/parse.go index d5126b26..97c55ef6 100644 --- a/pkg/storage/parse.go +++ b/pkg/storage/parse.go @@ -3,11 +3,13 @@ package storage import ( "fmt" + "github.com/databacker/api/go/api" "github.com/databacker/mysql-backup/pkg/storage/credentials" "github.com/databacker/mysql-backup/pkg/storage/file" "github.com/databacker/mysql-backup/pkg/storage/s3" "github.com/databacker/mysql-backup/pkg/storage/smb" "github.com/databacker/mysql-backup/pkg/util" + "gopkg.in/yaml.v3" ) func ParseURL(url string, creds credentials.Creds) (Storage, error) { @@ -57,3 +59,66 @@ func ParseURL(url string, creds credentials.Creds) (Storage, error) { } return store, nil } + +// FromTarget parse an api.Target and return something that implements the Storage interface +func FromTarget(target api.Target) (store Storage, err error) { + u, err := util.SmartParse(target.URL) + if err != nil { + return nil, err + } + switch target.Type { + case api.TargetTypeS3: + var spec api.S3 + specBytes, err := yaml.Marshal(target.Spec) + if err != nil { + return nil, fmt.Errorf("error marshalling spec part of target: %w", err) + } + if err := yaml.Unmarshal(specBytes, &spec); err != nil { + return nil, fmt.Errorf("parsed yaml had kind S3, but spec invalid") + } + + opts := []s3.Option{} + if spec.Region != nil && *spec.Region != "" { + opts = append(opts, s3.WithRegion(*spec.Region)) + } + if spec.Endpoint != nil && *spec.Endpoint != "" { + opts = append(opts, s3.WithEndpoint(*spec.Endpoint)) + } + if spec.AccessKeyID != nil && *spec.AccessKeyID != "" { + opts = append(opts, s3.WithAccessKeyId(*spec.AccessKeyID)) + } + if spec.SecretAccessKey != nil && *spec.SecretAccessKey != "" { + opts = append(opts, s3.WithSecretAccessKey(*spec.SecretAccessKey)) + } + store = s3.New(*u, opts...) + case api.TargetTypeSmb: + var spec api.SMB + specBytes, err := yaml.Marshal(target.Spec) + if err != nil { + return nil, fmt.Errorf("error marshalling spec part of target: %w", err) + } + if err := yaml.Unmarshal(specBytes, &spec); err != nil { + return nil, fmt.Errorf("parsed yaml had kind SMB, but spec invalid") + } + + opts := []smb.Option{} + if spec.Domain != nil && *spec.Domain != "" { + opts = append(opts, smb.WithDomain(*spec.Domain)) + } + if spec.Username != nil && *spec.Username != "" { + opts = append(opts, smb.WithUsername(*spec.Username)) + } + if spec.Password != nil && *spec.Password != "" { + opts = append(opts, smb.WithPassword(*spec.Password)) + } + store = smb.New(*u, opts...) + case api.TargetTypeFile: + store, err = ParseURL(target.URL, credentials.Creds{}) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unknown target type: %s", target.Type) + } + return store, nil +} diff --git a/pkg/storage/s3/s3.go b/pkg/storage/s3/s3.go index d187dc34..95cba29e 100644 --- a/pkg/storage/s3/s3.go +++ b/pkg/storage/s3/s3.go @@ -65,7 +65,7 @@ func New(u url.URL, opts ...Option) *S3 { return s } -func (s *S3) Pull(source, target string, logger *log.Entry) (int64, error) { +func (s *S3) Pull(ctx context.Context, source, target string, logger *log.Entry) (int64, error) { // get the s3 client client, err := s.getClient(logger) if err != nil { @@ -95,7 +95,7 @@ func (s *S3) Pull(source, target string, logger *log.Entry) (int64, error) { return n, nil } -func (s *S3) Push(target, source string, logger *log.Entry) (int64, error) { +func (s *S3) Push(ctx context.Context, target, source string, logger *log.Entry) (int64, error) { // get the s3 client client, err := s.getClient(logger) if err != nil { @@ -142,7 +142,7 @@ func (s *S3) URL() string { return s.url.String() } -func (s *S3) ReadDir(dirname string, logger *log.Entry) ([]fs.FileInfo, error) { +func (s *S3) ReadDir(ctx context.Context, dirname string, logger *log.Entry) ([]fs.FileInfo, error) { // get the s3 client client, err := s.getClient(logger) if err != nil { @@ -168,7 +168,7 @@ func (s *S3) ReadDir(dirname string, logger *log.Entry) ([]fs.FileInfo, error) { return files, nil } -func (s *S3) Remove(target string, logger *log.Entry) error { +func (s *S3) Remove(ctx context.Context, target string, logger *log.Entry) error { // Get the AWS client client, err := s.getClient(logger) if err != nil { diff --git a/pkg/storage/smb/smb.go b/pkg/storage/smb/smb.go index 8f70d870..0ecbde74 100644 --- a/pkg/storage/smb/smb.go +++ b/pkg/storage/smb/smb.go @@ -1,6 +1,7 @@ package smb import ( + "context" "fmt" "io" "net" @@ -50,7 +51,7 @@ func New(u url.URL, opts ...Option) *SMB { return s } -func (s *SMB) Pull(source, target string, logger *log.Entry) (int64, error) { +func (s *SMB) Pull(ctx context.Context, source, target string, logger *log.Entry) (int64, error) { var ( copied int64 err error @@ -74,7 +75,7 @@ func (s *SMB) Pull(source, target string, logger *log.Entry) (int64, error) { return copied, err } -func (s *SMB) Push(target, source string, logger *log.Entry) (int64, error) { +func (s *SMB) Push(ctx context.Context, target, source string, logger *log.Entry) (int64, error) { var ( copied int64 err error @@ -109,7 +110,7 @@ func (s *SMB) URL() string { return s.url.String() } -func (s *SMB) ReadDir(dirname string, logger *log.Entry) ([]os.FileInfo, error) { +func (s *SMB) ReadDir(ctx context.Context, dirname string, logger *log.Entry) ([]os.FileInfo, error) { var ( err error infos []os.FileInfo @@ -121,7 +122,7 @@ func (s *SMB) ReadDir(dirname string, logger *log.Entry) ([]os.FileInfo, error) return infos, err } -func (s *SMB) Remove(target string, logger *log.Entry) error { +func (s *SMB) Remove(ctx context.Context, target string, logger *log.Entry) error { return s.exec(s.url, func(fs *smb2.Share, sharepath string) error { smbFilename := fmt.Sprintf("%s%c%s", sharepath, smb2.PathSeparator, filepath.Base(strings.ReplaceAll(target, ":", "-"))) return fs.Remove(smbFilename) diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 48752668..86d5147f 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -1,6 +1,7 @@ package storage import ( + "context" "io/fs" log "github.com/sirupsen/logrus" @@ -10,9 +11,9 @@ type Storage interface { Protocol() string URL() string Clean(filename string) string - Push(target, source string, logger *log.Entry) (int64, error) - Pull(source, target string, logger *log.Entry) (int64, error) - ReadDir(dirname string, logger *log.Entry) ([]fs.FileInfo, error) + Push(ctx context.Context, target, source string, logger *log.Entry) (int64, error) + Pull(ctx context.Context, source, target string, logger *log.Entry) (int64, error) + ReadDir(ctx context.Context, dirname string, logger *log.Entry) ([]fs.FileInfo, error) // Remove remove a particular file - Remove(target string, logger *log.Entry) error + Remove(ctx context.Context, target string, logger *log.Entry) error } diff --git a/pkg/util/tracer.go b/pkg/util/tracer.go new file mode 100644 index 00000000..afec62a5 --- /dev/null +++ b/pkg/util/tracer.go @@ -0,0 +1,33 @@ +package util + +import ( + "context" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/trace" +) + +type contextKey string + +const ( + tracerKey contextKey = "mysql-backup-tracer-name" +) + +// ContextWithTracer adds a tracer to the context, using a key known only internally to this package. +func ContextWithTracer(ctx context.Context, tracer trace.Tracer) context.Context { + return context.WithValue(ctx, tracerKey, tracer) +} + +// GetTracerFromContext retrieves a tracer from the context, or returns a default tracer if none is found. +func GetTracerFromContext(ctx context.Context) trace.Tracer { + tracerAny := ctx.Value(tracerKey) + if tracerAny == nil { + return otel.Tracer("default") + } + tracer, ok := tracerAny.(trace.Tracer) + if !ok { + return otel.Tracer("default") + } + + return tracer +} diff --git a/test/backup_test.go b/test/backup_test.go index c1e5ff9e..8e712f8f 100644 --- a/test/backup_test.go +++ b/test/backup_test.go @@ -640,7 +640,8 @@ func runTest(t *testing.T, opts testOptions) { var results core.DumpResults if err := executor.Timer(timerOpts, func() error { - ret, err := executor.Dump(opts.dumpOptions) + ctx := context.Background() + ret, err := executor.Dump(ctx, opts.dumpOptions) results = ret return err }); err != nil {