From 2913a205bf8f3860db9d27ea78041e6c1b9671bb Mon Sep 17 00:00:00 2001 From: Chris Davis Date: Wed, 15 May 2024 20:44:36 +0000 Subject: [PATCH] b/340244046 initial port collect command from master to 2.4 Although this port copies the code from master to 2.4, there are issues related to permissions making it non-functional. This is a partial cherry-pick of the following PR's on master: DAOS-10625 control: Create the tool to collect the logs/config for support purpose (#11094) DAOS-13759 control: Update support collect-log tool. (#12906) DAOS-13763 control: Fix daos_metrics collection for support collect-log. (#12555) DAOS-13936 support: Collect the specific logs and Time range log for support (#13325) Change-Id: I168c14e177a5003c4e315595b1bf154e84cef473 --- src/control/cmd/daos_agent/main.go | 20 + src/control/cmd/daos_agent/support.go | 111 ++ src/control/cmd/daos_server/json_test.go | 79 ++ src/control/cmd/daos_server/main.go | 15 +- src/control/cmd/daos_server/support.go | 120 ++ src/control/cmd/daos_server/support_test.go | 29 + src/control/cmd/dmg/json_test.go | 2 +- src/control/cmd/dmg/main.go | 1 + src/control/cmd/dmg/pretty/printers.go | 39 + src/control/cmd/dmg/support.go | 267 ++++ src/control/common/archive.go | 62 + src/control/common/cmdutil/logging.go | 76 + src/control/common/file_utils.go | 71 + src/control/common/proto/ctl/common.pb.go | 4 +- src/control/common/proto/ctl/ctl_grpc.pb.go | 127 +- src/control/common/proto/ctl/support.pb.go | 315 +++++ src/control/common/proto/mgmt/mgmt_grpc.pb.go | 178 ++- src/control/lib/control/support.go | 79 ++ src/control/lib/support/README.md | 76 + src/control/lib/support/log.go | 985 +++++++++++++ src/control/lib/support/log_test.go | 1232 +++++++++++++++++ src/control/logging/syslog_test.go | 2 +- src/control/security/grpc_authorization.go | 1 + .../security/grpc_authorization_test.go | 1 + src/control/server/config/server.go | 1 + src/control/server/ctl_support_rpc.go | 40 + src/proto/Makefile | 16 +- src/proto/ctl/ctl.proto | 3 + src/proto/ctl/support.proto | 30 + .../control/daos_agent_support_collect_log.py | 51 + .../daos_agent_support_collect_log.yaml | 20 + .../daos_server_support_collect_log.py | 54 + .../daos_server_support_collect_log.yaml | 19 + .../ftest/control/dmg_support_collect_log.py | 50 + .../control/dmg_support_collect_log.yaml | 19 + src/tests/ftest/util/server_utils.py | 22 + src/tests/ftest/util/server_utils_base.py | 36 + 37 files changed, 4141 insertions(+), 112 deletions(-) create mode 100644 src/control/cmd/daos_agent/support.go create mode 100644 src/control/cmd/daos_server/json_test.go create mode 100644 src/control/cmd/daos_server/support.go create mode 100644 src/control/cmd/daos_server/support_test.go create mode 100644 src/control/cmd/dmg/support.go create mode 100644 src/control/common/archive.go create mode 100644 src/control/common/proto/ctl/support.pb.go create mode 100644 src/control/lib/control/support.go create mode 100644 src/control/lib/support/README.md create mode 100644 src/control/lib/support/log.go create mode 100644 src/control/lib/support/log_test.go create mode 100644 src/control/server/ctl_support_rpc.go create mode 100644 src/proto/ctl/support.proto create mode 100644 src/tests/ftest/control/daos_agent_support_collect_log.py create mode 100644 src/tests/ftest/control/daos_agent_support_collect_log.yaml create mode 100644 src/tests/ftest/control/daos_server_support_collect_log.py create mode 100644 src/tests/ftest/control/daos_server_support_collect_log.yaml create mode 100644 src/tests/ftest/control/dmg_support_collect_log.py create mode 100644 src/tests/ftest/control/dmg_support_collect_log.yaml diff --git a/src/control/cmd/daos_agent/main.go b/src/control/cmd/daos_agent/main.go index 1518207a3cbb..fc961833f5f9 100644 --- a/src/control/cmd/daos_agent/main.go +++ b/src/control/cmd/daos_agent/main.go @@ -39,6 +39,7 @@ type cliOptions struct { DumpInfo dumpAttachInfoCmd `command:"dump-attachinfo" description:"Dump system attachinfo"` DumpTopo hwprov.DumpTopologyCmd `command:"dump-topology" description:"Dump system topology"` NetScan netScanCmd `command:"net-scan" description:"Perform local network fabric scan"` + Support supportCmd `command:"support" description:"Perform debug tasks to help support team"` } type ( @@ -95,6 +96,25 @@ func exitWithError(log logging.Logger, err error) { os.Exit(1) } +type ( + supportAgentConfig interface { + setSupportConf(string) + getSupportConf() string + } + + supportAgentConfigCmd struct { + supportCfgPath string + } +) + +func (cmd *supportAgentConfigCmd) setSupportConf(cfgPath string) { + cmd.supportCfgPath = cfgPath +} + +func (cmd *supportAgentConfigCmd) getSupportConf() string { + return cmd.supportCfgPath +} + func parseOpts(args []string, opts *cliOptions, invoker control.Invoker, log *logging.LeveledLogger) error { var wroteJSON atm.Bool p := flags.NewParser(opts, flags.Default) diff --git a/src/control/cmd/daos_agent/support.go b/src/control/cmd/daos_agent/support.go new file mode 100644 index 000000000000..59f4a8d2eaa9 --- /dev/null +++ b/src/control/cmd/daos_agent/support.go @@ -0,0 +1,111 @@ +// +// (C) Copyright 2022-2023 Intel Corporation. +// +// SPDX-License-Identifier: BSD-2-Clause-Patent +// + +package main + +import ( + "fmt" + "os" + "path/filepath" + "time" + + "github.com/daos-stack/daos/src/control/common/cmdutil" + "github.com/daos-stack/daos/src/control/lib/support" +) + +// supportCmd is the struct representing the top-level support subcommand. +type supportCmd struct { + CollectLog collectLogCmd `command:"collect-log" description:"Collect logs from client"` + agentConfigPath string +} + +// collectLogCmd is the struct representing the command to collect the log from client side. +type collectLogCmd struct { + supportAgentConfigCmd + cmdutil.LogCmd + support.CollectLogSubCmd +} + +func (cmd *collectLogCmd) Execute(_ []string) error { + err := cmd.DateTimeValidate() + if err != nil { + return err + } + + var LogCollection = map[int32][]string{ + support.CopyAgentConfigEnum: {""}, + support.CollectAgentLogEnum: {""}, + support.CollectAgentCmdEnum: support.AgentCmd, + support.CollectClientLogEnum: {""}, + support.CollectSystemCmdEnum: support.SystemCmd, + } + + // Default 3 steps of log/conf collection. + progress := support.ProgressBar{ + Total: len(LogCollection), + NoDisplay: false, + } + + if cmd.Archive { + progress.Total++ + } + + // Copy the custom log folder + if cmd.ExtraLogsDir != "" { + LogCollection[support.CollectExtraLogsDirEnum] = []string{""} + progress.Total++ + } + + if cmd.TargetFolder == "" { + folderName := fmt.Sprintf("daos_support_client_logs_%s", time.Now().Format(time.RFC3339)) + cmd.TargetFolder = filepath.Join(os.TempDir(), folderName) + } + + cmd.Infof("Support Logs will be copied to %s", cmd.TargetFolder) + + progress.Steps = 100 / progress.Total + params := support.CollectLogsParams{} + params.TargetFolder = cmd.TargetFolder + params.ExtraLogsDir = cmd.ExtraLogsDir + params.Config = cmd.getSupportConf() + params.LogStartDate = cmd.LogStartDate + params.LogEndDate = cmd.LogEndDate + params.LogStartTime = cmd.LogStartTime + params.LogEndTime = cmd.LogEndTime + for logFunc, logCmdSet := range LogCollection { + for _, logCmd := range logCmdSet { + cmd.Debugf("Log Function Enum = %d -- Log Collect Cmd = %s ", logFunc, logCmd) + params.LogFunction = logFunc + params.LogCmd = logCmd + + err := support.CollectSupportLog(cmd.Logger, params) + if err != nil { + fmt.Println(err) + if cmd.StopOnError { + return err + } + } + } + fmt.Printf(progress.Display()) + } + + if cmd.Archive { + cmd.Debugf("Archiving the Log Folder %s", cmd.TargetFolder) + err := support.ArchiveLogs(cmd.Logger, params) + if err != nil { + return err + } + + // FIXME: DAOS-13290 Workaround for files held open + for i := 1; i < 3; i++ { + os.RemoveAll(cmd.TargetFolder) + } + } + + fmt.Printf(progress.Display()) + + return nil +} diff --git a/src/control/cmd/daos_server/json_test.go b/src/control/cmd/daos_server/json_test.go new file mode 100644 index 000000000000..89ba7fde598a --- /dev/null +++ b/src/control/cmd/daos_server/json_test.go @@ -0,0 +1,79 @@ +// +// (C) Copyright 2024 Intel Corporation. +// +// SPDX-License-Identifier: BSD-2-Clause-Patent +// + +package main + +import ( + "bytes" + "encoding/json" + "io" + "os" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/daos-stack/daos/src/control/common/cmdutil" + "github.com/daos-stack/daos/src/control/common/test" + "github.com/daos-stack/daos/src/control/logging" +) + +type jsonCmdTest struct { + name string + cmd string + setHelpers func(opts *mainOpts) + expOut interface{} // JSON encoded data should output. + expErr error +} + +func runJSONCmdTests(t *testing.T, log *logging.LeveledLogger, cmdTests []jsonCmdTest) { + t.Helper() + + for _, tc := range cmdTests { + t.Run(tc.name, func(t *testing.T) { + t.Helper() + + // Replace os.Stdout so that we can verify the generated output. + var result bytes.Buffer + r, w, _ := os.Pipe() + done := make(chan struct{}) + go func() { + _, _ = io.Copy(&result, r) + close(done) + }() + stdout := os.Stdout + defer func() { + os.Stdout = stdout + }() + os.Stdout = w + + var opts mainOpts + if tc.setHelpers != nil { + tc.setHelpers(&opts) + } + test.CmpErr(t, tc.expErr, parseOpts(strings.Split(tc.cmd, " "), &opts, log)) + + w.Close() + <-done + + // Verify only JSON gets printed. + if !json.Valid(result.Bytes()) { + t.Fatalf("invalid JSON in response: %s", result.String()) + } + + var sb strings.Builder + if err := cmdutil.OutputJSON(&sb, tc.expOut, tc.expErr); err != nil { + if err != tc.expErr { + t.Fatalf("OutputJSON: %s", err) + } + } + + if diff := cmp.Diff(sb.String(), result.String()); diff != "" { + t.Fatalf("unexpected stdout (-want, +got):\n%s\n", diff) + } + }) + } +} diff --git a/src/control/cmd/daos_server/main.go b/src/control/cmd/daos_server/main.go index 0de6f55bd6b1..763e58901b6f 100644 --- a/src/control/cmd/daos_server/main.go +++ b/src/control/cmd/daos_server/main.go @@ -28,6 +28,8 @@ import ( const defaultConfigFile = "daos_server.yml" +var errJSONOutputNotSupported = errors.New("this subcommand does not support JSON output") + type execTestFn func() error type mainOpts struct { @@ -51,6 +53,7 @@ type mainOpts struct { MgmtSvc msCmdRoot `command:"ms" description:"Perform tasks related to management service replicas"` DumpTopo hwprov.DumpTopologyCmd `command:"dump-topology" description:"Dump system topology"` Config configCmd `command:"config" alias:"cfg" description:"Perform tasks related to configuration of hardware on the local server"` + Support supportCmd `command:"support" description:"Perform debug tasks to help support team"` // Allow a set of tests to be run before executing commands. preExecTests []execTestFn @@ -92,10 +95,14 @@ func parseOpts(args []string, opts *mainOpts, log *logging.LeveledLogger) error return errors.Errorf("unexpected commandline arguments: %v", cmdArgs) } - if jsonCmd, ok := cmd.(cmdutil.JSONOutputter); ok && opts.JSON { - jsonCmd.EnableJSONOutput(os.Stdout, &wroteJSON) - // disable output on stdout other than JSON - log.ClearLevel(logging.LogLevelInfo) + if opts.JSON { + if jsonCmd, ok := cmd.(cmdutil.JSONOutputter); ok { + jsonCmd.EnableJSONOutput(os.Stdout, &wroteJSON) + // disable output on stdout other than JSON + log.ClearLevel(logging.LogLevelInfo) + } else { + return errJSONOutputNotSupported + } } switch cmd.(type) { diff --git a/src/control/cmd/daos_server/support.go b/src/control/cmd/daos_server/support.go new file mode 100644 index 000000000000..77736748f569 --- /dev/null +++ b/src/control/cmd/daos_server/support.go @@ -0,0 +1,120 @@ +// +// (C) Copyright 2022-2023 Intel Corporation. +// +// SPDX-License-Identifier: BSD-2-Clause-Patent +// + +package main + +import ( + "fmt" + "os" + "path/filepath" + "time" + + "github.com/daos-stack/daos/src/control/common/cmdutil" + "github.com/daos-stack/daos/src/control/lib/support" +) + +// supportCmd is the struct representing the top-level support subcommand. +type supportCmd struct { + CollectLog collectLogCmd `command:"collect-log" description:"Collect logs from server"` +} + +// collectLogCmd is the struct representing the command to collect the Logs/config for support purpose +type collectLogCmd struct { + cfgCmd + cmdutil.LogCmd + support.CollectLogSubCmd + support.LogTypeSubCmd +} + +func (cmd *collectLogCmd) Execute(_ []string) error { + var LogCollection = map[int32][]string{} + err := cmd.DateTimeValidate() + if err != nil { + return err + } + + // Only collect the specific logs Admin,Control or Engine. + // This will ignore the system information collection. + if cmd.LogType != "" { + LogCollection[support.CollectServerLogEnum], err = cmd.LogTypeValidate() + if err != nil { + return err + } + } else { + LogCollection[support.CopyServerConfigEnum] = []string{""} + LogCollection[support.CollectSystemCmdEnum] = support.SystemCmd + LogCollection[support.CollectDaosServerCmdEnum] = support.DaosServerCmd + LogCollection[support.CollectServerLogEnum], err = cmd.LogTypeValidate() + if err != nil { + return err + } + } + + // Default 4 steps of log/conf collection. + progress := support.ProgressBar{ + Total: len(LogCollection), + NoDisplay: false, + } + + if cmd.Archive { + progress.Total++ + } + + // Copy custom log folder + if cmd.ExtraLogsDir != "" { + LogCollection[support.CollectExtraLogsDirEnum] = []string{""} + progress.Total++ + } + + if cmd.TargetFolder == "" { + folderName := fmt.Sprintf("daos_support_server_logs_%s", time.Now().Format(time.RFC3339)) + cmd.TargetFolder = filepath.Join(os.TempDir(), folderName) + } + cmd.Infof("Support logs will be copied to %s", cmd.TargetFolder) + + progress.Steps = 100 / progress.Total + params := support.CollectLogsParams{} + params.Config = cmd.configPath() + params.TargetFolder = cmd.TargetFolder + params.ExtraLogsDir = cmd.ExtraLogsDir + params.LogStartDate = cmd.LogStartDate + params.LogEndDate = cmd.LogEndDate + params.LogStartTime = cmd.LogStartTime + params.LogEndTime = cmd.LogEndTime + for logFunc, logCmdSet := range LogCollection { + for _, logCmd := range logCmdSet { + cmd.Debugf("Log Function Enum = %d -- Log Collect Cmd = %s ", logFunc, logCmd) + params.LogFunction = logFunc + params.LogCmd = logCmd + + err := support.CollectSupportLog(cmd.Logger, params) + if err != nil { + fmt.Println(err) + if cmd.StopOnError { + return err + } + } + } + fmt.Printf(progress.Display()) + } + + if cmd.Archive { + cmd.Debugf("Archiving the Log Folder %s", cmd.TargetFolder) + err := support.ArchiveLogs(cmd.Logger, params) + if err != nil { + return err + } + + // FIXME: DAOS-13290 Workaround for files held open + for i := 1; i < 3; i++ { + os.RemoveAll(cmd.TargetFolder) + } + } + + fmt.Printf(progress.Display()) + + return nil +} diff --git a/src/control/cmd/daos_server/support_test.go b/src/control/cmd/daos_server/support_test.go new file mode 100644 index 000000000000..d73878c0e253 --- /dev/null +++ b/src/control/cmd/daos_server/support_test.go @@ -0,0 +1,29 @@ +// +// (C) Copyright 2024 Intel Corporation. +// +// SPDX-License-Identifier: BSD-2-Clause-Patent +// + +package main + +import ( + "testing" + + "github.com/daos-stack/daos/src/control/logging" +) + +// TestDaosServer_Support_Commands_JSON verifies that the JSON-output flag is disabled for support +// command syntax. +func TestDaosServer_Support_Commands_JSON(t *testing.T) { + log := logging.NewCommandLineLogger() + + runJSONCmdTests(t, log, []jsonCmdTest{ + { + "Collect-log; JSON", + "support collect-log -j", + nil, + nil, + errJSONOutputNotSupported, + }, + }) +} diff --git a/src/control/cmd/dmg/json_test.go b/src/control/cmd/dmg/json_test.go index 6e053cb44ac4..b4b14b8f19d0 100644 --- a/src/control/cmd/dmg/json_test.go +++ b/src/control/cmd/dmg/json_test.go @@ -67,7 +67,7 @@ func TestDmg_JsonOutput(t *testing.T) { testArgs := append([]string{"-i", "--json"}, args...) switch strings.Join(args, " ") { case "version", "telemetry config", "telemetry run", "config generate", - "manpage", "system set-prop": + "manpage", "system set-prop", "support collect-log": return case "storage nvme-rebind": testArgs = append(testArgs, "-l", "foo.com", "-a", diff --git a/src/control/cmd/dmg/main.go b/src/control/cmd/dmg/main.go index a088e0f80223..a15c65867a46 100644 --- a/src/control/cmd/dmg/main.go +++ b/src/control/cmd/dmg/main.go @@ -122,6 +122,7 @@ type cliOptions struct { Config configCmd `command:"config" alias:"cfg" description:"Perform tasks related to configuration of hardware on remote servers"` System SystemCmd `command:"system" alias:"sys" description:"Perform distributed tasks related to DAOS system"` Network NetCmd `command:"network" alias:"net" description:"Perform tasks related to network devices attached to remote servers"` + Support supportCmd `command:"support" alias:"supp" description:"Perform debug tasks to help support team"` Pool PoolCmd `command:"pool" description:"Perform tasks related to DAOS pools"` Cont ContCmd `command:"container" alias:"cont" description:"Perform tasks related to DAOS containers"` Version versionCmd `command:"version" description:"Print dmg version"` diff --git a/src/control/cmd/dmg/pretty/printers.go b/src/control/cmd/dmg/pretty/printers.go index c0b9d33cb893..b9e7ffc9846e 100644 --- a/src/control/cmd/dmg/pretty/printers.go +++ b/src/control/cmd/dmg/pretty/printers.go @@ -14,6 +14,7 @@ import ( "github.com/pkg/errors" + "github.com/daos-stack/daos/src/control/common" "github.com/daos-stack/daos/src/control/fault" "github.com/daos-stack/daos/src/control/lib/control" "github.com/daos-stack/daos/src/control/lib/txtfmt" @@ -155,3 +156,41 @@ func PrintResponseErrors(resp hostErrorsGetter, out io.Writer, opts ...PrintConf return nil } + +// PrintErrorsSummary generates a human-readable representation of the supplied +// HostErrorsMap summary struct and writes it to the supplied io.Writer. +func UpdateErrorSummary(resp hostErrorsGetter, cmd string, out io.Writer, opts ...PrintConfigOption) error { + if common.InterfaceIsNil(resp) { + return errors.Errorf("nil %T", resp) + } + + if len(resp.GetHostErrors()) > 0 { + setTitle := "Hosts" + cmdTitle := "Command" + errTitle := "Error" + + tablePrint := txtfmt.NewTableFormatter(setTitle, cmdTitle, errTitle) + tablePrint.InitWriter(out) + table := []txtfmt.TableRow{} + + for _, errStr := range resp.GetHostErrors().Keys() { + errHosts := getPrintHosts(resp.GetHostErrors()[errStr].HostSet.RangedString(), opts...) + row := txtfmt.TableRow{setTitle: errHosts} + + // Unpack the root cause error. If it's a fault, + // just print the description. + hostErr := errors.Cause(resp.GetHostErrors()[errStr].HostError) + row[cmdTitle] = cmd + row[errTitle] = hostErr.Error() + if f, ok := hostErr.(*fault.Fault); ok { + row[errTitle] = f.Description + } + + table = append(table, row) + } + + tablePrint.Format(table) + } + + return nil +} diff --git a/src/control/cmd/dmg/support.go b/src/control/cmd/dmg/support.go new file mode 100644 index 000000000000..9bc7a5d91cec --- /dev/null +++ b/src/control/cmd/dmg/support.go @@ -0,0 +1,267 @@ +// +// (C) Copyright 2022-2023 Intel Corporation. +// +// SPDX-License-Identifier: BSD-2-Clause-Patent +// + +package main + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/daos-stack/daos/src/control/cmd/dmg/pretty" + "github.com/daos-stack/daos/src/control/common/cmdutil" + "github.com/daos-stack/daos/src/control/lib/control" + "github.com/daos-stack/daos/src/control/lib/support" +) + +// supportCmd is the struct representing the top-level support subcommand. +type supportCmd struct { + CollectLog collectLogCmd `command:"collect-log" description:"Collect logs from servers"` +} + +// collectLogCmd is the struct representing the command to collect the Logs/config for support purpose +type collectLogCmd struct { + baseCmd + cfgCmd + ctlInvokerCmd + hostListCmd + cmdutil.JSONOutputCmd + support.CollectLogSubCmd + bld strings.Builder + support.LogTypeSubCmd +} + +// gRPC call to initiate the rsync and copy the logs to Admin (central location). +func (cmd *collectLogCmd) rsyncLog() error { + hostName, err := support.GetHostName() + if err != nil { + return err + } + + req := &control.CollectLogReq{ + TargetFolder: cmd.TargetFolder, + AdminNode: hostName, + LogFunction: support.RsyncLogEnum, + } + cmd.Debugf("Rsync logs from servers to %s:%s ", hostName, cmd.TargetFolder) + resp, err := control.CollectLog(cmd.MustLogCtx(), cmd.ctlInvoker, req) + if err != nil && cmd.StopOnError { + return err + } + if len(resp.GetHostErrors()) > 0 { + if err := pretty.UpdateErrorSummary(resp, "rsync", &cmd.bld); err != nil { + return err + } + return resp.Errors() + } + + return nil +} + +// gRPC call to Archive the logs on individual servers. +func (cmd *collectLogCmd) archLogsOnServer() error { + hostName, err := support.GetHostName() + if err != nil { + return err + } + + req := &control.CollectLogReq{ + TargetFolder: cmd.TargetFolder, + AdminNode: hostName, + LogFunction: support.ArchiveLogsEnum, + } + cmd.Debugf("Archiving the Log Folder %s", cmd.TargetFolder) + resp, err := control.CollectLog(cmd.MustLogCtx(), cmd.ctlInvoker, req) + if err != nil && cmd.StopOnError { + return err + } + if len(resp.GetHostErrors()) > 0 { + if err := pretty.UpdateErrorSummary(resp, "archive", &cmd.bld); err != nil { + return err + } + return resp.Errors() + } + + return nil +} + +// Execute is run when supportCmd activates. +func (cmd *collectLogCmd) Execute(_ []string) error { + // Default log collection set + var LogCollection = map[int32][]string{} + var DmgInfoCollection = map[int32][]string{} + + err := cmd.DateTimeValidate() + if err != nil { + return err + } + + // Only collect the specific logs Admin,Control or Engine. + // This will ignore the system information collection. + if cmd.LogType != "" { + LogCollection[support.CollectServerLogEnum], err = cmd.LogTypeValidate() + if err != nil { + return err + } + } else { + // Default collect everything from servers + LogCollection[support.CollectSystemCmdEnum] = support.SystemCmd + LogCollection[support.CollectDaosServerCmdEnum] = support.DaosServerCmd + LogCollection[support.CopyServerConfigEnum] = []string{""} + LogCollection[support.CollectServerLogEnum], err = cmd.LogTypeValidate() + if err != nil { + return err + } + + // dmg command info collection set + DmgInfoCollection[support.CollectDmgCmdEnum] = support.DmgCmd + DmgInfoCollection[support.CollectDmgDiskInfoEnum] = []string{""} + } + + // set of support collection steps to show in progress bar + progress := support.ProgressBar{ + Total: len(LogCollection) + len(DmgInfoCollection) + 1, // Extra 1 is for rsync operation. + NoDisplay: cmd.JSONOutputEnabled(), + } + + // Add custom log location + if cmd.ExtraLogsDir != "" { + LogCollection[support.CollectExtraLogsDirEnum] = []string{""} + progress.Total++ + } + + // Increase progress counter for Archive if enabled + if cmd.Archive { + progress.Total++ + } + progress.Steps = 100 / progress.Total + + // Default TargetFolder location where logs will be copied. + // Included Date and time stamp to the log folder. + if cmd.TargetFolder == "" { + folderName := fmt.Sprintf("daos_support_server_logs_%s", time.Now().Format(time.RFC3339)) + cmd.TargetFolder = filepath.Join(os.TempDir(), folderName) + } + cmd.Infof("Support logs will be copied to %s", cmd.TargetFolder) + if err := os.Mkdir(cmd.TargetFolder, 0700); err != nil && !os.IsExist(err) { + return err + } + + // Check if DAOS Management Service is up and running + params := support.CollectLogsParams{} + params.Config = cmd.cfgCmd.config.Path + params.LogFunction = support.CollectDmgCmdEnum + params.TargetFolder = cmd.TargetFolder + params.LogCmd = "dmg system query" + + err = support.CollectSupportLog(cmd.Logger, params) + + if err != nil { + return err + } + + // Copy log/config file to TargetFolder on all servers + for logFunc, logCmdSet := range LogCollection { + for _, logCmd := range logCmdSet { + cmd.Debugf("Log Function %d -- Log Collect Cmd %s ", logFunc, logCmd) + ctx := cmd.MustLogCtx() + req := &control.CollectLogReq{ + TargetFolder: cmd.TargetFolder, + ExtraLogsDir: cmd.ExtraLogsDir, + LogFunction: logFunc, + LogCmd: logCmd, + LogStartDate: cmd.LogStartDate, + LogEndDate: cmd.LogEndDate, + LogStartTime: cmd.LogStartTime, + LogEndTime: cmd.LogEndTime, + StopOnError: cmd.StopOnError, + } + req.SetHostList(cmd.hostlist) + + resp, err := control.CollectLog(ctx, cmd.ctlInvoker, req) + if err != nil && cmd.StopOnError { + return err + } + if len(resp.GetHostErrors()) > 0 { + if err := pretty.UpdateErrorSummary(resp, logCmd, &cmd.bld); err != nil { + return err + } + + if cmd.StopOnError { + return resp.Errors() + } + } + } + fmt.Printf(progress.Display()) + } + + // Run dmg command info collection set + params = support.CollectLogsParams{} + params.Config = cmd.cfgCmd.config.Path + params.TargetFolder = cmd.TargetFolder + params.ExtraLogsDir = cmd.ExtraLogsDir + params.JsonOutput = cmd.JSONOutputEnabled() + params.Hostlist = strings.Join(cmd.hostlist, " ") + for logFunc, logCmdSet := range DmgInfoCollection { + for _, logCmd := range logCmdSet { + params.LogFunction = logFunc + params.LogCmd = logCmd + + err := support.CollectSupportLog(cmd.Logger, params) + if err != nil { + if cmd.StopOnError { + return err + } + } + } + fmt.Printf(progress.Display()) + } + + // R sync the logs from servers + rsyncerr := cmd.rsyncLog() + fmt.Printf(progress.Display()) + if rsyncerr != nil && cmd.StopOnError { + return rsyncerr + } + + // Archive the logs + if cmd.Archive { + // Archive the logs on Admin Node + cmd.Debugf("Archiving the Log Folder on Admin Node%s", cmd.TargetFolder) + err := support.ArchiveLogs(cmd.Logger, params) + if err != nil && cmd.StopOnError { + return err + } + + // Archive the logs on Server node via gRPC in case of rsync failure and logs can not be + // copied to central/Admin node. + if rsyncerr != nil { + err = cmd.archLogsOnServer() + if err != nil && cmd.StopOnError { + return err + } + } + fmt.Printf(progress.Display()) + } + + fmt.Printf(progress.Display()) + + if cmd.JSONOutputEnabled() { + return cmd.OutputJSON(nil, err) + } + + // Print the support command summary. + if len(cmd.bld.String()) == 0 { + fmt.Println("Summary : All Commands Successfully Executed") + } else { + fmt.Println("Summary :") + cmd.Info(cmd.bld.String()) + } + + return nil +} diff --git a/src/control/common/archive.go b/src/control/common/archive.go new file mode 100644 index 000000000000..19b91315c3d5 --- /dev/null +++ b/src/control/common/archive.go @@ -0,0 +1,62 @@ +// +// (C) Copyright 2022-2023 Intel Corporation. +// +// SPDX-License-Identifier: BSD-2-Clause-Patent +// + +package common + +import ( + "archive/tar" + "compress/gzip" + "io" + "os" + "path/filepath" +) + +// Archive and create the *tar.gz of the given folder. +func FolderCompress(src string, buf io.Writer) error { + gzipWriter := gzip.NewWriter(buf) + tarWriter := tar.NewWriter(gzipWriter) + + // Loop thorough the folder + filepath.Walk(src, func(file string, fi os.FileInfo, err error) error { + // generate tar File header + header, err := tar.FileInfoHeader(fi, file) + if err != nil { + return err + } + + // Convert filepath / to slash + header.Name = filepath.ToSlash(file) + + // write the tar header + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + + // Write file content if it's not directory + if !fi.IsDir() { + data, err := os.Open(file) + if err != nil { + return err + } + if _, err := io.Copy(tarWriter, data); err != nil { + return err + } + } + return nil + }) + + // Create the tar + if err := tarWriter.Close(); err != nil { + return err + } + + // Create the gzip + if err := gzipWriter.Close(); err != nil { + return err + } + + return nil +} diff --git a/src/control/common/cmdutil/logging.go b/src/control/common/cmdutil/logging.go index 5c713d5dca77..cb87ab7ea521 100644 --- a/src/control/common/cmdutil/logging.go +++ b/src/control/common/cmdutil/logging.go @@ -8,7 +8,11 @@ package cmdutil import ( "context" + "os" + "github.com/pkg/errors" + + "github.com/daos-stack/daos/src/control/common" "github.com/daos-stack/daos/src/control/logging" ) @@ -26,6 +30,12 @@ type ( LogCmd struct { logging.Logger } + // LogConfig contains parameters used to configure the logger. + LogConfig struct { + LogFile string + LogLevel common.ControlLogLevel + JSON bool + } ) // SetLog sets the logger for the command. @@ -47,3 +57,69 @@ func (cmd *LogCmd) MustLogCtx() context.Context { } return ctx } + +// ConfigureLogger configures the logger according to the requested config. +func ConfigureLogger(logIn logging.Logger, cfg LogConfig) error { + log, ok := logIn.(*logging.LeveledLogger) + if !ok { + return errors.New("logger is not a LeveledLogger") + } + + // Set log level mask for default logger from config, + // unless it was explicitly set to debug via CLI flag. + applyLogConfig := func() error { + switch logging.LogLevel(cfg.LogLevel) { + case logging.LogLevelTrace: + log.SetLevel(logging.LogLevelTrace) + log.Debugf("Switching control log level to TRACE") + case logging.LogLevelDebug: + log.SetLevel(logging.LogLevelDebug) + log.Debugf("Switching control log level to DEBUG") + case logging.LogLevelNotice: + log.Debugf("Switching control log level to NOTICE") + log.SetLevel(logging.LogLevelNotice) + case logging.LogLevelError: + log.Debugf("Switching control log level to ERROR") + log.SetLevel(logging.LogLevelError) + } + + if cfg.JSON { + log = log.WithJSONOutput() + } + + log.Debugf("configured logging: level=%s, file=%s, json=%v", + cfg.LogLevel, cfg.LogFile, cfg.JSON) + + return nil + } + + hostname, err := os.Hostname() + if err != nil { + return errors.Wrap(err, "getting hostname") + } + + // Set log file for default logger if specified in config. + if cfg.LogFile != "" { + f, err := common.AppendFile(cfg.LogFile) + if err != nil { + return errors.Wrap(err, "create log file") + } + + log.Infof("%s logging to file %s", os.Args[0], cfg.LogFile) + + // Create an additional set of loggers which append everything + // to the specified file. + log = log. + WithErrorLogger(logging.NewErrorLogger(hostname, f)). + WithNoticeLogger(logging.NewNoticeLogger(hostname, f)). + WithInfoLogger(logging.NewInfoLogger(hostname, f)). + WithDebugLogger(logging.NewDebugLogger(f)). + WithTraceLogger(logging.NewTraceLogger(f)) + + return applyLogConfig() + } + + log.Info("no control log file specified; logging to stdout") + + return applyLogConfig() +} diff --git a/src/control/common/file_utils.go b/src/control/common/file_utils.go index 8b5eda54b558..8b5ebe818759 100644 --- a/src/control/common/file_utils.go +++ b/src/control/common/file_utils.go @@ -8,6 +8,7 @@ package common import ( "fmt" + "io" "io/ioutil" "os" "os/exec" @@ -258,6 +259,76 @@ func FindBinary(binName string) (string, error) { return adjPath, nil } +// CpFile copies a file from src to dst. +func CpFile(src, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + + _, err = io.Copy(out, in) + if err != nil { + return err + } + + return nil +} + +// Copy the Directory from source to destination. +func CpDir(source string, dest string) error { + // get properties of source dir + sourceinfo, err := os.Stat(source) + if err != nil { + return errors.Wrap(err, "unable to get FileInfo structure") + } + + // create dest dir + err = os.MkdirAll(dest, sourceinfo.Mode()) + if err != nil { + return errors.Wrap(err, "unable to create destination Folder") + } + + directory, _ := os.Open(source) + objects, err := directory.Readdir(-1) + + for _, obj := range objects { + sourceFile := source + "/" + obj.Name() + destinationFile := dest + "/" + obj.Name() + + if obj.IsDir() { + // create sub-directories - recursively + err = CpDir(sourceFile, destinationFile) + if err != nil { + return errors.Wrap(err, "unable to Copy Dir") + } + } else { + // perform the file copy + err = CpFile(sourceFile, destinationFile) + if err != nil { + return errors.Wrap(err, "unable to Copy File") + } + } + + } + return nil +} + +// Check if file or directory that starts with . which is hidden +func IsHidden(filename string) bool { + if filename != "" && filename[0:1] == "." { + return true + } + + return false +} + // Normalize the input path with removing redundant separators, up-level reference, changing relative // path to absolute one, etc. func NormalizePath(p string) (np string, err error) { diff --git a/src/control/common/proto/ctl/common.pb.go b/src/control/common/proto/ctl/common.pb.go index 6c2562644078..f1d7650fe11d 100644 --- a/src/control/common/proto/ctl/common.pb.go +++ b/src/control/common/proto/ctl/common.pb.go @@ -5,8 +5,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.5.0 +// protoc-gen-go v1.33.0 +// protoc v5.28.0--dev // source: ctl/common.proto package ctl diff --git a/src/control/common/proto/ctl/ctl_grpc.pb.go b/src/control/common/proto/ctl/ctl_grpc.pb.go index c50ad2b32136..759c15c5f0d9 100644 --- a/src/control/common/proto/ctl/ctl_grpc.pb.go +++ b/src/control/common/proto/ctl/ctl_grpc.pb.go @@ -1,7 +1,13 @@ +// +// (C) Copyright 2019-2022 Intel Corporation. +// +// SPDX-License-Identifier: BSD-2-Clause-Patent +// + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.5.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v5.28.0--dev // source: ctl/ctl.proto package ctl @@ -18,6 +24,25 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + CtlSvc_StorageScan_FullMethodName = "/ctl.CtlSvc/StorageScan" + CtlSvc_StorageFormat_FullMethodName = "/ctl.CtlSvc/StorageFormat" + CtlSvc_StorageNvmeRebind_FullMethodName = "/ctl.CtlSvc/StorageNvmeRebind" + CtlSvc_StorageNvmeAddDevice_FullMethodName = "/ctl.CtlSvc/StorageNvmeAddDevice" + CtlSvc_NetworkScan_FullMethodName = "/ctl.CtlSvc/NetworkScan" + CtlSvc_FirmwareQuery_FullMethodName = "/ctl.CtlSvc/FirmwareQuery" + CtlSvc_FirmwareUpdate_FullMethodName = "/ctl.CtlSvc/FirmwareUpdate" + CtlSvc_SmdQuery_FullMethodName = "/ctl.CtlSvc/SmdQuery" + CtlSvc_SmdManage_FullMethodName = "/ctl.CtlSvc/SmdManage" + CtlSvc_SetEngineLogMasks_FullMethodName = "/ctl.CtlSvc/SetEngineLogMasks" + CtlSvc_PrepShutdownRanks_FullMethodName = "/ctl.CtlSvc/PrepShutdownRanks" + CtlSvc_StopRanks_FullMethodName = "/ctl.CtlSvc/StopRanks" + CtlSvc_PingRanks_FullMethodName = "/ctl.CtlSvc/PingRanks" + CtlSvc_ResetFormatRanks_FullMethodName = "/ctl.CtlSvc/ResetFormatRanks" + CtlSvc_StartRanks_FullMethodName = "/ctl.CtlSvc/StartRanks" + CtlSvc_CollectLog_FullMethodName = "/ctl.CtlSvc/CollectLog" +) + // CtlSvcClient is the client API for CtlSvc service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -52,6 +77,8 @@ type CtlSvcClient interface { ResetFormatRanks(ctx context.Context, in *RanksReq, opts ...grpc.CallOption) (*RanksResp, error) // Start DAOS I/O Engines on a host. (gRPC fanout) StartRanks(ctx context.Context, in *RanksReq, opts ...grpc.CallOption) (*RanksResp, error) + // Perform a Log collection on Servers for support/debug purpose + CollectLog(ctx context.Context, in *CollectLogReq, opts ...grpc.CallOption) (*CollectLogResp, error) } type ctlSvcClient struct { @@ -64,7 +91,7 @@ func NewCtlSvcClient(cc grpc.ClientConnInterface) CtlSvcClient { func (c *ctlSvcClient) StorageScan(ctx context.Context, in *StorageScanReq, opts ...grpc.CallOption) (*StorageScanResp, error) { out := new(StorageScanResp) - err := c.cc.Invoke(ctx, "/ctl.CtlSvc/StorageScan", in, out, opts...) + err := c.cc.Invoke(ctx, CtlSvc_StorageScan_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -73,7 +100,7 @@ func (c *ctlSvcClient) StorageScan(ctx context.Context, in *StorageScanReq, opts func (c *ctlSvcClient) StorageFormat(ctx context.Context, in *StorageFormatReq, opts ...grpc.CallOption) (*StorageFormatResp, error) { out := new(StorageFormatResp) - err := c.cc.Invoke(ctx, "/ctl.CtlSvc/StorageFormat", in, out, opts...) + err := c.cc.Invoke(ctx, CtlSvc_StorageFormat_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -82,7 +109,7 @@ func (c *ctlSvcClient) StorageFormat(ctx context.Context, in *StorageFormatReq, func (c *ctlSvcClient) StorageNvmeRebind(ctx context.Context, in *NvmeRebindReq, opts ...grpc.CallOption) (*NvmeRebindResp, error) { out := new(NvmeRebindResp) - err := c.cc.Invoke(ctx, "/ctl.CtlSvc/StorageNvmeRebind", in, out, opts...) + err := c.cc.Invoke(ctx, CtlSvc_StorageNvmeRebind_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -91,7 +118,7 @@ func (c *ctlSvcClient) StorageNvmeRebind(ctx context.Context, in *NvmeRebindReq, func (c *ctlSvcClient) StorageNvmeAddDevice(ctx context.Context, in *NvmeAddDeviceReq, opts ...grpc.CallOption) (*NvmeAddDeviceResp, error) { out := new(NvmeAddDeviceResp) - err := c.cc.Invoke(ctx, "/ctl.CtlSvc/StorageNvmeAddDevice", in, out, opts...) + err := c.cc.Invoke(ctx, CtlSvc_StorageNvmeAddDevice_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -100,7 +127,7 @@ func (c *ctlSvcClient) StorageNvmeAddDevice(ctx context.Context, in *NvmeAddDevi func (c *ctlSvcClient) NetworkScan(ctx context.Context, in *NetworkScanReq, opts ...grpc.CallOption) (*NetworkScanResp, error) { out := new(NetworkScanResp) - err := c.cc.Invoke(ctx, "/ctl.CtlSvc/NetworkScan", in, out, opts...) + err := c.cc.Invoke(ctx, CtlSvc_NetworkScan_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -109,7 +136,7 @@ func (c *ctlSvcClient) NetworkScan(ctx context.Context, in *NetworkScanReq, opts func (c *ctlSvcClient) FirmwareQuery(ctx context.Context, in *FirmwareQueryReq, opts ...grpc.CallOption) (*FirmwareQueryResp, error) { out := new(FirmwareQueryResp) - err := c.cc.Invoke(ctx, "/ctl.CtlSvc/FirmwareQuery", in, out, opts...) + err := c.cc.Invoke(ctx, CtlSvc_FirmwareQuery_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -118,7 +145,7 @@ func (c *ctlSvcClient) FirmwareQuery(ctx context.Context, in *FirmwareQueryReq, func (c *ctlSvcClient) FirmwareUpdate(ctx context.Context, in *FirmwareUpdateReq, opts ...grpc.CallOption) (*FirmwareUpdateResp, error) { out := new(FirmwareUpdateResp) - err := c.cc.Invoke(ctx, "/ctl.CtlSvc/FirmwareUpdate", in, out, opts...) + err := c.cc.Invoke(ctx, CtlSvc_FirmwareUpdate_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -127,7 +154,7 @@ func (c *ctlSvcClient) FirmwareUpdate(ctx context.Context, in *FirmwareUpdateReq func (c *ctlSvcClient) SmdQuery(ctx context.Context, in *SmdQueryReq, opts ...grpc.CallOption) (*SmdQueryResp, error) { out := new(SmdQueryResp) - err := c.cc.Invoke(ctx, "/ctl.CtlSvc/SmdQuery", in, out, opts...) + err := c.cc.Invoke(ctx, CtlSvc_SmdQuery_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -136,7 +163,7 @@ func (c *ctlSvcClient) SmdQuery(ctx context.Context, in *SmdQueryReq, opts ...gr func (c *ctlSvcClient) SmdManage(ctx context.Context, in *SmdManageReq, opts ...grpc.CallOption) (*SmdManageResp, error) { out := new(SmdManageResp) - err := c.cc.Invoke(ctx, "/ctl.CtlSvc/SmdManage", in, out, opts...) + err := c.cc.Invoke(ctx, CtlSvc_SmdManage_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -145,7 +172,7 @@ func (c *ctlSvcClient) SmdManage(ctx context.Context, in *SmdManageReq, opts ... func (c *ctlSvcClient) SetEngineLogMasks(ctx context.Context, in *SetLogMasksReq, opts ...grpc.CallOption) (*SetLogMasksResp, error) { out := new(SetLogMasksResp) - err := c.cc.Invoke(ctx, "/ctl.CtlSvc/SetEngineLogMasks", in, out, opts...) + err := c.cc.Invoke(ctx, CtlSvc_SetEngineLogMasks_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -154,7 +181,7 @@ func (c *ctlSvcClient) SetEngineLogMasks(ctx context.Context, in *SetLogMasksReq func (c *ctlSvcClient) PrepShutdownRanks(ctx context.Context, in *RanksReq, opts ...grpc.CallOption) (*RanksResp, error) { out := new(RanksResp) - err := c.cc.Invoke(ctx, "/ctl.CtlSvc/PrepShutdownRanks", in, out, opts...) + err := c.cc.Invoke(ctx, CtlSvc_PrepShutdownRanks_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -163,7 +190,7 @@ func (c *ctlSvcClient) PrepShutdownRanks(ctx context.Context, in *RanksReq, opts func (c *ctlSvcClient) StopRanks(ctx context.Context, in *RanksReq, opts ...grpc.CallOption) (*RanksResp, error) { out := new(RanksResp) - err := c.cc.Invoke(ctx, "/ctl.CtlSvc/StopRanks", in, out, opts...) + err := c.cc.Invoke(ctx, CtlSvc_StopRanks_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -172,7 +199,7 @@ func (c *ctlSvcClient) StopRanks(ctx context.Context, in *RanksReq, opts ...grpc func (c *ctlSvcClient) PingRanks(ctx context.Context, in *RanksReq, opts ...grpc.CallOption) (*RanksResp, error) { out := new(RanksResp) - err := c.cc.Invoke(ctx, "/ctl.CtlSvc/PingRanks", in, out, opts...) + err := c.cc.Invoke(ctx, CtlSvc_PingRanks_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -181,7 +208,7 @@ func (c *ctlSvcClient) PingRanks(ctx context.Context, in *RanksReq, opts ...grpc func (c *ctlSvcClient) ResetFormatRanks(ctx context.Context, in *RanksReq, opts ...grpc.CallOption) (*RanksResp, error) { out := new(RanksResp) - err := c.cc.Invoke(ctx, "/ctl.CtlSvc/ResetFormatRanks", in, out, opts...) + err := c.cc.Invoke(ctx, CtlSvc_ResetFormatRanks_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -190,7 +217,16 @@ func (c *ctlSvcClient) ResetFormatRanks(ctx context.Context, in *RanksReq, opts func (c *ctlSvcClient) StartRanks(ctx context.Context, in *RanksReq, opts ...grpc.CallOption) (*RanksResp, error) { out := new(RanksResp) - err := c.cc.Invoke(ctx, "/ctl.CtlSvc/StartRanks", in, out, opts...) + err := c.cc.Invoke(ctx, CtlSvc_StartRanks_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *ctlSvcClient) CollectLog(ctx context.Context, in *CollectLogReq, opts ...grpc.CallOption) (*CollectLogResp, error) { + out := new(CollectLogResp) + err := c.cc.Invoke(ctx, CtlSvc_CollectLog_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -231,6 +267,8 @@ type CtlSvcServer interface { ResetFormatRanks(context.Context, *RanksReq) (*RanksResp, error) // Start DAOS I/O Engines on a host. (gRPC fanout) StartRanks(context.Context, *RanksReq) (*RanksResp, error) + // Perform a Log collection on Servers for support/debug purpose + CollectLog(context.Context, *CollectLogReq) (*CollectLogResp, error) mustEmbedUnimplementedCtlSvcServer() } @@ -283,6 +321,9 @@ func (UnimplementedCtlSvcServer) ResetFormatRanks(context.Context, *RanksReq) (* func (UnimplementedCtlSvcServer) StartRanks(context.Context, *RanksReq) (*RanksResp, error) { return nil, status.Errorf(codes.Unimplemented, "method StartRanks not implemented") } +func (UnimplementedCtlSvcServer) CollectLog(context.Context, *CollectLogReq) (*CollectLogResp, error) { + return nil, status.Errorf(codes.Unimplemented, "method CollectLog not implemented") +} func (UnimplementedCtlSvcServer) mustEmbedUnimplementedCtlSvcServer() {} // UnsafeCtlSvcServer may be embedded to opt out of forward compatibility for this service. @@ -306,7 +347,7 @@ func _CtlSvc_StorageScan_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ctl.CtlSvc/StorageScan", + FullMethod: CtlSvc_StorageScan_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CtlSvcServer).StorageScan(ctx, req.(*StorageScanReq)) @@ -324,7 +365,7 @@ func _CtlSvc_StorageFormat_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ctl.CtlSvc/StorageFormat", + FullMethod: CtlSvc_StorageFormat_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CtlSvcServer).StorageFormat(ctx, req.(*StorageFormatReq)) @@ -342,7 +383,7 @@ func _CtlSvc_StorageNvmeRebind_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ctl.CtlSvc/StorageNvmeRebind", + FullMethod: CtlSvc_StorageNvmeRebind_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CtlSvcServer).StorageNvmeRebind(ctx, req.(*NvmeRebindReq)) @@ -360,7 +401,7 @@ func _CtlSvc_StorageNvmeAddDevice_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ctl.CtlSvc/StorageNvmeAddDevice", + FullMethod: CtlSvc_StorageNvmeAddDevice_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CtlSvcServer).StorageNvmeAddDevice(ctx, req.(*NvmeAddDeviceReq)) @@ -378,7 +419,7 @@ func _CtlSvc_NetworkScan_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ctl.CtlSvc/NetworkScan", + FullMethod: CtlSvc_NetworkScan_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CtlSvcServer).NetworkScan(ctx, req.(*NetworkScanReq)) @@ -396,7 +437,7 @@ func _CtlSvc_FirmwareQuery_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ctl.CtlSvc/FirmwareQuery", + FullMethod: CtlSvc_FirmwareQuery_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CtlSvcServer).FirmwareQuery(ctx, req.(*FirmwareQueryReq)) @@ -414,7 +455,7 @@ func _CtlSvc_FirmwareUpdate_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ctl.CtlSvc/FirmwareUpdate", + FullMethod: CtlSvc_FirmwareUpdate_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CtlSvcServer).FirmwareUpdate(ctx, req.(*FirmwareUpdateReq)) @@ -432,7 +473,7 @@ func _CtlSvc_SmdQuery_Handler(srv interface{}, ctx context.Context, dec func(int } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ctl.CtlSvc/SmdQuery", + FullMethod: CtlSvc_SmdQuery_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CtlSvcServer).SmdQuery(ctx, req.(*SmdQueryReq)) @@ -450,7 +491,7 @@ func _CtlSvc_SmdManage_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ctl.CtlSvc/SmdManage", + FullMethod: CtlSvc_SmdManage_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CtlSvcServer).SmdManage(ctx, req.(*SmdManageReq)) @@ -468,7 +509,7 @@ func _CtlSvc_SetEngineLogMasks_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ctl.CtlSvc/SetEngineLogMasks", + FullMethod: CtlSvc_SetEngineLogMasks_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CtlSvcServer).SetEngineLogMasks(ctx, req.(*SetLogMasksReq)) @@ -486,7 +527,7 @@ func _CtlSvc_PrepShutdownRanks_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ctl.CtlSvc/PrepShutdownRanks", + FullMethod: CtlSvc_PrepShutdownRanks_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CtlSvcServer).PrepShutdownRanks(ctx, req.(*RanksReq)) @@ -504,7 +545,7 @@ func _CtlSvc_StopRanks_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ctl.CtlSvc/StopRanks", + FullMethod: CtlSvc_StopRanks_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CtlSvcServer).StopRanks(ctx, req.(*RanksReq)) @@ -522,7 +563,7 @@ func _CtlSvc_PingRanks_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ctl.CtlSvc/PingRanks", + FullMethod: CtlSvc_PingRanks_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CtlSvcServer).PingRanks(ctx, req.(*RanksReq)) @@ -540,7 +581,7 @@ func _CtlSvc_ResetFormatRanks_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ctl.CtlSvc/ResetFormatRanks", + FullMethod: CtlSvc_ResetFormatRanks_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CtlSvcServer).ResetFormatRanks(ctx, req.(*RanksReq)) @@ -558,7 +599,7 @@ func _CtlSvc_StartRanks_Handler(srv interface{}, ctx context.Context, dec func(i } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ctl.CtlSvc/StartRanks", + FullMethod: CtlSvc_StartRanks_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CtlSvcServer).StartRanks(ctx, req.(*RanksReq)) @@ -566,6 +607,24 @@ func _CtlSvc_StartRanks_Handler(srv interface{}, ctx context.Context, dec func(i return interceptor(ctx, in, info, handler) } +func _CtlSvc_CollectLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CollectLogReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CtlSvcServer).CollectLog(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: CtlSvc_CollectLog_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CtlSvcServer).CollectLog(ctx, req.(*CollectLogReq)) + } + return interceptor(ctx, in, info, handler) +} + // CtlSvc_ServiceDesc is the grpc.ServiceDesc for CtlSvc service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -633,6 +692,10 @@ var CtlSvc_ServiceDesc = grpc.ServiceDesc{ MethodName: "StartRanks", Handler: _CtlSvc_StartRanks_Handler, }, + { + MethodName: "CollectLog", + Handler: _CtlSvc_CollectLog_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "ctl/ctl.proto", diff --git a/src/control/common/proto/ctl/support.pb.go b/src/control/common/proto/ctl/support.pb.go new file mode 100644 index 000000000000..420e8a5dcac6 --- /dev/null +++ b/src/control/common/proto/ctl/support.pb.go @@ -0,0 +1,315 @@ +// +// (C) Copyright 2022-2023 Intel Corporation. +// +// SPDX-License-Identifier: BSD-2-Clause-Patent +// + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.33.0 +// protoc v5.28.0--dev +// source: ctl/support.proto + +package ctl + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CollectLogReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TargetFolder string `protobuf:"bytes,1,opt,name=TargetFolder,proto3" json:"TargetFolder,omitempty"` + ExtraLogsDir string `protobuf:"bytes,2,opt,name=ExtraLogsDir,proto3" json:"ExtraLogsDir,omitempty"` + AdminNode string `protobuf:"bytes,3,opt,name=AdminNode,proto3" json:"AdminNode,omitempty"` + JsonOutput bool `protobuf:"varint,4,opt,name=JsonOutput,proto3" json:"JsonOutput,omitempty"` + LogFunction int32 `protobuf:"varint,5,opt,name=LogFunction,proto3" json:"LogFunction,omitempty"` + LogCmd string `protobuf:"bytes,6,opt,name=LogCmd,proto3" json:"LogCmd,omitempty"` + LogStartDate string `protobuf:"bytes,7,opt,name=LogStartDate,proto3" json:"LogStartDate,omitempty"` + LogEndDate string `protobuf:"bytes,8,opt,name=LogEndDate,proto3" json:"LogEndDate,omitempty"` + LogStartTime string `protobuf:"bytes,9,opt,name=LogStartTime,proto3" json:"LogStartTime,omitempty"` + LogEndTime string `protobuf:"bytes,10,opt,name=LogEndTime,proto3" json:"LogEndTime,omitempty"` + StopOnError bool `protobuf:"varint,11,opt,name=StopOnError,proto3" json:"StopOnError,omitempty"` +} + +func (x *CollectLogReq) Reset() { + *x = CollectLogReq{} + if protoimpl.UnsafeEnabled { + mi := &file_ctl_support_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectLogReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectLogReq) ProtoMessage() {} + +func (x *CollectLogReq) ProtoReflect() protoreflect.Message { + mi := &file_ctl_support_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectLogReq.ProtoReflect.Descriptor instead. +func (*CollectLogReq) Descriptor() ([]byte, []int) { + return file_ctl_support_proto_rawDescGZIP(), []int{0} +} + +func (x *CollectLogReq) GetTargetFolder() string { + if x != nil { + return x.TargetFolder + } + return "" +} + +func (x *CollectLogReq) GetExtraLogsDir() string { + if x != nil { + return x.ExtraLogsDir + } + return "" +} + +func (x *CollectLogReq) GetAdminNode() string { + if x != nil { + return x.AdminNode + } + return "" +} + +func (x *CollectLogReq) GetJsonOutput() bool { + if x != nil { + return x.JsonOutput + } + return false +} + +func (x *CollectLogReq) GetLogFunction() int32 { + if x != nil { + return x.LogFunction + } + return 0 +} + +func (x *CollectLogReq) GetLogCmd() string { + if x != nil { + return x.LogCmd + } + return "" +} + +func (x *CollectLogReq) GetLogStartDate() string { + if x != nil { + return x.LogStartDate + } + return "" +} + +func (x *CollectLogReq) GetLogEndDate() string { + if x != nil { + return x.LogEndDate + } + return "" +} + +func (x *CollectLogReq) GetLogStartTime() string { + if x != nil { + return x.LogStartTime + } + return "" +} + +func (x *CollectLogReq) GetLogEndTime() string { + if x != nil { + return x.LogEndTime + } + return "" +} + +func (x *CollectLogReq) GetStopOnError() bool { + if x != nil { + return x.StopOnError + } + return false +} + +type CollectLogResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` // DAOS error code +} + +func (x *CollectLogResp) Reset() { + *x = CollectLogResp{} + if protoimpl.UnsafeEnabled { + mi := &file_ctl_support_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectLogResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectLogResp) ProtoMessage() {} + +func (x *CollectLogResp) ProtoReflect() protoreflect.Message { + mi := &file_ctl_support_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectLogResp.ProtoReflect.Descriptor instead. +func (*CollectLogResp) Descriptor() ([]byte, []int) { + return file_ctl_support_proto_rawDescGZIP(), []int{1} +} + +func (x *CollectLogResp) GetStatus() int32 { + if x != nil { + return x.Status + } + return 0 +} + +var File_ctl_support_proto protoreflect.FileDescriptor + +var file_ctl_support_proto_rawDesc = []byte{ + 0x0a, 0x11, 0x63, 0x74, 0x6c, 0x2f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x03, 0x63, 0x74, 0x6c, 0x22, 0xf9, 0x02, 0x0a, 0x0d, 0x43, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x12, 0x22, 0x0a, 0x0c, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x22, + 0x0a, 0x0c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x4c, 0x6f, 0x67, 0x73, 0x44, 0x69, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x4c, 0x6f, 0x67, 0x73, 0x44, + 0x69, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x4e, 0x6f, 0x64, 0x65, + 0x12, 0x1e, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x12, 0x20, 0x0a, 0x0b, 0x4c, 0x6f, 0x67, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x4c, 0x6f, 0x67, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x43, 0x6d, 0x64, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x4c, 0x6f, 0x67, 0x43, 0x6d, 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x4c, 0x6f, + 0x67, 0x53, 0x74, 0x61, 0x72, 0x74, 0x44, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x4c, 0x6f, 0x67, 0x53, 0x74, 0x61, 0x72, 0x74, 0x44, 0x61, 0x74, 0x65, 0x12, 0x1e, + 0x0a, 0x0a, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x64, 0x44, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x64, 0x44, 0x61, 0x74, 0x65, 0x12, 0x22, + 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x4c, 0x6f, 0x67, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x64, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x53, 0x74, 0x6f, 0x70, 0x4f, 0x6e, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x53, 0x74, 0x6f, 0x70, 0x4f, 0x6e, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x22, 0x28, 0x0a, 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x4c, + 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x39, + 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x61, 0x6f, + 0x73, 0x2d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2f, 0x64, 0x61, 0x6f, 0x73, 0x2f, 0x73, 0x72, 0x63, + 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x74, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_ctl_support_proto_rawDescOnce sync.Once + file_ctl_support_proto_rawDescData = file_ctl_support_proto_rawDesc +) + +func file_ctl_support_proto_rawDescGZIP() []byte { + file_ctl_support_proto_rawDescOnce.Do(func() { + file_ctl_support_proto_rawDescData = protoimpl.X.CompressGZIP(file_ctl_support_proto_rawDescData) + }) + return file_ctl_support_proto_rawDescData +} + +var file_ctl_support_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_ctl_support_proto_goTypes = []interface{}{ + (*CollectLogReq)(nil), // 0: ctl.CollectLogReq + (*CollectLogResp)(nil), // 1: ctl.CollectLogResp +} +var file_ctl_support_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_ctl_support_proto_init() } +func file_ctl_support_proto_init() { + if File_ctl_support_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_ctl_support_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectLogReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_ctl_support_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectLogResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_ctl_support_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_ctl_support_proto_goTypes, + DependencyIndexes: file_ctl_support_proto_depIdxs, + MessageInfos: file_ctl_support_proto_msgTypes, + }.Build() + File_ctl_support_proto = out.File + file_ctl_support_proto_rawDesc = nil + file_ctl_support_proto_goTypes = nil + file_ctl_support_proto_depIdxs = nil +} diff --git a/src/control/common/proto/mgmt/mgmt_grpc.pb.go b/src/control/common/proto/mgmt/mgmt_grpc.pb.go index 99ef1e8e95bf..60cd1494e519 100644 --- a/src/control/common/proto/mgmt/mgmt_grpc.pb.go +++ b/src/control/common/proto/mgmt/mgmt_grpc.pb.go @@ -1,7 +1,13 @@ +// +// (C) Copyright 2019-2022 Intel Corporation. +// +// SPDX-License-Identifier: BSD-2-Clause-Patent +// + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.5.0 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v5.28.0--dev // source: mgmt/mgmt.proto package mgmt @@ -19,6 +25,42 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + MgmtSvc_Join_FullMethodName = "/mgmt.MgmtSvc/Join" + MgmtSvc_ClusterEvent_FullMethodName = "/mgmt.MgmtSvc/ClusterEvent" + MgmtSvc_LeaderQuery_FullMethodName = "/mgmt.MgmtSvc/LeaderQuery" + MgmtSvc_PoolCreate_FullMethodName = "/mgmt.MgmtSvc/PoolCreate" + MgmtSvc_PoolDestroy_FullMethodName = "/mgmt.MgmtSvc/PoolDestroy" + MgmtSvc_PoolEvict_FullMethodName = "/mgmt.MgmtSvc/PoolEvict" + MgmtSvc_PoolExclude_FullMethodName = "/mgmt.MgmtSvc/PoolExclude" + MgmtSvc_PoolDrain_FullMethodName = "/mgmt.MgmtSvc/PoolDrain" + MgmtSvc_PoolExtend_FullMethodName = "/mgmt.MgmtSvc/PoolExtend" + MgmtSvc_PoolReintegrate_FullMethodName = "/mgmt.MgmtSvc/PoolReintegrate" + MgmtSvc_PoolQuery_FullMethodName = "/mgmt.MgmtSvc/PoolQuery" + MgmtSvc_PoolQueryTarget_FullMethodName = "/mgmt.MgmtSvc/PoolQueryTarget" + MgmtSvc_PoolSetProp_FullMethodName = "/mgmt.MgmtSvc/PoolSetProp" + MgmtSvc_PoolGetProp_FullMethodName = "/mgmt.MgmtSvc/PoolGetProp" + MgmtSvc_PoolGetACL_FullMethodName = "/mgmt.MgmtSvc/PoolGetACL" + MgmtSvc_PoolOverwriteACL_FullMethodName = "/mgmt.MgmtSvc/PoolOverwriteACL" + MgmtSvc_PoolUpdateACL_FullMethodName = "/mgmt.MgmtSvc/PoolUpdateACL" + MgmtSvc_PoolDeleteACL_FullMethodName = "/mgmt.MgmtSvc/PoolDeleteACL" + MgmtSvc_GetAttachInfo_FullMethodName = "/mgmt.MgmtSvc/GetAttachInfo" + MgmtSvc_ListPools_FullMethodName = "/mgmt.MgmtSvc/ListPools" + MgmtSvc_ListContainers_FullMethodName = "/mgmt.MgmtSvc/ListContainers" + MgmtSvc_ContSetOwner_FullMethodName = "/mgmt.MgmtSvc/ContSetOwner" + MgmtSvc_SystemQuery_FullMethodName = "/mgmt.MgmtSvc/SystemQuery" + MgmtSvc_SystemStop_FullMethodName = "/mgmt.MgmtSvc/SystemStop" + MgmtSvc_SystemStart_FullMethodName = "/mgmt.MgmtSvc/SystemStart" + MgmtSvc_SystemExclude_FullMethodName = "/mgmt.MgmtSvc/SystemExclude" + MgmtSvc_SystemErase_FullMethodName = "/mgmt.MgmtSvc/SystemErase" + MgmtSvc_SystemCleanup_FullMethodName = "/mgmt.MgmtSvc/SystemCleanup" + MgmtSvc_PoolUpgrade_FullMethodName = "/mgmt.MgmtSvc/PoolUpgrade" + MgmtSvc_SystemSetAttr_FullMethodName = "/mgmt.MgmtSvc/SystemSetAttr" + MgmtSvc_SystemGetAttr_FullMethodName = "/mgmt.MgmtSvc/SystemGetAttr" + MgmtSvc_SystemSetProp_FullMethodName = "/mgmt.MgmtSvc/SystemSetProp" + MgmtSvc_SystemGetProp_FullMethodName = "/mgmt.MgmtSvc/SystemGetProp" +) + // MgmtSvcClient is the client API for MgmtSvc service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -102,7 +144,7 @@ func NewMgmtSvcClient(cc grpc.ClientConnInterface) MgmtSvcClient { func (c *mgmtSvcClient) Join(ctx context.Context, in *JoinReq, opts ...grpc.CallOption) (*JoinResp, error) { out := new(JoinResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/Join", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_Join_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -111,7 +153,7 @@ func (c *mgmtSvcClient) Join(ctx context.Context, in *JoinReq, opts ...grpc.Call func (c *mgmtSvcClient) ClusterEvent(ctx context.Context, in *shared.ClusterEventReq, opts ...grpc.CallOption) (*shared.ClusterEventResp, error) { out := new(shared.ClusterEventResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/ClusterEvent", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_ClusterEvent_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -120,7 +162,7 @@ func (c *mgmtSvcClient) ClusterEvent(ctx context.Context, in *shared.ClusterEven func (c *mgmtSvcClient) LeaderQuery(ctx context.Context, in *LeaderQueryReq, opts ...grpc.CallOption) (*LeaderQueryResp, error) { out := new(LeaderQueryResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/LeaderQuery", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_LeaderQuery_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -129,7 +171,7 @@ func (c *mgmtSvcClient) LeaderQuery(ctx context.Context, in *LeaderQueryReq, opt func (c *mgmtSvcClient) PoolCreate(ctx context.Context, in *PoolCreateReq, opts ...grpc.CallOption) (*PoolCreateResp, error) { out := new(PoolCreateResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/PoolCreate", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_PoolCreate_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -138,7 +180,7 @@ func (c *mgmtSvcClient) PoolCreate(ctx context.Context, in *PoolCreateReq, opts func (c *mgmtSvcClient) PoolDestroy(ctx context.Context, in *PoolDestroyReq, opts ...grpc.CallOption) (*PoolDestroyResp, error) { out := new(PoolDestroyResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/PoolDestroy", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_PoolDestroy_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -147,7 +189,7 @@ func (c *mgmtSvcClient) PoolDestroy(ctx context.Context, in *PoolDestroyReq, opt func (c *mgmtSvcClient) PoolEvict(ctx context.Context, in *PoolEvictReq, opts ...grpc.CallOption) (*PoolEvictResp, error) { out := new(PoolEvictResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/PoolEvict", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_PoolEvict_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -156,7 +198,7 @@ func (c *mgmtSvcClient) PoolEvict(ctx context.Context, in *PoolEvictReq, opts .. func (c *mgmtSvcClient) PoolExclude(ctx context.Context, in *PoolExcludeReq, opts ...grpc.CallOption) (*PoolExcludeResp, error) { out := new(PoolExcludeResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/PoolExclude", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_PoolExclude_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -165,7 +207,7 @@ func (c *mgmtSvcClient) PoolExclude(ctx context.Context, in *PoolExcludeReq, opt func (c *mgmtSvcClient) PoolDrain(ctx context.Context, in *PoolDrainReq, opts ...grpc.CallOption) (*PoolDrainResp, error) { out := new(PoolDrainResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/PoolDrain", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_PoolDrain_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -174,7 +216,7 @@ func (c *mgmtSvcClient) PoolDrain(ctx context.Context, in *PoolDrainReq, opts .. func (c *mgmtSvcClient) PoolExtend(ctx context.Context, in *PoolExtendReq, opts ...grpc.CallOption) (*PoolExtendResp, error) { out := new(PoolExtendResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/PoolExtend", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_PoolExtend_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -183,7 +225,7 @@ func (c *mgmtSvcClient) PoolExtend(ctx context.Context, in *PoolExtendReq, opts func (c *mgmtSvcClient) PoolReintegrate(ctx context.Context, in *PoolReintegrateReq, opts ...grpc.CallOption) (*PoolReintegrateResp, error) { out := new(PoolReintegrateResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/PoolReintegrate", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_PoolReintegrate_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -192,7 +234,7 @@ func (c *mgmtSvcClient) PoolReintegrate(ctx context.Context, in *PoolReintegrate func (c *mgmtSvcClient) PoolQuery(ctx context.Context, in *PoolQueryReq, opts ...grpc.CallOption) (*PoolQueryResp, error) { out := new(PoolQueryResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/PoolQuery", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_PoolQuery_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -201,7 +243,7 @@ func (c *mgmtSvcClient) PoolQuery(ctx context.Context, in *PoolQueryReq, opts .. func (c *mgmtSvcClient) PoolQueryTarget(ctx context.Context, in *PoolQueryTargetReq, opts ...grpc.CallOption) (*PoolQueryTargetResp, error) { out := new(PoolQueryTargetResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/PoolQueryTarget", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_PoolQueryTarget_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -210,7 +252,7 @@ func (c *mgmtSvcClient) PoolQueryTarget(ctx context.Context, in *PoolQueryTarget func (c *mgmtSvcClient) PoolSetProp(ctx context.Context, in *PoolSetPropReq, opts ...grpc.CallOption) (*PoolSetPropResp, error) { out := new(PoolSetPropResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/PoolSetProp", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_PoolSetProp_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -219,7 +261,7 @@ func (c *mgmtSvcClient) PoolSetProp(ctx context.Context, in *PoolSetPropReq, opt func (c *mgmtSvcClient) PoolGetProp(ctx context.Context, in *PoolGetPropReq, opts ...grpc.CallOption) (*PoolGetPropResp, error) { out := new(PoolGetPropResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/PoolGetProp", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_PoolGetProp_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -228,7 +270,7 @@ func (c *mgmtSvcClient) PoolGetProp(ctx context.Context, in *PoolGetPropReq, opt func (c *mgmtSvcClient) PoolGetACL(ctx context.Context, in *GetACLReq, opts ...grpc.CallOption) (*ACLResp, error) { out := new(ACLResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/PoolGetACL", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_PoolGetACL_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -237,7 +279,7 @@ func (c *mgmtSvcClient) PoolGetACL(ctx context.Context, in *GetACLReq, opts ...g func (c *mgmtSvcClient) PoolOverwriteACL(ctx context.Context, in *ModifyACLReq, opts ...grpc.CallOption) (*ACLResp, error) { out := new(ACLResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/PoolOverwriteACL", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_PoolOverwriteACL_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -246,7 +288,7 @@ func (c *mgmtSvcClient) PoolOverwriteACL(ctx context.Context, in *ModifyACLReq, func (c *mgmtSvcClient) PoolUpdateACL(ctx context.Context, in *ModifyACLReq, opts ...grpc.CallOption) (*ACLResp, error) { out := new(ACLResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/PoolUpdateACL", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_PoolUpdateACL_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -255,7 +297,7 @@ func (c *mgmtSvcClient) PoolUpdateACL(ctx context.Context, in *ModifyACLReq, opt func (c *mgmtSvcClient) PoolDeleteACL(ctx context.Context, in *DeleteACLReq, opts ...grpc.CallOption) (*ACLResp, error) { out := new(ACLResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/PoolDeleteACL", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_PoolDeleteACL_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -264,7 +306,7 @@ func (c *mgmtSvcClient) PoolDeleteACL(ctx context.Context, in *DeleteACLReq, opt func (c *mgmtSvcClient) GetAttachInfo(ctx context.Context, in *GetAttachInfoReq, opts ...grpc.CallOption) (*GetAttachInfoResp, error) { out := new(GetAttachInfoResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/GetAttachInfo", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_GetAttachInfo_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -273,7 +315,7 @@ func (c *mgmtSvcClient) GetAttachInfo(ctx context.Context, in *GetAttachInfoReq, func (c *mgmtSvcClient) ListPools(ctx context.Context, in *ListPoolsReq, opts ...grpc.CallOption) (*ListPoolsResp, error) { out := new(ListPoolsResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/ListPools", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_ListPools_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -282,7 +324,7 @@ func (c *mgmtSvcClient) ListPools(ctx context.Context, in *ListPoolsReq, opts .. func (c *mgmtSvcClient) ListContainers(ctx context.Context, in *ListContReq, opts ...grpc.CallOption) (*ListContResp, error) { out := new(ListContResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/ListContainers", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_ListContainers_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -291,7 +333,7 @@ func (c *mgmtSvcClient) ListContainers(ctx context.Context, in *ListContReq, opt func (c *mgmtSvcClient) ContSetOwner(ctx context.Context, in *ContSetOwnerReq, opts ...grpc.CallOption) (*ContSetOwnerResp, error) { out := new(ContSetOwnerResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/ContSetOwner", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_ContSetOwner_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -300,7 +342,7 @@ func (c *mgmtSvcClient) ContSetOwner(ctx context.Context, in *ContSetOwnerReq, o func (c *mgmtSvcClient) SystemQuery(ctx context.Context, in *SystemQueryReq, opts ...grpc.CallOption) (*SystemQueryResp, error) { out := new(SystemQueryResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/SystemQuery", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_SystemQuery_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -309,7 +351,7 @@ func (c *mgmtSvcClient) SystemQuery(ctx context.Context, in *SystemQueryReq, opt func (c *mgmtSvcClient) SystemStop(ctx context.Context, in *SystemStopReq, opts ...grpc.CallOption) (*SystemStopResp, error) { out := new(SystemStopResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/SystemStop", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_SystemStop_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -318,7 +360,7 @@ func (c *mgmtSvcClient) SystemStop(ctx context.Context, in *SystemStopReq, opts func (c *mgmtSvcClient) SystemStart(ctx context.Context, in *SystemStartReq, opts ...grpc.CallOption) (*SystemStartResp, error) { out := new(SystemStartResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/SystemStart", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_SystemStart_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -327,7 +369,7 @@ func (c *mgmtSvcClient) SystemStart(ctx context.Context, in *SystemStartReq, opt func (c *mgmtSvcClient) SystemExclude(ctx context.Context, in *SystemExcludeReq, opts ...grpc.CallOption) (*SystemExcludeResp, error) { out := new(SystemExcludeResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/SystemExclude", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_SystemExclude_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -336,7 +378,7 @@ func (c *mgmtSvcClient) SystemExclude(ctx context.Context, in *SystemExcludeReq, func (c *mgmtSvcClient) SystemErase(ctx context.Context, in *SystemEraseReq, opts ...grpc.CallOption) (*SystemEraseResp, error) { out := new(SystemEraseResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/SystemErase", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_SystemErase_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -345,7 +387,7 @@ func (c *mgmtSvcClient) SystemErase(ctx context.Context, in *SystemEraseReq, opt func (c *mgmtSvcClient) SystemCleanup(ctx context.Context, in *SystemCleanupReq, opts ...grpc.CallOption) (*SystemCleanupResp, error) { out := new(SystemCleanupResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/SystemCleanup", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_SystemCleanup_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -354,7 +396,7 @@ func (c *mgmtSvcClient) SystemCleanup(ctx context.Context, in *SystemCleanupReq, func (c *mgmtSvcClient) PoolUpgrade(ctx context.Context, in *PoolUpgradeReq, opts ...grpc.CallOption) (*PoolUpgradeResp, error) { out := new(PoolUpgradeResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/PoolUpgrade", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_PoolUpgrade_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -363,7 +405,7 @@ func (c *mgmtSvcClient) PoolUpgrade(ctx context.Context, in *PoolUpgradeReq, opt func (c *mgmtSvcClient) SystemSetAttr(ctx context.Context, in *SystemSetAttrReq, opts ...grpc.CallOption) (*DaosResp, error) { out := new(DaosResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/SystemSetAttr", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_SystemSetAttr_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -372,7 +414,7 @@ func (c *mgmtSvcClient) SystemSetAttr(ctx context.Context, in *SystemSetAttrReq, func (c *mgmtSvcClient) SystemGetAttr(ctx context.Context, in *SystemGetAttrReq, opts ...grpc.CallOption) (*SystemGetAttrResp, error) { out := new(SystemGetAttrResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/SystemGetAttr", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_SystemGetAttr_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -381,7 +423,7 @@ func (c *mgmtSvcClient) SystemGetAttr(ctx context.Context, in *SystemGetAttrReq, func (c *mgmtSvcClient) SystemSetProp(ctx context.Context, in *SystemSetPropReq, opts ...grpc.CallOption) (*DaosResp, error) { out := new(DaosResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/SystemSetProp", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_SystemSetProp_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -390,7 +432,7 @@ func (c *mgmtSvcClient) SystemSetProp(ctx context.Context, in *SystemSetPropReq, func (c *mgmtSvcClient) SystemGetProp(ctx context.Context, in *SystemGetPropReq, opts ...grpc.CallOption) (*SystemGetPropResp, error) { out := new(SystemGetPropResp) - err := c.cc.Invoke(ctx, "/mgmt.MgmtSvc/SystemGetProp", in, out, opts...) + err := c.cc.Invoke(ctx, MgmtSvc_SystemGetProp_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -597,7 +639,7 @@ func _MgmtSvc_Join_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/Join", + FullMethod: MgmtSvc_Join_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).Join(ctx, req.(*JoinReq)) @@ -615,7 +657,7 @@ func _MgmtSvc_ClusterEvent_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/ClusterEvent", + FullMethod: MgmtSvc_ClusterEvent_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).ClusterEvent(ctx, req.(*shared.ClusterEventReq)) @@ -633,7 +675,7 @@ func _MgmtSvc_LeaderQuery_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/LeaderQuery", + FullMethod: MgmtSvc_LeaderQuery_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).LeaderQuery(ctx, req.(*LeaderQueryReq)) @@ -651,7 +693,7 @@ func _MgmtSvc_PoolCreate_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/PoolCreate", + FullMethod: MgmtSvc_PoolCreate_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).PoolCreate(ctx, req.(*PoolCreateReq)) @@ -669,7 +711,7 @@ func _MgmtSvc_PoolDestroy_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/PoolDestroy", + FullMethod: MgmtSvc_PoolDestroy_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).PoolDestroy(ctx, req.(*PoolDestroyReq)) @@ -687,7 +729,7 @@ func _MgmtSvc_PoolEvict_Handler(srv interface{}, ctx context.Context, dec func(i } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/PoolEvict", + FullMethod: MgmtSvc_PoolEvict_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).PoolEvict(ctx, req.(*PoolEvictReq)) @@ -705,7 +747,7 @@ func _MgmtSvc_PoolExclude_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/PoolExclude", + FullMethod: MgmtSvc_PoolExclude_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).PoolExclude(ctx, req.(*PoolExcludeReq)) @@ -723,7 +765,7 @@ func _MgmtSvc_PoolDrain_Handler(srv interface{}, ctx context.Context, dec func(i } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/PoolDrain", + FullMethod: MgmtSvc_PoolDrain_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).PoolDrain(ctx, req.(*PoolDrainReq)) @@ -741,7 +783,7 @@ func _MgmtSvc_PoolExtend_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/PoolExtend", + FullMethod: MgmtSvc_PoolExtend_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).PoolExtend(ctx, req.(*PoolExtendReq)) @@ -759,7 +801,7 @@ func _MgmtSvc_PoolReintegrate_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/PoolReintegrate", + FullMethod: MgmtSvc_PoolReintegrate_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).PoolReintegrate(ctx, req.(*PoolReintegrateReq)) @@ -777,7 +819,7 @@ func _MgmtSvc_PoolQuery_Handler(srv interface{}, ctx context.Context, dec func(i } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/PoolQuery", + FullMethod: MgmtSvc_PoolQuery_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).PoolQuery(ctx, req.(*PoolQueryReq)) @@ -795,7 +837,7 @@ func _MgmtSvc_PoolQueryTarget_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/PoolQueryTarget", + FullMethod: MgmtSvc_PoolQueryTarget_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).PoolQueryTarget(ctx, req.(*PoolQueryTargetReq)) @@ -813,7 +855,7 @@ func _MgmtSvc_PoolSetProp_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/PoolSetProp", + FullMethod: MgmtSvc_PoolSetProp_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).PoolSetProp(ctx, req.(*PoolSetPropReq)) @@ -831,7 +873,7 @@ func _MgmtSvc_PoolGetProp_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/PoolGetProp", + FullMethod: MgmtSvc_PoolGetProp_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).PoolGetProp(ctx, req.(*PoolGetPropReq)) @@ -849,7 +891,7 @@ func _MgmtSvc_PoolGetACL_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/PoolGetACL", + FullMethod: MgmtSvc_PoolGetACL_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).PoolGetACL(ctx, req.(*GetACLReq)) @@ -867,7 +909,7 @@ func _MgmtSvc_PoolOverwriteACL_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/PoolOverwriteACL", + FullMethod: MgmtSvc_PoolOverwriteACL_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).PoolOverwriteACL(ctx, req.(*ModifyACLReq)) @@ -885,7 +927,7 @@ func _MgmtSvc_PoolUpdateACL_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/PoolUpdateACL", + FullMethod: MgmtSvc_PoolUpdateACL_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).PoolUpdateACL(ctx, req.(*ModifyACLReq)) @@ -903,7 +945,7 @@ func _MgmtSvc_PoolDeleteACL_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/PoolDeleteACL", + FullMethod: MgmtSvc_PoolDeleteACL_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).PoolDeleteACL(ctx, req.(*DeleteACLReq)) @@ -921,7 +963,7 @@ func _MgmtSvc_GetAttachInfo_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/GetAttachInfo", + FullMethod: MgmtSvc_GetAttachInfo_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).GetAttachInfo(ctx, req.(*GetAttachInfoReq)) @@ -939,7 +981,7 @@ func _MgmtSvc_ListPools_Handler(srv interface{}, ctx context.Context, dec func(i } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/ListPools", + FullMethod: MgmtSvc_ListPools_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).ListPools(ctx, req.(*ListPoolsReq)) @@ -957,7 +999,7 @@ func _MgmtSvc_ListContainers_Handler(srv interface{}, ctx context.Context, dec f } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/ListContainers", + FullMethod: MgmtSvc_ListContainers_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).ListContainers(ctx, req.(*ListContReq)) @@ -975,7 +1017,7 @@ func _MgmtSvc_ContSetOwner_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/ContSetOwner", + FullMethod: MgmtSvc_ContSetOwner_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).ContSetOwner(ctx, req.(*ContSetOwnerReq)) @@ -993,7 +1035,7 @@ func _MgmtSvc_SystemQuery_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/SystemQuery", + FullMethod: MgmtSvc_SystemQuery_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).SystemQuery(ctx, req.(*SystemQueryReq)) @@ -1011,7 +1053,7 @@ func _MgmtSvc_SystemStop_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/SystemStop", + FullMethod: MgmtSvc_SystemStop_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).SystemStop(ctx, req.(*SystemStopReq)) @@ -1029,7 +1071,7 @@ func _MgmtSvc_SystemStart_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/SystemStart", + FullMethod: MgmtSvc_SystemStart_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).SystemStart(ctx, req.(*SystemStartReq)) @@ -1047,7 +1089,7 @@ func _MgmtSvc_SystemExclude_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/SystemExclude", + FullMethod: MgmtSvc_SystemExclude_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).SystemExclude(ctx, req.(*SystemExcludeReq)) @@ -1065,7 +1107,7 @@ func _MgmtSvc_SystemErase_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/SystemErase", + FullMethod: MgmtSvc_SystemErase_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).SystemErase(ctx, req.(*SystemEraseReq)) @@ -1083,7 +1125,7 @@ func _MgmtSvc_SystemCleanup_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/SystemCleanup", + FullMethod: MgmtSvc_SystemCleanup_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).SystemCleanup(ctx, req.(*SystemCleanupReq)) @@ -1101,7 +1143,7 @@ func _MgmtSvc_PoolUpgrade_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/PoolUpgrade", + FullMethod: MgmtSvc_PoolUpgrade_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).PoolUpgrade(ctx, req.(*PoolUpgradeReq)) @@ -1119,7 +1161,7 @@ func _MgmtSvc_SystemSetAttr_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/SystemSetAttr", + FullMethod: MgmtSvc_SystemSetAttr_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).SystemSetAttr(ctx, req.(*SystemSetAttrReq)) @@ -1137,7 +1179,7 @@ func _MgmtSvc_SystemGetAttr_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/SystemGetAttr", + FullMethod: MgmtSvc_SystemGetAttr_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).SystemGetAttr(ctx, req.(*SystemGetAttrReq)) @@ -1155,7 +1197,7 @@ func _MgmtSvc_SystemSetProp_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/SystemSetProp", + FullMethod: MgmtSvc_SystemSetProp_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).SystemSetProp(ctx, req.(*SystemSetPropReq)) @@ -1173,7 +1215,7 @@ func _MgmtSvc_SystemGetProp_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/mgmt.MgmtSvc/SystemGetProp", + FullMethod: MgmtSvc_SystemGetProp_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MgmtSvcServer).SystemGetProp(ctx, req.(*SystemGetPropReq)) diff --git a/src/control/lib/control/support.go b/src/control/lib/control/support.go new file mode 100644 index 000000000000..ee9d25d9bb21 --- /dev/null +++ b/src/control/lib/control/support.go @@ -0,0 +1,79 @@ +// +// (C) Copyright 2022-2023 Intel Corporation. +// +// SPDX-License-Identifier: BSD-2-Clause-Patent +// + +package control + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/protobuf/proto" + + ctlpb "github.com/daos-stack/daos/src/control/common/proto/ctl" +) + +type ( + // CollectLogReq contains the parameters for a collect-log request. + CollectLogReq struct { + unaryRequest + TargetFolder string + AdminNode string + ExtraLogsDir string + JsonOutput bool + LogFunction int32 + LogCmd string + LogStartDate string + LogEndDate string + LogStartTime string + LogEndTime string + StopOnError bool + } + + // CollectLogResp contains the results of a collect-log + CollectLogResp struct { + HostErrorsResp + } +) + +// CollectLog concurrently performs log collection across all hosts +// supplied in the request's hostlist, or all configured hosts if not +// explicitly specified. The function blocks until all results (successful +// or otherwise) are received, and returns a single response structure +// containing results for all host log collection operations. +func CollectLog(ctx context.Context, rpcClient UnaryInvoker, req *CollectLogReq) (*CollectLogResp, error) { + req.setRPC(func(ctx context.Context, conn *grpc.ClientConn) (proto.Message, error) { + return ctlpb.NewCtlSvcClient(conn).CollectLog(ctx, &ctlpb.CollectLogReq{ + TargetFolder: req.TargetFolder, + AdminNode: req.AdminNode, + ExtraLogsDir: req.ExtraLogsDir, + JsonOutput: req.JsonOutput, + LogFunction: req.LogFunction, + LogCmd: req.LogCmd, + LogStartDate: req.LogStartDate, + LogEndDate: req.LogEndDate, + LogStartTime: req.LogStartTime, + LogEndTime: req.LogEndTime, + StopOnError: req.StopOnError, + }) + }) + + ur, err := rpcClient.InvokeUnaryRPC(ctx, req) + if err != nil { + return nil, err + } + + scr := new(CollectLogResp) + for _, hostResp := range ur.Responses { + if hostResp.Error != nil { + if err := scr.addHostError(hostResp.Addr, hostResp.Error); err != nil { + return nil, err + } + continue + } + + } + + return scr, nil +} diff --git a/src/control/lib/support/README.md b/src/control/lib/support/README.md new file mode 100644 index 000000000000..9f894f5f7c19 --- /dev/null +++ b/src/control/lib/support/README.md @@ -0,0 +1,76 @@ +# support collect-log command + +Support collect-log command will collect the logs on the servers or individual clients +for debugging purpose.This options is available for `daos_server`, `dmg` and `daos_agent` binaries. +It will collect the specific logs, config and other DAOS related metrics and system information. + +`dmg support collect-log` is the single command, which will initiate the log collection +over gRPC,collect and copy the logs on each servers.Logs will be Rsync to the admin node. + +`daos_server support collect-log` command will collect the information on that particular DAOS server.It will not collect the `dmg` command information as `dmg` command needs to be run from the admin node. + +`daos_agent support collect-log` will collect the information on the client and collect the +DAOS client side log with other system related information. + +## List of items collected as part of `dmg support collect-log` + +* dmg network,storage and system command output +* daos server config +* helper_log_file mention in daos server config +* control_log_file mention in daos server config +* engines log_file mention in daos server config +* daos metrics for all the engines +* daos_server dump-topology, version output +* system information + +## List of items collected as part of `daos_server support collect-log` + +* daos server config +* helper_log_file mention in daos server config +* control_log_file mention in daos server config +* engines log_file mention in daos server config +* daos metrics for all the engines +* daos_server dump-topology, version output +* system information + +## List of items collected as part of `daos_agent support collect-log` + +* daos agent config +* log_file mention in daos agent config +* daos client log if it's set `D_LOG_FILE` +* daos_agent dump-topology, net-scan, version output +* system information + +# support collect-log command options + +support collect-log help describe the use of each options. + +``` +# dmg support collect-log --help +Usage: + dmg [OPTIONS] support collect-log [collect-log-OPTIONS] + +Application Options: + --allow-proxy Allow proxy configuration via environment + -i, --insecure Have dmg attempt to connect without certificates + -d, --debug Enable debug output + --log-file= Log command output to the specified file + -j, --json Enable JSON output + -J, --json-logging Enable JSON-formatted log output + -o, --config-path= Client config file path + +Help Options: + -h, --help Show this help message + +[collect-log command options] + -l, --host-list= A comma separated list of addresses to connect to + -s, --stop-on-error Stop the collect-log command on very first error + -t, --target-folder= Target Folder location where log will be copied + -z, --archive Archive the log/config files + -c, --extra-logs-dir= Collect the Logs from given directory + -D, --start-date= Specify the start date, the day from log will be collected, Format: MM-DD + -F, --end-date= Specify the end date, the day till the log will be collected, Format: MM-DD + -S, --log-start-time= Specify the log collection start time, Format: HH:MM:SS + -E, --log-end-time= Specify the log collection end time, Format: HH:MM:SS + -e, --log-type= collect specific logs only admin,control,server and ignore everything else +``` diff --git a/src/control/lib/support/log.go b/src/control/lib/support/log.go new file mode 100644 index 000000000000..bd6cb696cf1e --- /dev/null +++ b/src/control/lib/support/log.go @@ -0,0 +1,985 @@ +// +// (C) Copyright 2022-2024 Intel Corporation. +// +// SPDX-License-Identifier: BSD-2-Clause-Patent +// + +package support + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/pkg/errors" + "gopkg.in/yaml.v2" + + "github.com/daos-stack/daos/src/control/common" + "github.com/daos-stack/daos/src/control/lib/hardware" + "github.com/daos-stack/daos/src/control/lib/hardware/hwprov" + "github.com/daos-stack/daos/src/control/logging" + "github.com/daos-stack/daos/src/control/server/config" +) + +const ( + CopyServerConfigEnum int32 = iota + CollectSystemCmdEnum + CollectServerLogEnum + CollectExtraLogsDirEnum + CollectDaosServerCmdEnum + CollectDmgCmdEnum + CollectDmgDiskInfoEnum + CollectAgentCmdEnum + CollectClientLogEnum + CollectAgentLogEnum + CopyAgentConfigEnum + RsyncLogEnum + ArchiveLogsEnum +) + +type CollectLogSubCmd struct { + StopOnError bool `short:"s" long:"stop-on-error" description:"Stop the collect-log command on very first error"` + TargetFolder string `short:"t" long:"target-folder" description:"Target Folder location where log will be copied"` + Archive bool `short:"z" long:"archive" description:"Archive the log/config files"` + ExtraLogsDir string `short:"c" long:"extra-logs-dir" description:"Collect the Logs from given directory"` + LogStartDate string `short:"D" long:"start-date" description:"Specify the start date, the day from log will be collected, Format: MM-DD"` + LogEndDate string `short:"F" long:"end-date" description:"Specify the end date, the day till the log will be collected, Format: MM-DD"` + LogStartTime string `short:"S" long:"log-start-time" description:"Specify the log collection start time, Format: HH:MM:SS"` + LogEndTime string `short:"E" long:"log-end-time" description:"Specify the log collection end time, Format: HH:MM:SS"` +} + +type LogTypeSubCmd struct { + LogType string `short:"e" long:"log-type" description:"collect specific logs only admin,control,server and ignore everything else"` +} + +const ( + MMDDYYYY = "1-2-2006" + HHMMSS = "15:4:5" + MMDDHHMMSS = "1/2-15:4:5" + MMDDYYYY_HHMMSS = "1-2-2006 15:4:5" + YYYYMMDD_HHMMSS = "2006/1/2 15:4:5" +) + +// Folder names to copy logs and configs +const ( + dmgSystemLogs = "DmgSystemLogs" // Copy the dmg command output for DAOS system + dmgNodeLogs = "DmgNodeLogs" // Copy the dmg command output specific to the server. + daosAgentCmdInfo = "DaosAgentCmdInfo" // Copy the daos_agent command output specific to the node. + genSystemInfo = "GenSystemInfo" // Copy the system related information + engineLogs = "EngineLogs" // Copy the engine logs + controlLogs = "ControlLogs" // Copy the control logs + adminLogs = "AdminLogs" // Copy the helper logs + clientLogs = "ClientLogs" // Copy the DAOS client logs + DaosServerConfig = "DaosServerConfig" // Copy the server config + agentConfig = "AgentConfig" // Copy the Agent config + agentLogs = "AgentLogs" // Copy the Agent log + extraLogs = "ExtraLogs" // Copy the Custom logs +) + +const DmgListDeviceCmd = "dmg storage query list-devices" +const DmgDeviceHealthCmd = "dmg storage query device-health" + +var DmgCmd = []string{ + "dmg system get-prop", + "dmg system query", + "dmg system list-pools", + "dmg system leader-query", + "dmg system get-attr", + "dmg network scan", + "dmg storage scan", + "dmg storage scan -n", + "dmg storage scan -m", + "dmg storage query list-pools -v", + "dmg storage query usage", +} + +var AgentCmd = []string{ + "daos_agent version", + "daos_agent net-scan", + "daos_agent dump-topology", +} + +var SystemCmd = []string{ + "dmesg", + "df -h", + "mount", + "ps axf", + "top -bcn1 -w512", + "lspci -D", + "sysctl -a", + "printenv", + "rpm -qa --qf '(%{INSTALLTIME:date}): %{NAME}-%{VERSION}\n'", +} + +var ServerLog = []string{ + "EngineLog", + "ControlLog", + "HelperLog", +} + +var DaosServerCmd = []string{ + "daos_server version", + "daos_metrics", + "dump-topology", +} + +type ProgressBar struct { + Start int // start int number + Total int // end int number + Steps int // number to be increased per step + NoDisplay bool // Option to skip progress bar if Json output is enabled +} + +type CollectLogsParams struct { + Config string + Hostlist string + TargetFolder string + AdminNode string + ExtraLogsDir string + JsonOutput bool + LogFunction int32 + LogCmd string + LogStartDate string + LogEndDate string + LogStartTime string + LogEndTime string + StopOnError bool +} + +type logCopy struct { + cmd string + option string +} + +// Verify if the date and time argument is valid and return error if it's invalid +func (cmd *CollectLogSubCmd) DateTimeValidate() error { + if cmd.LogStartDate != "" || cmd.LogEndDate != "" { + startDate, err := time.Parse(MMDDYYYY, cmd.LogStartDate) + if err != nil { + return errors.New("Invalid date, please provide the startDate in MM-DD-YYYY format") + } + + endDate, err := time.Parse(MMDDYYYY, cmd.LogEndDate) + if err != nil { + return errors.New("Invalid date, please provide the endDate in MM-DD-YYYY format") + } + + if startDate.After(endDate) { + return errors.New("start-date can not be after end-date") + } + } + + if cmd.LogStartTime != "" { + _, err := time.Parse(HHMMSS, cmd.LogStartTime) + if err != nil { + return errors.New("Invalid log-start-time, please provide the time in HH:MM:SS format") + } + } + + if cmd.LogEndTime != "" { + _, err := time.Parse(HHMMSS, cmd.LogEndTime) + if err != nil { + return errors.New("Invalid log-end-time, please provide the time in HH:MM:SS format") + } + } + + return nil +} + +// Verify LogType argument is valid.Return error, if it's not matching as describer in help +func (cmd *LogTypeSubCmd) LogTypeValidate() ([]string, error) { + if cmd.LogType == "" { + return ServerLog, nil + } + + logType := []string{} + logTypeIn := strings.FieldsFunc(cmd.LogType, logTypeSplit) + + for _, value := range logTypeIn { + if value != "admin" && value != "control" && value != "server" { + return nil, errors.New("Invalid log-type, please use admin,control,server log-type only") + } + + switch value { + case "admin": + logType = append(logType, "HelperLog") + case "control": + logType = append(logType, "ControlLog") + case "server": + logType = append(logType, "EngineLog") + } + } + + return logType, nil +} + +func logTypeSplit(r rune) bool { + return r == ',' +} + +// Print the progress while collect-log command is in progress +func (p *ProgressBar) Display() string { + if !(p.NoDisplay) { + // Return the progress End string. + if p.Start == p.Total { + printString := fmt.Sprintf("\r[%-100s] %8d/%d\n", strings.Repeat("=", 100), p.Start, p.Total) + return printString + } + // Return the current progress string. + p.Start = p.Start + 1 + printString := fmt.Sprintf("\r[%-100s] %8d/%d", strings.Repeat("=", p.Steps*p.Start), p.Start, p.Total) + return printString + } + + return "" +} + +// Check if daos_engine process is running on server and return the bool value accordingly. +func checkEngineState(log logging.Logger) (bool, error) { + _, err := exec.Command("bash", "-c", "pidof daos_engine").Output() + if err != nil { + return false, errors.Wrap(err, "daos_engine is not running on server") + } + + return true, nil +} + +// Get the server config from the running daos engine process +func getRunningConf(log logging.Logger) (string, error) { + running_config := "" + runState, err := checkEngineState(log) + if err != nil { + return "", err + } + + if runState { + cmd := "ps -eo args | grep daos_engine | head -n 1 | grep -oP '(?<=-d )[^ ]*'" + stdout, err := exec.Command("bash", "-c", cmd).Output() + if err != nil { + return "", errors.Wrap(err, "daos_engine is not running on server") + } + running_config = filepath.Join(strings.TrimSpace(string(stdout)), config.ConfigOut) + } + + return running_config, nil +} + +// Get the server config, either from the running daos engine or default +func getServerConf(log logging.Logger, opts ...CollectLogsParams) (string, error) { + cfgPath, err := getRunningConf(log) + + if cfgPath == "" { + cfgPath = filepath.Join(config.DefaultServer().SocketDir, config.ConfigOut) + } + + if err != nil { + return cfgPath, nil + } + + log.Debugf(" -- Server Config File is %s", cfgPath) + return cfgPath, nil +} + +// Copy file from source to destination +func cpLogFile(src, dst string, log logging.Logger) error { + log_file_name := filepath.Base(src) + log.Debugf(" -- Copy File %s to %s\n", log_file_name, dst) + + err := common.CpFile(src, filepath.Join(dst, log_file_name)) + if err != nil { + return errors.Wrap(err, "unable to Copy File") + } + + return nil +} + +// Copy Command output to the file +func cpOutputToFile(target string, log logging.Logger, cp ...logCopy) (string, error) { + // Run command and copy output to the file + // executing as sub shell enables pipes in cmd string + runCmd := strings.Join([]string{cp[0].cmd, cp[0].option}, " ") + out, err := exec.Command("sh", "-c", runCmd).CombinedOutput() + if err != nil { + return "", errors.New(string(out)) + } + + cmd := strings.ReplaceAll(cp[0].cmd, " -", "_") + cmd = strings.ReplaceAll(cmd, " ", "_") + log.Debugf("Collecting DAOS command output = %s > %s ", runCmd, filepath.Join(target, cmd)) + + if err := ioutil.WriteFile(filepath.Join(target, cmd), out, 0644); err != nil { + return "", errors.Wrapf(err, "failed to write %s", filepath.Join(target, cmd)) + } + + return string(out), nil +} + +// Create the Archive of log folder. +func ArchiveLogs(log logging.Logger, opts ...CollectLogsParams) error { + var buf bytes.Buffer + err := common.FolderCompress(opts[0].TargetFolder, &buf) + if err != nil { + return err + } + + // write to the the .tar.gz + tarFileName := fmt.Sprintf("%s.tar.gz", opts[0].TargetFolder) + log.Debugf("Archiving the log folder %s", tarFileName) + fileToWrite, err := os.OpenFile(tarFileName, os.O_CREATE|os.O_RDWR, os.FileMode(0600)) + if err != nil { + return err + } + defer fileToWrite.Close() + + _, err = io.Copy(fileToWrite, &buf) + if err != nil { + return err + } + + return nil +} + +// Get the system hostname +func GetHostName() (string, error) { + hn, err := exec.Command("hostname", "-s").Output() + if err != nil { + return "", errors.Wrapf(err, "Error running hostname -s command %s", hn) + } + out := strings.Split(string(hn), "\n") + + return out[0], nil +} + +// Create the local folder on each servers +func createFolder(target string, log logging.Logger) error { + if _, err := os.Stat(target); err != nil { + log.Debugf("Log folder is not Exists, so creating %s", target) + + if err := os.MkdirAll(target, 0700); err != nil { + return err + } + } + + return nil +} + +// Create the individual folder on each server based on hostname +func createHostFolder(dst string, log logging.Logger) (string, error) { + hn, err := GetHostName() + if err != nil { + return "", err + } + + targetLocation := filepath.Join(dst, hn) + err = createFolder(targetLocation, log) + if err != nil { + return "", err + } + + return targetLocation, nil +} + +// Create the TargetFolder on each server +func createHostLogFolder(dst string, log logging.Logger, opts ...CollectLogsParams) (string, error) { + targetLocation, err := createHostFolder(opts[0].TargetFolder, log) + if err != nil { + return "", err + } + + targetDst := filepath.Join(targetLocation, dst) + err = createFolder(targetDst, log) + if err != nil { + return "", err + } + + return targetDst, nil + +} + +// Get all the servers name from the dmg query +func getSysNameFromQuery(configPath string, log logging.Logger) ([]string, error) { + var hostNames []string + + dName, err := exec.Command("sh", "-c", "domainname").Output() + if err != nil { + return nil, errors.Wrapf(err, "Error running command domainname with %s", dName) + } + domainName := strings.Split(string(dName), "\n") + + cmd := strings.Join([]string{"dmg", "system", "query", "-v", "-o", configPath}, " ") + out, err := exec.Command("sh", "-c", cmd).Output() + if err != nil { + return nil, errors.Wrapf(err, "Error running command %s with %s", cmd, out) + } + temp := strings.Split(string(out), "\n") + + if len(temp) > 0 { + for _, hn := range temp[2 : len(temp)-2] { + hn = strings.ReplaceAll(strings.Fields(hn)[3][1:], domainName[0], "") + hn = strings.TrimSuffix(hn, ".") + hostNames = append(hostNames, hn) + } + } else { + return nil, errors.Wrapf(err, "No system found for command %s", cmd) + } + + return hostNames, nil +} + +// R sync logs from individual servers to Admin node +func rsyncLog(log logging.Logger, opts ...CollectLogsParams) error { + targetLocation, err := createHostFolder(opts[0].TargetFolder, log) + if err != nil { + return err + } + + cmd := strings.Join([]string{ + "rsync", + "-av", + "--blocking-io", + targetLocation, + opts[0].AdminNode + ":" + opts[0].TargetFolder}, + " ") + + out, err := exec.Command("sh", "-c", cmd).Output() + if err != nil { + return errors.Wrapf(err, "Error running command %s %s", cmd, string(out)) + } + log.Infof("rsyncCmd:= %s stdout:\n%s\n\n", cmd, string(out)) + + return nil +} + +// Collect the custom log folder +func collectExtraLogsDir(log logging.Logger, opts ...CollectLogsParams) error { + log.Infof("Log will be collected from custom location %s", opts[0].ExtraLogsDir) + + hn, err := GetHostName() + if err != nil { + return err + } + + customLogFolder := filepath.Join(opts[0].TargetFolder, hn, extraLogs) + err = createFolder(customLogFolder, log) + if err != nil { + return err + } + + err = common.CpDir(opts[0].ExtraLogsDir, customLogFolder) + if err != nil { + return err + } + + return nil +} + +// Collect the disk info using dmg command from each server. +func collectDmgDiskInfo(log logging.Logger, opts ...CollectLogsParams) error { + var hostNames []string + var output string + + hostNames, err := getSysNameFromQuery(opts[0].Config, log) + if err != nil { + return err + } + if len(opts[0].Hostlist) > 0 { + hostNames = strings.Fields(opts[0].Hostlist) + } + + for _, hostName := range hostNames { + // Copy all the devices information for each server + dmg := logCopy{} + dmg.cmd = DmgListDeviceCmd + dmg.option = strings.Join([]string{"-o", opts[0].Config, "-l", hostName}, " ") + targetDmgLog := filepath.Join(opts[0].TargetFolder, hostName, dmgNodeLogs) + + // Create the Folder. + err := createFolder(targetDmgLog, log) + if err != nil { + return err + } + + output, err = cpOutputToFile(targetDmgLog, log, dmg) + if err != nil { + return err + } + + // Get each device health information from each server + for _, v1 := range strings.Split(output, "\n") { + if strings.Contains(v1, "UUID") { + device := strings.Fields(v1)[0][5:] + health := logCopy{} + health.cmd = strings.Join([]string{DmgDeviceHealthCmd, "-u", device}, " ") + health.option = strings.Join([]string{"-l", hostName, "-o", opts[0].Config}, " ") + _, err = cpOutputToFile(targetDmgLog, log, health) + if err != nil { + return err + } + } + } + } + + return nil +} + +// Run command and copy the output to file. +func collectCmdOutput(folderName string, log logging.Logger, opts ...CollectLogsParams) error { + nodeLocation, err := createHostLogFolder(folderName, log, opts...) + if err != nil { + return err + } + + agent := logCopy{} + agent.cmd = opts[0].LogCmd + _, err = cpOutputToFile(nodeLocation, log, agent) + if err != nil { + return err + } + + return nil +} + +// Collect client side log +func collectClientLog(log logging.Logger, opts ...CollectLogsParams) error { + clientLogFile := os.Getenv("D_LOG_FILE") + if clientLogFile != "" { + clientLogLocation, err := createHostLogFolder(clientLogs, log, opts...) + if err != nil { + return err + } + + matches, _ := filepath.Glob(clientLogFile + "*") + for _, logfile := range matches { + err = cpLinesFromLog(log, logfile, clientLogLocation, opts...) + if err != nil { + return err + } + } + } + + return nil +} + +// Collect Agent log +func collectAgentLog(log logging.Logger, opts ...CollectLogsParams) error { + // Create the individual folder on each client + targetAgentLog, err := createHostLogFolder(agentLogs, log, opts...) + if err != nil { + return err + } + + agentFile, err := ioutil.ReadFile(opts[0].Config) + if err != nil { + return err + } + + data := make(map[interface{}]interface{}) + err = yaml.Unmarshal(agentFile, &data) + if err != nil { + return err + } + + err = cpLogFile(fmt.Sprintf("%s", data["log_file"]), targetAgentLog, log) + if err != nil { + return err + } + + return nil +} + +// Copy Agent config file. +func copyAgentConfig(log logging.Logger, opts ...CollectLogsParams) error { + // Create the individual folder on each client + targetConfig, err := createHostLogFolder(agentConfig, log, opts...) + if err != nil { + return err + } + + err = cpLogFile(opts[0].Config, targetConfig, log) + if err != nil { + return err + } + + return nil +} + +// Collect the output of all dmg command and copy into individual file. +func collectDmgCmd(log logging.Logger, opts ...CollectLogsParams) error { + targetDmgLog := filepath.Join(opts[0].TargetFolder, dmgSystemLogs) + err := createFolder(targetDmgLog, log) + if err != nil { + return err + } + + dmg := logCopy{} + dmg.cmd = opts[0].LogCmd + dmg.option = strings.Join([]string{"-o", opts[0].Config}, " ") + + if opts[0].JsonOutput { + dmg.option = strings.Join([]string{dmg.option, "-j"}, " ") + } + + _, err = cpOutputToFile(targetDmgLog, log, dmg) + if err != nil { + return err + } + + return nil +} + +// Copy server config file. +func copyServerConfig(log logging.Logger, opts ...CollectLogsParams) error { + var cfgPath string + + if opts[0].Config != "" { + cfgPath = opts[0].Config + } else { + cfgPath, _ = getServerConf(log) + } + + serverConfig := config.DefaultServer() + serverConfig.SetPath(cfgPath) + serverConfig.Load() + // Create the individual folder on each server + targetConfig, err := createHostLogFolder(DaosServerConfig, log, opts...) + if err != nil { + return err + } + + err = cpLogFile(cfgPath, targetConfig, log) + if err != nil { + return err + } + + // Rename the file if it's hidden + result := common.IsHidden(filepath.Base(cfgPath)) + if result { + hiddenConf := filepath.Join(targetConfig, filepath.Base(cfgPath)) + nonhiddenConf := filepath.Join(targetConfig, filepath.Base(cfgPath)[1:]) + os.Rename(hiddenConf, nonhiddenConf) + } + + return nil +} + +// Calculate the start/end time provided by user. +func getDateTime(log logging.Logger, opts ...CollectLogsParams) (time.Time, time.Time, error) { + // Default Start time, in case no start time provides on start dates.This will copy log start of the day. + if opts[0].LogStartTime == "" { + opts[0].LogStartTime = "00:00:00" + } + + // Default End time, in case no End time provides.This will copy log till the End of the day. + if opts[0].LogEndTime == "" { + opts[0].LogEndTime = "23:59:59" + } + + startTimeStr := fmt.Sprintf("%s %s", opts[0].LogStartDate, opts[0].LogStartTime) + endTimeStr := fmt.Sprintf("%s %s", opts[0].LogEndDate, opts[0].LogEndTime) + + actStartTime, err := time.Parse(MMDDYYYY_HHMMSS, startTimeStr) + if err != nil { + return time.Time{}, time.Time{}, err + } + + actEndTime, err := time.Parse(MMDDYYYY_HHMMSS, endTimeStr) + if err != nil { + return time.Time{}, time.Time{}, err + } + + return actStartTime, actEndTime, nil +} + +// Copy only specific lines from the server logs based on the Start/End date and time, provided by user. +func cpLinesFromLog(log logging.Logger, srcFile string, destFile string, opts ...CollectLogsParams) error { + + // Copy the full log file in case of no dates provided + if opts[0].LogStartDate == "" && opts[0].LogEndDate == "" { + return cpLogFile(srcFile, destFile, log) + } + + // Get the start/end time provided by user for comparison. + actStartTime, actEndTime, err := getDateTime(log, opts...) + if err != nil { + return err + } + + // Create the new empty file, which will be used to copy the matching log lines. + logFileName := filepath.Join(destFile, filepath.Base(srcFile)) + writeFile, err := os.Create(logFileName) + if err != nil { + return err + } + defer writeFile.Close() + + // Open log file for reading. + readFile, err := os.Open(srcFile) + if err != nil { + return err + } + defer readFile.Close() + + // Loop through each line and identify the date and time of each log line. + // Compare the date/time stamp against user provided date/time. + scanner := bufio.NewScanner(readFile) + var cpLogLine bool + if opts[0].LogCmd == "EngineLog" { + // Remove year as engine log does not store the year information. + actStartTime, _ = time.Parse(MMDDHHMMSS, actStartTime.Format(MMDDHHMMSS)) + actEndTime, _ = time.Parse(MMDDHHMMSS, actEndTime.Format(MMDDHHMMSS)) + + var validDateTime = regexp.MustCompile(`^\d\d\/\d\d-\d\d:\d\d:\d\d.\d\d`) + for scanner.Scan() { + lineData := scanner.Text() + lineDataSlice := strings.Split(lineData, " ") + + // Verify if log line has date/time stamp and copy line if it's in range. + if validDateTime.MatchString(lineData) == false { + if cpLogLine { + _, err = writeFile.WriteString(lineData + "\n") + if err != nil { + return err + } + } + continue + } + + dateTime := strings.Split(lineDataSlice[0], "-") + timeOnly := strings.Split(dateTime[1], ".") + expDateTime := fmt.Sprintf("%s-%s", dateTime[0], timeOnly[0]) + expLogTime, _ := time.Parse(MMDDHHMMSS, expDateTime) + + // Copy line, if the log line has time stamp between the given range of start/end date and time. + if expLogTime.After(actStartTime) && expLogTime.Before(actEndTime) { + cpLogLine = true + _, err = writeFile.WriteString(lineData + "\n") + if err != nil { + return err + } + } + + if expLogTime.After(actEndTime) { + return nil + } + } + + if err := scanner.Err(); err != nil { + return err + } + + return nil + } + + // Copy log line for Helper and Control log + for scanner.Scan() { + var validDateTime = regexp.MustCompile(`\d\d\d\d/\d\d/\d\d \d\d:\d\d:\d\d`) + lineData := scanner.Text() + + // Verify if log line has date/time stamp and copy line if it's in range. + if validDateTime.MatchString(lineData) == false { + if cpLogLine { + _, err = writeFile.WriteString(lineData + "\n") + if err != nil { + return err + } + } + continue + } + + data := validDateTime.FindAllString(lineData, -1) + expLogTime, _ := time.Parse(YYYYMMDD_HHMMSS, data[0]) + // Copy line, if the log line has time stamp between the given range of start/end date and time. + if expLogTime.After(actStartTime) && expLogTime.Before(actEndTime) { + cpLogLine = true + _, err = writeFile.WriteString(lineData + "\n") + if err != nil { + return err + } + } + if expLogTime.After(actEndTime) { + return nil + } + } + + if err := scanner.Err(); err != nil { + return err + } + + return nil +} + +// Collect all server side logs +func collectServerLog(log logging.Logger, opts ...CollectLogsParams) error { + var cfgPath string + + if opts[0].Config != "" { + cfgPath = opts[0].Config + } else { + cfgPath, _ = getServerConf(log) + } + serverConfig := config.DefaultServer() + serverConfig.SetPath(cfgPath) + serverConfig.Load() + + switch opts[0].LogCmd { + case "EngineLog": + if len(serverConfig.Engines) == 0 { + return errors.New("Engine count is 0 from server config") + } + + targetServerLogs, err := createHostLogFolder(engineLogs, log, opts...) + if err != nil { + return err + } + + for i := range serverConfig.Engines { + matches, _ := filepath.Glob(serverConfig.Engines[i].LogFile + "*") + for _, logFile := range matches { + err = cpLinesFromLog(log, logFile, targetServerLogs, opts...) + if err != nil && opts[0].StopOnError { + return err + } + } + } + + case "ControlLog": + targetControlLogs, err := createHostLogFolder(controlLogs, log, opts...) + if err != nil { + return err + } + + err = cpLinesFromLog(log, serverConfig.ControlLogFile, targetControlLogs, opts...) + if err != nil { + return err + } + + case "HelperLog": + targetAdminLogs, err := createHostLogFolder(adminLogs, log, opts...) + if err != nil { + return err + } + + err = cpLinesFromLog(log, serverConfig.HelperLogFile, targetAdminLogs, opts...) + if err != nil { + return err + } + } + + return nil +} + +// Collect daos server metrics. +func collectDaosMetrics(daosNodeLocation string, log logging.Logger, opts ...CollectLogsParams) error { + engineRunState, err := checkEngineState(log) + if err != nil { + return err + } + + if engineRunState { + daos := logCopy{} + var cfgPath string + if opts[0].Config != "" { + cfgPath = opts[0].Config + } else { + cfgPath, _ = getServerConf(log) + } + serverConfig := config.DefaultServer() + serverConfig.SetPath(cfgPath) + serverConfig.Load() + + for i := range serverConfig.Engines { + engineId := fmt.Sprintf("%d", i) + daos.cmd = strings.Join([]string{"daos_metrics", "-S", engineId}, " ") + + _, err := cpOutputToFile(daosNodeLocation, log, daos) + if err != nil { + log.Errorf("Failed to run %s: %v", daos.cmd, err) + } + } + } else { + return errors.New("-- FAIL -- Daos Engine is not Running, so daos_metrics will not be collected") + } + + return nil +} + +// Collect system side info of daos_server command. +func collectDaosServerCmd(log logging.Logger, opts ...CollectLogsParams) error { + daosNodeLocation, err := createHostLogFolder(dmgNodeLogs, log, opts...) + if err != nil { + return err + } + + switch opts[0].LogCmd { + case "daos_metrics": + err = collectDaosMetrics(daosNodeLocation, log, opts...) + if err != nil { + return err + } + case "dump-topology": + hwlog := logging.NewCommandLineLogger() + hwProv := hwprov.DefaultTopologyProvider(hwlog) + topo, err := hwProv.GetTopology(context.Background()) + if err != nil { + return err + } + f, err := os.Create(filepath.Join(daosNodeLocation, "daos_server_dump-topology")) + if err != nil { + return err + } + defer f.Close() + hardware.PrintTopology(topo, f) + default: + daos := logCopy{} + daos.cmd = opts[0].LogCmd + _, err := cpOutputToFile(daosNodeLocation, log, daos) + if err != nil { + return err + } + } + + return nil +} + +// Common Entry/Exit point function. +func CollectSupportLog(log logging.Logger, opts ...CollectLogsParams) error { + switch opts[0].LogFunction { + case CopyServerConfigEnum: + return copyServerConfig(log, opts...) + case CollectSystemCmdEnum: + return collectCmdOutput(genSystemInfo, log, opts...) + case CollectServerLogEnum: + return collectServerLog(log, opts...) + case CollectExtraLogsDirEnum: + return collectExtraLogsDir(log, opts...) + case CollectDaosServerCmdEnum: + return collectDaosServerCmd(log, opts...) + case CollectDmgCmdEnum: + return collectDmgCmd(log, opts...) + case CollectDmgDiskInfoEnum: + return collectDmgDiskInfo(log, opts...) + case CollectAgentCmdEnum: + return collectCmdOutput(daosAgentCmdInfo, log, opts...) + case CollectClientLogEnum: + return collectClientLog(log, opts...) + case CollectAgentLogEnum: + return collectAgentLog(log, opts...) + case CopyAgentConfigEnum: + return copyAgentConfig(log, opts...) + case RsyncLogEnum: + return rsyncLog(log, opts...) + case ArchiveLogsEnum: + return ArchiveLogs(log, opts...) + } + + return nil +} diff --git a/src/control/lib/support/log_test.go b/src/control/lib/support/log_test.go new file mode 100644 index 000000000000..fd68639bf57e --- /dev/null +++ b/src/control/lib/support/log_test.go @@ -0,0 +1,1232 @@ +// +// (C) Copyright 2022-2024 Intel Corporation. +// +// SPDX-License-Identifier: BSD-2-Clause-Patent +// + +package support + +import ( + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + "time" + + "github.com/pkg/errors" + + "github.com/daos-stack/daos/src/control/common/test" + "github.com/daos-stack/daos/src/control/logging" + "github.com/daos-stack/daos/src/control/server/config" +) + +const mockSocketDir = "/tmp/mock_socket_dir/" + +func TestSupport_Display(t *testing.T) { + progress := ProgressBar{ + Start: 1, + Total: 7, + NoDisplay: false, + } + + for name, tc := range map[string]struct { + Start int + Steps int + NoDisplay bool + expResult string + }{ + "Valid Step count progress": { + Start: 2, + Steps: 7, + NoDisplay: false, + expResult: "\r[===================== ] 3/7", + }, + "Valid progress end": { + Start: 7, + Steps: 7, + NoDisplay: false, + expResult: "\r[====================================================================================================] 7/7\n", + }, + "No Progress Bar if JsonOutput is Enabled": { + Start: 2, + Steps: 7, + NoDisplay: true, + expResult: "", + }, + } { + t.Run(name, func(t *testing.T) { + progress.Start = tc.Start + progress.Steps = tc.Steps + progress.NoDisplay = tc.NoDisplay + gotOutput := progress.Display() + test.AssertEqual(t, tc.expResult, gotOutput, "") + }) + } +} + +func TestSupport_checkEngineState(t *testing.T) { + log, buf := logging.NewTestLogger(t.Name()) + defer test.ShowBufferOnFailure(t, buf) + + for name, tc := range map[string]struct { + expResult bool + expErr error + }{ + "When process is not running": { + expResult: false, + expErr: errors.New("daos_engine is not running on server: exit status 1"), + }, + } { + t.Run(name, func(t *testing.T) { + gotOutput, gotErr := checkEngineState(log) + test.AssertEqual(t, tc.expResult, gotOutput, "Result is not as expected") + test.CmpErr(t, tc.expErr, gotErr) + }) + } +} + +func TestSupport_getRunningConf(t *testing.T) { + log, buf := logging.NewTestLogger(t.Name()) + defer test.ShowBufferOnFailure(t, buf) + + for name, tc := range map[string]struct { + expResult string + expErr error + }{ + "default config is null if no engine is running": { + expResult: "", + expErr: errors.New("daos_engine is not running on server: exit status 1"), + }, + } { + t.Run(name, func(t *testing.T) { + gotOutput, gotErr := getRunningConf(log) + test.AssertEqual(t, tc.expResult, gotOutput, "Result is not as expected") + test.CmpErr(t, tc.expErr, gotErr) + }) + } +} + +func TestSupport_getServerConf(t *testing.T) { + log, buf := logging.NewTestLogger(t.Name()) + defer test.ShowBufferOnFailure(t, buf) + + for name, tc := range map[string]struct { + expResult string + expErr error + }{ + "default config path if no engine is running": { + expResult: config.ConfigOut, + expErr: nil, + }, + } { + t.Run(name, func(t *testing.T) { + gotOutput, gotErr := getServerConf(log) + test.AssertEqual(t, tc.expResult, filepath.Base(gotOutput), "daos server config file is not what we expected") + test.CmpErr(t, tc.expErr, gotErr) + }) + } +} + +func TestSupport_cpLogFile(t *testing.T) { + log, buf := logging.NewTestLogger(t.Name()) + defer test.ShowBufferOnFailure(t, buf) + + srcTestDir, srcCleanup := test.CreateTestDir(t) + defer srcCleanup() + srcPath := test.CreateTestFile(t, srcTestDir, "Temp File\n") + + dstTestDir, dstCleanup := test.CreateTestDir(t) + defer dstCleanup() + + for name, tc := range map[string]struct { + src string + dst string + expErr error + }{ + "Copy file to valid Directory": { + src: srcPath, + dst: dstTestDir, + expErr: nil, + }, + "Copy file to in valid Directory": { + src: srcPath, + dst: dstTestDir + "/tmp", + expErr: errors.New("unable to Copy File"), + }, + } { + t.Run(name, func(t *testing.T) { + gotErr := cpLogFile(tc.src, tc.dst, log) + test.CmpErr(t, tc.expErr, gotErr) + }) + } +} + +func TestSupport_createFolder(t *testing.T) { + log, buf := logging.NewTestLogger(t.Name()) + defer test.ShowBufferOnFailure(t, buf) + targetTestDir, targetCleanup := test.CreateTestDir(t) + defer targetCleanup() + srcPath := test.CreateTestFile(t, targetTestDir, "Temp File\n") + + for name, tc := range map[string]struct { + target string + expErr error + }{ + "Create the Valid directory": { + target: targetTestDir + "/test1", + expErr: nil, + }, + "Create the directory with existing file name": { + target: srcPath + "/file1", + expErr: errors.New("mkdir " + srcPath + ": not a directory"), + }, + } { + t.Run(name, func(t *testing.T) { + gotErr := createFolder(tc.target, log) + test.CmpErr(t, tc.expErr, gotErr) + }) + } +} + +func TestSupport_GetHostName(t *testing.T) { + hostName, _ := os.Hostname() + for name, tc := range map[string]struct { + expResult string + expErr error + }{ + "Check Valid Hostname": { + expResult: hostName, + expErr: nil, + }, + } { + t.Run(name, func(t *testing.T) { + gotOutput, gotErr := GetHostName() + test.CmpErr(t, tc.expErr, gotErr) + + if !strings.Contains(tc.expResult, gotOutput) { + t.Errorf("Hostname '%s' is not part of full hostname '%s')", + gotOutput, tc.expResult) + } + }) + } +} + +func TestSupport_cpOutputToFile(t *testing.T) { + log, buf := logging.NewTestLogger(t.Name()) + defer test.ShowBufferOnFailure(t, buf) + targetTestDir, targetCleanup := test.CreateTestDir(t) + defer targetCleanup() + + hostName, _ := os.Hostname() + logCp := logCopy{} + + for name, tc := range map[string]struct { + target string + cmd string + option string + expResult string + expErr error + }{ + "Check valid Command without option": { + target: targetTestDir, + cmd: "hostname", + option: "", + expResult: hostName, + expErr: nil, + }, + "Check valid Command with option": { + target: targetTestDir, + cmd: "hostname", + option: "-s", + expResult: hostName, + expErr: nil, + }, + "Check invalid Command": { + target: targetTestDir, + cmd: "hostnamefoo", + option: "", + expResult: "", + expErr: errors.New("command not found"), + }, + "Check valid Command with invalid target directory": { + target: targetTestDir + "/dir1", + cmd: "hostname", + option: "", + expResult: "", + expErr: errors.New("failed to write"), + }, + } { + t.Run(name, func(t *testing.T) { + logCp.cmd = tc.cmd + logCp.option = tc.option + gotOutput, gotErr := cpOutputToFile(tc.target, log, logCp) + gotOutput = strings.TrimRight(gotOutput, "\n") + if !strings.Contains(tc.expResult, gotOutput) { + t.Errorf("Hostname '%s' is not part of full hostname '%s')", + gotOutput, tc.expResult) + } + test.CmpErr(t, tc.expErr, gotErr) + }) + } +} + +func TestSupport_ArchiveLogs(t *testing.T) { + log, buf := logging.NewTestLogger(t.Name()) + defer test.ShowBufferOnFailure(t, buf) + targetTestDir, targetCleanup := test.CreateTestDir(t) + defer targetCleanup() + test.CreateTestFile(t, targetTestDir, "Temp Log File\n") + + arcLog := CollectLogsParams{} + + for name, tc := range map[string]struct { + targetFolder string + expErr error + }{ + "Directory with valid log file": { + targetFolder: targetTestDir, + expErr: nil, + }, + "Invalid Directory": { + targetFolder: targetTestDir + "/foo/bar", + expErr: errors.New("no such file or directory"), + }, + } { + t.Run(name, func(t *testing.T) { + arcLog.TargetFolder = tc.targetFolder + gotErr := ArchiveLogs(log, arcLog) + test.CmpErr(t, tc.expErr, gotErr) + }) + } +} + +func TestSupport_createHostLogFolder(t *testing.T) { + log, buf := logging.NewTestLogger(t.Name()) + defer test.ShowBufferOnFailure(t, buf) + targetTestDir, targetCleanup := test.CreateTestDir(t) + defer targetCleanup() + srcPath := test.CreateTestFile(t, targetTestDir, "Temp File\n") + + collLogParams := CollectLogsParams{} + + for name, tc := range map[string]struct { + dst string + targetFolder string + expErr error + }{ + "Create the valid Log directory": { + dst: dmgSystemLogs, + targetFolder: targetTestDir, + expErr: nil, + }, + "Create the invalid Log directory": { + dst: dmgSystemLogs, + targetFolder: srcPath + "/file1", + expErr: errors.New("mkdir " + srcPath + ": not a directory"), + }, + } { + t.Run(name, func(t *testing.T) { + collLogParams.TargetFolder = tc.targetFolder + _, gotErr := createHostLogFolder(tc.dst, log, collLogParams) + test.CmpErr(t, tc.expErr, gotErr) + }) + } +} + +func TestSupport_rsyncLog(t *testing.T) { + log, buf := logging.NewTestLogger(t.Name()) + defer test.ShowBufferOnFailure(t, buf) + targetTestDir, targetCleanup := test.CreateTestDir(t) + defer targetCleanup() + srcPath := test.CreateTestFile(t, targetTestDir, "Temp File\n") + hostName, _ := os.Hostname() + + rsLog := CollectLogsParams{} + + for name, tc := range map[string]struct { + targetFolder string + AdminNode string + expErr error + }{ + "rsync to invalid Target directory": { + targetFolder: targetTestDir + "/foo/bar", + AdminNode: hostName + ":/tmp/foo/bar/", + expErr: errors.New("Error running command"), + }, + "rsync invalid log directory": { + targetFolder: srcPath + "/file1", + AdminNode: hostName, + expErr: errors.New("not a directory"), + }, + } { + t.Run(name, func(t *testing.T) { + rsLog.TargetFolder = tc.targetFolder + rsLog.AdminNode = tc.AdminNode + gotErr := rsyncLog(log, rsLog) + test.CmpErr(t, tc.expErr, gotErr) + }) + } +} + +func TestSupport_collectExtraLogsDir(t *testing.T) { + log, buf := logging.NewTestLogger(t.Name()) + defer test.ShowBufferOnFailure(t, buf) + targetTestDir, targetCleanup := test.CreateTestDir(t) + defer targetCleanup() + extraLogDir, extraLogCleanup := test.CreateTestDir(t) + defer extraLogCleanup() + srcPath := test.CreateTestFile(t, targetTestDir, "Temp File\n") + test.CreateTestFile(t, extraLogDir, "Extra Log File\n") + + rsLog := CollectLogsParams{} + + for name, tc := range map[string]struct { + targetFolder string + ExtraLogsDir string + expErr error + }{ + "Copy valid log directory": { + targetFolder: targetTestDir, + ExtraLogsDir: extraLogDir, + expErr: nil, + }, + "Copy to invalid target directory": { + targetFolder: srcPath + "/file1", + ExtraLogsDir: extraLogDir, + expErr: errors.New("not a directory"), + }, + "Copy invalid extra log directory": { + targetFolder: targetTestDir, + ExtraLogsDir: extraLogDir + "foo/bar", + expErr: errors.New("no such file or directory"), + }, + } { + t.Run(name, func(t *testing.T) { + rsLog.TargetFolder = tc.targetFolder + rsLog.ExtraLogsDir = tc.ExtraLogsDir + gotErr := collectExtraLogsDir(log, rsLog) + test.CmpErr(t, tc.expErr, gotErr) + }) + } +} + +func TestSupport_collectCmdOutput(t *testing.T) { + log, buf := logging.NewTestLogger(t.Name()) + defer test.ShowBufferOnFailure(t, buf) + targetTestDir, targetCleanup := test.CreateTestDir(t) + defer targetCleanup() + srcPath := test.CreateTestFile(t, targetTestDir, "Temp File\n") + + logCp := CollectLogsParams{} + + for name, tc := range map[string]struct { + folderName string + targetFolder string + cmd string + expErr error + }{ + "Valid command output": { + targetFolder: targetTestDir, + folderName: genSystemInfo, + cmd: "hostname", + expErr: nil, + }, + "Invalid command output": { + targetFolder: targetTestDir, + folderName: genSystemInfo, + cmd: "hostname-cmd-notfound", + expErr: errors.New("command not found"), + }, + "Invalid targetFolder": { + targetFolder: srcPath + "/file1", + folderName: genSystemInfo, + cmd: "hostname", + expErr: errors.New("not a directory"), + }, + } { + t.Run(name, func(t *testing.T) { + logCp.LogCmd = tc.cmd + logCp.TargetFolder = tc.targetFolder + gotErr := collectCmdOutput(tc.folderName, log, logCp) + test.CmpErr(t, tc.expErr, gotErr) + }) + } +} + +func TestSupport_collectClientLog(t *testing.T) { + log, buf := logging.NewTestLogger(t.Name()) + defer test.ShowBufferOnFailure(t, buf) + targetTestDir, targetCleanup := test.CreateTestDir(t) + defer targetCleanup() + srcPath := test.CreateTestFile(t, targetTestDir, "Temp File\n") + + collLogParams := CollectLogsParams{} + os.Setenv("D_LOG_FILE", srcPath) + + for name, tc := range map[string]struct { + targetFolder string + expErr error + }{ + "Collect valid client log": { + targetFolder: targetTestDir, + expErr: nil, + }, + "Collect invalid client log": { + targetFolder: srcPath + "/file1", + expErr: errors.New("mkdir " + srcPath + ": not a directory"), + }, + } { + t.Run(name, func(t *testing.T) { + collLogParams.TargetFolder = tc.targetFolder + gotErr := collectClientLog(log, collLogParams) + test.CmpErr(t, tc.expErr, gotErr) + }) + } +} + +func TestSupport_collectAgentLog(t *testing.T) { + log, buf := logging.NewTestLogger(t.Name()) + defer test.ShowBufferOnFailure(t, buf) + targetTestDir, targetCleanup := test.CreateTestDir(t) + defer targetCleanup() + + agentTestDir, agentCleanup := test.CreateTestDir(t) + defer agentCleanup() + + srcPath := test.CreateTestFile(t, targetTestDir, "Temp File\n") + agentConfig := test.CreateTestFile(t, agentTestDir, "log_file: "+srcPath+"\n") + agentInvalidConfig := test.CreateTestFile(t, agentTestDir, "invalid_log_file: "+srcPath+"\n") + + collLogParams := CollectLogsParams{} + + for name, tc := range map[string]struct { + targetFolder string + config string + expErr error + }{ + "Valid agent log collect": { + targetFolder: targetTestDir, + config: agentConfig, + expErr: nil, + }, + "Invalid agent log entry in yaml": { + targetFolder: targetTestDir, + config: agentInvalidConfig, + expErr: errors.New("no such file or directory"), + }, + "Without agent file": { + targetFolder: targetTestDir, + config: "", + expErr: errors.New("no such file or directory"), + }, + "Invalid agent yaml file format": { + targetFolder: targetTestDir, + config: srcPath, + expErr: errors.New("unmarshal errors"), + }, + "Invalid Agent target folder": { + targetFolder: srcPath + "/file1", + config: agentConfig, + expErr: errors.New("mkdir " + srcPath + ": not a directory"), + }, + } { + t.Run(name, func(t *testing.T) { + collLogParams.TargetFolder = tc.targetFolder + collLogParams.Config = tc.config + gotErr := collectAgentLog(log, collLogParams) + test.CmpErr(t, tc.expErr, gotErr) + }) + } +} + +func TestSupport_copyAgentConfig(t *testing.T) { + log, buf := logging.NewTestLogger(t.Name()) + defer test.ShowBufferOnFailure(t, buf) + targetTestDir, targetCleanup := test.CreateTestDir(t) + defer targetCleanup() + srcPath := test.CreateTestFile(t, targetTestDir, "Temp File\n") + agentConfig := test.CreateTestFile(t, targetTestDir, "log_file: "+srcPath+"\n") + + collLogParams := CollectLogsParams{} + + for name, tc := range map[string]struct { + targetFolder string + config string + expErr error + }{ + "Valid agent log collect": { + targetFolder: targetTestDir, + config: agentConfig, + expErr: nil, + }, + "Invalid Agent log folder": { + targetFolder: srcPath + "/file1", + config: agentConfig, + expErr: errors.New("mkdir " + srcPath + ": not a directory"), + }, + } { + t.Run(name, func(t *testing.T) { + collLogParams.TargetFolder = tc.targetFolder + collLogParams.Config = tc.config + gotErr := copyAgentConfig(log, collLogParams) + test.CmpErr(t, tc.expErr, gotErr) + }) + } +} + +func TestSupport_copyServerConfig(t *testing.T) { + log, buf := logging.NewTestLogger(t.Name()) + defer test.ShowBufferOnFailure(t, buf) + targetTestDir, targetCleanup := test.CreateTestDir(t) + defer targetCleanup() + srcPath := test.CreateTestFile(t, targetTestDir, "Temp File\n") + serverConfig := test.CreateTestFile(t, targetTestDir, "log_file: "+srcPath+"\n") + + collLogParams := CollectLogsParams{} + defaultSeverConfig, _ := getServerConf(log, collLogParams) + + for name, tc := range map[string]struct { + createFile bool + targetFolder string + config string + expErr error + }{ + "Copy server file which is not available": { + createFile: false, + targetFolder: targetTestDir, + config: mockSocketDir + config.ConfigOut + "notavailable", + expErr: errors.New("no such file or directory"), + }, + "Copy to Invalid folder": { + createFile: false, + targetFolder: srcPath + "/file1", + config: serverConfig, + expErr: errors.New("mkdir " + srcPath + ": not a directory"), + }, + } { + t.Run(name, func(t *testing.T) { + if tc.createFile { + data := []byte("hello\nDAOS\n") + if err := os.WriteFile(defaultSeverConfig, data, 0644); err != nil { + t.Fatalf(err.Error()) + } + } + collLogParams.TargetFolder = tc.targetFolder + collLogParams.Config = tc.config + gotErr := copyServerConfig(log, collLogParams) + test.CmpErr(t, tc.expErr, gotErr) + + if tc.createFile { + if err := os.Remove(defaultSeverConfig); err != nil { + t.Fatalf(err.Error()) + } + } + }) + } +} + +func TestSupport_collectServerLog(t *testing.T) { + log, buf := logging.NewTestLogger(t.Name()) + defer test.ShowBufferOnFailure(t, buf) + targetTestDir, targetCleanup := test.CreateTestDir(t) + defer targetCleanup() + engineLog0 := test.CreateTestFile(t, targetTestDir, "Engine Log 0") + engineLog1 := test.CreateTestFile(t, targetTestDir, "Engine Log 1") + controlLog := test.CreateTestFile(t, targetTestDir, "Control Log") + helperLog := test.CreateTestFile(t, targetTestDir, "Helper Log") + + MockValidServerConfig := `port: 10001 +transport_config: + allow_insecure: false + client_cert_dir: /etc/daos/certs/clients + ca_cert: /etc/daos/certs/daosCA.crt + cert: /etc/daos/certs/server.crt + key: /etc/daos/certs/server.key +engines: +- targets: 12 + nr_xs_helpers: 2 + first_core: 0 + log_file: ` + engineLog0 + ` + storage: + - class: dcpm + scm_mount: /mnt/daos0 + scm_list: + - /dev/pmem0 + - class: nvme + bdev_list: + - "0000:00:00.0" + - "0000:01:00.0" + - "0000:02:00.0" + - "0000:03:00.0" + provider: ofi+verbs + fabric_iface: ib0 + fabric_iface_port: 31416 + pinned_numa_node: 0 +- targets: 6 + nr_xs_helpers: 0 + first_core: 0 + log_file: ` + engineLog1 + ` + storage: + - class: dcpm + scm_mount: /mnt/daos1 + scm_list: + - /dev/pmem1 + - class: nvme + bdev_list: + - "0000:04:00.0" + - "0000:05:00.0" + - "0000:06:00.0" + provider: ofi+verbs + fabric_iface: ib1 + fabric_iface_port: 32416 + pinned_numa_node: 1 +disable_vfio: false +disable_vmd: false +enable_hotplug: false +nr_hugepages: 6144 +disable_hugepages: false +control_log_mask: INFO +control_log_file: ` + controlLog + ` +helper_log_file: ` + helperLog + ` +core_dump_filter: 19 +name: daos_server +socket_dir: /var/run/daos_server +provider: ofi+verbs +access_points: +- hostX:10002 +fault_cb: "" +hyperthreads: false +` + + MockInvalidServerConfig := `port: 10001 +transport_config: + allow_insecure: false + client_cert_dir: /etc/daos/certs/clients + ca_cert: /etc/daos/certs/daosCA.crt + cert: /etc/daos/certs/server.crt + key: /etc/daos/certs/server.key +engines: +- targets: 12 + nr_xs_helpers: 2 + first_core: 0 + log_file: ` + targetTestDir + ` /dir1/invalid_engine0.log + storage: + - class: dcpm + scm_mount: /mnt/daos0 + scm_list: + - /dev/pmem0 + - class: nvme + bdev_list: + - "0000:00:00.0" + - "0000:01:00.0" + - "0000:02:00.0" + - "0000:03:00.0" + provider: ofi+verbs + fabric_iface: ib0 + fabric_iface_port: 31416 + pinned_numa_node: 0 +- targets: 6 + nr_xs_helpers: 0 + first_core: 0 + log_file: ` + targetTestDir + ` /dir1/invalid_engine1.log + storage: + - class: dcpm + scm_mount: /mnt/daos1 + scm_list: + - /dev/pmem1 + - class: nvme + bdev_list: + - "0000:04:00.0" + - "0000:05:00.0" + - "0000:06:00.0" + provider: ofi+verbs + fabric_iface: ib1 + fabric_iface_port: 32416 + pinned_numa_node: 1 +disable_vfio: false +disable_vmd: false +enable_hotplug: false +nr_hugepages: 6144 +disable_hugepages: false +control_log_mask: INFO +control_log_file: ` + targetTestDir + ` /dir1/invalid_control.log +helper_log_file: ` + targetTestDir + ` /dir1/invalid_helper.log +core_dump_filter: 19 +name: daos_server +socket_dir: /var/run/daos_server +provider: ofi+verbs +access_points: +- hostX:10002 +fault_cb: "" +hyperthreads: false +` + + MockZeroEngineServerConfig := `port: 10001 +transport_config: + allow_insecure: false + client_cert_dir: /etc/daos/certs/clients + ca_cert: /etc/daos/certs/daosCA.crt + cert: /etc/daos/certs/server.crt + key: /etc/daos/certs/server.key +` + + validConfig := test.CreateTestFile(t, targetTestDir, MockValidServerConfig) + invalidConfig := test.CreateTestFile(t, targetTestDir, MockInvalidServerConfig) + zeroEngineConfig := test.CreateTestFile(t, targetTestDir, MockZeroEngineServerConfig) + collLogParams := CollectLogsParams{} + + for name, tc := range map[string]struct { + logCmd string + targetFolder string + config string + expErr error + }{ + "Copy Server Logs": { + targetFolder: targetTestDir, + config: validConfig, + logCmd: "EngineLog", + expErr: nil, + }, + "Copy Control Logs": { + targetFolder: targetTestDir, + config: validConfig, + logCmd: "ControlLog", + expErr: nil, + }, + "Copy Helper Logs": { + targetFolder: targetTestDir, + config: validConfig, + logCmd: "HelperLog", + expErr: nil, + }, + "Copy Invalid Control Logs": { + targetFolder: targetTestDir, + config: invalidConfig, + logCmd: "ControlLog", + expErr: errors.New("no such file or directory"), + }, + "Copy Invalid Helper Logs": { + targetFolder: targetTestDir, + config: invalidConfig, + logCmd: "HelperLog", + expErr: errors.New("no such file or directory"), + }, + "Copy Server Logs with invalid config": { + targetFolder: targetTestDir, + config: zeroEngineConfig, + logCmd: "EngineLog", + expErr: errors.New("Engine count is 0 from server config"), + }, + } { + t.Run(name, func(t *testing.T) { + collLogParams.TargetFolder = tc.targetFolder + collLogParams.Config = tc.config + collLogParams.LogCmd = tc.logCmd + gotErr := collectServerLog(log, collLogParams) + test.CmpErr(t, tc.expErr, gotErr) + }) + } +} + +func TestSupport_DateTimeValidate(t *testing.T) { + for name, tc := range map[string]struct { + logStartDate string + logEndDate string + logStartTime string + logEndTime string + expErr error + }{ + "Empty Date and Time": { + expErr: nil, + }, + "Valid StartDate No EndDate": { + logStartDate: "12-01-2024", + expErr: errors.New("Invalid date, please provide the endDate in MM-DD-YYYY format"), + }, + "No StartDate Valid EndDate": { + logEndDate: "12-31-2024", + expErr: errors.New("Invalid date, please provide the startDate in MM-DD-YYYY format"), + }, + "Invalid StartDate No EndDate": { + logStartDate: "44-22-2024", + expErr: errors.New("Invalid date, please provide the startDate in MM-DD-YYYY format"), + }, + "Invalid EndDate": { + logStartDate: "12-01-2024", + logEndDate: "44-22-2024", + expErr: errors.New("Invalid date, please provide the endDate in MM-DD-YYYY format"), + }, + "StartDate after EndDate": { + logStartDate: "10-01-2024", + logEndDate: "05-06-2024", + expErr: errors.New("start-date can not be after end-date"), + }, + "Valid StartDate and EndDate": { + logStartDate: "12-01-2024", + logEndDate: "12-31-2024", + expErr: nil, + }, + "Valid StartTime No EndTime": { + logStartTime: "13:15:59", + expErr: nil, + }, + "No StartTime valid EndTime": { + logEndTime: "20:30:50", + expErr: nil, + }, + "Invalid StartTime": { + logStartTime: "25:99:67", + expErr: errors.New("Invalid log-start-time, please provide the time in HH:MM:SS format"), + }, + "Invalid EndTime": { + logStartTime: "13:15:59", + logEndTime: "25:99:67", + expErr: errors.New("Invalid log-end-time, please provide the time in HH:MM:SS format"), + }, + "Valid StartTime EndTime": { + logStartTime: "13:15:59", + logEndTime: "20:30:50", + expErr: nil, + }, + "Valid Date Time": { + logStartDate: "12-01-2024", + logEndDate: "12-31-2024", + logStartTime: "13:15:59", + logEndTime: "20:30:50", + expErr: nil, + }, + } { + t.Run(name, func(t *testing.T) { + var params CollectLogSubCmd + params.LogStartDate = tc.logStartDate + params.LogEndDate = tc.logEndDate + params.LogStartTime = tc.logStartTime + params.LogEndTime = tc.logEndTime + err := params.DateTimeValidate() + test.CmpErr(t, tc.expErr, err) + if err != nil { + return + } + }) + } +} + +func TestSupport_LogTypeValidate(t *testing.T) { + for name, tc := range map[string]struct { + logType string + expLogType []string + expErr error + }{ + "empty": { + expLogType: ServerLog, + expErr: nil, + }, + "Invalid LogType": { + logType: "INVALID_LOG", + expLogType: nil, + expErr: errors.New("Invalid log-type, please use admin,control,server log-type only"), + }, + "LogType Admin": { + logType: "admin", + expLogType: []string{"HelperLog"}, + expErr: nil, + }, + "LogType Control": { + logType: "control", + expLogType: []string{"ControlLog"}, + expErr: nil, + }, + "LogType Server": { + logType: "server", + expLogType: []string{"EngineLog"}, + expErr: nil, + }, + "LogType Admin Control": { + logType: "admin,control", + expLogType: []string{"HelperLog", "ControlLog"}, + expErr: nil, + }, + "LogType Admin Control Server": { + logType: "admin,control,server", + expLogType: []string{"HelperLog", "ControlLog", "EngineLog"}, + expErr: nil, + }, + } { + t.Run(name, func(t *testing.T) { + var params LogTypeSubCmd + params.LogType = tc.logType + logType, err := params.LogTypeValidate() + test.CmpErr(t, tc.expErr, err) + if err != nil { + return + } + + if reflect.DeepEqual(logType, tc.expLogType) == false { + t.Fatalf("logType Expected:%s Got:%s", tc.expLogType, logType) + } + + }) + } +} + +func TestSupport_cpLinesFromLog(t *testing.T) { + log, buf := logging.NewTestLogger(t.Name()) + defer test.ShowBufferOnFailure(t, buf) + targetTestDir, targetCleanup := test.CreateTestDir(t) + defer targetCleanup() + + srcPath := test.CreateTestFile(t, targetTestDir, "Temp File\n") + dstTestDir, dstCleanup := test.CreateTestDir(t) + defer dstCleanup() + + collLogParams := CollectLogsParams{} + + DummyEngineLog := `01/01-01:01:01.90 system-01 LOG LINE 1 +02/02-04:04:04.90 system-02 LOG LINE 2 +03/03-06:06:06.90 system-02 LOG LINE 3 +04/04-08:08:08.90 system-02 LOG LINE 4 +05/05-10:10:10.90 system-02 LOG LINE 5 +06/06-12:12:12.90 system-02 LOG LINE 6 +07/07-14:14:14.90 system-02 LOG LINE 7 +LINE WITHOUT DATE AND TIME +08/08-16:16:16.90 system-02 LOG LINE 8 +09/09-18:18:18.90 system-02 LOG LINE 9 +10/10-20:20:20.90 system-02 LOG LINE 10 +11/11-22:22:22.90 system-02 LOG LINE 11 +12/12-23:59:59.90 system-02 LOG LINE 12 +` + MockEngineLogFile := test.CreateTestFile(t, targetTestDir, DummyEngineLog) + + DummyControlLog := `hostname INFO 2023/01/01 01:01:01 LOG LINE 1 +hostname INFO 2023/02/02 04:04:04 LOG LINE 2 +hostname INFO 2023/03/03 06:06:06 LOG LINE 3 +hostname INFO 2023/04/04 08:08:08 LOG LINE 4 +hostname INFO 2023/05/05 10:10:10 LOG LINE 5 +hostname INFO 2023/06/06 12:12:12 LOG LINE 6 +hostname INFO 2023/07/07 14:14:14 LOG LINE 7 +LINE WITHOUT DATE AND TIME +hostname INFO 2023/08/08 16:16:16 LOG LINE 8 +hostname INFO 2023/09/09 18:18:18 LOG LINE 9 +hostname INFO 2023/10/10 20:20:20 LOG LINE 10 +hostname INFO 2023/11/11 22:22:22 LOG LINE 11 +hostname INFO 2023/12/12 23:59:59 LOG LINE 12 +` + MockControlLogFile := test.CreateTestFile(t, targetTestDir, DummyControlLog) + + DummyAdminLog := `INFO 2023/01/01 01:01:01.441231 LOG LINE 1 +INFO 2023/02/02 04:04:04.441232 LOG LINE 2 +INFO 2023/03/03 06:06:06.441233 LOG LINE 3 +INFO 2023/04/04 08:08:08.441234 LOG LINE 4 +INFO 2023/05/05 10:10:10.441235 LOG LINE 5 +INFO 2023/06/06 12:12:12.441235 LOG LINE 6 +INFO 2023/07/07 14:14:14.441236 LOG LINE 7 +LINE WITHOUT DATE AND TIME +INFO 2023/08/08 16:16:16.441237 LOG LINE 8 +INFO 2023/09/09 18:18:18.441238 LOG LINE 9 +INFO 2023/10/10 20:20:20.441239 LOG LINE 10 +INFO 2023/11/11 22:22:22.441240 LOG LINE 11 +INFO 2023/12/12 23:59:59.441241 LOG LINE 12 +` + MockAdminLogFile := test.CreateTestFile(t, targetTestDir, DummyAdminLog) + + for name, tc := range map[string]struct { + logStartDate string + logEndDate string + logStartTime string + logEndTime string + srcFile string + destFile string + expErr error + verifyLog string + logCmd string + }{ + "No startDate and EndDate": { + logStartDate: "", + logEndDate: "", + srcFile: srcPath, + destFile: dstTestDir, + expErr: nil, + }, + "Invalid Destination Directory": { + logStartDate: "", + logEndDate: "", + srcFile: srcPath, + destFile: dstTestDir + "/tmp", + expErr: errors.New("unable to Copy File"), + }, + "Invalid Source File": { + logStartDate: "01-01-2023", + logEndDate: "12-31-2023", + srcFile: srcPath + "unknownFile", + destFile: dstTestDir, + expErr: errors.New("no such file or directory"), + }, + "Valid date without any time": { + logStartDate: "01-01-2023", + logEndDate: "12-31-2023", + srcFile: srcPath, + destFile: dstTestDir, + expErr: nil, + }, + "Verify the content of Engine log line based on date": { + logStartDate: "04-01-2023", + logEndDate: "08-08-2023", + srcFile: MockEngineLogFile, + destFile: dstTestDir, + logCmd: "EngineLog", + expErr: nil, + verifyLog: "08/08-16:16:16.90 system-02 LOG LINE 8", + }, + "Verify the content of Engine log line based on date and time": { + logStartDate: "09-09-2023", + logEndDate: "11-11-2023", + logStartTime: "12:00:00", + logEndTime: "23:23:23", + srcFile: MockEngineLogFile, + destFile: dstTestDir, + logCmd: "EngineLog", + expErr: nil, + verifyLog: "11/11-22:22:22.90 system-02 LOG LINE 11", + }, + "Verify the content of Control log line based on date": { + logStartDate: "04-01-2023", + logEndDate: "08-08-2023", + srcFile: MockControlLogFile, + destFile: dstTestDir, + logCmd: "ControlLog", + expErr: nil, + verifyLog: "hostname INFO 2023/08/08 16:16:16 LOG LINE 8", + }, + "Verify the content of Control log line based on date and time": { + logStartDate: "09-09-2023", + logEndDate: "11-11-2023", + logStartTime: "12:00:00", + logEndTime: "23:23:23", + srcFile: MockControlLogFile, + destFile: dstTestDir, + logCmd: "ControlLog", + expErr: nil, + verifyLog: "hostname INFO 2023/11/11 22:22:22 LOG LINE 11", + }, + "Verify the content of Admin log line based on date": { + logStartDate: "04-01-2023", + logEndDate: "08-08-2023", + srcFile: MockAdminLogFile, + destFile: dstTestDir, + logCmd: "HelperLog", + expErr: nil, + verifyLog: "INFO 2023/08/08 16:16:16.441237 LOG LINE 8", + }, + "Verify the content of Admin log line based on date and time": { + logStartDate: "09-09-2023", + logEndDate: "11-11-2023", + logStartTime: "12:00:00", + logEndTime: "23:23:23", + srcFile: MockAdminLogFile, + destFile: dstTestDir, + logCmd: "HelperLog", + expErr: nil, + verifyLog: "INFO 2023/11/11 22:22:22.441240 LOG LINE 11", + }, + } { + t.Run(name, func(t *testing.T) { + collLogParams.LogStartDate = tc.logStartDate + collLogParams.LogEndDate = tc.logEndDate + collLogParams.LogStartTime = tc.logStartTime + collLogParams.LogEndTime = tc.logEndTime + collLogParams.LogCmd = tc.logCmd + gotErr := cpLinesFromLog(log, tc.srcFile, tc.destFile, collLogParams) + test.CmpErr(t, tc.expErr, gotErr) + + if tc.verifyLog != "" { + readFile := filepath.Join(tc.destFile, filepath.Base(tc.srcFile)) + b, err := ioutil.ReadFile(readFile) + if err != nil { + t.Fatalf(err.Error()) + } + + if strings.Contains(string(b), tc.verifyLog) == false { + t.Fatalf("Expected log line:=%s can not be found in File:=%s", tc.verifyLog, readFile) + } + + } + }) + } +} + +func TestSupport_getDateTime(t *testing.T) { + log, buf := logging.NewTestLogger(t.Name()) + defer test.ShowBufferOnFailure(t, buf) + + collLogParams := CollectLogsParams{} + + for name, tc := range map[string]struct { + logStartDate string + logEndDate string + logStartTime string + logEndTime string + expStartTime string + expEndTime string + expErr error + }{ + "No StartTime": { + logStartDate: "1-2-2023", + logEndDate: "1-3-2023", + expErr: nil, + }, + "No EndTime": { + logStartDate: "1-2-2023", + logEndDate: "1-3-2023", + logStartTime: "10:10:10", + expStartTime: "01-02-2023 10:10:10", + expEndTime: "01-03-2023 23:59:59", + expErr: nil, + }, + "Valid Date and Invalid Start Time": { + logStartDate: "1-2-2023", + logEndDate: "1-3-2023", + logStartTime: "99:99:99", + logEndTime: "12:12:12", + expErr: errors.New("parsing time \"1-2-2023 99:99:99\": hour out of range"), + }, + "Valid Date and Invalid End Time": { + logStartDate: "1-2-2023", + logEndDate: "1-3-2023", + logStartTime: "10:10:10", + logEndTime: "99:99:99", + expErr: errors.New("parsing time \"1-3-2023 99:99:99\": hour out of range"), + }, + "Valid Date and Time": { + logStartDate: "1-2-2023", + logEndDate: "1-3-2023", + logStartTime: "10:10:10", + logEndTime: "12:12:12", + expStartTime: "01-02-2023 10:10:10", + expEndTime: "01-03-2023 12:12:12", + expErr: nil, + }, + } { + t.Run(name, func(t *testing.T) { + collLogParams.LogStartDate = tc.logStartDate + collLogParams.LogEndDate = tc.logEndDate + collLogParams.LogStartTime = tc.logStartTime + collLogParams.LogEndTime = tc.logEndTime + startTime, endTime, gotErr := getDateTime(log, collLogParams) + test.CmpErr(t, tc.expErr, gotErr) + if tc.expStartTime != "" { + tmpStartTime, _ := time.Parse(MMDDYYYY_HHMMSS, tc.expStartTime) + if tmpStartTime.Equal(startTime) == false { + t.Fatalf("Expected StartTime:=%s But Got :=%s", tmpStartTime, startTime) + } + } + if tc.expEndTime != "" { + tmpEndTime, _ := time.Parse(MMDDYYYY_HHMMSS, tc.expEndTime) + if tmpEndTime.Equal(endTime) == false { + t.Fatalf("Expected EndTime:=%s But Got :=%s", tmpEndTime, endTime) + } + } + }) + } +} diff --git a/src/control/logging/syslog_test.go b/src/control/logging/syslog_test.go index d57ad1a8a8bf..7c1199716eb5 100644 --- a/src/control/logging/syslog_test.go +++ b/src/control/logging/syslog_test.go @@ -27,7 +27,7 @@ func TestSyslogOutput(t *testing.T) { t.Log("unable to locate journalctl -- not running this test") return } - cmd := exec.Command(journalctl, "--system") + cmd := exec.Command(journalctl, "--system", "--since", "1 minute ago") if err := cmd.Run(); err != nil { t.Log("current user does not have permissions to view system log") return diff --git a/src/control/security/grpc_authorization.go b/src/control/security/grpc_authorization.go index f14c486b4603..47eb7a8df635 100644 --- a/src/control/security/grpc_authorization.go +++ b/src/control/security/grpc_authorization.go @@ -33,6 +33,7 @@ var methodAuthorizations = map[string][]Component{ "/ctl.CtlSvc/StorageNvmeRebind": {ComponentAdmin}, "/ctl.CtlSvc/StorageNvmeAddDevice": {ComponentAdmin}, "/ctl.CtlSvc/NetworkScan": {ComponentAdmin}, + "/ctl.CtlSvc/CollectLog": {ComponentAdmin}, "/ctl.CtlSvc/FirmwareQuery": {ComponentAdmin}, "/ctl.CtlSvc/FirmwareUpdate": {ComponentAdmin}, "/ctl.CtlSvc/SmdQuery": {ComponentAdmin}, diff --git a/src/control/security/grpc_authorization_test.go b/src/control/security/grpc_authorization_test.go index a2411970d71f..6f0c85a5fd04 100644 --- a/src/control/security/grpc_authorization_test.go +++ b/src/control/security/grpc_authorization_test.go @@ -58,6 +58,7 @@ func TestSecurity_ComponentHasAccess(t *testing.T) { "/ctl.CtlSvc/StorageNvmeRebind": {ComponentAdmin}, "/ctl.CtlSvc/StorageNvmeAddDevice": {ComponentAdmin}, "/ctl.CtlSvc/NetworkScan": {ComponentAdmin}, + "/ctl.CtlSvc/CollectLog": {ComponentAdmin}, "/ctl.CtlSvc/FirmwareQuery": {ComponentAdmin}, "/ctl.CtlSvc/FirmwareUpdate": {ComponentAdmin}, "/ctl.CtlSvc/SmdQuery": {ComponentAdmin}, diff --git a/src/control/server/config/server.go b/src/control/server/config/server.go index 87ae27bb101b..eabe96942b3f 100644 --- a/src/control/server/config/server.go +++ b/src/control/server/config/server.go @@ -33,6 +33,7 @@ const ( defaultRuntimeDir = "/var/run/daos_server" defaultConfigPath = "../etc/daos_server.yml" configOut = ".daos_server.active.yml" + ConfigOut = ".daos_server.active.yml" relConfExamplesPath = "../utils/config/examples/" ) diff --git a/src/control/server/ctl_support_rpc.go b/src/control/server/ctl_support_rpc.go new file mode 100644 index 000000000000..e9e7a72313ae --- /dev/null +++ b/src/control/server/ctl_support_rpc.go @@ -0,0 +1,40 @@ +// +// (C) Copyright 2022-2023 Intel Corporation. +// +// SPDX-License-Identifier: BSD-2-Clause-Patent +// + +package server + +import ( + "golang.org/x/net/context" + + ctlpb "github.com/daos-stack/daos/src/control/common/proto/ctl" + "github.com/daos-stack/daos/src/control/lib/support" +) + +// CollectLog collect the file for each server on given target location. +func (c *ControlService) CollectLog(ctx context.Context, req *ctlpb.CollectLogReq) (*ctlpb.CollectLogResp, error) { + c.log.Infof("Support CollectLog: Calling Log Function Enum: %d, And Cmd/Log: %s", req.LogFunction, req.LogCmd) + + params := support.CollectLogsParams{} + params.TargetFolder = req.TargetFolder + params.ExtraLogsDir = req.ExtraLogsDir + params.AdminNode = req.AdminNode + params.JsonOutput = req.JsonOutput + params.LogFunction = req.LogFunction + params.LogCmd = req.LogCmd + params.LogStartDate = req.LogStartDate + params.LogEndDate = req.LogEndDate + params.LogStartTime = req.LogStartTime + params.LogEndTime = req.LogEndTime + params.StopOnError = req.StopOnError + + err := support.CollectSupportLog(c.log, params) + if err != nil { + return nil, err + } + + resp := new(ctlpb.CollectLogResp) + return resp, nil +} diff --git a/src/proto/Makefile b/src/proto/Makefile index 5ce3fcf15612..f1546bd068d8 100644 --- a/src/proto/Makefile +++ b/src/proto/Makefile @@ -45,6 +45,7 @@ GO_CONTROL_FILES = common/proto/shared/ranks.pb.go\ common/proto/ctl/storage_scm.pb.go\ common/proto/ctl/ctl.pb.go\ common/proto/ctl/network.pb.go\ + common/proto/ctl/support.pb.go\ common/proto/ctl/firmware.pb.go\ common/proto/ctl/ranks.pb.go\ common/proto/srv/srv.pb.go\ @@ -82,10 +83,11 @@ $(PROTOC_GEN_GO): $(PROTOC) [ -d $(GOPATH)/src/$(PROTOC_GEN_GO_IMP) ] &&\ cd $(GOPATH)/src/$(PROTOC_GEN_GO_IMP) &&\ git checkout master || true - go get -u $(PROTOC_GEN_GO_IMP) &&\ - cd $(GOPATH)/src/$(PROTOC_GEN_GO_IMP) &&\ - git checkout $(PROTOC_GEN_GO_TAG) &&\ - go install $(PROTOC_GEN_GO_IMP) +# go get -u $(PROTOC_GEN_GO_IMP) &&\ +# cd $(GOPATH)/src/$(PROTOC_GEN_GO_IMP) &&\ +# git checkout $(PROTOC_GEN_GO_TAG) &&\ + go install $(PROTOC_GEN_GO_IMP)@$(PROTOC_GEN_GO_TAG) + PROTOC_GEN_GO_GRPC_PKG := google.golang.org/grpc PROTOC_GEN_GO_GRPC_IMP := $(PROTOC_GEN_GO_GRPC_PKG)/cmd/protoc-gen-go-grpc @@ -185,6 +187,12 @@ $(DAOS_ROOT)/src/mgmt/server.pb-c.h: $(PROTO_SOURCE_DIR)/ctl/server.proto $(DAOS_ROOT)/src/mgmt/server.pb-c.c: $(PROTO_SOURCE_DIR)/ctl/server.proto protoc -I $(dir $<) --c_out=$(dir $@) $(notdir $<) +$(DAOS_ROOT)/src/mgmt/support.pb-c.h: $(PROTO_SOURCE_DIR)/ctl/support.proto + protoc -I $(dir $<) --c_out=$(dir $@) $(notdir $<) + +$(DAOS_ROOT)/src/mgmt/support.pb-c.c: $(PROTO_SOURCE_DIR)/ctl/support.proto + protoc -I $(dir $<) --c_out=$(dir $@) $(notdir $<) + $(DAOS_ROOT)/src/tests/drpc/%.pb-c.h: $(PROTO_SOURCE_DIR)/test/%.proto protoc -I $(dir $<) --c_out=$(dir $@) $(notdir $<) diff --git a/src/proto/ctl/ctl.proto b/src/proto/ctl/ctl.proto index 5cdfc38dfe3d..744122d497ac 100644 --- a/src/proto/ctl/ctl.proto +++ b/src/proto/ctl/ctl.proto @@ -15,6 +15,7 @@ import "ctl/firmware.proto"; import "ctl/smd.proto"; import "ctl/ranks.proto"; import "ctl/server.proto"; +import "ctl/support.proto"; // Service definitions for communications between gRPC management server and // client regarding tasks related to DAOS system and server hardware. @@ -52,4 +53,6 @@ service CtlSvc { rpc ResetFormatRanks(RanksReq) returns (RanksResp) {} // Start DAOS I/O Engines on a host. (gRPC fanout) rpc StartRanks(RanksReq) returns (RanksResp) {} +// Perform a Log collection on Servers for support/debug purpose + rpc CollectLog (CollectLogReq) returns (CollectLogResp) {}; } diff --git a/src/proto/ctl/support.proto b/src/proto/ctl/support.proto new file mode 100644 index 000000000000..1a7fe0d21e9c --- /dev/null +++ b/src/proto/ctl/support.proto @@ -0,0 +1,30 @@ +// +// (C) Copyright 2022-2023 Intel Corporation. +// +// SPDX-License-Identifier: BSD-2-Clause-Patent +// + +syntax = "proto3"; +package ctl; + +option go_package = "github.com/daos-stack/daos/src/control/common/proto/ctl"; + +// Management Service Protobuf Definitions related to collect-log for support purpose. + +message CollectLogReq { + string TargetFolder = 1; + string ExtraLogsDir = 2; + string AdminNode = 3; + bool JsonOutput = 4; + int32 LogFunction = 5; + string LogCmd = 6; + string LogStartDate = 7; + string LogEndDate = 8; + string LogStartTime = 9; + string LogEndTime = 10; + bool StopOnError = 11; +} + +message CollectLogResp { + int32 status = 1; // DAOS error code +} diff --git a/src/tests/ftest/control/daos_agent_support_collect_log.py b/src/tests/ftest/control/daos_agent_support_collect_log.py new file mode 100644 index 000000000000..7e60e94b6ec1 --- /dev/null +++ b/src/tests/ftest/control/daos_agent_support_collect_log.py @@ -0,0 +1,51 @@ +""" + (C) Copyright 2023 Intel Corporation. + + SPDX-License-Identifier: BSD-2-Clause-Patent +""" +from support_test_base import SupportTestBase + + +class DaosAgentSupportCollectLogTest(SupportTestBase): + # pylint: disable=too-many-ancestors + """Test Class Description: + Verify the daos_server support collect-log command. + + :avocado: recursive + """ + + def test_daos_agent_support_collect_log(self): + """JIRA ID: DAOS-10625 + + Test Description: + Test daos_agent support collect-log command completes successfully. + + :avocado: tags=all,full_regression + :avocado: tags=hw,medium + :avocado: tags=control,basic,support,daos_server + :avocado: tags=DaosAgentSupportCollectLogTest,test_daos_agent_support_collect_log + """ + # Create the custom log data which will be collected via support collect-log, + # Later verify the data file is archived as part of collection. + self.log_hosts = self.hostlist_clients + self.create_custom_log("Client_Support_Logs") + + # Run daos_agent support collect-log with --extra-logs-dir, + # copy log to folder with command option --target-folder + # Enable archive mode. + result = self.agent_managers[0].support_collect_log( + extra_logs_dir=self.custom_log_dir, + target_folder=self.target_folder, + archive=True) + + # Add a tearDown method to cleanup the logs + self.register_cleanup(self.cleanup_support_log, log_dir=self.target_folder) + + if not result.passed: + self.fail("Failed to run daos_agent support collect-log command") + + # Extract the collected tar.gz file + self.extract_logs(self.target_folder + ".tar.gz") + + # Verify the custom log file collected on each clients. + self.verify_custom_log_data() diff --git a/src/tests/ftest/control/daos_agent_support_collect_log.yaml b/src/tests/ftest/control/daos_agent_support_collect_log.yaml new file mode 100644 index 000000000000..1f6b4134142d --- /dev/null +++ b/src/tests/ftest/control/daos_agent_support_collect_log.yaml @@ -0,0 +1,20 @@ +hosts: + test_servers: 3 + test_clients: 1 +timeout: 120 +server_config: + name: daos_server + engines_per_host: 2 + engines: + 0: + pinned_numa_node: 0 + fabric_iface: ib0 + fabric_iface_port: 31317 + log_file: daos_server0.log + storage: auto + 1: + pinned_numa_node: 1 + fabric_iface: ib1 + fabric_iface_port: 31417 + log_file: daos_server1.log + storage: auto diff --git a/src/tests/ftest/control/daos_server_support_collect_log.py b/src/tests/ftest/control/daos_server_support_collect_log.py new file mode 100644 index 000000000000..2d99fbe6f77b --- /dev/null +++ b/src/tests/ftest/control/daos_server_support_collect_log.py @@ -0,0 +1,54 @@ +""" + (C) Copyright 2023 Intel Corporation. + + SPDX-License-Identifier: BSD-2-Clause-Patent +""" +from support_test_base import SupportTestBase + + +class DaosServerSupportCollectLogTest(SupportTestBase): + # pylint: disable=too-many-ancestors + """Test Class Description:Verify the daos_server support collect-log command. + + :avocado: recursive + """ + + def test_daos_server_support_collect_log(self): + """JIRA ID: DAOS-10625 + + Test Description: + Test daos_server support collect-log command completes successfully. + + :avocado: tags=all,daily_regression + :avocado: tags=hw,medium + :avocado: tags=control,basic,support,daos_server + :avocado: tags=DaosServerSupportCollectLogTest,test_daos_server_support_collect_log + """ + self.log_hosts = self.hostlist_servers + self.run_user = 'daos_server' + # Create the custom log data which will be collected via support collect-log, + # Later verify the data file is archived as part of collection. + self.create_custom_log("Server_Support_Logs") + + # Run daos_server support collect-log with --extra-logs-dir, + # copy log to folder with command option --target-folder + # Enable archive mode. + result = self.server_managers[0].support_collect_log( + extra_logs_dir=self.custom_log_dir, + target_folder=self.target_folder, + archive=True) + + # Add a tearDown method to cleanup the logs + self.register_cleanup(self.cleanup_support_log, log_dir=self.target_folder) + + if not result.passed: + self.fail("Failed to run daos_server support collect-log command") + + # Extract the collected tar.gz file + self.extract_logs(self.target_folder + ".tar.gz") + + # Verify server logs files collected for each servers. + self.validate_server_log_files() + + # Verify the custom log file collected for each servers. + self.verify_custom_log_data() diff --git a/src/tests/ftest/control/daos_server_support_collect_log.yaml b/src/tests/ftest/control/daos_server_support_collect_log.yaml new file mode 100644 index 000000000000..4fc6f2244003 --- /dev/null +++ b/src/tests/ftest/control/daos_server_support_collect_log.yaml @@ -0,0 +1,19 @@ +hosts: + test_servers: 3 +timeout: 120 +server_config: + name: daos_server + engines_per_host: 2 + engines: + 0: + pinned_numa_node: 0 + fabric_iface: ib0 + fabric_iface_port: 31317 + log_file: daos_server0.log + storage: auto + 1: + pinned_numa_node: 1 + fabric_iface: ib1 + fabric_iface_port: 31417 + log_file: daos_server1.log + storage: auto diff --git a/src/tests/ftest/control/dmg_support_collect_log.py b/src/tests/ftest/control/dmg_support_collect_log.py new file mode 100644 index 000000000000..24d537329cbc --- /dev/null +++ b/src/tests/ftest/control/dmg_support_collect_log.py @@ -0,0 +1,50 @@ +""" + (C) Copyright 2023 Intel Corporation. + + SPDX-License-Identifier: BSD-2-Clause-Patent +""" +from support_test_base import SupportTestBase + + +class DmgSupportCollectLogTest(SupportTestBase): + # pylint: disable=too-many-ancestors + """Test Class Description:Verify the dmg support collect-log command. + + :avocado: recursive + """ + + def test_dmg_support_collect_log(self): + """JIRA ID: DAOS-10625 + + Test Description: + Test that dmg support collect-log command completes successfully. + + :avocado: tags=all,daily_regression + :avocado: tags=hw,medium + :avocado: tags=basic,control,support,dmg + :avocado: tags=DmgSupportCollectLogTest,test_dmg_support_collect_log + """ + self.log_hosts = self.hostlist_servers + self.run_user = 'daos_server' + # Create the custom log data which will be collected via support collect-log, + # Later verify the dame data file is archived as part of collection. + self.create_custom_log("Support_Custom_Dir") + + # Run dmg support collect-log with --extra-logs-dir, + # copy logs to folder with command option --target-folder + # Enable archive mode. + self.dmg.support_collect_log(extra_logs_dir=self.custom_log_dir, + target_folder=self.target_folder, + archive=True) + + # Add a tearDown method to cleanup the logs + self.register_cleanup(self.cleanup_support_log, log_dir=self.target_folder) + + # Extract the collected tar.gz file + self.extract_logs(self.target_folder + ".tar.gz") + + # Verify server logs file collected. + self.validate_server_log_files() + + # Verify the custom log file collected. + self.verify_custom_log_data() diff --git a/src/tests/ftest/control/dmg_support_collect_log.yaml b/src/tests/ftest/control/dmg_support_collect_log.yaml new file mode 100644 index 000000000000..794ca16cacca --- /dev/null +++ b/src/tests/ftest/control/dmg_support_collect_log.yaml @@ -0,0 +1,19 @@ +hosts: + test_servers: 3 +timeout: 200 +server_config: + name: daos_server + engines_per_host: 2 + engines: + 0: + pinned_numa_node: 0 + fabric_iface: ib0 + fabric_iface_port: 31317 + log_file: daos_server0.log + storage: auto + 1: + pinned_numa_node: 1 + fabric_iface: ib1 + fabric_iface_port: 31417 + log_file: daos_server1.log + storage: auto diff --git a/src/tests/ftest/util/server_utils.py b/src/tests/ftest/util/server_utils.py index 82981206ed53..ba9a7bba0b27 100644 --- a/src/tests/ftest/util/server_utils.py +++ b/src/tests/ftest/util/server_utils.py @@ -18,6 +18,7 @@ from dmg_utils import get_dmg_command from exception_utils import CommandFailure from general_utils import get_display_size, get_log_file, list_to_str, pcmd, run_pcmd +from general_utils import get_default_config_file from host_utils import get_local_host from run_utils import run_remote, stop_processes from server_utils_base import DaosServerCommand, DaosServerInformation, ServerFailed @@ -430,6 +431,27 @@ def nvme_prepare(self, **kwargs): return run_remote( self.log, self._hosts, cmd.with_exports, timeout=self.storage_prepare_timeout.value) + def support_collect_log(self, **kwargs): + """Run daos_server support collect-log on the server hosts. + + Args: + kwargs (dict, optional): named arguments and their values to use with the + DaosServerCommand.SupportSubCommand.CollectLogSubCommand object + + Returns: + RemoteCommandResult: a grouping of the command results from the same hosts with the same + return status + + """ + cmd = DaosServerCommand(self.manager.job.command_path) + cmd.run_user = "daos_server" + cmd.debug.value = False + cmd.config.value = get_default_config_file("server") + self.log.info("Support collect-log on servers: %s", str(cmd)) + cmd.set_command(("support", "collect-log"), **kwargs) + return run_remote( + self.log, self._hosts, cmd.with_exports, timeout=self.collect_log_timeout.value) + def detect_format_ready(self, reformat=False): """Detect when all the daos_servers are ready for storage format. diff --git a/src/tests/ftest/util/server_utils_base.py b/src/tests/ftest/util/server_utils_base.py index 7637b0991eb1..5423c9f96bf0 100644 --- a/src/tests/ftest/util/server_utils_base.py +++ b/src/tests/ftest/util/server_utils_base.py @@ -510,6 +510,42 @@ def __init__(self): # --helper-log-file= Log file location for debug from daos_admin binary self.helper_log_file = FormattedParameter("--helper-log-file={}") + class SupportSubCommand(CommandWithSubCommand): + """Defines an object for the daos_server support sub command.""" + + def __init__(self): + """Create a support subcommand object.""" + super().__init__("/run/daos_server/support/*", "support") + + def get_sub_command_class(self): + """Get the daos_server support sub command object.""" + # Available sub-commands: + # collect-log Collect logs on servers + if self.sub_command.value == "collect-log": + self.sub_command_class = self.CollectLogSubCommand() + else: + self.sub_command_class = None + + class CollectLogSubCommand(CommandWithSubCommand): + """Defines an object for the daos_server support collect-log command.""" + + def __init__(self): + """Create a support collect-log subcommand object.""" + super().__init__( + "/run/daos_server/support/collect-log/*", "collect-log") + + # daos_server support collect-log command options: + # --stop-on-error Stop the collect-log command on very first error + # --target-folder= Target Folder location where log will be copied + # --archive= Archive the log/config files + # --extra-logs-dir= Collect the Logs from given custom directory + # --target-host= R sync the logs to target-host system + self.stop_on_error = FormattedParameter("--stop-on-error", False) + self.target_folder = FormattedParameter("--target-folder={}") + self.archive = FormattedParameter("--archive", False) + self.extra_logs_dir = FormattedParameter("--extra-logs-dir={}") + self.target_host = FormattedParameter("--target-host={}") + class StorageSubCommand(CommandWithSubCommand): """Defines an object for the daos_server storage sub command."""