From 1dab463744e38dcd307ea6471308bb82252fa002 Mon Sep 17 00:00:00 2001 From: bohendo Date: Fri, 15 Mar 2024 13:48:39 -0400 Subject: [PATCH 01/15] init colorful logger --- cmd/cloudexec/launch.go | 2 +- cmd/cloudexec/main.go | 8 +++---- cmd/cloudexec/push.go | 8 +++---- pkg/digitalocean/digitalocean.go | 26 +++++++++++----------- pkg/log/log.go | 37 ++++++++++++++++++++++++++++++++ pkg/s3/s3.go | 6 +++--- 6 files changed, 61 insertions(+), 26 deletions(-) create mode 100644 pkg/log/log.go diff --git a/cmd/cloudexec/launch.go b/cmd/cloudexec/launch.go index ea48843..edacc20 100644 --- a/cmd/cloudexec/launch.go +++ b/cmd/cloudexec/launch.go @@ -114,8 +114,8 @@ func Launch(config config.Config, dropletSize string, dropletRegion string, lc L } newState.CreateJob(newJob) // sync state to bucket - fmt.Printf("Adding new job to the state...\n") err = state.MergeAndSave(config, newState) + fmt.Printf("Registered new job with id %v\n", thisJobId) if err != nil { return fmt.Errorf("Failed to update S3 state: %w", err) } diff --git a/cmd/cloudexec/main.go b/cmd/cloudexec/main.go index 9a16298..2e212fb 100644 --- a/cmd/cloudexec/main.go +++ b/cmd/cloudexec/main.go @@ -8,6 +8,7 @@ import ( "strconv" do "github.com/crytic/cloudexec/pkg/digitalocean" + "github.com/crytic/cloudexec/pkg/log" "github.com/crytic/cloudexec/pkg/ssh" "github.com/crytic/cloudexec/pkg/state" "github.com/urfave/cli/v2" @@ -32,7 +33,7 @@ func main() { Usage: "Gets the version of the app", Aliases: []string{"v"}, Action: func(*cli.Context) error { - fmt.Printf("cloudexec %s, commit %s, built at %s", Version, Commit, Date) + log.Info("cloudexec %s, commit %s, built at %s", Version, Commit, Date) return nil }, }, @@ -70,16 +71,15 @@ func main() { if configErr != nil { return configErr } - resp, err := do.CheckAuth(config) + err := do.CheckAuth(config) if err != nil { return err } - fmt.Println(resp) snap, err := do.GetLatestSnapshot(config) if err != nil { return err } - fmt.Printf("Using CloudExec image: %s\n", snap.Name) + log.Info("Using CloudExec image: %s\n", snap.Name) return nil }, }, diff --git a/cmd/cloudexec/push.go b/cmd/cloudexec/push.go index 47849a7..7e13b14 100644 --- a/cmd/cloudexec/push.go +++ b/cmd/cloudexec/push.go @@ -17,7 +17,7 @@ func UploadDirectoryToSpaces(config config.Config, sourcePath string, destPath s zipFilePath := filepath.Join(os.TempDir(), zipFileName) // Create a file where we will write the zipped archive - fmt.Printf("Creating zipped archive at %s\n", zipFilePath) + // fmt.Printf("Creating zipped archive at %s\n", zipFilePath) zipFile, err := os.Create(zipFilePath) if err != nil { return err @@ -38,7 +38,7 @@ func UploadDirectoryToSpaces(config config.Config, sourcePath string, destPath s // If it's a symbolic link, resolve the target if info.Mode()&os.ModeSymlink == os.ModeSymlink { target, err = os.Readlink(path) - fmt.Printf("Resolved link from %s to %s\n", path, target) + // fmt.Printf("Resolved link from %s to %s\n", path, target) if err != nil { return err } @@ -53,7 +53,7 @@ func UploadDirectoryToSpaces(config config.Config, sourcePath string, destPath s if targetInfo.IsDir() { cleanPath := filepath.Clean(path) + string(filepath.Separator) - fmt.Printf("Creating directory %s in the zipped archive\n", cleanPath) + // fmt.Printf("Creating directory %s in the zipped archive\n", cleanPath) _, err = zipWriter.Create(cleanPath) if err != nil { return err @@ -66,7 +66,7 @@ func UploadDirectoryToSpaces(config config.Config, sourcePath string, destPath s return nil } - fmt.Printf("Adding %s to the zipped archive\n", target) + // fmt.Printf("Adding %s to the zipped archive\n", target) // Create a new file entry in the zipped archive zipFileEntry, err := zipWriter.Create(path) diff --git a/pkg/digitalocean/digitalocean.go b/pkg/digitalocean/digitalocean.go index f9c3501..ca089a9 100644 --- a/pkg/digitalocean/digitalocean.go +++ b/pkg/digitalocean/digitalocean.go @@ -10,6 +10,7 @@ import ( "github.com/digitalocean/godo/util" "github.com/crytic/cloudexec/pkg/config" + "github.com/crytic/cloudexec/pkg/log" "github.com/crytic/cloudexec/pkg/s3" ) @@ -87,7 +88,7 @@ func findSSHKeyOnDigitalOcean(keyName string) (string, string, error) { } for _, key := range keys { if key.Name == keyName { - fmt.Printf("SSH key found. ID=%v | Name=%s | Fingerprint=%v\n", key.ID, key.Name, key.Fingerprint) + log.Good("SSH key found. ID=%v | Name=%s | Fingerprint=%v\n", key.ID, key.Name, key.Fingerprint) return key.Fingerprint, key.PublicKey, nil } } @@ -98,29 +99,26 @@ func findSSHKeyOnDigitalOcean(keyName string) (string, string, error) { // Exported Functions // Query the droplet and spaces APIs to check whether the config contains valid credentials -func CheckAuth(config config.Config) (string, error) { +func CheckAuth(config config.Config) error { // create a client doClient, err := initializeDOClient(config.DigitalOcean.ApiKey) if err != nil { - return "", err + return err } - greenCheck := "\u2705" - noEntry := "\U0001F6AB" - // Check Account authentication _, _, err = doClient.Account.Get(context.Background()) if err != nil { - return "", fmt.Errorf("%s Failed to authenticate with DigitalOcean API: %w", noEntry, err) + return fmt.Errorf("%s Failed to authenticate with DigitalOcean API: %w", noEntry, err) } - doResp := fmt.Sprintf("%s Successfully authenticated with DigitalOcean API", greenCheck) + log.Good("Successfully authenticated with DigitalOcean API") // Check Spaces authentication _, err = s3.ListBuckets(config) if err != nil { - return "", fmt.Errorf("%s Failed to authenticate with DigitalOcean Spaces API: %w", noEntry, err) + return fmt.Errorf("%s Failed to authenticate with DigitalOcean Spaces API: %w", noEntry, err) } - bucketResp := fmt.Sprintf("%s Successfully authenticated with DigitalOcean Spaces API", greenCheck) - return fmt.Sprintf("%s\n%s", doResp, bucketResp), nil + log.Good("%s Successfully authenticated with DigitalOcean Spaces API", greenCheck) + return nil } // Launch a new droplet @@ -143,13 +141,13 @@ func CreateDroplet(config config.Config, region string, size string, userData st } } else { // Create the SSH key on DigitalOcean - fmt.Println("Creating SSH key on DigitalOcean...") + log.Wait("Creating SSH key on DigitalOcean") keyName := fmt.Sprintf("cloudexec-%v", config.Username) sshKeyFingerprint, err = createSSHKeyOnDigitalOcean(keyName, publicKey) if err != nil { return droplet, fmt.Errorf("Failed to create SSH key on DigitalOcean: %w", err) } - fmt.Printf("SSH key created on DigitalOcean with fingerprint: %v\n", sshKeyFingerprint) + log.Good("SSH key created on DigitalOcean with fingerprint: %v\n", sshKeyFingerprint) } snap, err := GetLatestSnapshot(config) @@ -158,7 +156,7 @@ func CreateDroplet(config config.Config, region string, size string, userData st } // Create a new droplet - fmt.Println("Creating droplet...") + log.Wait("Creating droplet") createRequest := &godo.DropletCreateRequest{ Name: dropletName, Region: region, diff --git a/pkg/log/log.go b/pkg/log/log.go new file mode 100644 index 0000000..33c7b6e --- /dev/null +++ b/pkg/log/log.go @@ -0,0 +1,37 @@ +import ( + "fmt" +) + +const ( + ColorReset = "\033[0m" + ColorRed = "\033[31m" + ColorGreen = "\033[32m" + ColorBlue = "\033[34m" + ColorYellow = "\033[33m" + ColorWhite = "\033[37m" +) + +func Info(msg string, args ...interface{}) { + formatted := fmt.Sprintf(format, args...) + fmt.Println(ColorWhite, formatted, ColorReset) +} + +func Wait(format string, args ...interface{}) { + formatted := fmt.Sprintf(format, args...) + "..." + fmt.Println(ColorBlue, formatted, ColorReset) +} + +func Good(msg string, args ...interface{}) { + formatted := fmt.Sprintf(format, args...) + fmt.Println(ColorGreen, formatted, ColorReset) +} + +func Warn(msg string, args ...interface{}) { + formatted := fmt.Sprintf(format, args...) + fmt.Println(ColorYellow, formatted, ColorReset) +} + +func Error(msg string, args ...interface{}) { + formatted := fmt.Sprintf(format, args...) + fmt.Println(ColorRed, formatted, ColorReset) +} diff --git a/pkg/s3/s3.go b/pkg/s3/s3.go index 3bb8f30..2c5d463 100644 --- a/pkg/s3/s3.go +++ b/pkg/s3/s3.go @@ -127,7 +127,7 @@ func CreateBucket(config config.Config) error { if err != nil { return fmt.Errorf("Failed to wait for bucket '%s': %w", bucketName, err) } - fmt.Printf("Created bucket '%s'...\n", bucketName) + // fmt.Printf("Created bucket '%s'...\n", bucketName) return nil } @@ -148,7 +148,7 @@ func PutObject(config config.Config, key string, value []byte) error { if err != nil { return fmt.Errorf("Failed to create %s directory in bucket %s: %w", key, bucketName, err) } - fmt.Printf("Successfully created directory in s3 bucket: %s/%s\n", bucketName, key) + // fmt.Printf("Successfully created directory in s3 bucket: %s/%s\n", bucketName, key) return nil } // hash the input to ensure the integrity of file @@ -167,7 +167,7 @@ func PutObject(config config.Config, key string, value []byte) error { if err != nil { return fmt.Errorf("Failed to upload file %s to bucket %s: %w", key, bucketName, err) } - fmt.Printf("Successfully uploaded file to s3 bucket: %s/%s\n", bucketName, key) + // fmt.Printf("Successfully uploaded file to s3 bucket: %s/%s\n", bucketName, key) return nil } From 41f0f84336b91468b0108c80f67d1edbf6901e4e Mon Sep 17 00:00:00 2001 From: bohendo Date: Fri, 15 Mar 2024 14:28:08 -0400 Subject: [PATCH 02/15] fix logs bug --- cmd/cloudexec/logs.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/cloudexec/logs.go b/cmd/cloudexec/logs.go index 4487c7b..b34002e 100644 --- a/cmd/cloudexec/logs.go +++ b/cmd/cloudexec/logs.go @@ -11,7 +11,7 @@ import ( ) func GetLogsFromBucket(config config.Config, jobID int64) error { - itemKey := fmt.Sprintf("job-%d/logs/cloud-init-output.log", jobID) + itemKey := fmt.Sprintf("job-%d/cloudexec.log", jobID) log, err := s3.GetObject(config, itemKey) if err != nil { From 2acd7a5d279fffb5ea9aa3efd6bd85ef35cd1a22 Mon Sep 17 00:00:00 2001 From: bohendo Date: Fri, 15 Mar 2024 14:31:11 -0400 Subject: [PATCH 03/15] wire up colorful logs --- cmd/cloudexec/cancel.go | 19 +++++++------- cmd/cloudexec/clean.go | 22 +++++++++------- cmd/cloudexec/configure.go | 11 ++++---- cmd/cloudexec/init.go | 7 ++--- cmd/cloudexec/launch.go | 33 +++++++++-------------- cmd/cloudexec/logs.go | 2 +- cmd/cloudexec/main.go | 22 +++++++--------- cmd/cloudexec/pull.go | 11 ++++---- cmd/cloudexec/push.go | 13 +++++---- example/cloudexec.toml | 45 +------------------------------- pkg/config/config.go | 4 ++- pkg/digitalocean/digitalocean.go | 12 ++++----- pkg/log/log.go | 14 +++++----- pkg/s3/s3.go | 3 --- pkg/ssh/ssh.go | 10 +++---- pkg/state/state.go | 5 ++-- 16 files changed, 92 insertions(+), 141 deletions(-) diff --git a/cmd/cloudexec/cancel.go b/cmd/cloudexec/cancel.go index 66742b5..58f8f94 100644 --- a/cmd/cloudexec/cancel.go +++ b/cmd/cloudexec/cancel.go @@ -6,6 +6,7 @@ import ( "github.com/crytic/cloudexec/pkg/config" do "github.com/crytic/cloudexec/pkg/digitalocean" + "github.com/crytic/cloudexec/pkg/log" "github.com/crytic/cloudexec/pkg/state" ) @@ -13,26 +14,26 @@ func CancelJob(config config.Config, existingState *state.State, job *state.Job, if job.Status != state.Provisioning && job.Status != state.Running { return fmt.Errorf("Job %v is not running, it is %s", job.ID, job.Status) } - fmt.Printf("Destroying droplet %s associated with job %v: IP=%v | CreatedAt=%s\n", job.Droplet.Name, job.ID, job.Droplet.IP, job.Droplet.Created) + log.Info("Destroying droplet %s associated with job %v: IP=%v | CreatedAt=%s", job.Droplet.Name, job.ID, job.Droplet.IP, job.Droplet.Created) if !force { // Ask for confirmation before cleaning this job if no force flag - fmt.Println("Confirm? (y/n)") + log.Warn("Confirm? (y/n)") var response string fmt.Scanln(&response) if strings.ToLower(response) != "y" { - fmt.Printf("Droplet %s was not destroyed\n", job.Droplet.Name) + log.Info("Droplet %s was not destroyed", job.Droplet.Name) return nil } } - fmt.Printf("Destroying droplet %v...\n", job.Droplet.ID) err := do.DeleteDroplet(config, job.Droplet.ID) if err != nil { return fmt.Errorf("Failed to destroy droplet: %w", err) } - fmt.Printf("Marking job %v as cancelled...\n", job.Droplet.ID) + log.Good("Droplet %v destroyed", job.Droplet.ID) err = existingState.CancelRunningJob(config, job.ID) if err != nil { - return fmt.Errorf("Failed to mark job as cancelled: %w", err) + return fmt.Errorf("Failed to change job status to cancelled: %w", err) } + log.Good("Job %v status changed to cancelled", job.Droplet.ID) return nil } @@ -42,17 +43,17 @@ func CancelAll(config config.Config, existingState *state.State, force bool) err return fmt.Errorf("Failed to get all running servers: %w", err) } if len(droplets) == 0 { - fmt.Println("No running servers found") + log.Info("No running servers found") return nil } - fmt.Printf("Found %v running server(s):\n", len(droplets)) + log.Info("Found %v running server(s):", len(droplets)) for _, job := range existingState.Jobs { if job.Status != state.Provisioning && job.Status != state.Running { continue // skip jobs that aren't running } err = CancelJob(config, existingState, &job, force) if err != nil { - fmt.Printf("Failed to cancel job %v", job.ID) + log.Warn("Failed to cancel job %v", job.ID) } } return nil diff --git a/cmd/cloudexec/clean.go b/cmd/cloudexec/clean.go index 541931d..f5b7d46 100644 --- a/cmd/cloudexec/clean.go +++ b/cmd/cloudexec/clean.go @@ -5,6 +5,7 @@ import ( "strings" "github.com/crytic/cloudexec/pkg/config" + "github.com/crytic/cloudexec/pkg/log" "github.com/crytic/cloudexec/pkg/s3" "github.com/crytic/cloudexec/pkg/ssh" "github.com/crytic/cloudexec/pkg/state" @@ -19,29 +20,30 @@ func CleanBucketJob(config config.Config, existingState *state.State, jobID int6 // Confirm job data deletion var numToRm int = len(objects) if numToRm == 0 { - fmt.Printf("Bucket is already empty.\n") + log.Info("Bucket is already empty.") return nil } - fmt.Printf("Removing ALL input, output, and logs associated with %s...\n", prefix) + log.Info("Removing ALL input, output, and logs associated with %s", prefix) if !force { // Ask for confirmation before cleaning this job if no force flag - fmt.Println("Confirm? (y/n)") + log.Warn("Confirm? (y/n)") var response string fmt.Scanln(&response) if strings.ToLower(response) != "y" { - fmt.Printf("Job %v was not cleaned\n", jobID) + log.Info("Job %v was not cleaned", jobID) return nil } } - fmt.Printf("Deleting bucket contents...\n") + log.Wait("Deleting bucket contents") // Delete all objects in the bucket for _, object := range objects { - fmt.Println("Deleting object: ", object) + log.Info("Deleting object: ", object) err = s3.DeleteObject(config, object) if err != nil { return err } } - fmt.Printf("Deleted %d objects from bucket, removing job %v from state file..\n", numToRm, jobID) + log.Good("Deleted %d objects from bucket", numToRm) + log.Wait("Removing job %v from state file", numToRm, jobID) newState := &state.State{} deleteJob := state.Job{ ID: jobID, @@ -50,9 +52,9 @@ func CleanBucketJob(config config.Config, existingState *state.State, jobID int6 newState.CreateJob(deleteJob) err = state.MergeAndSave(config, newState) if err != nil { - return fmt.Errorf("Error removing %s from state file: %w\n", prefix, err) + return fmt.Errorf("Error removing %s from state file: %w", prefix, err) } - fmt.Printf("Removing ssh config for job %v...\n", jobID) + log.Wait("Removing ssh config for job %v", jobID) err = ssh.DeleteSSHConfig(jobID) if err != nil { return fmt.Errorf("Failed to delete ssh config: %w", err) @@ -62,7 +64,7 @@ func CleanBucketJob(config config.Config, existingState *state.State, jobID int6 func CleanBucketAll(config config.Config, existingState *state.State, force bool) error { if len(existingState.Jobs) == 0 { - fmt.Println("No jobs are available") + log.Info("No jobs are available") return nil } for _, job := range existingState.Jobs { diff --git a/cmd/cloudexec/configure.go b/cmd/cloudexec/configure.go index 6440693..76f34e9 100644 --- a/cmd/cloudexec/configure.go +++ b/cmd/cloudexec/configure.go @@ -9,15 +9,16 @@ import ( "os/user" "strings" - "github.com/crytic/cloudexec/pkg/config" "golang.org/x/term" + + "github.com/crytic/cloudexec/pkg/config" + "github.com/crytic/cloudexec/pkg/log" ) func Configure() error { user, err := user.Current() if err != nil { - fmt.Printf("Failed to get current user: %v", err) - os.Exit(1) + return fmt.Errorf("Failed to get current user: %v", err) } username, err := promptUserInput("Username", user.Username) if err != nil { @@ -63,7 +64,7 @@ func Configure() error { } func promptSecretInput(prompt, defaultValue string) (string, error) { - fmt.Printf("%s [%s]: ", prompt, defaultValue) + log.Info("%s [%s]: ", prompt, defaultValue) rawInput, err := term.ReadPassword(int(os.Stdin.Fd())) if err != nil { return "", fmt.Errorf("Failed to read input: %w", err) @@ -81,7 +82,7 @@ func promptSecretInput(prompt, defaultValue string) (string, error) { func promptUserInput(prompt, defaultValue string) (string, error) { reader := bufio.NewReader(os.Stdin) - fmt.Printf("%s [%s]: ", prompt, defaultValue) + log.Info("%s [%s]: ", prompt, defaultValue) input, err := reader.ReadString('\n') if err != nil { diff --git a/cmd/cloudexec/init.go b/cmd/cloudexec/init.go index fe29b07..c064079 100644 --- a/cmd/cloudexec/init.go +++ b/cmd/cloudexec/init.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/crytic/cloudexec/pkg/config" + "github.com/crytic/cloudexec/pkg/log" "github.com/crytic/cloudexec/pkg/s3" ) @@ -25,7 +26,7 @@ func Init(config config.Config) error { if !bucketExists { // Create a new bucket - fmt.Printf("Creating new %s bucket...\n", bucketName) + log.Wait("Creating new %s bucket", bucketName) err = s3.CreateBucket(config) if err != nil { return err @@ -56,7 +57,7 @@ func initState(config config.Config, bucketName string) error { } // Create the state directory if it does not already exist if !stateDirExists { - fmt.Printf("Creating new state directory at %s/%s\n", bucketName, stateDir) + log.Wait("Creating new state directory at %s/%s", bucketName, stateDir) err = s3.PutObject(config, stateDir, []byte{}) if err != nil { return fmt.Errorf("Failed to create state directory at %s/%s: %w", bucketName, stateDir, err) @@ -71,7 +72,7 @@ func initState(config config.Config, bucketName string) error { } // Create the initial state file if it does not already exist if !statePathExists { - fmt.Printf("Creating new state file at %s/%s\n", bucketName, statePath) + log.Wait("Creating new state file at %s/%s", bucketName, statePath) err = s3.PutObject(config, statePath, []byte("{}")) if err != nil { return fmt.Errorf("Failed to create state file in bucket %s: %w", bucketName, err) diff --git a/cmd/cloudexec/launch.go b/cmd/cloudexec/launch.go index edacc20..82991ef 100644 --- a/cmd/cloudexec/launch.go +++ b/cmd/cloudexec/launch.go @@ -9,6 +9,7 @@ import ( "github.com/BurntSushi/toml" "github.com/crytic/cloudexec/pkg/config" do "github.com/crytic/cloudexec/pkg/digitalocean" + "github.com/crytic/cloudexec/pkg/log" "github.com/crytic/cloudexec/pkg/ssh" "github.com/crytic/cloudexec/pkg/state" ) @@ -84,10 +85,7 @@ func LoadLaunchConfig(launchConfigPath string) (LaunchConfig, error) { } func Launch(config config.Config, dropletSize string, dropletRegion string, lc LaunchConfig) error { - username := config.Username - bucketName := fmt.Sprintf("cloudexec-%s", username) - - // get existing state from bucket + // get existing state existingState, err := state.GetState(config) if err != nil { return fmt.Errorf("Failed to get S3 state: %w", err) @@ -115,7 +113,7 @@ func Launch(config config.Config, dropletSize string, dropletRegion string, lc L newState.CreateJob(newJob) // sync state to bucket err = state.MergeAndSave(config, newState) - fmt.Printf("Registered new job with id %v\n", thisJobId) + log.Info("Registered new job with id %v", thisJobId) if err != nil { return fmt.Errorf("Failed to update S3 state: %w", err) } @@ -123,36 +121,31 @@ func Launch(config config.Config, dropletSize string, dropletRegion string, lc L // upload local files to the bucket sourcePath := lc.Input.Directory // TODO: verify that this path exists & throw informative error if not destPath := fmt.Sprintf("job-%v", thisJobId) - fmt.Printf("Compressing and uploading contents of directory %s to bucket %s/%s...\n", sourcePath, bucketName, destPath) err = UploadDirectoryToSpaces(config, sourcePath, destPath) if err != nil { return fmt.Errorf("Failed to upload files: %w", err) } // Get or create an SSH key - fmt.Println("Getting or creating SSH key pair...") publicKey, err := ssh.GetOrCreateSSHKeyPair() if err != nil { return fmt.Errorf("Failed to get or creating SSH key pair: %w", err) } // Prepare user data - fmt.Println("Generating user data...") userData, err := GenerateUserData(config, lc) if err != nil { return fmt.Errorf("Failed to generate user data: %w", err) } - fmt.Printf("Creating new %s droplet in %s for job %d...\n", dropletSize, config.DigitalOcean.SpacesRegion, thisJobId) + log.Wait("Creating new %s droplet in %s for job %d", dropletSize, config.DigitalOcean.SpacesRegion, thisJobId) droplet, err := do.CreateDroplet(config, config.DigitalOcean.SpacesRegion, dropletSize, userData, thisJobId, publicKey) if err != nil { return fmt.Errorf("Failed to create droplet: %w", err) } - - fmt.Printf("Droplet created with IP: %v\n", droplet.IP) + log.Good("Droplet created with IP: %v", droplet.IP) // Add the droplet info to state - fmt.Println("Adding new droplet info to state...") updatedAt := time.Now().Unix() for i, job := range newState.Jobs { if job.ID == thisJobId { @@ -160,30 +153,30 @@ func Launch(config config.Config, dropletSize string, dropletRegion string, lc L newState.Jobs[i].UpdatedAt = updatedAt } } - fmt.Printf("Uploading new state to %s\n", bucketName) err = state.MergeAndSave(config, newState) if err != nil { return fmt.Errorf("Failed to update S3 state: %w", err) } + log.Info("Saved new droplet info to state") // Add the droplet to the SSH config file - fmt.Println("Adding droplet to SSH config file...") err = ssh.AddSSHConfig(thisJobId, droplet.IP) if err != nil { return fmt.Errorf("Failed to add droplet to SSH config file: %w", err) } + log.Info("Saved droplet info to SSH config") // Ensure we can SSH into the droplet - fmt.Println("Ensuring we can SSH into the droplet...") + log.Wait("Waiting until our new droplet wakes up") err = ssh.WaitForSSHConnection(thisJobId) if err != nil { return fmt.Errorf("Failed to SSH into the droplet: %w", err) } - fmt.Println("SSH connection established!") - fmt.Println("Launch complete") - fmt.Println("You can now attach to the running job with: cloudexec attach") - fmt.Println("Stream logs from the droplet with: cloudexec logs") - fmt.Println("SSH to your droplet with: ssh cloudexec") + log.Good("SSH connection established!") + log.Good("Launch complete") + log.Info("You can now attach to the running job with: cloudexec attach") + log.Info("Stream logs from the droplet with: cloudexec logs") + log.Info("SSH to your droplet with: ssh cloudexec-%v", thisJobId) return nil } diff --git a/cmd/cloudexec/logs.go b/cmd/cloudexec/logs.go index b34002e..037b039 100644 --- a/cmd/cloudexec/logs.go +++ b/cmd/cloudexec/logs.go @@ -16,7 +16,7 @@ func GetLogsFromBucket(config config.Config, jobID int64) error { log, err := s3.GetObject(config, itemKey) if err != nil { if err.Error() == "The specified key does not exist." { - return fmt.Errorf("The specified job logs do not exist. Please check the job ID and try again.\n") + return fmt.Errorf("The specified job logs do not exist. Please check the job ID and try again.") } return fmt.Errorf("Failed to read log data: %w", err) } diff --git a/cmd/cloudexec/main.go b/cmd/cloudexec/main.go index 2e212fb..09c2ed7 100644 --- a/cmd/cloudexec/main.go +++ b/cmd/cloudexec/main.go @@ -3,7 +3,6 @@ package main import ( "encoding/json" "fmt" - "log" "os" "strconv" @@ -79,7 +78,7 @@ func main() { if err != nil { return err } - log.Info("Using CloudExec image: %s\n", snap.Name) + log.Info("Using CloudExec image: %s", snap.Name) return nil }, }, @@ -130,10 +129,7 @@ func main() { return err } err = Launch(config, dropletSize, dropletRegion, lc) - if err != nil { - log.Fatal(err) - } - return nil + return err }, }, @@ -297,8 +293,8 @@ func main() { } return nil } else { - fmt.Println("error: Can't attach, no running job found") - fmt.Println("Check the status of the job with cloudexec status") + log.Error("Can't attach, no running job found") + log.Info("Check the status of the job with cloudexec status") return nil } }, @@ -508,7 +504,7 @@ func main() { } // Print the jobs from the state for _, job := range existingState.Jobs { - fmt.Printf("Job ID: %d, Status: %s\n", job.ID, job.Status) + log.Info("Job ID: %d, Status: %s", job.ID, job.Status) } return nil }, @@ -528,13 +524,13 @@ func main() { } jobID := c.Args().First() // Get the job ID from the arguments if jobID == "" { - fmt.Println("Please provide a job ID to remove") + log.Warn("Please provide a job ID to remove") return nil } // Convert jobID string to int64 id, err := strconv.ParseInt(jobID, 10, 64) if err != nil { - fmt.Printf("Invalid job ID: %s\n", jobID) + log.Error("Invalid job ID: %s", jobID) return nil } newState := &state.State{} @@ -573,7 +569,7 @@ func main() { if err != nil { return err } - fmt.Println(string(json)) + log.Info(string(json)) return nil }, }, @@ -583,7 +579,7 @@ func main() { } if err := app.Run(os.Args); err != nil { - fmt.Printf("%v\n", err) + log.Error("%v\n", err) os.Exit(1) } diff --git a/cmd/cloudexec/pull.go b/cmd/cloudexec/pull.go index c3540fd..802ad30 100644 --- a/cmd/cloudexec/pull.go +++ b/cmd/cloudexec/pull.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/crytic/cloudexec/pkg/config" + "github.com/crytic/cloudexec/pkg/log" "github.com/crytic/cloudexec/pkg/s3" ) @@ -49,7 +50,7 @@ func DownloadJobOutput(config config.Config, jobID int64, localPath string) erro return fmt.Errorf("Failed to write object content to file: %w", err) } - fmt.Printf("Downloaded %s to %s \n", objectKey, localFilePath) + log.Good("Downloaded %s to %s", objectKey, localFilePath) } } return nil @@ -59,12 +60,12 @@ func DownloadJobOutput(config config.Config, jobID int64, localPath string) erro body, logErr := s3.GetObject(config, fmt.Sprintf("job-%v/cloudexec.log", jobID)) if len(objectKeys) == 0 && logErr != nil { - fmt.Printf("No output or logs are available for job %v\n", jobID) + log.Info("No output or logs are available for job %v", jobID) return nil } else if len(objectKeys) == 0 { - fmt.Printf("No output is available for job %v\n", jobID) + log.Info("No output is available for job %v", jobID) } else if logErr != nil { - fmt.Printf("No logs are available for job %v\n", jobID) + log.Info("No logs are available for job %v", jobID) } // Check if the local path is a directory, if not, create it @@ -90,7 +91,7 @@ func DownloadJobOutput(config config.Config, jobID int64, localPath string) erro if err != nil { return fmt.Errorf("Failed to write object content to file: %w", err) } - fmt.Printf("Downloaded job %v logs to %s \n", jobID, localFilePath) + log.Good("Downloaded job %v logs to %s", jobID, localFilePath) } return nil diff --git a/cmd/cloudexec/push.go b/cmd/cloudexec/push.go index 7e13b14..88297a9 100644 --- a/cmd/cloudexec/push.go +++ b/cmd/cloudexec/push.go @@ -8,16 +8,18 @@ import ( "path/filepath" "github.com/crytic/cloudexec/pkg/config" + "github.com/crytic/cloudexec/pkg/log" "github.com/crytic/cloudexec/pkg/s3" ) func UploadDirectoryToSpaces(config config.Config, sourcePath string, destPath string) error { + log.Wait("Compressing and uploading contents of directory %s to bucket at %s", sourcePath, destPath) + // Compute the path for the zipped archive of sourcePath zipFileName := "input.zip" zipFilePath := filepath.Join(os.TempDir(), zipFileName) // Create a file where we will write the zipped archive - // fmt.Printf("Creating zipped archive at %s\n", zipFilePath) zipFile, err := os.Create(zipFilePath) if err != nil { return err @@ -38,7 +40,6 @@ func UploadDirectoryToSpaces(config config.Config, sourcePath string, destPath s // If it's a symbolic link, resolve the target if info.Mode()&os.ModeSymlink == os.ModeSymlink { target, err = os.Readlink(path) - // fmt.Printf("Resolved link from %s to %s\n", path, target) if err != nil { return err } @@ -53,7 +54,6 @@ func UploadDirectoryToSpaces(config config.Config, sourcePath string, destPath s if targetInfo.IsDir() { cleanPath := filepath.Clean(path) + string(filepath.Separator) - // fmt.Printf("Creating directory %s in the zipped archive\n", cleanPath) _, err = zipWriter.Create(cleanPath) if err != nil { return err @@ -66,8 +66,6 @@ func UploadDirectoryToSpaces(config config.Config, sourcePath string, destPath s return nil } - // fmt.Printf("Adding %s to the zipped archive\n", target) - // Create a new file entry in the zipped archive zipFileEntry, err := zipWriter.Create(path) if err != nil { @@ -97,7 +95,6 @@ func UploadDirectoryToSpaces(config config.Config, sourcePath string, destPath s if err != nil { return err } - fmt.Printf("Successfully added all files from %s to zipped archive at %s\n", sourcePath, zipFilePath) // Make sure all prior writes are sync'd to the filesystem // This is necessary bc we're going to read the file right after writing it @@ -128,14 +125,16 @@ func UploadDirectoryToSpaces(config config.Config, sourcePath string, destPath s if len(fileBytes) == 0 { return fmt.Errorf("Failed to read zipped archive at %s: read zero bytes of data", zipFilePath) } + log.Good("Successfully added all files from %s to zipped archive at %s", sourcePath, zipFilePath) // Upload the zipped archive destKey := filepath.Join(destPath, "input.zip") - fmt.Printf("Uploading archive (%v bytes) to %s\n", len(fileBytes), destKey) + log.Wait("Uploading zipped archive (%v bytes) to %s", len(fileBytes), destKey) err = s3.PutObject(config, destKey, fileBytes) if err != nil { return err } + log.Good("Zipped archive uploaded successfully") return nil } diff --git a/example/cloudexec.toml b/example/cloudexec.toml index 9b35773..26cb183 100644 --- a/example/cloudexec.toml +++ b/example/cloudexec.toml @@ -5,49 +5,6 @@ timeout = "48h" [commands] setup = ''' -if ! command -v slither >/dev/null 2>&1; then - echo "Installing solc and slither..." - python3 -m venv ~/venv - source ~/venv/bin/activate - pip3 install solc-select slither-analyzer crytic-compile - solc-select install 0.8.6 - solc-select use 0.8.6 -fi - -if ! command -v echidna >/dev/null 2>&1; then - echo "Downloading echidna..." - curl -fsSL -o /tmp/echidna.zip https://github.com/crytic/echidna/releases/download/v2.2.1/echidna-2.2.1-Linux.zip - echo "Extracting echidna..." - unzip /tmp/echidna.zip -d /tmp - tar -xzf /tmp/echidna.tar.gz -C /tmp - echo "Installing echidna..." - mv /tmp/echidna /usr/local/bin - rm /tmp/echidna.tar.gz -fi - -if ! command -v medusa >/dev/null 2>&1; then - echo "Downloading medusa..." - sudo apt-get update; sudo apt-get install -y unzip - curl -fsSL https://github.com/crytic/medusa/releases/download/v0.1.0/medusa-linux-x64.zip -o medusa.zip - unzip medusa.zip - chmod +x medusa - sudo mv medusa /usr/local/bin -fi - -if ! command -v docker >/dev/null 2>&1; then - echo "Installing docker and its dependencies..." - apt-get install -y apt-transport-https ca-certificates curl gnupg-agent software-properties-common - docker_key="$(curl -fsSL https://download.docker.com/linux/ubuntu/gpg)" - echo "${docker_key}" | apt-key add - - release="$(lsb_release -cs)" - add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu ${release} stable" - apt-get update -y - apt-get install -y docker-ce docker-ce-cli containerd.io - user="$(whoami)" - usermod -aG docker "${user}" - systemctl enable docker -fi - if ! command -v forge >/dev/null 2>&1; then echo "Installing foundry..." curl -L https://foundry.paradigm.xyz | bash @@ -57,4 +14,4 @@ fi ''' # This command is run after the setup script completes. -run = "medusa fuzz --target archive.zip" +run = "medusa fuzz --target flags.sol" diff --git a/pkg/config/config.go b/pkg/config/config.go index d4c3d12..0c5f5bd 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -6,6 +6,8 @@ import ( "path/filepath" "github.com/BurntSushi/toml" + + "github.com/crytic/cloudexec/pkg/log" ) type Config struct { @@ -40,7 +42,7 @@ func Create(configValues Config) error { return fmt.Errorf("Failed to encode configuration values: %w", err) } - fmt.Printf("Configuration file created at: %s\n", configFile) + log.Good("Configuration file created at: %s", configFile) return nil } diff --git a/pkg/digitalocean/digitalocean.go b/pkg/digitalocean/digitalocean.go index ca089a9..54c0e14 100644 --- a/pkg/digitalocean/digitalocean.go +++ b/pkg/digitalocean/digitalocean.go @@ -88,7 +88,6 @@ func findSSHKeyOnDigitalOcean(keyName string) (string, string, error) { } for _, key := range keys { if key.Name == keyName { - log.Good("SSH key found. ID=%v | Name=%s | Fingerprint=%v\n", key.ID, key.Name, key.Fingerprint) return key.Fingerprint, key.PublicKey, nil } } @@ -108,16 +107,16 @@ func CheckAuth(config config.Config) error { // Check Account authentication _, _, err = doClient.Account.Get(context.Background()) if err != nil { - return fmt.Errorf("%s Failed to authenticate with DigitalOcean API: %w", noEntry, err) + return fmt.Errorf("Failed to authenticate with DigitalOcean API: %w", err) } log.Good("Successfully authenticated with DigitalOcean API") // Check Spaces authentication _, err = s3.ListBuckets(config) if err != nil { - return fmt.Errorf("%s Failed to authenticate with DigitalOcean Spaces API: %w", noEntry, err) + return fmt.Errorf("Failed to authenticate with DigitalOcean Spaces API: %w", err) } - log.Good("%s Successfully authenticated with DigitalOcean Spaces API", greenCheck) + log.Good("Successfully authenticated with DigitalOcean Spaces API") return nil } @@ -141,13 +140,13 @@ func CreateDroplet(config config.Config, region string, size string, userData st } } else { // Create the SSH key on DigitalOcean - log.Wait("Creating SSH key on DigitalOcean") + log.Wait("Saving SSH public key to DigitalOcean") keyName := fmt.Sprintf("cloudexec-%v", config.Username) sshKeyFingerprint, err = createSSHKeyOnDigitalOcean(keyName, publicKey) if err != nil { return droplet, fmt.Errorf("Failed to create SSH key on DigitalOcean: %w", err) } - log.Good("SSH key created on DigitalOcean with fingerprint: %v\n", sshKeyFingerprint) + log.Good("SSH key is available on DigitalOcean with fingerprint: %v", sshKeyFingerprint) } snap, err := GetLatestSnapshot(config) @@ -156,7 +155,6 @@ func CreateDroplet(config config.Config, region string, size string, userData st } // Create a new droplet - log.Wait("Creating droplet") createRequest := &godo.DropletCreateRequest{ Name: dropletName, Region: region, diff --git a/pkg/log/log.go b/pkg/log/log.go index 33c7b6e..2d5be6a 100644 --- a/pkg/log/log.go +++ b/pkg/log/log.go @@ -1,3 +1,5 @@ +package log + import ( "fmt" ) @@ -12,26 +14,26 @@ const ( ) func Info(msg string, args ...interface{}) { - formatted := fmt.Sprintf(format, args...) + formatted := fmt.Sprintf(msg, args...) fmt.Println(ColorWhite, formatted, ColorReset) } -func Wait(format string, args ...interface{}) { - formatted := fmt.Sprintf(format, args...) + "..." +func Wait(msg string, args ...interface{}) { + formatted := fmt.Sprintf(msg, args...) + "..." fmt.Println(ColorBlue, formatted, ColorReset) } func Good(msg string, args ...interface{}) { - formatted := fmt.Sprintf(format, args...) + formatted := fmt.Sprintf(msg, args...) fmt.Println(ColorGreen, formatted, ColorReset) } func Warn(msg string, args ...interface{}) { - formatted := fmt.Sprintf(format, args...) + formatted := fmt.Sprintf(msg, args...) fmt.Println(ColorYellow, formatted, ColorReset) } func Error(msg string, args ...interface{}) { - formatted := fmt.Sprintf(format, args...) + formatted := fmt.Sprintf(msg, args...) fmt.Println(ColorRed, formatted, ColorReset) } diff --git a/pkg/s3/s3.go b/pkg/s3/s3.go index 2c5d463..56db5f8 100644 --- a/pkg/s3/s3.go +++ b/pkg/s3/s3.go @@ -127,7 +127,6 @@ func CreateBucket(config config.Config) error { if err != nil { return fmt.Errorf("Failed to wait for bucket '%s': %w", bucketName, err) } - // fmt.Printf("Created bucket '%s'...\n", bucketName) return nil } @@ -148,7 +147,6 @@ func PutObject(config config.Config, key string, value []byte) error { if err != nil { return fmt.Errorf("Failed to create %s directory in bucket %s: %w", key, bucketName, err) } - // fmt.Printf("Successfully created directory in s3 bucket: %s/%s\n", bucketName, key) return nil } // hash the input to ensure the integrity of file @@ -167,7 +165,6 @@ func PutObject(config config.Config, key string, value []byte) error { if err != nil { return fmt.Errorf("Failed to upload file %s to bucket %s: %w", key, bucketName, err) } - // fmt.Printf("Successfully uploaded file to s3 bucket: %s/%s\n", bucketName, key) return nil } diff --git a/pkg/ssh/ssh.go b/pkg/ssh/ssh.go index 54dca56..d210ac3 100644 --- a/pkg/ssh/ssh.go +++ b/pkg/ssh/ssh.go @@ -17,6 +17,8 @@ import ( "github.com/kevinburke/ssh_config" "github.com/mikesmitty/edkey" "golang.org/x/crypto/ssh" + + "github.com/crytic/cloudexec/pkg/log" ) const HostConfigTemplate = ` @@ -112,11 +114,9 @@ func GetOrCreateSSHKeyPair() (string, error) { if err != nil { return "", fmt.Errorf("Failed to read SSH public key file: %w", err) } - fmt.Printf("Using existing ssh keypair at %s(.pub)\n", privateKeyPath) return string(publicKeyBytes), nil } - - fmt.Printf("Creating new ssh keypair at %s(.pub)\n", privateKeyPath) + log.Wait("Creating new ssh keypair") // Generate an ed25519 key pair edPubKey, privateKey, err := ed25519.GenerateKey(rand.Reader) @@ -148,6 +148,7 @@ func GetOrCreateSSHKeyPair() (string, error) { return "", fmt.Errorf("Failed to save SSH public key file: %w", err) } + log.Good("Created new ssh keypair at %s(.pub)", privateKeyPath) return string(publicKeySSHFormat), nil } @@ -211,7 +212,7 @@ func DeleteSSHConfig(jobID int64) error { } // If there's no error, the file was deleted successfully if err == nil { - fmt.Printf("Deleted SSH config for job-%v\n", jobID) + log.Info("Deleted SSH config for job-%v", jobID) } return nil } @@ -278,7 +279,6 @@ func WaitForSSHConnection(jobID int64) error { return fmt.Errorf("Timed out waiting for SSH connection: %w", err) } - fmt.Printf("Can't connect to %s yet, retrying in %v...\n", hostname, retryInterval) time.Sleep(retryInterval) } } diff --git a/pkg/state/state.go b/pkg/state/state.go index 73d2b5f..157c354 100644 --- a/pkg/state/state.go +++ b/pkg/state/state.go @@ -8,6 +8,7 @@ import ( "github.com/crytic/cloudexec/pkg/config" do "github.com/crytic/cloudexec/pkg/digitalocean" + "github.com/crytic/cloudexec/pkg/log" "github.com/crytic/cloudexec/pkg/s3" ) @@ -161,7 +162,7 @@ func (s *State) CancelRunningJob(config config.Config, jobID int64) error { for i, job := range s.Jobs { if job.ID == jobID { if job.Status == Running || job.Status == Provisioning { - fmt.Printf("Setting status of job %d to 'Cancelled'\n", job.ID) + log.Info("Setting status of job %d to 'Cancelled'", job.ID) s.Jobs[i].Status = Cancelled break } else { @@ -207,7 +208,7 @@ func GetJobIdsByInstance(config config.Config) (map[int64][]int64, error) { } for _, job := range existingState.Jobs { if job.Droplet.ID == 0 { - fmt.Printf("Warning: Uninitialized droplet id for job %d\n", job.ID) + log.Warn("Uninitialized droplet id for job %d", job.ID) } instanceToJobIds[job.Droplet.ID] = append(instanceToJobIds[job.Droplet.ID], job.ID) } From 31a8a8b11934d7a55cfa19af2d7de45132a03357 Mon Sep 17 00:00:00 2001 From: bohendo Date: Fri, 15 Mar 2024 14:31:21 -0400 Subject: [PATCH 04/15] tweak default output path --- cmd/cloudexec/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/cloudexec/main.go b/cmd/cloudexec/main.go index 09c2ed7..8176f31 100644 --- a/cmd/cloudexec/main.go +++ b/cmd/cloudexec/main.go @@ -199,7 +199,7 @@ func main() { } path := c.String("path") if path == "" { - path = fmt.Sprintf("cloudexec-%v", jobID) + path = fmt.Sprintf("cloudexec/job-%v", jobID) } err = DownloadJobOutput(config, jobID, path) return err @@ -459,7 +459,7 @@ func main() { } path := c.String("path") if path == "" { - path = fmt.Sprintf("cloudexec-%v", jobID) + path = fmt.Sprintf("cloudexec/job-%v", jobID) } // Pull all data err = DownloadJobOutput(config, jobID, path) From 783deb52acfe4e6d08b24158f0c5c0dc519bdb6a Mon Sep 17 00:00:00 2001 From: bohendo Date: Fri, 15 Mar 2024 14:47:53 -0400 Subject: [PATCH 05/15] rm obsolete fn & tidy logs --- cmd/cloudexec/cancel.go | 3 +- cmd/cloudexec/clean.go | 9 ++- cmd/cloudexec/main.go | 6 +- pkg/ssh/ssh.go | 2 +- pkg/state/state.go | 126 ++++++++++++++++++---------------------- 5 files changed, 65 insertions(+), 81 deletions(-) diff --git a/cmd/cloudexec/cancel.go b/cmd/cloudexec/cancel.go index 58f8f94..f2d3f2f 100644 --- a/cmd/cloudexec/cancel.go +++ b/cmd/cloudexec/cancel.go @@ -12,7 +12,8 @@ import ( func CancelJob(config config.Config, existingState *state.State, job *state.Job, force bool) error { if job.Status != state.Provisioning && job.Status != state.Running { - return fmt.Errorf("Job %v is not running, it is %s", job.ID, job.Status) + log.Info("Job %v is not running, it is %s", job.ID, job.Status) + return nil } log.Info("Destroying droplet %s associated with job %v: IP=%v | CreatedAt=%s", job.Droplet.Name, job.ID, job.Droplet.IP, job.Droplet.Created) if !force { // Ask for confirmation before cleaning this job if no force flag diff --git a/cmd/cloudexec/clean.go b/cmd/cloudexec/clean.go index f5b7d46..ecdd45b 100644 --- a/cmd/cloudexec/clean.go +++ b/cmd/cloudexec/clean.go @@ -23,7 +23,7 @@ func CleanBucketJob(config config.Config, existingState *state.State, jobID int6 log.Info("Bucket is already empty.") return nil } - log.Info("Removing ALL input, output, and logs associated with %s", prefix) + log.Warn("Removing all input, output, logs, and configuration associated with %s", prefix) if !force { // Ask for confirmation before cleaning this job if no force flag log.Warn("Confirm? (y/n)") var response string @@ -36,14 +36,13 @@ func CleanBucketJob(config config.Config, existingState *state.State, jobID int6 log.Wait("Deleting bucket contents") // Delete all objects in the bucket for _, object := range objects { - log.Info("Deleting object: ", object) + log.Info("Deleting object: %s", object) err = s3.DeleteObject(config, object) if err != nil { return err } } - log.Good("Deleted %d objects from bucket", numToRm) - log.Wait("Removing job %v from state file", numToRm, jobID) + log.Good("Bucket is clean") newState := &state.State{} deleteJob := state.Job{ ID: jobID, @@ -51,10 +50,10 @@ func CleanBucketJob(config config.Config, existingState *state.State, jobID int6 } newState.CreateJob(deleteJob) err = state.MergeAndSave(config, newState) + log.Good("Removed job %v from state file", jobID) if err != nil { return fmt.Errorf("Error removing %s from state file: %w", prefix, err) } - log.Wait("Removing ssh config for job %v", jobID) err = ssh.DeleteSSHConfig(jobID) if err != nil { return fmt.Errorf("Failed to delete ssh config: %w", err) diff --git a/cmd/cloudexec/main.go b/cmd/cloudexec/main.go index 8176f31..af99fce 100644 --- a/cmd/cloudexec/main.go +++ b/cmd/cloudexec/main.go @@ -191,7 +191,7 @@ func main() { } jobID := c.Int64("job") if jobID == 0 { - latestCompletedJob, err := state.GetLatestCompletedJob(existingState) + latestCompletedJob, err := existingState.GetLatestCompletedJob() if err != nil { return err } @@ -446,7 +446,7 @@ func main() { jobID := c.Int64("job") var targetJob *state.Job if c.Int("job") == 0 { - targetJob, err = state.GetLatestCompletedJob(existingState) + targetJob, err = existingState.GetLatestCompletedJob() if err != nil { return err } @@ -579,7 +579,7 @@ func main() { } if err := app.Run(os.Args); err != nil { - log.Error("%v\n", err) + log.Error("%v", err) os.Exit(1) } diff --git a/pkg/ssh/ssh.go b/pkg/ssh/ssh.go index d210ac3..e69bf21 100644 --- a/pkg/ssh/ssh.go +++ b/pkg/ssh/ssh.go @@ -212,7 +212,7 @@ func DeleteSSHConfig(jobID int64) error { } // If there's no error, the file was deleted successfully if err == nil { - log.Info("Deleted SSH config for job-%v", jobID) + log.Good("Deleted SSH config for cloudexec-%v", jobID) } return nil } diff --git a/pkg/state/state.go b/pkg/state/state.go index 157c354..536ff2a 100644 --- a/pkg/state/state.go +++ b/pkg/state/state.go @@ -40,6 +40,9 @@ type State struct { Jobs []Job `json:"jobs"` } +//////////////////////////////////////// +// Public Functions + func GetState(config config.Config) (*State, error) { stateKey := "state/state.json" var state State @@ -66,44 +69,6 @@ func GetState(config config.Config) (*State, error) { return &state, nil } -// GetJob returns a job with the specified ID or nil if not found. -func (s *State) GetJob(jobID int64) *Job { - for _, job := range s.Jobs { - if job.ID == jobID { - return &job - } - } - return nil -} - -func MergeAndSave(config config.Config, newState *State) error { - // TODO: Handle locking to prevent concurrent updates - stateKey := "state/state.json" - existingState, err := GetState(config) - if err != nil { - return err - } - // Merge the existing state and the new state - MergeStates(existingState, newState) - // Marshal the merged state struct to JSON - mergedStateJSON, err := json.Marshal(existingState) - if err != nil { - return fmt.Errorf("Failed to marshal merged state to JSON: %w", err) - } - for i := 1; i <= maxRetries; i++ { - err = s3.PutObject(config, stateKey, mergedStateJSON) - if err == nil { - break - } - if i < maxRetries { - time.Sleep(time.Duration(i) * time.Second) - } else { - return fmt.Errorf("Failed to update state after %d retries: %w", maxRetries, err) - } - } - return nil -} - func MergeStates(existingState, newState *State) { // Create a map to keep track of deleted jobs deletedJobs := make(map[int64]bool) @@ -144,19 +109,37 @@ func removeDeletedJobs(jobs []Job, deletedJobs map[int64]bool) []Job { return filteredJobs } -// CreateJob adds a new job to the state. -func (s *State) CreateJob(job Job) { - s.Jobs = append(s.Jobs, job) -} - -// GetLatestJob returns the latest job in the state. -func (s *State) GetLatestJob() *Job { - if len(s.Jobs) > 0 { - return &s.Jobs[len(s.Jobs)-1] +func MergeAndSave(config config.Config, newState *State) error { + // TODO: Handle locking to prevent concurrent updates + stateKey := "state/state.json" + existingState, err := GetState(config) + if err != nil { + return err + } + // Merge the existing state and the new state + MergeStates(existingState, newState) + // Marshal the merged state struct to JSON + mergedStateJSON, err := json.Marshal(existingState) + if err != nil { + return fmt.Errorf("Failed to marshal merged state to JSON: %w", err) + } + for i := 1; i <= maxRetries; i++ { + err = s3.PutObject(config, stateKey, mergedStateJSON) + if err == nil { + break + } + if i < maxRetries { + time.Sleep(time.Duration(i) * time.Second) + } else { + return fmt.Errorf("Failed to update state after %d retries: %w", maxRetries, err) + } } return nil } +//////////////////////////////////////// +// State Methods + func (s *State) CancelRunningJob(config config.Config, jobID int64) error { // Mark any running jobs as cancelled for i, job := range s.Jobs { @@ -174,43 +157,44 @@ func (s *State) CancelRunningJob(config config.Config, jobID int64) error { if err != nil { return err } + return nil +} +// GetJob returns a job with the specified ID or nil if not found. +func (s *State) GetJob(jobID int64) *Job { + for _, job := range s.Jobs { + if job.ID == jobID { + return &job + } + } return nil } -func GetLatestCompletedJob(state *State) (*Job, error) { - var latestCompletedJob *Job +// CreateJob adds a new job to the state. +func (s *State) CreateJob(job Job) { + s.Jobs = append(s.Jobs, job) +} + +// GetLatestJob returns the latest job in the state. +func (s *State) GetLatestJob() *Job { + if len(s.Jobs) > 0 { + return &s.Jobs[len(s.Jobs)-1] + } + return nil +} +func (s *State) GetLatestCompletedJob() (*Job, error) { + var latestCompletedJob *Job // Find the latest completed job - for i := len(state.Jobs) - 1; i >= 0; i-- { - job := state.Jobs[i] + for i := len(s.Jobs) - 1; i >= 0; i-- { + job := s.Jobs[i] if job.Status == Completed || job.Status == Failed { latestCompletedJob = &job break } } - if latestCompletedJob == nil { return nil, errors.New("No completed jobs available") } - return latestCompletedJob, nil } - -func GetJobIdsByInstance(config config.Config) (map[int64][]int64, error) { - existingState, err := GetState(config) - if err != nil { - return nil, fmt.Errorf("Failed to get state: %w", err) - } - instanceToJobIds := make(map[int64][]int64) - if existingState.Jobs == nil { - return instanceToJobIds, nil - } - for _, job := range existingState.Jobs { - if job.Droplet.ID == 0 { - log.Warn("Uninitialized droplet id for job %d", job.ID) - } - instanceToJobIds[job.Droplet.ID] = append(instanceToJobIds[job.Droplet.ID], job.ID) - } - return instanceToJobIds, nil -} From 168e8777155d60ca18509d459ed308bb8f029c29 Mon Sep 17 00:00:00 2001 From: bohendo Date: Fri, 15 Mar 2024 15:14:28 -0400 Subject: [PATCH 06/15] add manual debugging tools --- flake.nix | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/flake.nix b/flake.nix index 206a180..bfd238e 100644 --- a/flake.nix +++ b/flake.nix @@ -10,6 +10,12 @@ utils.lib.eachDefaultSystem (system: let pkgs = import nixpkgs { inherit system; config.allowUnfree = true; }; + pyCommon = { + format = "pyproject"; + nativeBuildInputs = with pkgs.python310Packages; [ pythonRelaxDepsHook ]; + pythonRelaxDeps = true; + doCheck = false; + }; in rec { @@ -58,6 +64,52 @@ ]; }; + solc-select = pkgs.python310Packages.buildPythonPackage (pyCommon // { + pname = "solc-select"; + version = "1.0.4"; + src = builtins.fetchGit { + url = "git+ssh://git@github.com/crytic/solc-select"; + rev = "8072a3394bdc960c0f652fb72e928a7eae3631da"; + }; + propagatedBuildInputs = with pkgs.python310Packages; [ + packaging + setuptools + pycryptodome + ]; + }); + + crytic-compile = pkgs.python310Packages.buildPythonPackage (pyCommon // rec { + pname = "crytic-compile"; + version = "0.3.5"; + src = builtins.fetchGit { + url = "git+ssh://git@github.com/crytic/crytic-compile"; + rev = "3a4b0de72ad418b60b9ef8c38d7de31ed39e3898"; + }; + propagatedBuildInputs = with pkgs.python310Packages; [ + cbor2 + packages.solc-select + pycryptodome + setuptools + toml + ]; + }); + + medusa = pkgs.buildGoModule { + pname = "medusa"; + version = "0.1.2"; # from cmd/root.go + src = builtins.fetchGit { + url = "git+ssh://git@github.com/trailofbits/medusa"; + rev = "ac99e78ee38df86a8afefb21f105be9e4eae46ee"; + }; + vendorSha256 = "sha256-k5DtmpNi1ynSWgJ6b9EIlqCM6OlCkQf3Cf/daP+I7mY="; + nativeBuildInputs = [ + packages.crytic-compile + pkgs.solc + pkgs.nodejs + ]; + doCheck = false; # tests require `npm install` which can't run in hermetic build env + }; + }; apps = { @@ -91,6 +143,9 @@ packer doctl curl + # manual testing + packages.medusa + packages.crytic-compile ]; }; }; From de28a159946965ca79762a025945c48cc7c8cc08 Mon Sep 17 00:00:00 2001 From: bohendo Date: Fri, 15 Mar 2024 15:14:55 -0400 Subject: [PATCH 07/15] more log cleanup --- cmd/cloudexec/cancel.go | 6 ++-- cmd/cloudexec/launch.go | 54 ++++++++++++++++----------------- cmd/cloudexec/user_data.sh.tmpl | 1 + pkg/state/state.go | 2 -- 4 files changed, 31 insertions(+), 32 deletions(-) diff --git a/cmd/cloudexec/cancel.go b/cmd/cloudexec/cancel.go index f2d3f2f..a16f4f9 100644 --- a/cmd/cloudexec/cancel.go +++ b/cmd/cloudexec/cancel.go @@ -15,7 +15,7 @@ func CancelJob(config config.Config, existingState *state.State, job *state.Job, log.Info("Job %v is not running, it is %s", job.ID, job.Status) return nil } - log.Info("Destroying droplet %s associated with job %v: IP=%v | CreatedAt=%s", job.Droplet.Name, job.ID, job.Droplet.IP, job.Droplet.Created) + log.Warn("Destroying droplet %s associated with job %v: IP=%v | CreatedAt=%s", job.Droplet.Name, job.ID, job.Droplet.IP, job.Droplet.Created) if !force { // Ask for confirmation before cleaning this job if no force flag log.Warn("Confirm? (y/n)") var response string @@ -29,12 +29,12 @@ func CancelJob(config config.Config, existingState *state.State, job *state.Job, if err != nil { return fmt.Errorf("Failed to destroy droplet: %w", err) } - log.Good("Droplet %v destroyed", job.Droplet.ID) + log.Good("Droplet %v destroyed", job.Droplet.Name) err = existingState.CancelRunningJob(config, job.ID) if err != nil { return fmt.Errorf("Failed to change job status to cancelled: %w", err) } - log.Good("Job %v status changed to cancelled", job.Droplet.ID) + log.Good("Job %v status changed to cancelled", job.ID) return nil } diff --git a/cmd/cloudexec/launch.go b/cmd/cloudexec/launch.go index 82991ef..0ae6710 100644 --- a/cmd/cloudexec/launch.go +++ b/cmd/cloudexec/launch.go @@ -44,7 +44,7 @@ func InitLaunchConfig() error { // Write the default launch config to the file _, err = launchConfigFile.WriteString(` -# Set the directory to upload to the droplet. +# Set the directory to upload to the server. [input] directory = "" timeout = "48h" @@ -84,7 +84,7 @@ func LoadLaunchConfig(launchConfigPath string) (LaunchConfig, error) { return lc, nil } -func Launch(config config.Config, dropletSize string, dropletRegion string, lc LaunchConfig) error { +func Launch(config config.Config, serverSize string, serverRegion string, lc LaunchConfig) error { // get existing state existingState, err := state.GetState(config) if err != nil { @@ -98,7 +98,7 @@ func Launch(config config.Config, dropletSize string, dropletRegion string, lc L } else { latestJobId = latestJob.ID } - thisJobId := latestJobId + 1 + jobID := latestJobId + 1 // update state struct with a new job newState := &state.State{} @@ -106,21 +106,21 @@ func Launch(config config.Config, dropletSize string, dropletRegion string, lc L newJob := state.Job{ Name: lc.Input.JobName, - ID: thisJobId, + ID: jobID, Status: state.Provisioning, StartedAt: startedAt, } newState.CreateJob(newJob) // sync state to bucket err = state.MergeAndSave(config, newState) - log.Info("Registered new job with id %v", thisJobId) + log.Info("Registered new job with id %v", jobID) if err != nil { return fmt.Errorf("Failed to update S3 state: %w", err) } // upload local files to the bucket sourcePath := lc.Input.Directory // TODO: verify that this path exists & throw informative error if not - destPath := fmt.Sprintf("job-%v", thisJobId) + destPath := fmt.Sprintf("job-%v", jobID) err = UploadDirectoryToSpaces(config, sourcePath, destPath) if err != nil { return fmt.Errorf("Failed to upload files: %w", err) @@ -138,18 +138,18 @@ func Launch(config config.Config, dropletSize string, dropletRegion string, lc L return fmt.Errorf("Failed to generate user data: %w", err) } - log.Wait("Creating new %s droplet in %s for job %d", dropletSize, config.DigitalOcean.SpacesRegion, thisJobId) - droplet, err := do.CreateDroplet(config, config.DigitalOcean.SpacesRegion, dropletSize, userData, thisJobId, publicKey) + log.Wait("Creating new %s server in %s for job %d", serverSize, config.DigitalOcean.SpacesRegion, jobID) + server, err := do.CreateDroplet(config, config.DigitalOcean.SpacesRegion, serverSize, userData, jobID, publicKey) if err != nil { - return fmt.Errorf("Failed to create droplet: %w", err) + return fmt.Errorf("Failed to create server: %w", err) } - log.Good("Droplet created with IP: %v", droplet.IP) + log.Good("Server created with IP: %v", server.IP) - // Add the droplet info to state + // Add the server info to state updatedAt := time.Now().Unix() for i, job := range newState.Jobs { - if job.ID == thisJobId { - newState.Jobs[i].Droplet = droplet + if job.ID == jobID { + newState.Jobs[i].Droplet = server newState.Jobs[i].UpdatedAt = updatedAt } } @@ -157,26 +157,26 @@ func Launch(config config.Config, dropletSize string, dropletRegion string, lc L if err != nil { return fmt.Errorf("Failed to update S3 state: %w", err) } - log.Info("Saved new droplet info to state") + log.Info("Saved new server info to state") - // Add the droplet to the SSH config file - err = ssh.AddSSHConfig(thisJobId, droplet.IP) + // Add the server to the SSH config file + err = ssh.AddSSHConfig(jobID, server.IP) if err != nil { - return fmt.Errorf("Failed to add droplet to SSH config file: %w", err) + return fmt.Errorf("Failed to add server to SSH config file: %w", err) } - log.Info("Saved droplet info to SSH config") + log.Info("Added cloudexec-%v to SSH config", jobID) - // Ensure we can SSH into the droplet - log.Wait("Waiting until our new droplet wakes up") - err = ssh.WaitForSSHConnection(thisJobId) + // Ensure we can SSH into the server + log.Wait("Waiting for our new server to wake up") + err = ssh.WaitForSSHConnection(jobID) if err != nil { - return fmt.Errorf("Failed to SSH into the droplet: %w", err) + return fmt.Errorf("Failed to SSH into the server: %w", err) } - log.Good("SSH connection established!") - log.Good("Launch complete") - log.Info("You can now attach to the running job with: cloudexec attach") - log.Info("Stream logs from the droplet with: cloudexec logs") - log.Info("SSH to your droplet with: ssh cloudexec-%v", thisJobId) + log.Good("Good Morning!") + fmt.Println() + log.Info("Stream logs from the server with: cloudexec logs") + log.Info("SSH to your server with: ssh cloudexec-%v", jobID) + log.Info("Once setup is complete, you can attach to the running job with: cloudexec attach") return nil } diff --git a/cmd/cloudexec/user_data.sh.tmpl b/cmd/cloudexec/user_data.sh.tmpl index e1fe439..92df2ed 100644 --- a/cmd/cloudexec/user_data.sh.tmpl +++ b/cmd/cloudexec/user_data.sh.tmpl @@ -265,6 +265,7 @@ end_time=$(("${start_time}" + TIMEOUT)) pretty_end_time="$(fmtDate "${end_time}")" echo "Workload is running, timer started at ${pretty_start_time}, we'll time out at ${pretty_end_time}" echo "================================================================================================" +echo "${RUN_COMMAND}" echo ######################################## diff --git a/pkg/state/state.go b/pkg/state/state.go index 536ff2a..85287a4 100644 --- a/pkg/state/state.go +++ b/pkg/state/state.go @@ -8,7 +8,6 @@ import ( "github.com/crytic/cloudexec/pkg/config" do "github.com/crytic/cloudexec/pkg/digitalocean" - "github.com/crytic/cloudexec/pkg/log" "github.com/crytic/cloudexec/pkg/s3" ) @@ -145,7 +144,6 @@ func (s *State) CancelRunningJob(config config.Config, jobID int64) error { for i, job := range s.Jobs { if job.ID == jobID { if job.Status == Running || job.Status == Provisioning { - log.Info("Setting status of job %d to 'Cancelled'", job.ID) s.Jobs[i].Status = Cancelled break } else { From fd0fde1987c661c94219b909aabbbaafd3e8d710 Mon Sep 17 00:00:00 2001 From: bohendo Date: Fri, 15 Mar 2024 15:28:45 -0400 Subject: [PATCH 08/15] fix example --- example/input/flags.sol | 6 +++--- example/input/medusa.json | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/example/input/flags.sol b/example/input/flags.sol index 82c66a2..1fbdcb7 100644 --- a/example/input/flags.sol +++ b/example/input/flags.sol @@ -14,15 +14,15 @@ contract Test { flag1 = false; } - function echidna_alwaystrue() public returns (bool){ + function fuzz_alwaystrue() public returns (bool){ return(true); } - function echidna_revert_always() public returns (bool){ + function fuzz_revert_always() public returns (bool){ revert(); } - function echidna_sometimesfalse() public returns (bool){ + function fuzz_sometimesfalse() public returns (bool){ emit Flag(flag0); emit Flag(flag1); return(flag1); diff --git a/example/input/medusa.json b/example/input/medusa.json index 3c9a9e7..5a6c4c6 100644 --- a/example/input/medusa.json +++ b/example/input/medusa.json @@ -5,7 +5,7 @@ "timeout": 0, "testLimit": 50000, "callSequenceLength": 100, - "corpusDirectory": "/root/output", + "corpusDirectory": "output", "coverageEnabled": true, "deploymentOrder": [], "constructorArgs": {}, @@ -21,7 +21,7 @@ "testAllContracts": false, "traceAll": false, "assertionTesting": { - "enabled": false, + "enabled": true, "testViewMethods": false }, "propertyTesting": { From daf15f99fe0fb582aed35027bcb9d28dc04cf176 Mon Sep 17 00:00:00 2001 From: bohendo Date: Fri, 15 Mar 2024 15:28:53 -0400 Subject: [PATCH 09/15] update gitignore --- .gitignore | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 297ef26..9aad16d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,15 @@ -*.backup +# Environment .direnv/ .env + +# Editors .vscode .vscode/ -bin/ + +# Output dist/ -example/input.zip result +cloudexec/ +example/input/crytic-export/ +example/input/output/ +example/input.zip From d86fcba6b41cf920ca2e2c348eb05b46af51911c Mon Sep 17 00:00:00 2001 From: bohendo Date: Fri, 15 Mar 2024 15:56:27 -0400 Subject: [PATCH 10/15] fix example for real --- example/cloudexec.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/example/cloudexec.toml b/example/cloudexec.toml index 26cb183..990940b 100644 --- a/example/cloudexec.toml +++ b/example/cloudexec.toml @@ -14,4 +14,4 @@ fi ''' # This command is run after the setup script completes. -run = "medusa fuzz --target flags.sol" +run = "medusa fuzz" From 335c98d4133b27bc24033bf2f20316868edea816 Mon Sep 17 00:00:00 2001 From: bohendo Date: Fri, 15 Mar 2024 15:56:44 -0400 Subject: [PATCH 11/15] quiet noisy server logs --- cmd/cloudexec/user_data.sh.tmpl | 32 +++++++++++++++----------------- flake.nix | 4 ++-- pkg/state/state.go | 26 +++++++++++--------------- 3 files changed, 28 insertions(+), 34 deletions(-) diff --git a/cmd/cloudexec/user_data.sh.tmpl b/cmd/cloudexec/user_data.sh.tmpl index 92df2ed..10c3878 100644 --- a/cmd/cloudexec/user_data.sh.tmpl +++ b/cmd/cloudexec/user_data.sh.tmpl @@ -27,15 +27,15 @@ stderr_log="/tmp/cloudexec-stderr.log" # Required setup # Wait for unattended-upgr to finish install/upgrading stuff in the background +echo "Waiting for unattended-upgr to finish..." while fuser /var/lib/dpkg/lock >/dev/null 2>&1; do - echo "Waiting for unattended-upgr to finish..." - sleep 3 + sleep 1 done echo "Installing prereqs..." export DEBIAN_FRONTEND=noninteractive -apt-get update -apt-get install -y jq s3cmd tmux python3-pip python3-venv unzip +apt-get update > /dev/null +apt-get install -y jq s3cmd tmux python3-pip python3-venv unzip > /dev/null # set hostname current_hostname="$(hostname)" @@ -76,6 +76,16 @@ for tag in ${TAGS}; do fi done +if [[ ${CLOUDEXEC} == false ]] || [[ ${USERNAME} == "" ]]; then + echo "Not a CloudExec droplet, exiting..." + # exit 1 +fi + +if [[ ${JOB_ID} == "" ]]; then + echo "No job ID, exiting..." + exit 1 +fi + export BUCKET_NAME="cloudexec-${USERNAME}" echo "Using bucket ${BUCKET_NAME}" @@ -97,16 +107,6 @@ for var in AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_DEFAULT_REGION; do fi done -if [[ ${CLOUDEXEC} == false ]] || [[ ${USERNAME} == "" ]]; then - echo "Not a CloudExec droplet, exiting..." - # exit 1 -fi - -if [[ ${JOB_ID} == "" ]]; then - echo "No job ID, exiting..." - exit 1 -fi - ######################################## # Define helper functions @@ -122,15 +122,12 @@ s3cmd() { } upload_output() { - if compgen -G "${output_dir}/*" >/dev/null; then echo "Uploading results..." s3cmd put -r "${output_dir}"/* "s3://${BUCKET_NAME}/job-${JOB_ID}/output/" - else echo "Skipping results upload, no files found in ${output_dir}" fi - } # Define a cleanup function that will be executed on signals or exit @@ -208,6 +205,7 @@ trap cleanup EXIT SIGHUP SIGINT SIGTERM ######################################## # Job-specific setup +echo "================================================================================================" echo "Running setup..." eval "${SETUP_COMMANDS}" diff --git a/flake.nix b/flake.nix index bfd238e..acda03e 100644 --- a/flake.nix +++ b/flake.nix @@ -99,9 +99,9 @@ version = "0.1.2"; # from cmd/root.go src = builtins.fetchGit { url = "git+ssh://git@github.com/trailofbits/medusa"; - rev = "ac99e78ee38df86a8afefb21f105be9e4eae46ee"; + rev = "72e9b8586ad93b37ff9063ccf3f5b471f934c264"; }; - vendorSha256 = "sha256-k5DtmpNi1ynSWgJ6b9EIlqCM6OlCkQf3Cf/daP+I7mY="; + vendorSha256 = "sha256-IKB8c6oxF5h88FdzUAmNA96BpNo/LIbwzuDCMFsdZNE="; nativeBuildInputs = [ packages.crytic-compile pkgs.solc diff --git a/pkg/state/state.go b/pkg/state/state.go index 85287a4..af93a42 100644 --- a/pkg/state/state.go +++ b/pkg/state/state.go @@ -39,32 +39,39 @@ type State struct { Jobs []Job `json:"jobs"` } +// Helper function to remove deleted jobs from the new state +func removeDeletedJobs(jobs []Job, deletedJobs map[int64]bool) []Job { + filteredJobs := jobs[:0] + for _, job := range jobs { + if !deletedJobs[job.ID] { + filteredJobs = append(filteredJobs, job) + } + } + return filteredJobs +} + //////////////////////////////////////// // Public Functions func GetState(config config.Config) (*State, error) { stateKey := "state/state.json" var state State - // Read the state.json object data stateData, err := s3.GetObject(config, stateKey) if err != nil { return nil, fmt.Errorf("Failed to read state data, make sure you've run 'cloudexec init': %w", err) } - // Unmarshal the state JSON data into a State struct err = json.Unmarshal(stateData, &state) if err != nil { return nil, fmt.Errorf("Failed to unmarshal state JSON: %w", err) } - // Replace empty names with a placeholder for i, job := range state.Jobs { if job.Name == "" { state.Jobs[i].Name = "no name" } } - return &state, nil } @@ -97,17 +104,6 @@ func MergeStates(existingState, newState *State) { newState.Jobs = removeDeletedJobs(newState.Jobs, deletedJobs) } -// Helper function to remove deleted jobs from the new state -func removeDeletedJobs(jobs []Job, deletedJobs map[int64]bool) []Job { - filteredJobs := jobs[:0] - for _, job := range jobs { - if !deletedJobs[job.ID] { - filteredJobs = append(filteredJobs, job) - } - } - return filteredJobs -} - func MergeAndSave(config config.Config, newState *State) error { // TODO: Handle locking to prevent concurrent updates stateKey := "state/state.json" From 6edb5422ea9e282148a4e62b627767ecea8f9171 Mon Sep 17 00:00:00 2001 From: bohendo Date: Fri, 15 Mar 2024 16:00:16 -0400 Subject: [PATCH 12/15] clean up example --- example/cloudexec.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/example/cloudexec.toml b/example/cloudexec.toml index 990940b..dfc65d3 100644 --- a/example/cloudexec.toml +++ b/example/cloudexec.toml @@ -14,4 +14,4 @@ fi ''' # This command is run after the setup script completes. -run = "medusa fuzz" +run = "medusa fuzz --no-color" From a406bb8c4553159dcbbaff8c95e57eb9d6f897af Mon Sep 17 00:00:00 2001 From: bohendo Date: Fri, 15 Mar 2024 17:08:04 -0400 Subject: [PATCH 13/15] s/CleanBucketJob/CleanJob/g --- cmd/cloudexec/clean.go | 6 +++--- cmd/cloudexec/main.go | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/cloudexec/clean.go b/cmd/cloudexec/clean.go index ecdd45b..fd7a772 100644 --- a/cmd/cloudexec/clean.go +++ b/cmd/cloudexec/clean.go @@ -11,7 +11,7 @@ import ( "github.com/crytic/cloudexec/pkg/state" ) -func CleanBucketJob(config config.Config, existingState *state.State, jobID int64, force bool) error { +func CleanJob(config config.Config, existingState *state.State, jobID int64, force bool) error { prefix := fmt.Sprintf("job-%v", jobID) objects, err := s3.ListObjects(config, prefix) if err != nil { @@ -61,13 +61,13 @@ func CleanBucketJob(config config.Config, existingState *state.State, jobID int6 return nil } -func CleanBucketAll(config config.Config, existingState *state.State, force bool) error { +func CleanAll(config config.Config, existingState *state.State, force bool) error { if len(existingState.Jobs) == 0 { log.Info("No jobs are available") return nil } for _, job := range existingState.Jobs { - err := CleanBucketJob(config, existingState, job.ID, force) + err := CleanJob(config, existingState, job.ID, force) if err != nil { return err } diff --git a/cmd/cloudexec/main.go b/cmd/cloudexec/main.go index af99fce..5947c13 100644 --- a/cmd/cloudexec/main.go +++ b/cmd/cloudexec/main.go @@ -386,7 +386,7 @@ func main() { return err } // Flag all job data for deletion - err = CleanBucketAll(config, existingState, force) + err = CleanAll(config, existingState, force) if err != nil { return err } @@ -402,7 +402,7 @@ func main() { return err } } - err = CleanBucketJob(config, existingState, jobID, force) + err = CleanJob(config, existingState, jobID, force) if err != nil { return err } @@ -475,7 +475,7 @@ func main() { } } // Clean this job's data out of the bucket - err = CleanBucketJob(config, existingState, jobID, force) + err = CleanJob(config, existingState, jobID, force) return err }, }, From 38a3e381620ea7b9bccf77026775fe21e72f721a Mon Sep 17 00:00:00 2001 From: bohendo Date: Fri, 15 Mar 2024 17:13:49 -0400 Subject: [PATCH 14/15] try to clean other jobs even if some fail --- cmd/cloudexec/cancel.go | 2 +- cmd/cloudexec/clean.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/cloudexec/cancel.go b/cmd/cloudexec/cancel.go index a16f4f9..b095219 100644 --- a/cmd/cloudexec/cancel.go +++ b/cmd/cloudexec/cancel.go @@ -54,7 +54,7 @@ func CancelAll(config config.Config, existingState *state.State, force bool) err } err = CancelJob(config, existingState, &job, force) if err != nil { - log.Warn("Failed to cancel job %v", job.ID) + log.Error("Failed to cancel job %v", job.ID) } } return nil diff --git a/cmd/cloudexec/clean.go b/cmd/cloudexec/clean.go index fd7a772..751cf41 100644 --- a/cmd/cloudexec/clean.go +++ b/cmd/cloudexec/clean.go @@ -69,7 +69,7 @@ func CleanAll(config config.Config, existingState *state.State, force bool) erro for _, job := range existingState.Jobs { err := CleanJob(config, existingState, job.ID, force) if err != nil { - return err + log.Error("Failed to clean job %v", job.ID) } } return nil From a68604810dbd6c47c013336c2bbedcc06a1899d2 Mon Sep 17 00:00:00 2001 From: bohendo Date: Fri, 15 Mar 2024 17:14:53 -0400 Subject: [PATCH 15/15] trunk fmt --- pkg/digitalocean/digitalocean.go | 2 +- pkg/log/log.go | 32 ++++++++++++++++---------------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/pkg/digitalocean/digitalocean.go b/pkg/digitalocean/digitalocean.go index 54c0e14..02f0abd 100644 --- a/pkg/digitalocean/digitalocean.go +++ b/pkg/digitalocean/digitalocean.go @@ -109,7 +109,7 @@ func CheckAuth(config config.Config) error { if err != nil { return fmt.Errorf("Failed to authenticate with DigitalOcean API: %w", err) } - log.Good("Successfully authenticated with DigitalOcean API") + log.Good("Successfully authenticated with DigitalOcean API") // Check Spaces authentication _, err = s3.ListBuckets(config) diff --git a/pkg/log/log.go b/pkg/log/log.go index 2d5be6a..c8d2db4 100644 --- a/pkg/log/log.go +++ b/pkg/log/log.go @@ -5,35 +5,35 @@ import ( ) const ( - ColorReset = "\033[0m" - ColorRed = "\033[31m" - ColorGreen = "\033[32m" - ColorBlue = "\033[34m" - ColorYellow = "\033[33m" - ColorWhite = "\033[37m" + ColorReset = "\033[0m" + ColorRed = "\033[31m" + ColorGreen = "\033[32m" + ColorBlue = "\033[34m" + ColorYellow = "\033[33m" + ColorWhite = "\033[37m" ) func Info(msg string, args ...interface{}) { - formatted := fmt.Sprintf(msg, args...) - fmt.Println(ColorWhite, formatted, ColorReset) + formatted := fmt.Sprintf(msg, args...) + fmt.Println(ColorWhite, formatted, ColorReset) } func Wait(msg string, args ...interface{}) { - formatted := fmt.Sprintf(msg, args...) + "..." - fmt.Println(ColorBlue, formatted, ColorReset) + formatted := fmt.Sprintf(msg, args...) + "..." + fmt.Println(ColorBlue, formatted, ColorReset) } func Good(msg string, args ...interface{}) { - formatted := fmt.Sprintf(msg, args...) - fmt.Println(ColorGreen, formatted, ColorReset) + formatted := fmt.Sprintf(msg, args...) + fmt.Println(ColorGreen, formatted, ColorReset) } func Warn(msg string, args ...interface{}) { - formatted := fmt.Sprintf(msg, args...) - fmt.Println(ColorYellow, formatted, ColorReset) + formatted := fmt.Sprintf(msg, args...) + fmt.Println(ColorYellow, formatted, ColorReset) } func Error(msg string, args ...interface{}) { - formatted := fmt.Sprintf(msg, args...) - fmt.Println(ColorRed, formatted, ColorReset) + formatted := fmt.Sprintf(msg, args...) + fmt.Println(ColorRed, formatted, ColorReset) }