-
Notifications
You must be signed in to change notification settings - Fork 28
/
envbuilder.go
1897 lines (1705 loc) · 63.5 KB
/
envbuilder.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
package envbuilder
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
"maps"
"net"
"net/http"
"os"
"os/exec"
"os/user"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/coder/envbuilder/buildinfo"
"github.com/coder/envbuilder/git"
"github.com/coder/envbuilder/options"
"github.com/go-git/go-billy/v5"
"github.com/GoogleContainerTools/kaniko/pkg/config"
"github.com/GoogleContainerTools/kaniko/pkg/creds"
"github.com/GoogleContainerTools/kaniko/pkg/executor"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/coder/envbuilder/devcontainer"
"github.com/coder/envbuilder/internal/ebutil"
"github.com/coder/envbuilder/internal/workingdir"
"github.com/coder/envbuilder/log"
"github.com/containerd/platforms"
"github.com/distribution/distribution/v3/configuration"
"github.com/distribution/distribution/v3/registry/handlers"
_ "github.com/distribution/distribution/v3/registry/storage/driver/filesystem"
dockerconfig "github.com/docker/cli/cli/config"
"github.com/docker/cli/cli/config/configfile"
"github.com/fatih/color"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/kballard/go-shellquote"
"github.com/mattn/go-isatty"
"github.com/sirupsen/logrus"
"github.com/tailscale/hujson"
"golang.org/x/xerrors"
)
// ErrNoFallbackImage is returned when no fallback image has been specified.
var ErrNoFallbackImage = errors.New("no fallback image has been specified")
// DockerConfig represents the Docker configuration file.
type DockerConfig = configfile.ConfigFile
type runtimeDataStore struct {
// Runtime data.
Image bool `json:"-"`
Built bool `json:"-"`
SkippedRebuild bool `json:"-"`
Scripts devcontainer.LifecycleScripts `json:"-"`
ImageEnv []string `json:"-"`
ContainerEnv map[string]string `json:"-"`
RemoteEnv map[string]string `json:"-"`
DevcontainerPath string `json:"-"`
// Data stored in the magic image file.
ContainerUser string `json:"container_user"`
}
type execArgsInfo struct {
InitCommand string
InitArgs []string
UserInfo userInfo
Environ []string
}
// Run runs the envbuilder.
// Logger is the logf to use for all operations.
// Filesystem is the filesystem to use for all operations.
// Defaults to the host filesystem.
// preExec are any functions that should be called before exec'ing the init
// command. This is useful for ensuring that defers get run.
func Run(ctx context.Context, opts options.Options, preExec ...func()) error {
var args execArgsInfo
// Run in a separate function to ensure all defers run before we
// setuid or exec.
err := run(ctx, opts, &args)
if err != nil {
return err
}
err = syscall.Setgid(args.UserInfo.gid)
if err != nil {
return fmt.Errorf("set gid: %w", err)
}
err = syscall.Setuid(args.UserInfo.uid)
if err != nil {
return fmt.Errorf("set uid: %w", err)
}
opts.Logger(log.LevelInfo, "=== Running init command as user %q: %q", args.UserInfo.user.Username, append([]string{opts.InitCommand}, args.InitArgs...))
for _, fn := range preExec {
fn()
}
err = syscall.Exec(args.InitCommand, append([]string{args.InitCommand}, args.InitArgs...), args.Environ)
if err != nil {
return fmt.Errorf("exec init script: %w", err)
}
return errors.New("exec failed")
}
func run(ctx context.Context, opts options.Options, execArgs *execArgsInfo) error {
defer options.UnsetEnv()
workingDir := workingdir.At(opts.WorkingDirBase)
stageNumber := 0
startStage := func(format string, args ...any) func(format string, args ...any) {
now := time.Now()
stageNumber++
stageNum := stageNumber
opts.Logger(log.LevelInfo, "#%d: %s", stageNum, fmt.Sprintf(format, args...))
return func(format string, args ...any) {
opts.Logger(log.LevelInfo, "#%d: %s [%s]", stageNum, fmt.Sprintf(format, args...), time.Since(now))
}
}
if opts.GetCachedImage {
return fmt.Errorf("developer error: use RunCacheProbe instead")
}
if opts.CacheRepo == "" && opts.PushImage {
return fmt.Errorf("--cache-repo must be set when using --push-image")
}
// Default to the shell.
execArgs.InitCommand = opts.InitCommand
execArgs.InitArgs = []string{"-c", opts.InitScript}
if opts.InitArgs != "" {
var err error
execArgs.InitArgs, err = shellquote.Split(opts.InitArgs)
if err != nil {
return fmt.Errorf("parse init args: %w", err)
}
}
opts.Logger(log.LevelInfo, "%s %s - Build development environments from repositories in a container", newColor(color.Bold).Sprintf("envbuilder"), buildinfo.Version())
cleanupDockerConfigOverride, err := initDockerConfigOverride(opts.Filesystem, opts.Logger, workingDir, opts.DockerConfigBase64)
if err != nil {
return err
}
defer func() {
if err := cleanupDockerConfigOverride(); err != nil {
opts.Logger(log.LevelError, "failed to cleanup docker config override: %w", err)
}
}() // best effort
runtimeData := runtimeDataStore{
ContainerEnv: make(map[string]string),
RemoteEnv: make(map[string]string),
}
if fileExists(opts.Filesystem, workingDir.Image()) {
opts.Logger(log.LevelInfo, "Found magic image file at %s", workingDir.Image())
if err = parseMagicImageFile(opts.Filesystem, workingDir.Image(), &runtimeData); err != nil {
return fmt.Errorf("parse magic image file: %w", err)
}
runtimeData.Image = true
// Some options are only applicable for builds.
if opts.RemoteRepoBuildMode {
opts.Logger(log.LevelDebug, "Ignoring %s option, it is not supported when using a pre-built image.", options.WithEnvPrefix("REMOTE_REPO_BUILD_MODE"))
opts.RemoteRepoBuildMode = false
}
if opts.ExportEnvFile != "" {
// Currently we can't support this as we don't have access to the
// post-build computed env vars to know which ones to export.
opts.Logger(log.LevelWarn, "Ignoring %s option, it is not supported when using a pre-built image.", options.WithEnvPrefix("EXPORT_ENV_FILE"))
opts.ExportEnvFile = ""
}
}
runtimeData.Built = fileExists(opts.Filesystem, workingDir.Built())
buildTimeWorkspaceFolder := opts.WorkspaceFolder
var fallbackErr error
var cloned bool
if opts.GitURL != "" {
endStage := startStage("📦 Cloning %s to %s...",
newColor(color.FgCyan).Sprintf(opts.GitURL),
newColor(color.FgCyan).Sprintf(opts.WorkspaceFolder),
)
stageNum := stageNumber
logStage := func(format string, args ...any) {
opts.Logger(log.LevelInfo, "#%d: %s", stageNum, fmt.Sprintf(format, args...))
}
cloneOpts, err := git.CloneOptionsFromOptions(logStage, opts)
if err != nil {
return fmt.Errorf("git clone options: %w", err)
}
w := git.ProgressWriter(logStage)
defer w.Close()
cloneOpts.Progress = w
cloned, fallbackErr = git.CloneRepo(ctx, logStage, cloneOpts)
if fallbackErr == nil {
if cloned {
endStage("📦 Cloned repository!")
} else {
endStage("📦 The repository already exists!")
}
} else {
opts.Logger(log.LevelError, "Failed to clone repository: %s", fallbackErr.Error())
if !runtimeData.Image {
opts.Logger(log.LevelError, "Falling back to the default image...")
}
}
_ = w.Close()
// Always clone the repo in remote repo build mode into a location that
// we control that isn't affected by the users changes.
if opts.RemoteRepoBuildMode {
cloneOpts, err := git.CloneOptionsFromOptions(logStage, opts)
if err != nil {
return fmt.Errorf("git clone options: %w", err)
}
cloneOpts.Path = workingDir.Join("repo")
endStage := startStage("📦 Remote repo build mode enabled, cloning %s to %s for build context...",
newColor(color.FgCyan).Sprintf(opts.GitURL),
newColor(color.FgCyan).Sprintf(cloneOpts.Path),
)
w := git.ProgressWriter(logStage)
defer w.Close()
cloneOpts.Progress = w
fallbackErr = git.ShallowCloneRepo(ctx, logStage, cloneOpts)
if fallbackErr == nil {
endStage("📦 Cloned repository!")
buildTimeWorkspaceFolder = cloneOpts.Path
} else {
opts.Logger(log.LevelError, "Failed to clone repository for remote repo mode: %s", fallbackErr.Error())
opts.Logger(log.LevelError, "Falling back to the default image...")
}
_ = w.Close()
}
}
if !runtimeData.Image {
defaultBuildParams := func() (*devcontainer.Compiled, error) {
dockerfile := workingDir.Join("Dockerfile")
file, err := opts.Filesystem.OpenFile(dockerfile, os.O_CREATE|os.O_WRONLY, 0o644)
if err != nil {
return nil, err
}
defer file.Close()
if opts.FallbackImage == "" {
if fallbackErr != nil {
return nil, xerrors.Errorf("%s: %w", fallbackErr.Error(), ErrNoFallbackImage)
}
// We can't use errors.Join here because our tests
// don't support parsing a multiline error.
return nil, ErrNoFallbackImage
}
content := "FROM " + opts.FallbackImage
_, err = file.Write([]byte(content))
if err != nil {
return nil, err
}
return &devcontainer.Compiled{
DockerfilePath: dockerfile,
DockerfileContent: content,
BuildContext: workingDir.Path(),
}, nil
}
var buildParams *devcontainer.Compiled
if opts.DockerfilePath == "" {
opts.Logger(log.LevelInfo, "No Dockerfile specified, looking for a devcontainer.json...")
// Only look for a devcontainer if a Dockerfile wasn't specified.
// devcontainer is a standard, so it's reasonable to be the default.
var devcontainerDir string
var err error
runtimeData.DevcontainerPath, devcontainerDir, err = findDevcontainerJSON(buildTimeWorkspaceFolder, opts)
if err != nil {
opts.Logger(log.LevelError, "Failed to locate devcontainer.json: %s", err.Error())
opts.Logger(log.LevelError, "Falling back to the default image...")
} else {
opts.Logger(log.LevelInfo, "Building in Devcontainer mode using %s", strings.TrimPrefix(runtimeData.DevcontainerPath, buildTimeWorkspaceFolder))
// We know a devcontainer exists.
// Let's parse it and use it!
file, err := opts.Filesystem.Open(runtimeData.DevcontainerPath)
if err != nil {
return fmt.Errorf("open devcontainer.json: %w", err)
}
defer file.Close()
content, err := io.ReadAll(file)
if err != nil {
return fmt.Errorf("read devcontainer.json: %w", err)
}
devContainer, err := devcontainer.Parse(content)
if err == nil {
var fallbackDockerfile string
if !devContainer.HasImage() && !devContainer.HasDockerfile() {
defaultParams, err := defaultBuildParams()
if err != nil {
return fmt.Errorf("no Dockerfile or image found: %w", err)
}
opts.Logger(log.LevelInfo, "No Dockerfile or image specified; falling back to the default image...")
fallbackDockerfile = defaultParams.DockerfilePath
}
buildParams, err = devContainer.Compile(opts.Filesystem, devcontainerDir, workingDir.Path(), fallbackDockerfile, opts.WorkspaceFolder, false, os.LookupEnv)
if err != nil {
return fmt.Errorf("compile devcontainer.json: %w", err)
}
if buildParams.User != "" {
runtimeData.ContainerUser = buildParams.User
}
runtimeData.Scripts = devContainer.LifecycleScripts
} else {
opts.Logger(log.LevelError, "Failed to parse devcontainer.json: %s", err.Error())
opts.Logger(log.LevelError, "Falling back to the default image...")
}
}
} else {
// If a Dockerfile was specified, we use that.
dockerfilePath := filepath.Join(buildTimeWorkspaceFolder, opts.DockerfilePath)
opts.Logger(log.LevelInfo, "Building in Dockerfile-only mode using %s", opts.DockerfilePath)
// If the dockerfilePath is specified and deeper than the base of WorkspaceFolder AND the BuildContextPath is
// not defined, show a warning
dockerfileDir := filepath.Dir(dockerfilePath)
if dockerfileDir != filepath.Clean(buildTimeWorkspaceFolder) && opts.BuildContextPath == "" {
opts.Logger(log.LevelWarn, "given dockerfile %q is below %q and no custom build context has been defined", dockerfilePath, buildTimeWorkspaceFolder)
opts.Logger(log.LevelWarn, "\t-> set BUILD_CONTEXT_PATH to %q to fix", dockerfileDir)
}
dockerfile, err := opts.Filesystem.Open(dockerfilePath)
if err == nil {
content, err := io.ReadAll(dockerfile)
if err != nil {
return fmt.Errorf("read Dockerfile: %w", err)
}
buildParams = &devcontainer.Compiled{
DockerfilePath: dockerfilePath,
DockerfileContent: string(content),
BuildContext: filepath.Join(buildTimeWorkspaceFolder, opts.BuildContextPath),
}
}
}
if buildParams == nil {
// If there isn't a devcontainer.json file in the repository,
// we fallback to whatever the `DefaultImage` is.
var err error
buildParams, err = defaultBuildParams()
if err != nil {
return fmt.Errorf("no Dockerfile or devcontainer.json found: %w", err)
}
}
lvl := log.LevelInfo
if opts.Verbose {
lvl = log.LevelDebug
}
log.HijackLogrus(lvl, func(entry *logrus.Entry) {
for _, line := range strings.Split(entry.Message, "\r") {
opts.Logger(log.FromLogrus(entry.Level), "#%d: %s", stageNumber, color.HiBlackString(line))
}
})
if opts.LayerCacheDir != "" {
if opts.CacheRepo != "" {
opts.Logger(log.LevelWarn, "Overriding cache repo with local registry...")
}
localRegistry, closeLocalRegistry, err := serveLocalRegistry(ctx, opts.Logger, opts.LayerCacheDir)
if err != nil {
return err
}
defer closeLocalRegistry()
opts.CacheRepo = localRegistry
}
// IgnorePaths in the Kaniko opts doesn't properly ignore paths.
// So we add them to the default ignore list. See:
// https://github.com/GoogleContainerTools/kaniko/blob/63be4990ca5a60bdf06ddc4d10aa4eca0c0bc714/cmd/executor/cmd/root.go#L136
ignorePaths := append([]string{
workingDir.Path(),
opts.WorkspaceFolder,
// See: https://github.com/coder/envbuilder/issues/37
"/etc/resolv.conf",
}, opts.IgnorePaths...)
if opts.LayerCacheDir != "" {
ignorePaths = append(ignorePaths, opts.LayerCacheDir)
}
for _, ignorePath := range ignorePaths {
util.AddToDefaultIgnoreList(util.IgnoreListEntry{
Path: ignorePath,
PrefixMatchOnly: false,
AllowedPaths: nil,
})
}
// In order to allow 'resuming' envbuilder, embed the binary into the image
// if it is being pushed.
// As these files will be owned by root, it is considerate to clean up
// after we're done!
cleanupBuildContext := func() {}
if opts.PushImage {
// Add exceptions in Kaniko's ignorelist for these magic files we add.
if err := util.AddAllowedPathToDefaultIgnoreList(opts.BinaryPath); err != nil {
return fmt.Errorf("add envbuilder binary to ignore list: %w", err)
}
if err := util.AddAllowedPathToDefaultIgnoreList(workingDir.Image()); err != nil {
return fmt.Errorf("add magic image file to ignore list: %w", err)
}
if err := util.AddAllowedPathToDefaultIgnoreList(workingDir.Features()); err != nil {
return fmt.Errorf("add features to ignore list: %w", err)
}
magicTempDir := workingdir.At(buildParams.BuildContext, workingdir.TempDir)
if err := opts.Filesystem.MkdirAll(magicTempDir.Path(), 0o755); err != nil {
return fmt.Errorf("create magic temp dir in build context: %w", err)
}
// Add the magic directives that embed the binary into the built image.
buildParams.DockerfileContent += workingdir.Directives
envbuilderBinDest := filepath.Join(magicTempDir.Path(), "envbuilder")
magicImageDest := magicTempDir.Image()
// Clean up after build!
var cleanupOnce sync.Once
cleanupBuildContext = func() {
cleanupOnce.Do(func() {
for _, path := range []string{magicImageDest, envbuilderBinDest, magicTempDir.Path()} {
if err := opts.Filesystem.Remove(path); err != nil {
opts.Logger(log.LevelWarn, "failed to clean up magic temp dir from build context: %w", err)
}
}
})
}
defer cleanupBuildContext()
// Copy the envbuilder binary into the build context. External callers
// will need to specify the path to the desired envbuilder binary.
opts.Logger(log.LevelDebug, "copying envbuilder binary at %q to build context %q", opts.BinaryPath, envbuilderBinDest)
if err := copyFile(opts.Filesystem, opts.BinaryPath, envbuilderBinDest, 0o755); err != nil {
return fmt.Errorf("copy envbuilder binary to build context: %w", err)
}
// Also write the magic file that signifies the image has been built.
// Since the user in the image is set to root, we also store the user
// in the magic file to be used by envbuilder when the image is run.
opts.Logger(log.LevelDebug, "writing magic image file at %q in build context %q", magicImageDest, magicTempDir)
if err := writeMagicImageFile(opts.Filesystem, magicImageDest, runtimeData); err != nil {
return fmt.Errorf("write magic image file in build context: %w", err)
}
}
// temp move of all ro mounts
tempRemountDest := workingDir.Join("mnt")
// ignorePrefixes is a superset of ignorePaths that we pass to kaniko's
// IgnoreList.
ignorePrefixes := append([]string{"/dev", "/proc", "/sys"}, ignorePaths...)
restoreMounts, err := ebutil.TempRemount(opts.Logger, tempRemountDest, ignorePrefixes...)
defer func() { // restoreMounts should never be nil
if err := restoreMounts(); err != nil {
opts.Logger(log.LevelError, "restore mounts: %s", err.Error())
}
}()
if err != nil {
return fmt.Errorf("temp remount: %w", err)
}
stdoutWriter, closeStdout := log.Writer(opts.Logger)
defer closeStdout()
stderrWriter, closeStderr := log.Writer(opts.Logger)
defer closeStderr()
build := func() (v1.Image, error) {
defer cleanupBuildContext()
if runtimeData.Built && opts.SkipRebuild {
endStage := startStage("🏗️ Skipping build because of cache...")
imageRef, err := devcontainer.ImageFromDockerfile(buildParams.DockerfileContent)
if err != nil {
return nil, fmt.Errorf("image from dockerfile: %w", err)
}
image, err := remote.Image(imageRef, remote.WithAuthFromKeychain(creds.GetKeychain()))
if err != nil {
return nil, fmt.Errorf("image from remote: %w", err)
}
endStage("🏗️ Found image from remote!")
runtimeData.Built = false
runtimeData.SkippedRebuild = true
return image, nil
}
// This is required for deleting the filesystem prior to build!
err = util.InitIgnoreList()
if err != nil {
return nil, fmt.Errorf("init ignore list: %w", err)
}
// It's possible that the container will already have files in it, and
// we don't want to merge a new container with the old one.
if err := maybeDeleteFilesystem(opts.Logger, opts.ForceSafe); err != nil {
return nil, fmt.Errorf("delete filesystem: %w", err)
}
cacheTTL := time.Hour * 24 * 7
if opts.CacheTTLDays != 0 {
cacheTTL = time.Hour * 24 * time.Duration(opts.CacheTTLDays)
}
// At this point we have all the context, we can now build!
registryMirror := []string{}
if val, ok := os.LookupEnv("KANIKO_REGISTRY_MIRROR"); ok {
registryMirror = strings.Split(val, ";")
}
var destinations []string
if opts.CacheRepo != "" {
destinations = append(destinations, opts.CacheRepo)
}
kOpts := &config.KanikoOptions{
// Boilerplate!
CustomPlatform: platforms.Format(platforms.Normalize(platforms.DefaultSpec())),
SnapshotMode: "redo",
RunV2: true,
RunStdout: stdoutWriter,
RunStderr: stderrWriter,
Destinations: destinations,
NoPush: !opts.PushImage || len(destinations) == 0,
CacheRunLayers: true,
CacheCopyLayers: true,
ForceBuildMetadata: opts.PushImage, // Force layers with no changes to be cached, required for cache probing.
CompressedCaching: true,
Compression: config.ZStd,
// Maps to "default" level, ~100-300 MB/sec according to
// benchmarks in klauspost/compress README
// https://github.com/klauspost/compress/blob/67a538e2b4df11f8ec7139388838a13bce84b5d5/zstd/encoder_options.go#L188
CompressionLevel: 3,
CacheOptions: config.CacheOptions{
CacheTTL: cacheTTL,
CacheDir: opts.BaseImageCacheDir,
},
ForceUnpack: true,
BuildArgs: buildParams.BuildArgs,
BuildSecrets: opts.BuildSecrets,
CacheRepo: opts.CacheRepo,
Cache: opts.CacheRepo != "" || opts.BaseImageCacheDir != "",
DockerfilePath: buildParams.DockerfilePath,
DockerfileContent: buildParams.DockerfileContent,
RegistryOptions: config.RegistryOptions{
Insecure: opts.Insecure,
InsecurePull: opts.Insecure,
SkipTLSVerify: opts.Insecure,
// Enables registry mirror features in Kaniko, see more in link below
// https://github.com/GoogleContainerTools/kaniko?tab=readme-ov-file#flag---registry-mirror
// Related to PR #114
// https://github.com/coder/envbuilder/pull/114
RegistryMirrors: registryMirror,
},
SrcContext: buildParams.BuildContext,
// For cached image utilization, produce reproducible builds.
Reproducible: opts.PushImage,
}
endStage := startStage("🏗️ Building image...")
image, err := executor.DoBuild(kOpts)
if err != nil {
return nil, xerrors.Errorf("do build: %w", err)
}
endStage("🏗️ Built image!")
if opts.PushImage {
endStage = startStage("🏗️ Pushing image...")
// To debug registry issues, enable logging:
//
// import (
// stdlog "log"
// reglogs "github.com/google/go-containerregistry/pkg/logs"
// )
// reglogs.Debug = stdlog.New(os.Stderr, "", 0)
// reglogs.Warn = stdlog.New(os.Stderr, "", 0)
// reglogs.Progress = stdlog.New(os.Stderr, "", 0)
if err := executor.DoPush(image, kOpts); err == nil {
endStage("🏗️ Pushed image!")
} else if !opts.ExitOnPushFailure {
endStage("⚠️️ Failed to push image!")
} else {
return nil, xerrors.Errorf("do push: %w", err)
}
}
return image, err
}
// At this point we have all the context, we can now build!
image, err := build()
if err != nil {
fallback := false
switch {
case strings.Contains(err.Error(), "parsing dockerfile"):
fallback = true
fallbackErr = err
case strings.Contains(err.Error(), "error building stage"):
fallback = true
fallbackErr = err
// This occurs when the image cannot be found!
case strings.Contains(err.Error(), "authentication required"):
fallback = true
fallbackErr = err
// This occurs from Docker Hub when the image cannot be found!
case strings.Contains(err.Error(), "manifest unknown"):
fallback = true
fallbackErr = err
case strings.Contains(err.Error(), "unexpected status code 401 Unauthorized"):
opts.Logger(log.LevelError, "Unable to pull the provided image. Ensure your registry credentials are correct!")
}
if !fallback || opts.ExitOnBuildFailure {
return err
}
opts.Logger(log.LevelError, "Failed to build: %s", err)
opts.Logger(log.LevelError, "Falling back to the default image...")
buildParams, err = defaultBuildParams()
if err != nil {
return err
}
image, err = build()
}
if err != nil {
return fmt.Errorf("build with kaniko: %w", err)
}
if err := restoreMounts(); err != nil {
return fmt.Errorf("restore mounts: %w", err)
}
configFile, err := image.ConfigFile()
if err != nil {
return fmt.Errorf("get image config: %w", err)
}
runtimeData.ImageEnv = configFile.Config.Env
// Dev Container metadata can be persisted through a standard label.
// Note that this currently only works when we're building the image,
// not when we're using a pre-built image as we don't have access to
// labels.
devContainerMetadata, exists := configFile.Config.Labels["devcontainer.metadata"]
if exists {
var devContainer []*devcontainer.Spec
devContainerMetadataBytes, err := hujson.Standardize([]byte(devContainerMetadata))
if err != nil {
return fmt.Errorf("humanize json for dev container metadata: %w", err)
}
err = json.Unmarshal(devContainerMetadataBytes, &devContainer)
if err != nil {
return fmt.Errorf("unmarshal metadata: %w", err)
}
opts.Logger(log.LevelInfo, "#%d: 👀 Found devcontainer.json label metadata in image...", stageNumber)
for _, container := range devContainer {
if container.ContainerUser != "" {
opts.Logger(log.LevelInfo, "#%d: 🧑 Updating the user to %q!", stageNumber, container.ContainerUser)
configFile.Config.User = container.ContainerUser
}
maps.Copy(runtimeData.ContainerEnv, container.ContainerEnv)
maps.Copy(runtimeData.RemoteEnv, container.RemoteEnv)
if !container.OnCreateCommand.IsEmpty() {
runtimeData.Scripts.OnCreateCommand = container.OnCreateCommand
}
if !container.UpdateContentCommand.IsEmpty() {
runtimeData.Scripts.UpdateContentCommand = container.UpdateContentCommand
}
if !container.PostCreateCommand.IsEmpty() {
runtimeData.Scripts.PostCreateCommand = container.PostCreateCommand
}
if !container.PostStartCommand.IsEmpty() {
runtimeData.Scripts.PostStartCommand = container.PostStartCommand
}
}
}
maps.Copy(runtimeData.ContainerEnv, buildParams.ContainerEnv)
maps.Copy(runtimeData.RemoteEnv, buildParams.RemoteEnv)
if runtimeData.ContainerUser == "" && configFile.Config.User != "" {
runtimeData.ContainerUser = configFile.Config.User
}
} else {
runtimeData.DevcontainerPath, _, err = findDevcontainerJSON(opts.WorkspaceFolder, opts)
if err == nil {
file, err := opts.Filesystem.Open(runtimeData.DevcontainerPath)
if err != nil {
return fmt.Errorf("open devcontainer.json: %w", err)
}
defer file.Close()
content, err := io.ReadAll(file)
if err != nil {
return fmt.Errorf("read devcontainer.json: %w", err)
}
devContainer, err := devcontainer.Parse(content)
if err == nil {
maps.Copy(runtimeData.ContainerEnv, devContainer.ContainerEnv)
maps.Copy(runtimeData.RemoteEnv, devContainer.RemoteEnv)
if devContainer.ContainerUser != "" {
runtimeData.ContainerUser = devContainer.ContainerUser
}
runtimeData.Scripts = devContainer.LifecycleScripts
} else {
opts.Logger(log.LevelError, "Failed to parse devcontainer.json: %s", err.Error())
}
}
}
// Sanitize the environment of any opts!
options.UnsetEnv()
// Remove the Docker config secret file!
if err := cleanupDockerConfigOverride(); err != nil {
return err
}
// Set the environment from /etc/environment first, so it can be
// overridden by the image and devcontainer settings.
err = setEnvFromEtcEnvironment(opts.Logger)
if err != nil {
return fmt.Errorf("set env from /etc/environment: %w", err)
}
allEnvKeys := make(map[string]struct{})
// It must be set in this parent process otherwise nothing will be found!
for _, env := range runtimeData.ImageEnv {
pair := strings.SplitN(env, "=", 2)
os.Setenv(pair[0], pair[1])
allEnvKeys[pair[0]] = struct{}{}
}
// Set Envbuilder runtime markers
runtimeData.ContainerEnv["ENVBUILDER"] = "true"
if runtimeData.DevcontainerPath != "" {
runtimeData.ContainerEnv["DEVCONTAINER"] = "true"
runtimeData.ContainerEnv["DEVCONTAINER_CONFIG"] = runtimeData.DevcontainerPath
}
for _, env := range []map[string]string{runtimeData.ContainerEnv, runtimeData.RemoteEnv} {
envKeys := make([]string, 0, len(env))
for key := range env {
envKeys = append(envKeys, key)
allEnvKeys[key] = struct{}{}
}
sort.Strings(envKeys)
for _, envVar := range envKeys {
value := devcontainer.SubstituteVars(env[envVar], opts.WorkspaceFolder, os.LookupEnv)
os.Setenv(envVar, value)
}
}
// Do not export env if we skipped a rebuild, because ENV directives
// from the Dockerfile would not have been processed and we'd miss these
// in the export. We should have generated a complete set of environment
// on the intial build, so exporting environment variables a second time
// isn't useful anyway.
if opts.ExportEnvFile != "" && !runtimeData.SkippedRebuild {
exportEnvFile, err := opts.Filesystem.Create(opts.ExportEnvFile)
if err != nil {
return fmt.Errorf("failed to open %s %q: %w", options.WithEnvPrefix("EXPORT_ENV_FILE"), opts.ExportEnvFile, err)
}
envKeys := make([]string, 0, len(allEnvKeys))
for key := range allEnvKeys {
envKeys = append(envKeys, key)
}
sort.Strings(envKeys)
for _, key := range envKeys {
fmt.Fprintf(exportEnvFile, "%s=%s\n", key, os.Getenv(key))
}
exportEnvFile.Close()
}
if runtimeData.ContainerUser == "" {
opts.Logger(log.LevelWarn, "#%d: no user specified, using root", stageNumber)
}
execArgs.UserInfo, err = getUser(runtimeData.ContainerUser)
if err != nil {
return fmt.Errorf("update user: %w", err)
}
// We only need to do this if we cloned!
// Git doesn't store file permissions as part of the repository.
if cloned {
endStage := startStage("🔄 Updating the ownership of the workspace...")
// By default, we clone the Git repository into the workspace folder.
// It will have root permissions, because that's the user that built it.
//
// We need to change the ownership of the files to the user that will
// be running the init script.
if chownErr := filepath.Walk(opts.WorkspaceFolder, func(path string, _ os.FileInfo, err error) error {
if err != nil {
return err
}
return os.Chown(path, execArgs.UserInfo.uid, execArgs.UserInfo.gid)
}); chownErr != nil {
opts.Logger(log.LevelError, "chown %q: %s", execArgs.UserInfo.user.HomeDir, chownErr.Error())
endStage("⚠️ Failed to the ownership of the workspace, you may need to fix this manually!")
} else {
endStage("👤 Updated the ownership of the workspace!")
}
}
// We may also need to update the ownership of the user homedir.
// Skip this step if the user is root.
if execArgs.UserInfo.uid != 0 {
endStage := startStage("🔄 Updating ownership of %s...", execArgs.UserInfo.user.HomeDir)
if chownErr := filepath.Walk(execArgs.UserInfo.user.HomeDir, func(path string, _ fs.FileInfo, err error) error {
if err != nil {
return err
}
return os.Chown(path, execArgs.UserInfo.uid, execArgs.UserInfo.gid)
}); chownErr != nil {
opts.Logger(log.LevelError, "chown %q: %s", execArgs.UserInfo.user.HomeDir, chownErr.Error())
endStage("⚠️ Failed to update ownership of %s, you may need to fix this manually!", execArgs.UserInfo.user.HomeDir)
} else {
endStage("🏡 Updated ownership of %s!", execArgs.UserInfo.user.HomeDir)
}
}
err = opts.Filesystem.MkdirAll(opts.WorkspaceFolder, 0o755)
if err != nil {
return fmt.Errorf("create workspace folder: %w", err)
}
err = os.Chdir(opts.WorkspaceFolder)
if err != nil {
return fmt.Errorf("change directory: %w", err)
}
// This is called before the Setuid to TARGET_USER because we want the
// lifecycle scripts to run using the default user for the container,
// rather than the user specified for running the init command. For
// example, TARGET_USER may be set to root in the case where we will
// exec systemd as the init command, but that doesn't mean we should
// run the lifecycle scripts as root.
os.Setenv("HOME", execArgs.UserInfo.user.HomeDir)
if err := execLifecycleScripts(ctx, opts, runtimeData.Scripts, !runtimeData.Built, execArgs.UserInfo); err != nil {
return err
}
// Create the magic file to indicate that this build
// has already been ran before!
if !runtimeData.Built {
file, err := opts.Filesystem.Create(workingDir.Built())
if err != nil {
return fmt.Errorf("create magic file: %w", err)
}
_ = file.Close()
}
// The setup script can specify a custom initialization command
// and arguments to run instead of the default shell.
//
// This is useful for hooking into the environment for a specific
// init to PID 1.
if opts.SetupScript != "" {
// We execute the initialize script as the root user!
os.Setenv("HOME", "/root")
opts.Logger(log.LevelInfo, "=== Running the setup command %q as the root user...", opts.SetupScript)
envKey := "ENVBUILDER_ENV"
envFile := workingDir.Join("environ")
file, err := opts.Filesystem.Create(envFile)
if err != nil {
return fmt.Errorf("create environ file: %w", err)
}
_ = file.Close()
cmd := exec.CommandContext(ctx, "/bin/sh", "-c", opts.SetupScript)
cmd.Env = append(os.Environ(),
fmt.Sprintf("%s=%s", envKey, envFile),
fmt.Sprintf("TARGET_USER=%s", execArgs.UserInfo.user.Username),
)
cmd.Dir = opts.WorkspaceFolder
// This allows for a really nice and clean experience to experiement with!
// e.g. docker run --it --rm -e INIT_SCRIPT bash ...
if isatty.IsTerminal(os.Stdout.Fd()) && isatty.IsTerminal(os.Stdin.Fd()) {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
} else {
cmd.Stdout = newWriteLogger(opts.Logger, log.LevelInfo)
cmd.Stderr = newWriteLogger(opts.Logger, log.LevelError)
}
err = cmd.Run()
if err != nil {
return fmt.Errorf("run setup script: %w", err)
}
environ, err := os.ReadFile(envFile)
if errors.Is(err, os.ErrNotExist) {
err = nil
environ = []byte{}
}
if err != nil {
return fmt.Errorf("read environ: %w", err)
}
updatedCommand := false
updatedArgs := false
for _, env := range strings.Split(string(environ), "\n") {
pair := strings.SplitN(env, "=", 2)
if len(pair) != 2 {
continue
}
key := pair[0]
switch key {
case "INIT_COMMAND":
execArgs.InitCommand = pair[1]
updatedCommand = true
case "INIT_ARGS":
execArgs.InitArgs, err = shellquote.Split(pair[1])
if err != nil {
return fmt.Errorf("split init args: %w", err)
}
updatedArgs = true
case "TARGET_USER":
execArgs.UserInfo, err = getUser(pair[1])
if err != nil {
return fmt.Errorf("update user: %w", err)
}
default:
return fmt.Errorf("unknown environ key %q", key)
}
}
if updatedCommand && !updatedArgs {
// Because our default is a shell we need to empty the args
// if the command was updated. This a tragic hack, but it works.
execArgs.InitArgs = []string{}
}
}
// Hop into the user that should execute the initialize script!
os.Setenv("HOME", execArgs.UserInfo.user.HomeDir)
// Set last to ensure all environment changes are complete.
execArgs.Environ = os.Environ()
return nil
}
// RunCacheProbe performs a 'dry-run' build of the image and checks that
// all of the resulting layers are present in options.CacheRepo.
func RunCacheProbe(ctx context.Context, opts options.Options) (v1.Image, error) {
defer options.UnsetEnv()
if !opts.GetCachedImage {
return nil, fmt.Errorf("developer error: RunCacheProbe must be run with --get-cached-image")
}
if opts.CacheRepo == "" {
return nil, fmt.Errorf("--cache-repo must be set when using --get-cached-image")
}
workingDir := workingdir.At(opts.WorkingDirBase)
stageNumber := 0
startStage := func(format string, args ...any) func(format string, args ...any) {
now := time.Now()
stageNumber++
stageNum := stageNumber
opts.Logger(log.LevelInfo, "#%d: %s", stageNum, fmt.Sprintf(format, args...))
return func(format string, args ...any) {
opts.Logger(log.LevelInfo, "#%d: %s [%s]", stageNum, fmt.Sprintf(format, args...), time.Since(now))
}
}
opts.Logger(log.LevelInfo, "%s %s - Build development environments from repositories in a container", newColor(color.Bold).Sprintf("envbuilder"), buildinfo.Version())
cleanupDockerConfigOverride, err := initDockerConfigOverride(opts.Filesystem, opts.Logger, workingDir, opts.DockerConfigBase64)
if err != nil {
return nil, err
}
defer func() {
if err := cleanupDockerConfigOverride(); err != nil {
opts.Logger(log.LevelError, "failed to cleanup docker config override: %w", err)
}
}() // best effort