diff --git a/.gitignore b/.gitignore index 37757e17..a71f632c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,6 @@ +.minio/ result* .decrypted~keys.yaml .storage .direnv +zosVmDir** diff --git a/flake.lock b/flake.lock index 510d52a7..87c4a71b 100644 --- a/flake.lock +++ b/flake.lock @@ -374,6 +374,23 @@ "type": "github" } }, + "flake-utils_4": { + "inputs": { + "systems": "systems_4" + }, + "locked": { + "lastModified": 1709126324, + "narHash": "sha256-q6EQdSeUZOG26WelxqkmR7kArjgWCdw5sfJVHPH/7j8=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "d465f4819400de7c8d874d50b982301f28a84605", + "type": "github" + }, + "original": { + "id": "flake-utils", + "type": "indirect" + } + }, "gitignore": { "inputs": { "nixpkgs": [ @@ -694,6 +711,21 @@ "type": "github" } }, + "nixlib": { + "locked": { + "lastModified": 1711241261, + "narHash": "sha256-knrTvpl81yGFHIpm1SsLDApe0thFkw1cl3ISAMPmP/0=", + "owner": "nix-community", + "repo": "nixpkgs.lib", + "rev": "b2a1eeef8c185f6bd27432b053ff09d773244cbc", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nixpkgs.lib", + "type": "github" + } + }, "nixos-2305": { "locked": { "lastModified": 1686478675, @@ -735,6 +767,27 @@ "type": "github" } }, + "nixos-generators": { + "inputs": { + "nixlib": "nixlib", + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1711327729, + "narHash": "sha256-RzOXI1kBlK7HkeZfRjUnsBUJEmlMYgLEG7CziZN0lgs=", + "owner": "nix-community", + "repo": "nixos-generators", + "rev": "d3e8145766dad6b47f6e37ce28731a05144dec26", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nixos-generators", + "type": "github" + } + }, "nixos-images": { "inputs": { "nixos-2305": [ @@ -1031,6 +1084,7 @@ "keys_zippy": "keys_zippy", "microvm": "microvm", "nixos-anywhere": "nixos-anywhere", + "nixos-generators": "nixos-generators", "nixpkgs": [ "nixpkgs-23-11" ], @@ -1040,6 +1094,7 @@ "nixpkgsUnstable": "nixpkgsUnstable", "sops-nix": "sops-nix", "srvos": "srvos", + "threefold-rfs": "threefold-rfs", "tx5": "tx5" } }, @@ -1168,6 +1223,46 @@ "type": "github" } }, + "systems_4": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "threefold-rfs": { + "inputs": { + "crane": [ + "crane" + ], + "flake-utils": "flake-utils_4", + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1710422063, + "narHash": "sha256-Jd+W673itGqNzaa7Sbw+BzyWdYrZElDFXYkAjAiJQ/w=", + "owner": "steveej-forks", + "repo": "threefold-rfs", + "rev": "4ef140bb4d054d3b8298353b9f9fb68d571788b2", + "type": "github" + }, + "original": { + "owner": "steveej-forks", + "ref": "sqlite-locking-issue", + "repo": "threefold-rfs", + "type": "github" + } + }, "treefmt-nix": { "inputs": { "nixpkgs": [ diff --git a/flake.nix b/flake.nix index 70799c88..bd452917 100644 --- a/flake.nix +++ b/flake.nix @@ -93,6 +93,17 @@ flake = false; url = "github:steveej-forks/coturn/debug-cli-login"; }; + + nixos-generators = { + url = "github:nix-community/nixos-generators"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + + threefold-rfs = { + url = "github:steveej-forks/threefold-rfs/sqlite-locking-issue"; + inputs.nixpkgs.follows = "nixpkgs"; + inputs.crane.follows = "crane"; + }; }; outputs = inputs @ { @@ -113,6 +124,7 @@ self', inputs', pkgs, + lib, ... }: { # Per-system attributes can be defined here. The self' and inputs' @@ -124,40 +136,52 @@ nomadClientCert = ./secrets/nomad/cli/global-cli-nomad.pem; in pkgs.mkShell { - packages = [ - pkgs.yq-go - - inputs'.nixos-anywhere.packages.default - - inputs'.sops-nix.packages.default - pkgs.ssh-to-age - pkgs.age - pkgs.age-plugin-yubikey - pkgs.sops - - self'.packages.nomad - - (pkgs.writeShellScriptBin "nomad-ui-proxy" (let - caddyfile = pkgs.writeText "caddyfile" '' - { - auto_https off - http_port 2016 - } - - localhost:2016 { - reverse_proxy ${nomadAddr} { - transport http { - tls_trusted_ca_certs ${nomadCaCert} - tls_client_auth ${nomadClientCert} {$NOMAD_CLIENT_KEY} + packages = + [ + pkgs.yq-go + + inputs'.nixos-anywhere.packages.default + + inputs'.sops-nix.packages.default + pkgs.ssh-to-age + pkgs.age + pkgs.age-plugin-yubikey + pkgs.sops + + # self'.packages.nomad + + (pkgs.writeShellScriptBin "nomad-ui-proxy" (let + caddyfile = pkgs.writeText "caddyfile" '' + { + auto_https off + http_port 2016 + } + + localhost:2016 { + reverse_proxy ${nomadAddr} { + transport http { + tls_trusted_ca_certs ${nomadCaCert} + tls_client_auth ${nomadClientCert} {$NOMAD_CLIENT_KEY} + } } } - } - ''; - in '' - ${pkgs.caddy}/bin/caddy run --adapter caddyfile --config ${caddyfile} - '')) - pkgs.caddy - ]; + ''; + in '' + ${pkgs.caddy}/bin/caddy run --adapter caddyfile --config ${caddyfile} + '')) + pkgs.caddy + + inputs'.threefold-rfs.packages.default + ] + ++ ( + let + zosCmds = builtins.filter (pkg: null != (builtins.match "^zos-.*" pkg.name)) (builtins.attrValues self'.packages); + in + zosCmds + ++ (lib.lists.flatten (builtins.map (cmd: cmd.nativeBuildInputs or []) zosCmds)) + ++ (lib.lists.flatten (builtins.map (cmd: cmd.buildInputs or []) zosCmds)) + ++ (lib.lists.flatten (builtins.map (cmd: cmd.runtimeInputs or []) zosCmds)) + ); NOMAD_ADDR = nomadAddr; NOMAD_CACERT = nomadCaCert; diff --git a/lib/make-system-directory.nix b/lib/make-system-directory.nix new file mode 100644 index 00000000..c87b095d --- /dev/null +++ b/lib/make-system-directory.nix @@ -0,0 +1,47 @@ +{ stdenv +, closureInfo +, pixz + +, # The files and directories to be placed in the directory. + # This is a list of attribute sets {source, target} where `source' + # is the file system object (regular file or directory) to be + # grafted in the file system at path `target'. + contents + +, # In addition to `contents', the closure of the store paths listed + # in `packages' are also placed in the Nix store of the tarball. This is + # a list of attribute sets {object, symlink} where `object' if a + # store path whose closure will be copied, and `symlink' is a + # symlink to `object' that will be added to the tarball. + storeContents ? [ ] + + # Extra commands to be executed before archiving files +, extraCommands ? "" + + # extra inputs +, extraInputs ? [ ] +}: + +let + symlinks = map (x: x.symlink) storeContents; + objects = map (x: x.object) storeContents; +in + +stdenv.mkDerivation { + name = "system-directory"; + builder = ./make-system-directory.sh; + nativeBuildInputs = extraInputs; + + inherit extraCommands; + + # !!! should use XML. + sources = map (x: x.source) contents; + targets = map (x: x.target) contents; + + # !!! should use XML. + inherit symlinks objects; + + closureInfo = closureInfo { + rootPaths = objects; + }; +} diff --git a/lib/make-system-directory.sh b/lib/make-system-directory.sh new file mode 100644 index 00000000..a156fa14 --- /dev/null +++ b/lib/make-system-directory.sh @@ -0,0 +1,53 @@ +source $stdenv/setup + +sources_=($sources) +targets_=($targets) + +objects=($objects) +symlinks=($symlinks) + +# Remove the initial slash from a path, since genisofs likes it that way. +stripSlash() { + res="$1" + if test "${res:0:1}" = /; then res=${res:1}; fi +} + +# Add the individual files. +for ((i = 0; i < ${#targets_[@]}; i++)); do + stripSlash "${targets_[$i]}" + mkdir -p "$(dirname "$res")" + cp -a "${sources_[$i]}" "$res" +done + +# Add the closures of the top-level store objects. +chmod +w . +mkdir -p nix/store +for i in $(<$closureInfo/store-paths); do + cp -a "$i" "${i:1}" +done + +# TODO tar ruxo +# Also include a manifest of the closures in a format suitable for +# nix-store --load-db. +cp $closureInfo/registration nix-path-registration + +# Add symlinks to the top-level store objects. +for ((n = 0; n < ${#objects[*]}; n++)); do + object=${objects[$n]} + symlink=${symlinks[$n]} + if test "$symlink" != "none"; then + mkdir -p $(dirname ./$symlink) + ln -s $object ./$symlink + fi +done + +$extraCommands + +rm env-vars + +mkdir $out +cp -a --reflink=always * $out/ + +mkdir -p $out/nix-support +echo $system >$out/nix-support/system +echo "file system-directory $out" >$out/nix-support/hydra-build-products diff --git a/modules/flake-parts/nixosConfigurations.tfgrid-devnet-vm0/configuration.nix b/modules/flake-parts/nixosConfigurations.tfgrid-devnet-vm0/configuration.nix new file mode 100644 index 00000000..60957771 --- /dev/null +++ b/modules/flake-parts/nixosConfigurations.tfgrid-devnet-vm0/configuration.nix @@ -0,0 +1,38 @@ +{ + config, + inputs, + self, + pkgs, + ... +}: let + hostName = "tfgrid-devnet-vm0"; +in { + imports = [ + inputs.srvos.nixosModules.server + inputs.srvos.nixosModules.mixins-terminfo + + inputs.sops-nix.nixosModules.sops + + self.nixosModules.holo-users + ../../nixos/shared.nix + ../../nixos/shared-nix-settings.nix + + self.nixosModules.zosVmDir + ]; + + networking.hostName = hostName; + + hostName = "TODO"; + + nix.settings.max-jobs = 8; + + nix.settings.substituters = [ + "https://holochain-ci.cachix.org" + ]; + + nix.settings.trusted-public-keys = [ + "holochain-ci.cachix.org-1:5IUSkZc0aoRS53rfkvH9Kid40NpyjwCMCzwRTXy+QN8=" + ]; + + system.stateVersion = "23.11"; +} diff --git a/modules/flake-parts/nixosConfigurations.tfgrid-devnet-vm0/default.nix b/modules/flake-parts/nixosConfigurations.tfgrid-devnet-vm0/default.nix new file mode 100644 index 00000000..4bb10112 --- /dev/null +++ b/modules/flake-parts/nixosConfigurations.tfgrid-devnet-vm0/default.nix @@ -0,0 +1,12 @@ +{ + self, + lib, + inputs, + ... +}: { + flake.nixosConfigurations.tfgrid-devnet-vm0 = inputs.nixpkgs.lib.nixosSystem { + modules = [./configuration.nix]; + system = "x86_64-linux"; + specialArgs = self.specialArgs; + }; +} diff --git a/modules/flake-parts/packages.zos-utils.nix b/modules/flake-parts/packages.zos-utils.nix new file mode 100644 index 00000000..2e0a8e69 --- /dev/null +++ b/modules/flake-parts/packages.zos-utils.nix @@ -0,0 +1,271 @@ +{ + # System independent arguments. + ... +}: { + perSystem = { + # Arguments specific to the `perSystem` context. + pkgs, + inputs', + ... + }: { + packages = let + configName = "tfgrid-devnet-vm0"; + in + { + zos-vm-build = pkgs.writeShellApplication { + name = "zos-vm-build"; + text = '' + set -xueE -o pipefail + + ts="''${1:-"$(date +"%Y%m%d.%H%M%S")"}" + + resultName="${configName}.$ts" + + mkdir -p results + + nix build --out-link results/"$resultName" \ + .\#nixosConfigurations."${configName}".config.system.build.zosVmDir + ln -sf --no-target-directory "$resultName" results/"${configName}.latest" + + echo results/"$resultName" + ''; + }; + + # TODO: automate proper minio hosting. this is exemplary only and requires imperative setup of minio + zos-vm-serve-s3 = pkgs.writeShellApplication { + name = "zos-vm-serve-s3"; + runtimeInputs = [ + pkgs.minio + ]; + text = '' + set -ueE -o pipefail + + cd .minio + + env \ + MINIO_ROOT_USER=minioadmin \ + MINIO_ROOT_PASSWORD="$(cat minioadmin.key)" \ + minio server --console-address ":9001" storage + ''; + }; + + zos-vm-publish-s3 = let + s3BaseUrl = "sj-bm-hostkey0.dev.infra.holochain.org"; + s3ListenUrl = "${s3BaseUrl}:9000"; + s3HttpUrl = "https://${s3BaseUrl}/s3"; + s3Bucket = "tfgrid-eval"; + in + pkgs.writeShellApplication { + name = "zos-vm-publish-s3"; + runtimeInputs = [ + pkgs.minio-client + ]; + text = '' + set -xueE -o pipefail + + rootfsRel="$1" + rootfsBase="$(basename "$rootfsRel")" + rootfsDir="$(dirname "$rootfsRel")" + rootfs="$(realpath "$rootfsRel")" + + workDir="$rootfsDir"/"$rootfsBase".work + + mkdir -p "$workDir" + cd "$workDir" + + # mc rm --recursive --force localhost/${s3Bucket} || echo removal failed + env RUST_MIN_STACK=8388608 \ + rfs pack -m result.fl -s s3://minioadmin:"$(cat ../../.minio/minioadmin.key)"@${s3ListenUrl}/${s3Bucket}\?region=us-east-1 "$rootfs/" | tee rfs-pack.log + + # TODO: document or automate setting up the alias "localhost" + mc cp result.fl localhost/${s3Bucket}/"$rootfsBase".fl + echo ${s3HttpUrl}/${s3Bucket}/"$rootfsBase".fl > public-url + + touch published + + echo "$workDir"/result.fl + ''; + }; + } + // ( + let + macaddr = "12:34:56:78:90:ab"; + userData = pkgs.writeText "user-data" '' + #cloud-config + + ssh_pwauth: True + ''; + metaData = pkgs.writeText "meta-data" '' + instance-id: tfgrid + local-hostname: tfgrid + ''; + networkConfig = pkgs.writeText "network-config" '' + version: 2 + ethernets: + id0: + match: + macaddress: '${macaddr}' + dhcp4: false + addresses: [192.168.249.2/24] + gateway4: 192.168.249.1 + ''; + # see https://github.com/cloud-hypervisor/cloud-hypervisor/blob/main/scripts/create-cloud-init.sh + cloudinitImg = + pkgs.runCommand "cloudinit.img" + { + nativeBuildInputs = [pkgs.dosfstools pkgs.mtools]; + } '' + mkdosfs -n CIDATA -C "$out" 8192 + + # TODO: clarify whether the name needs to match + cp ${userData} user-data + mcopy -oi "$out" -s user-data :: + + cp ${metaData} meta-data + mcopy -oi "$out" -s meta-data :: + + cp ${networkConfig} network-config + mcopy -oi "$out" -s network-config :: + ''; + in { + zos-vm-boot-local = pkgs.writeShellApplication { + # see https://gist.github.com/muhamadazmy/a10bfb0cc77084c9b09dea5e49ec528e + name = "zos-vm-boot-local"; + runtimeInputs = [ + pkgs.virtiofsd + pkgs.cloud-hypervisor + ]; + text = '' + set -xeuE -o pipefail + + # path to root directory + rootfs="''${1}" + kernel="$rootfs/boot/vmlinuz" + initram="$rootfs/boot/initrd.img" + + workDir="$rootfs.work" + mkdir -p "$workDir" + + socket="$workDir/virtiofs.sock" + + fail() { + echo "$1" >&2 + exit 1 + } + + if [ ! -f "$kernel" ]; then + fail "kernel file not found" + fi + + if [ ! -f "$initram" ]; then + fail "kernel file not found" + fi + + # start virtiofsd in the background + sudo virtiofsd -d --socket-path="$socket" --shared-dir="$rootfs/" &>/dev/null & + fspid="$!" + + sleep 1 + + cleanup() { + ( + set +eEu + + sudo kill "$fspid" + rm -rf "$socket" + ) + } + + trap cleanup EXIT + + sudo cloud-hypervisor \ + --memory size=2048M,shared=on \ + --disk path=${cloudinitImg},readonly=true \ + --net "tap=,mac=${macaddr},ip=,mask=" \ + --kernel "$kernel" \ + --initramfs "$initram" \ + --fs tag=vroot,socket="$socket" \ + --cmdline "rw console=ttyS0 boot.shell_on_fail" \ + --serial tty \ + --console off + ''; + + # --cmdline "rw console=ttyS0 init=$init boot.shell_on_fail boot.debug1mounts" \ + }; + zos-vm-boot-s3 = pkgs.writeShellApplication { + # see https://gist.github.com/muhamadazmy/a10bfb0cc77084c9b09dea5e49ec528e + name = "zos-vm-boot-s3"; + runtimeInputs = [ + pkgs.virtiofsd + pkgs.cloud-hypervisor + inputs'.threefold-rfs.packages.default + ]; + text = '' + set -xeuE -o pipefail + + # path to root directory + rootfs="''${1}" + kernel="$rootfs/boot/vmlinuz" + initram="$rootfs/boot/initrd.img" + + workDir="$rootfs.work" + mountDir="$workDir/mnt" + mkdir -p "$mountDir" + + socket="$workDir/virtiofs.sock" + + fail() { + echo "$1" >&2 + exit 1 + } + + rfs mount -m "$workDir"/result.fl "$mountDir" > "$workDir"/rfs_mount.log 2>&1 & + mountpid="$!" + + sleep 3 + + if [ ! -f "$kernel" ]; then + fail "kernel file not found" + fi + + if [ ! -f "$initram" ]; then + fail "kernel file not found" + fi + + # start virtiofsd in the background + sudo virtiofsd -d --socket-path="$socket" --shared-dir="$mountDir" &>/dev/null & + fspid="$!" + + cleanup() { + ( + set +eEu + + sudo kill "$fspid" + rm -rf "$socket" + + kill "$mountpid" + umount --lazy "$mountDir" + rmdir "$mountDir" + ) + } + + trap cleanup EXIT + + sudo cloud-hypervisor \ + --memory size=2048M,shared=on \ + --disk path=${cloudinitImg},readonly=true \ + --net "tap=,mac=${macaddr},ip=,mask=" \ + --kernel "$kernel" \ + --initramfs "$initram" \ + --fs tag=vroot,socket="$socket" \ + --cmdline "rw console=ttyS0 boot.shell_on_fail" \ + --serial tty \ + --console off + ''; + + # --cmdline "rw console=ttyS0 init=$init boot.shell_on_fail boot.debug1mounts" \ + }; + } + ); + }; +} diff --git a/modules/flake-parts/tfgrid-microvm/default.nix b/modules/flake-parts/tfgrid-microvm/default.nix new file mode 100644 index 00000000..946017b1 --- /dev/null +++ b/modules/flake-parts/tfgrid-microvm/default.nix @@ -0,0 +1,99 @@ +# TODO: make sure new kernels/initrds also get copied to `/boot/vmlinuz` and `/boot/initrd.mg` +{self, ...}: let + mkZosVmDir = import ./mk-zos-vm-dir.nix; +in { + flake.nixosModules = { + zosVmDir = { + config, + lib, + modulesPath, + pkgs, + ... + }: { + imports = [ + (modulesPath + "/profiles/qemu-guest.nix") + self.nixosModules.zosVmDirOverlayAutodetect + ]; + # can be built with + # nix build -v .\#nixosConfigurations..config.system.build.zosVmDir + system.build.zosVmDir = mkZosVmDir {inherit self pkgs config;}; + + fileSystems."/" = { + device = "vroot"; + fsType = "virtiofs"; + }; + + boot.initrd.kernelModules = [ + "virtiofs" + "virtio_blk" + "virtio_pmem" + "virtio_console" + "virtio_pci" + "virtio_mmio" + ]; + + boot.loader.grub.enable = true; + boot.initrd.systemd.enable = false; + boot.loader.grub.device = "nodev"; + + services.cloud-init.enable = true; + services.cloud-init.ext4.enable = true; + services.cloud-init.network.enable = true; + + boot.kernelParams = ["nomodeset"]; + networking.useDHCP = false; + + # force SSH to start + services.openssh.enable = true; + systemd.services.sshd.wantedBy = lib.mkForce ["multi-user.target"]; + # systemd.services.sshd.after = lib.mkForce [ ]; + + # changes for format.docker + networking.useHostResolvConf = false; + }; + + zosVmDirOverlayAutodetect = {lib, ...}: { + boot.initrd.kernelModules = [ + "overlay" + ]; + + # use an overlay on a tmpfs because the rfs mount is read-only + boot.initrd.postMountCommands = let + target = "/mnt-root"; + targetRo = "${target}-ro"; + + # TODO: make this these are sane and work + overlay = rec { + base = "/overlay"; + upper = "${base}/rw/upper"; + work = "${base}/rw/work"; + lower = "${base}/ro"; + }; + in '' + set -x + if ! touch ${target}/.read-write; then + # move the rootfs mount out of the way for the tmpfs + mkdir -p ${targetRo} + mount --move ${target} ${targetRo} + + # create a new tmpfs for the overlay + mount -t tmpfs none -o size=4G,mode=755 ${target} + + # assemble and the overlay + mkdir -p ${overlay.upper} ${overlay.work} ${overlay.lower} + mount --move ${targetRo} ${overlay.lower} + mount -t overlay overlay -o upperdir=${overlay.upper},workdir=${overlay.work},lowerdir=${overlay.lower} ${target} + + # TODO: make the overlay internals visible underneath its own mountpoint + # currently the mount fails with: 'mount: mounting /overlay on /mnt-root/overlay failed: Invalid argument' + # mkdir ${target}/overlay + # mount --move ${overlay.base} ${target}/overlay + fi + set +x + ''; + + services.getty.autologinUser = "root"; + users.users.root.password = "root"; + }; + }; +} diff --git a/modules/flake-parts/tfgrid-microvm/mk-zos-vm-dir.nix b/modules/flake-parts/tfgrid-microvm/mk-zos-vm-dir.nix new file mode 100644 index 00000000..62d00c5d --- /dev/null +++ b/modules/flake-parts/tfgrid-microvm/mk-zos-vm-dir.nix @@ -0,0 +1,34 @@ +{ + self, + config, + pkgs, +}: let + pkgs2storeContents = map (x: { + object = x; + symlink = "none"; + }); + # trying to produce something that is compatible with + # https://github.com/threefoldtech/zos/blob/main/docs/manual/zmachine/zmachine.md#vm +in + pkgs.callPackage (self + "/lib/make-system-directory.nix") { + contents = [ + { + source = let + cmd = pkgs.runCommandNoCC "rootfs" {} '' + mkdir -p $out/boot + + ln -s ${config.system.build.toplevel}/init $out/init + ${pkgs.gcc}/bin/strip ${config.system.build.kernel.dev}/vmlinux -o $out/boot/vmlinuz + cp ${config.system.build.initialRamdisk}/initrd $out/boot/initrd.img + ''; + in "${cmd}/."; + target = "./"; + } + ]; + + # Add init script to image + storeContents = pkgs2storeContents [ + config.system.build.toplevel + pkgs.stdenvNoCC + ]; + }