diff --git a/.envrc b/.envrc new file mode 100644 index 0000000..3550a30 --- /dev/null +++ b/.envrc @@ -0,0 +1 @@ +use flake diff --git a/.github/workflows/flake.yaml b/.github/workflows/flake.yaml new file mode 100644 index 0000000..c501064 --- /dev/null +++ b/.github/workflows/flake.yaml @@ -0,0 +1,44 @@ +name: flake + +on: + pull_request: + push: + branches: + - main + +# env: +# CACHIX_BINARY_CACHE: altf4llc-os + +jobs: + check: + runs-on: ubuntu-latest + steps: + - uses: cachix/install-nix-action@v25 + with: + enable_kvm: true + # - uses: cachix/cachix-action@v14 + # with: + # authToken: ${{ secrets.ALTF4LLC_CACHIX_AUTH_TOKEN }} + # name: ${{ env.CACHIX_BINARY_CACHE }} + - uses: actions/checkout@v4 + - run: nix develop -c just check + + build: + needs: + - check + runs-on: ubuntu-latest + strategy: + matrix: + profile: + - gc-fwd + - ecs-node + steps: + - uses: cachix/install-nix-action@v25 + with: + enable_kvm: true + # - uses: cachix/cachix-action@v14 + # with: + # authToken: ${{ secrets.ALTF4LLC_CACHIX_AUTH_TOKEN }} + # name: ${{ env.CACHIX_BINARY_CACHE }} + - uses: actions/checkout@v4 + - run: nix develop -c just build "${{ matrix.profile }}" diff --git a/LICENSE b/LICENSE index 261eeb9..57bc88a 100644 --- a/LICENSE +++ b/LICENSE @@ -199,3 +199,4 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + diff --git a/README.md b/README.md index 02f0c92..7d36c59 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,91 @@ -# vms-nix -NixOS virtual machine images for ALT-F4 LLC +# `vms.nix` + +[![License: Apache-2.0](https://img.shields.io/github/license/ALT-F4-LLC/vms.nix +)](./LICENSE) + +NixOS-based VM images for ALT-F4 LLC. These images are built using +[nixos-generators](https://github.com/nix-community/nixos-generators) and +Nix flakes. + +## Image Details + +Every image built from this repository is built with an immutable main disk. +This means that while 'state' directories (`/home`, `/var/lib`, etc.) are +writable, the majority of configuration will be static and immutable, packaged +as part of the Nix store. + +There is also an `altf4` user baked into all images that has a list of trusted +SSH keys on it. This user is for administrative purposes. + +> ![NOTE] +> On AMIs, the SSH keypair for `altf4` will not be overridden by setting the +> SSH Key Pair option when provisioning the AMI. That option only applies to +> the `root` user. + +## Layout + +Service configuration files land in `modules/mixins`, and generic (global) +configuration files land in `modules/profiles`, as they are not tied to any +specific kind or role of image. + +``` +vms.nix +├── flake.lock +├── flake.nix +├── justfile +├── LICENSE +├── modules +│   ├── mixins +│   │   └── Service configuration modules +│   │   └── default.nix +│   └── profiles +│   └── "Base" configuration modules (EC2 extras, base config, etc) +└── README.md +``` + +## Building an Image + +To build an image, find its package name in [`flake.nix`](./flake.nix), then +use `just build` to build it; + +``` +$ just build ecs-node +``` + +### Publishing an AMI to EC2 + +> ![NOTE] +> Using this if you're not a member of ALT-F4 requires some more steps. See +> [`aws/README.md`](./aws/README.md) for more info. + +There is a `just` task for doing this called `publish-ami`. It takes the name +of the image you want to build as an input, and then carries out the following +tasks: + +- Builds the image with `just build` +- Uploads the output `.vhd` image to S3 +- Kicks off a snapshot import using the EC2 VM Import/Export service +- Waits for the snapshot to be fully imported and available +- Registers an AMI using the snapshot and outputs its ID + +NixOS VMs use `/dev/sda1` as their root device name, and that is configured at +the point the AMI is registered. By default, the images are built on a 4GB disk +but this can be tweaked if an image does not fit into only 4GB. + +All VMs are also configured with the `cachix-agent` installed, and all Amazon +AMIs are configured with `amazon-ssm-agent` and `amazon-init` to ensure full +feature compatibility with EC2. + +## Contributing + +While this is an internal project at ALT-F4, we still welcome contributions +from the community in case you can spot an improvement or a suggestion! + +Feel free to raise PRs and issues against this repository, but also understand +that as this is an internal piece of tooling, some opinionations in configs +and/or logic will be present and we may be stubborn with them! + +## License + +`vms.nix` is licensed under the Apache License Version 2.0. For full license +terms, see [`LICENSE`](./LICENSE). diff --git a/aws/README.md b/aws/README.md new file mode 100644 index 0000000..98209be --- /dev/null +++ b/aws/README.md @@ -0,0 +1,13 @@ +# AWS + +To use this repository with AWS, you need the following: + +- An S3 bucket you have write access to +- A role called `vmimport` (exactly), using the included + [trust policy](./vmimport_trust_policy.json) and + [permissions](./vmimport_role_policy.json). + +See the links above for what those policies should be. + +Once done, you'll need to fork this repo and change the `ami_bucket` variable +in the [`justfile`](../justfile) to the name of your bucket. diff --git a/aws/vmimport_role_policy.json b/aws/vmimport_role_policy.json new file mode 100644 index 0000000..d2da54b --- /dev/null +++ b/aws/vmimport_role_policy.json @@ -0,0 +1,27 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetBucketLocation", + "s3:GetObject", + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::altf4llc-hayden-test-nix-amis", + "arn:aws:s3:::altf4llc-hayden-test-nix-amis/*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "ec2:ModifySnapshotAttribute", + "ec2:CopySnapshot", + "ec2:RegisterImage", + "ec2:Describe*" + ], + "Resource": "*" + } + ] +} diff --git a/aws/vmimport_trust_policy.json b/aws/vmimport_trust_policy.json new file mode 100644 index 0000000..2bfbbde --- /dev/null +++ b/aws/vmimport_trust_policy.json @@ -0,0 +1,17 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "vmie.amazonaws.com" + }, + "Action": "sts:AssumeRole", + "Condition": { + "StringEquals": { + "sts:Externalid": "vmimport" + } + } + } + ] +} diff --git a/flake.lock b/flake.lock new file mode 100644 index 0000000..b50ddcc --- /dev/null +++ b/flake.lock @@ -0,0 +1,121 @@ +{ + "nodes": { + "flake-parts": { + "inputs": { + "nixpkgs-lib": "nixpkgs-lib" + }, + "locked": { + "lastModified": 1712014858, + "narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "9126214d0a59633752a136528f5f3b9aa8565b7d", + "type": "github" + }, + "original": { + "id": "flake-parts", + "type": "indirect" + } + }, + "nixlib": { + "locked": { + "lastModified": 1712450863, + "narHash": "sha256-K6IkdtMtq9xktmYPj0uaYc8NsIqHuaAoRBaMgu9Fvrw=", + "owner": "nix-community", + "repo": "nixpkgs.lib", + "rev": "3c62b6a12571c9a7f65ab037173ee153d539905f", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nixpkgs.lib", + "type": "github" + } + }, + "nixos-generators": { + "inputs": { + "nixlib": "nixlib", + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1713783234, + "narHash": "sha256-3yh0nqI1avYUmmtqqTW3EVfwaLE+9ytRWxsA5aWtmyI=", + "owner": "nix-community", + "repo": "nixos-generators", + "rev": "722b512eb7e6915882f39fff0e4c9dd44f42b77e", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nixos-generators", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1714076141, + "narHash": "sha256-Drmja/f5MRHZCskS6mvzFqxEaZMeciScCTFxWVLqWEY=", + "owner": "nixos", + "repo": "nixpkgs", + "rev": "7bb2ccd8cdc44c91edba16c48d2c8f331fb3d856", + "type": "github" + }, + "original": { + "owner": "nixos", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-lib": { + "locked": { + "dir": "lib", + "lastModified": 1711703276, + "narHash": "sha256-iMUFArF0WCatKK6RzfUJknjem0H9m4KgorO/p3Dopkk=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "d8fe5e6c92d0d190646fb9f1056741a229980089", + "type": "github" + }, + "original": { + "dir": "lib", + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "flake-parts": "flake-parts", + "nixos-generators": "nixos-generators", + "nixpkgs": "nixpkgs", + "srvos": "srvos" + } + }, + "srvos": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1714143163, + "narHash": "sha256-WMAziIBkwX//WUGxH49ZSm0yaPS6/PvNWUMMut8unm0=", + "owner": "numtide", + "repo": "srvos", + "rev": "71a8e8ab6e4763714d20c22f42ba8860369a1508", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "srvos", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 0000000..366b424 --- /dev/null +++ b/flake.nix @@ -0,0 +1,50 @@ +{ + inputs = { + nixpkgs.url = "github:nixos/nixpkgs?ref=nixos-unstable"; + + nixos-generators.url = "github:nix-community/nixos-generators"; + nixos-generators.inputs.nixpkgs.follows = "nixpkgs"; + + srvos.url = "github:numtide/srvos"; + srvos.inputs.nixpkgs.follows = "nixpkgs"; + }; + + outputs = inputs@{ flake-parts, ... }: + flake-parts.lib.mkFlake { inherit inputs; } { + systems = [ "x86_64-linux" "aarch64-linux" ]; + + perSystem = { pkgs, system, ... }: + let + inherit (pkgs) awscli2 just; + in + { + devShells.default = pkgs.mkShell { + buildInputs = [ awscli2 just ]; + }; + + packages = { + gc-fwd = inputs.nixos-generators.nixosGenerate { + inherit system; + modules = [ + inputs.srvos.nixosModules.server + inputs.srvos.nixosModules.hardware-amazon + ./modules/profiles/common.nix + ./modules/mixins/gc-fwd + ]; + format = "amazon"; # ami + }; + + ecs-node = inputs.nixos-generators.nixosGenerate { + inherit system; + modules = [ + inputs.srvos.nixosModules.server + inputs.srvos.nixosModules.hardware-amazon + ./modules/profiles/common.nix + ./modules/mixins/ecs-agent + ]; + format = "amazon"; # ami + }; + }; + }; + }; +} diff --git a/justfile b/justfile new file mode 100644 index 0000000..5aafe5f --- /dev/null +++ b/justfile @@ -0,0 +1,37 @@ +ami_bucket := "altf4llc-hayden-test-nix-amis" + +build profile: + nix build --json --print-build-logs --no-link '.#{{profile}}' + +publish-ami profile: + #!/usr/bin/env bash + set -euo pipefail + BUILD_TIME=$(date +%s) + IMAGE_NAME="altf4llc-{{profile}}-$BUILD_TIME" + + DERIVATION=$(just build {{profile}}) + OUTPUT=$(echo "$DERIVATION" | jq -r '.[].outputs.out') + IMAGE_PATH=$(cd "$OUTPUT" && ls *.vhd) + + echo "Uploading VHD to S3." + aws s3 cp "$OUTPUT/$IMAGE_PATH" "s3://{{ami_bucket}}/$IMAGE_NAME.vhd" + + echo "Starting snapshot import." + TASK_ID=$(aws ec2 import-snapshot --disk-container "Format=VHD,UserBucket={S3Bucket=altf4llc-hayden-test-nix-amis,S3Key=$IMAGE_NAME.vhd}" --output json | jq -r ".ImportTaskId") + + echo "Waiting for snapshot import to complete." + until [[ $(aws ec2 describe-import-snapshot-tasks --import-task-ids "$TASK_ID" --output json | jq -r '.ImportSnapshotTasks[].SnapshotTaskDetail.Status') == "completed" ]]; do + echo "Snapshot is not imported yet, waiting..." + sleep 5 + done + + SNAPSHOT_ID=$(aws ec2 describe-import-snapshot-tasks --import-task-ids "$TASK_ID" --output json | jq -r '.ImportSnapshotTasks[].SnapshotTaskDetail.SnapshotId') + + echo "New snapshot is $SNAPSHOT_ID." + + AMI_ID=$(aws ec2 register-image --architecture x86_64 --ena-support --name "$IMAGE_NAME" --description "A NixOS AMI: {{profile}}" --block-device-mappings "DeviceName=/dev/sda1,Ebs={SnapshotId=$SNAPSHOT_ID}" --root-device-name /dev/sda1 | jq .ImageId) + + echo "AMI is registered: $AMI_ID" + + echo "Cleaning up image VHD from bucket" + aws s3 rm "s3://{{ami_bucket}}/$IMAGE_NAME.vhd" diff --git a/modules/mixins/alloy-forwarder/config.alloy b/modules/mixins/alloy-forwarder/config.alloy new file mode 100644 index 0000000..c37d37d --- /dev/null +++ b/modules/mixins/alloy-forwarder/config.alloy @@ -0,0 +1,100 @@ +import.git "gcloud" { + repository = "https://github.com/grafana/alloy-modules.git" + path = "modules/cloud/grafana/cloud/module.river" + revision = "main" + pull_frequency = "0s" +} + +gcloud.stack "default" { + stack_name = env("GRAFANA_CLOUD_STACK") + token = env("GRAFANA_CLOUD_TOKEN") +} + +prometheus.receive_http "forward" { + http { + listen_address = "0.0.0.0" + listen_port = 9090 + } + forward_to = [ + grafana_cloud.stack.receivers.metrics, + ] +} + +prometheus.scrape "linux_node" { + targets = prometheus.exporter.unix.node.targets + forward_to = [ + grafana_cloud.stack.receivers.metrics, + ] +} + +prometheus.exporter.unix "node" { +} + +loki.source.api "receive" { + http { + listen_address = "0.0.0.0" + listen_port = 3100 + } + forward_to = [ + grafana_cloud.stack.receivers.logs, + ] +} + +loki.relabel "journal" { + forward_to = [] + + rule { + source_labels = ["__journal__systemd_unit"] + target_label = "unit" + } + rule { + source_labels = ["__journal__boot_id"] + target_label = "boot_id" + } + rule { + source_labels = ["__journal__transport"] + target_label = "transport" + } + rule { + source_labels = ["__journal_priority_keyword"] + target_label = "level" + } + rule { + source_labels = ["__journal__hostname"] + target_label = "instance" + } +} + +loki.source.journal "read" { + forward_to = [ + grafana_cloud.stack.receivers.logs, + ] + relabel_rules = loki.relabel.journal.rules + labels = { + "job" = "integrations/node_exporter", + } +} + +otelcol.exporter.prometheus "to_prometheus" { + forward_to = [ + grafana_cloud.stack.receivers.metrics, + ] +} + +otelcol.exporter.loki "to_loki" { + forward_to = [ + grafana_cloud.stack.receivers.logs, + ] +} + +otelcol.receiver.otlp "default" { + grpc {} + http {} + output { + metrics = [otelcol.exporter.prometheus.to_prometheus.input] + logs = [otelcol.exporter.loki.to_loki.input] + traces = [grafana_cloud.stack.receivers.traces] + } +} + +// vim:ft=hcl diff --git a/modules/mixins/alloy-forwarder/default.nix b/modules/mixins/alloy-forwarder/default.nix new file mode 100644 index 0000000..741a3e6 --- /dev/null +++ b/modules/mixins/alloy-forwarder/default.nix @@ -0,0 +1,15 @@ +{ lib, ... }: +{ + imports = [ ../alloy ]; + + # Only change from normal Alloy mixin is an overridden config file + environment.etc."alloy/config.alloy".source = lib.mkForce ./config.alloy; + + virtualisation.oci-containers.containers.alloy = { + environmentFiles = [ "/run/keys/grafana-cloud" ]; + + environment = { + GRAFANA_CLOUD_STACK = "altf4llc"; + }; + }; +} diff --git a/modules/mixins/alloy/config.alloy b/modules/mixins/alloy/config.alloy new file mode 100644 index 0000000..cc7c5ee --- /dev/null +++ b/modules/mixins/alloy/config.alloy @@ -0,0 +1,69 @@ +# We ship everything over OTLP +otelcol.exporter.otlp "gc-fwd" { + client { + endpoint = "gc-fwd.altf4.internal:4317" + tls { + insecure = true + } + } +} + +# Convert Prometheus data for OTLP +otelcol.receiver.prometheus "default" { + output { + metrics = [otelcol.exporter.otlp.gc-fwd.input] + } +} + +# Convert Loki data for OTLP +otelcol.receiver.loki "default" { + output { + logs = [otelcol.exporter.otlp.gc-fwd.input] + } +} + +# Extract Systemd unit from journal entry +loki.relabel "journal" { + forward_to = [] + + rule { + source_labels = ["__journal__systemd_unit"] + target_label = "unit" + } +} + +# Fetch journal entries +loki.source.journal "journal" { + forward_to = [otelcol.receiver.loki.default.receiver] + relabel_rules = loki.relabel.journal.rules + labels = {component = "loki.source.journal"} +} + +# Set instance label to the hostname +prometheus.relabel "instance" { + forward_to = [otelcol.receiver.prometheus.default.receiver] + rule { + target_label = "instance" + replacement = env("HOSTNAME") + } +} + +# Export system metrics +prometheus.exporter.unix "host" { + procfs_path = "/host/proc" + sysfs_path = "/host/sys" + rootfs_path = "/rootfs" + udev_data_path = "/host/run/udev/data" + + filesystem { + mount_points_exclude = "^/(sys|proc|dev|host|etc)($$|/)" + } +} + +# Scrape system metrics +prometheus.scrape "host" { + targets = prometheus.exporter.unix.host.targets + forward_to = [prometheus.relabel.instance.receiver] +} + +// vim:ft=hcl diff --git a/modules/mixins/alloy/default.nix b/modules/mixins/alloy/default.nix new file mode 100644 index 0000000..3db6491 --- /dev/null +++ b/modules/mixins/alloy/default.nix @@ -0,0 +1,50 @@ +{ config, ... }: +{ + # see TODO further down + imports = [ ../docker ]; + + environment.etc."alloy/config.alloy" = { + source = ./config.alloy; + mode = "0440"; + user = "root"; + }; + + # TODO: Replace this once there's an Alloy package merged into Nixpkgs + # https://github.com/NixOS/nixpkgs/pull/306048 + virtualisation.oci-containers.containers.alloy = { + autoStart = true; + image = "grafana/alloy:v1.0.0"; + + user = "root"; + + ports = [ + "12345:12345" + ]; + + cmd = [ + "run" + "--server.http.listen-addr=0.0.0.0:12345" + "--storage.path=/var/lib/alloy/data" + "--stability.level=public-preview" + + # we give a path to the directory so it loads every file, instead of + # one config file. this allows us to add extra configuration in other + # mixins. + "/etc/alloy" + ]; + + volumes = [ + # Alloy + "/var/log:/var/log:ro" + "/etc/alloy:/etc/alloy:ro" + + "/var/lib/alloy/data" + + # Node Exporter + "/proc:/host/proc:ro" + "/sys:/host/sys:ro" + "/run/udev/data:/host/run/udev/data:ro" + "/:/rootfs:ro" + ]; + }; +} diff --git a/modules/mixins/docker/default.nix b/modules/mixins/docker/default.nix new file mode 100644 index 0000000..39e1f1d --- /dev/null +++ b/modules/mixins/docker/default.nix @@ -0,0 +1,5 @@ +{ ... }: +{ + virtualisation.docker.enable = true; + virtualisation.oci-containers.backend = "docker"; +} diff --git a/modules/mixins/ecs-agent/config.alloy b/modules/mixins/ecs-agent/config.alloy new file mode 100644 index 0000000..c5f81fc --- /dev/null +++ b/modules/mixins/ecs-agent/config.alloy @@ -0,0 +1,33 @@ +prometheus.exporter.cadvisor "cadvisor" { + docker_host = "unix:///var/run/docker.sock" + storage_duration = "5m" +} + +prometheus.scrape "cadvisor" { + targets = prometheus.exporter.cadvisor.cadvisor.targets + forward_to = [prometheus.relabel.instance.receiver] + scrape_interval = "30s" +} + +prometheus.scrape "ecs-agent" { + targets = [ + {"__address__" = "127.0.0.1:51680", instance = env("HOSTNAME")}, + ] + + forward_to = [prometheus.relabel.instance.receiver] + scrape_interval = "30s" +} + +// Run a local OpenTelemetry collector +otelcol.receiver.otlp "otlp" { + grpc {} + http {} + + output { + metrics = [prometheus.relabel.instance.receiver] + logs = [otelcol.receiver.loki.default.receiver] + traces = [otelcol.exporter.otlp.gc-fwd.input] + } +} + +// vim:ft=hcl diff --git a/modules/mixins/ecs-agent/default.nix b/modules/mixins/ecs-agent/default.nix new file mode 100644 index 0000000..6deb12a --- /dev/null +++ b/modules/mixins/ecs-agent/default.nix @@ -0,0 +1,56 @@ +{ pkgs, ... }: +{ + imports = [ + ../docker + ../alloy + ]; + + boot.kernel.sysctl."net.ipv4.conf.all.route_localnet" = 1; + + networking.firewall.logRefusedConnections = true; + networking.useDHCP = true; + + networking.firewall.extraCommands = '' + iptables -t nat -A PREROUTING -p tcp -d 169.254.170.2 --dport 80 -j DNAT --to-destination 127.0.0.1:51679 + iptables -t nat -A OUTPUT -d 169.254.170.2 -p tcp -m tcp --dport 80 -j REDIRECT --to-ports 51679 + ''; + + virtualisation.oci-containers.containers.ecs-agent = { + autoStart = true; + image = "public.ecr.aws/ecs/amazon-ecs-agent:v1.82.3"; + + ports = [ + "127.0.0.1:51678:51678" # ecs metadata service + "127.0.0.1:51680:51680" # prometheus metrics + ]; + + extraOptions = [ + "--net=host" + ]; + + environmentFiles = [ "/run/keys/ecs.config" ]; + environment = { + ECS_LOGFILE = "/log/ecs-agent.log"; + ECS_LOGLEVEL = "info"; + ECS_DATADIR = "/data"; + ECS_UPDATES_ENABLED = "false"; + ECS_AVAILABLE_LOGGING_DRIVERS = "[\"journald\"]"; + ECS_ENABLE_TASK_IAM_ROLE = "true"; + ECS_ENABLE_SPOT_INSTANCE_DRAINING = "true"; + }; + + volumes = [ + "/var/run/docker.sock:/var/run/docker.sock" + + "/var/log/ecs/:/log" + "/var/lib/ecs/data:/data" + ]; + }; + + # Monitoring + environment.etc."alloy/ecs-agent.alloy" = { + source = ./config.alloy; + mode = "0440"; + user = "root"; + }; +} diff --git a/modules/profiles/common.nix b/modules/profiles/common.nix new file mode 100644 index 0000000..33fcd4d --- /dev/null +++ b/modules/profiles/common.nix @@ -0,0 +1,26 @@ +{ ... }: +{ + services.cachix-agent.enable = true; + + boot.loader.efi.canTouchEfiVariables = true; + + services.openssh.enable = true; + + security.sudo.wheelNeedsPassword = false; + + security.auditd.enable = true; + security.audit.enable = true; + security.audit.rules = [ + "-a exit,always -F arch=b64 -S execve" + ]; + + users.users.altf4 = { + isNormalUser = true; + extraGroups = [ "wheel" ]; + openssh.authorizedKeys.keys = [ + "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDkhuhfzyg7R+O62XSktHufGmmhy6FNDi/NuPPJt7bI+" + ]; + }; + + system.stateVersion = "24.05"; +}