diff --git a/flake.lock b/flake.lock index 29aa4e82..1a71cb63 100644 --- a/flake.lock +++ b/flake.lock @@ -1688,6 +1688,7 @@ "nixpkgsNix": "nixpkgsNix", "nixpkgsPulumi": "nixpkgsPulumi", "nixpkgsUnstable": "nixpkgsUnstable", + "sbd": "sbd", "sops-nix": "sops-nix", "srvos": "srvos", "threefold-rfs": "threefold-rfs", @@ -1766,6 +1767,23 @@ "type": "github" } }, + "sbd": { + "flake": false, + "locked": { + "lastModified": 1716568215, + "narHash": "sha256-ROvhoxRovsm3KCKTVLap1NigCXbjo0HikToO7raHnAU=", + "owner": "holochain", + "repo": "sbd", + "rev": "466fcebc0227526597295e054455db7c82748b96", + "type": "github" + }, + "original": { + "owner": "holochain", + "ref": "sbd-server-v0.0.4-alpha", + "repo": "sbd", + "type": "github" + } + }, "scaffolding": { "flake": false, "locked": { diff --git a/flake.nix b/flake.nix index 89dc9d2d..cd0aadf6 100644 --- a/flake.nix +++ b/flake.nix @@ -86,6 +86,10 @@ tx5.url = "github:holochain/tx5/tx5-signal-srv-v0.0.8-alpha"; tx5.flake = false; + sbd.url = + "github:holochain/sbd/sbd-server-v0.0.4-alpha" + ; + sbd.flake = false; holochain-versions.url = "github:holochain/holochain?dir=versions/weekly"; holochain = { @@ -151,6 +155,8 @@ pkgs.mkShell { packages = [ + self'.formatter + pkgs.yq-go inputs'.nixos-anywhere.packages.default diff --git a/lib/make-system-directory.nix b/lib/make-system-directory.nix index c87b095d..8d0a494f 100644 --- a/lib/make-system-directory.nix +++ b/lib/make-system-directory.nix @@ -1,47 +1,41 @@ -{ stdenv -, closureInfo -, pixz - -, # The files and directories to be placed in the directory. +{ + stdenv, + closureInfo, + pixz, + # The files and directories to be placed in the directory. # This is a list of attribute sets {source, target} where `source' # is the file system object (regular file or directory) to be # grafted in the file system at path `target'. - contents - -, # In addition to `contents', the closure of the store paths listed + contents, + # In addition to `contents', the closure of the store paths listed # in `packages' are also placed in the Nix store of the tarball. This is # a list of attribute sets {object, symlink} where `object' if a # store path whose closure will be copied, and `symlink' is a # symlink to `object' that will be added to the tarball. - storeContents ? [ ] - + storeContents ? [], # Extra commands to be executed before archiving files -, extraCommands ? "" - + extraCommands ? "", # extra inputs -, extraInputs ? [ ] -}: - -let + extraInputs ? [], +}: let symlinks = map (x: x.symlink) storeContents; objects = map (x: x.object) storeContents; in + stdenv.mkDerivation { + name = "system-directory"; + builder = ./make-system-directory.sh; + nativeBuildInputs = extraInputs; -stdenv.mkDerivation { - name = "system-directory"; - builder = ./make-system-directory.sh; - nativeBuildInputs = extraInputs; - - inherit extraCommands; + inherit extraCommands; - # !!! should use XML. - sources = map (x: x.source) contents; - targets = map (x: x.target) contents; + # !!! should use XML. + sources = map (x: x.source) contents; + targets = map (x: x.target) contents; - # !!! should use XML. - inherit symlinks objects; + # !!! should use XML. + inherit symlinks objects; - closureInfo = closureInfo { - rootPaths = objects; - }; -} + closureInfo = closureInfo { + rootPaths = objects; + }; + } diff --git a/modules/flake-parts/holochain-turn-server.nix b/modules/flake-parts/holochain-turn-server.nix index a31da6bb..d36c5af4 100644 --- a/modules/flake-parts/holochain-turn-server.nix +++ b/modules/flake-parts/holochain-turn-server.nix @@ -22,6 +22,7 @@ nixosModules.holochain-turn-server = { config, lib, + options, ... }: let cfg = config.services.holochain-turn-server; @@ -49,6 +50,12 @@ default = 82; }; + listening-port = lib.mkOption { + description = options.services.coturn.listening-port.description; + type = lib.types.nullOr lib.types.int; + default = 80; + }; + coturn-min-port = lib.mkOption { description = "lower port for coturn's range"; type = lib.types.int; @@ -71,14 +78,14 @@ username = lib.mkOption { description = "user for establishing turn connections to coturn"; - type = lib.types.str; - default = "test"; + type = lib.types.nullOr lib.types.str; + default = null; }; credential = lib.mkOption { description = "credential for establishing turn connections to coturn"; - type = lib.types.str; - default = "test"; + type = lib.types.nullOr lib.types.str; + default = null; }; extraCoturnAttrs = lib.mkOption { @@ -86,23 +93,41 @@ type = lib.types.attrs; default = {}; }; + + extraCoturnConfig = lib.mkOption { + description = "extra config passed to coturn"; + type = lib.types.str; + default = ""; + }; + + acme-staging = lib.mkEnableOption "use ACME's staging server which has retry limits. useful when debugging ACME challenges."; }; config = lib.mkIf cfg.enable { nixpkgs.overlays = [self.overlays.coturn]; - networking.firewall.allowedTCPPorts = [ - 80 - 443 - 9641 # prometheus - - cfg.nginx-http-port - ]; - networking.firewall.allowedUDPPorts = [ - 80 - 443 - 9641 # prometheus - ]; + networking.firewall.allowedTCPPorts = + ( + lib.lists.optionals (cfg.listening-port != null) [ + cfg.listening-port + ] + ) + ++ [ + 443 + 9641 # prometheus + + cfg.nginx-http-port + ]; + networking.firewall.allowedUDPPorts = + ( + lib.lists.optionals (cfg.listening-port != null) [ + cfg.listening-port + ] + ) + ++ [ + 443 + 9641 # prometheus + ]; networking.firewall.allowedUDPPortRanges = [ { from = cfg.coturn-min-port; @@ -113,10 +138,9 @@ services.coturn = { enable = true; - listening-port = 80; tls-listening-port = 443; listening-ips = [cfg.address]; - lt-cred-mech = true; # Use long-term credential mechanism. + lt-cred-mech = cfg.username != null && cfg.credential != null; # Use long-term credential mechanism. realm = cfg.url; cert = "${cfg.turn-cert-dir}/fullchain.pem"; pkey = "${cfg.turn-cert-dir}/key.pem"; @@ -129,15 +153,22 @@ no-multicast-peers no-tlsv1 no-tlsv1_1 - user=${cfg.username}:${cfg.credential} prometheus '' + + lib.strings.optionalString config.services.coturn.lt-cred-mech '' + user=${cfg.username}:${cfg.credential} + '' + lib.strings.optionalString cfg.verbose '' verbose '' + lib.strings.optionalString (cfg.acme-redirect != null) '' acme-redirect=${cfg.acme-redirect} - ''; + '' + + cfg.extraCoturnConfig; + } + // lib.attrsets.optionalAttrs (cfg.listening-port + != null) { + inherit (cfg) listening-port; } // cfg.extraCoturnAttrs; @@ -167,19 +198,22 @@ }; }; - security.acme = { - acceptTerms = true; - defaults = { - email = "acme@holo.host"; - }; - - # after certificate renewal by acme coturn.service needs to reload this new cert, too - # see https://github.com/NixOS/nixpkgs/blob/nixos-23.05/nixos/modules/security/acme/default.nix#L322 - certs."${cfg.url}".reloadServices = ["coturn"]; - - # staging server has higher retry limits. uncomment the following when debugging ACME challenges. - # certs."${cfg.url}".server = "https://acme-staging-v02.api.letsencrypt.org/directory"; - }; + security.acme = + lib.attrsets.recursiveUpdate + { + acceptTerms = true; + defaults = { + email = "acme@holo.host"; + }; + + # after certificate renewal by acme coturn.service needs to reload this new cert, too + # see https://github.com/NixOS/nixpkgs/blob/nixos-23.05/nixos/modules/security/acme/default.nix#L322 + certs."${cfg.url}".reloadServices = ["coturn"]; + } ( + lib.attrsets.optionalAttrs cfg.acme-staging { + certs."${cfg.url}".server = "https://acme-staging-v02.api.letsencrypt.org/directory"; + } + ); }; }; }; diff --git a/modules/flake-parts/nixosConfigurations.sbd-0.main.infra.holo.host/configuration.nix b/modules/flake-parts/nixosConfigurations.sbd-0.main.infra.holo.host/configuration.nix new file mode 100644 index 00000000..64726c38 --- /dev/null +++ b/modules/flake-parts/nixosConfigurations.sbd-0.main.infra.holo.host/configuration.nix @@ -0,0 +1,58 @@ +{ + config, + inputs, + self, + pkgs, + ... +}: let + # https://console.hetzner.cloud/projects/1982619/servers/47746862/overview + hostName = "sbd-0"; + domain = "main.infra.holo.host"; + ipv4 = "65.108.241.120"; + fqdn = "${config.networking.hostName}.${config.networking.domain}"; +in { + imports = [ + inputs.disko.nixosModules.disko + inputs.srvos.nixosModules.server + inputs.srvos.nixosModules.mixins-terminfo + inputs.srvos.nixosModules.hardware-hetzner-cloud + self.nixosModules.hardware-hetzner-cloud-ccx + + inputs.sops-nix.nixosModules.sops + + self.nixosModules.holo-users + ../../nixos/shared.nix + ../../nixos/shared-nix-settings.nix + self.nixosModules.ps1 + + self.nixosModules.sbd-server + ]; + + networking = {inherit hostName domain;}; + + hostName = ipv4; + + nix.settings.max-jobs = 8; + + nix.settings.substituters = [ + "https://holochain-ci.cachix.org" + ]; + + nix.settings.trusted-public-keys = [ + "holochain-ci.cachix.org-3:5IUSkZc0aoRS53rfkvH9Kid40NpyjwCMCzwRTXy+QN8=" + ]; + + system.stateVersion = "23.11"; + + services.sbd-server = { + enable = true; + url = fqdn; + address = ipv4; + tls-port = 443; + trusted-ip-header = "cf-connecting-ip"; + + # unlike the tx5-signal-server the sbd-server doesn't know about the STUN servers. + # going forward its' going to be part of the conductor client config + # "stun:${config.services.holochain-turn-server.url}:80" + }; +} diff --git a/modules/flake-parts/nixosConfigurations.sbd-0.main.infra.holo.host/default.nix b/modules/flake-parts/nixosConfigurations.sbd-0.main.infra.holo.host/default.nix new file mode 100644 index 00000000..f415640d --- /dev/null +++ b/modules/flake-parts/nixosConfigurations.sbd-0.main.infra.holo.host/default.nix @@ -0,0 +1,12 @@ +{ + self, + lib, + inputs, + ... +}: { + flake.nixosConfigurations.sbd-0_main_infra_holo_host = inputs.nixpkgs.lib.nixosSystem { + modules = [./configuration.nix]; + system = "x86_64-linux"; + specialArgs = self.specialArgs; + }; +} diff --git a/modules/flake-parts/nixosConfigurations.stun-0.main.infra.holo.host/configuration.nix b/modules/flake-parts/nixosConfigurations.stun-0.main.infra.holo.host/configuration.nix new file mode 100644 index 00000000..cd4c8dfb --- /dev/null +++ b/modules/flake-parts/nixosConfigurations.stun-0.main.infra.holo.host/configuration.nix @@ -0,0 +1,62 @@ +{ + config, + inputs, + self, + pkgs, + ... +}: let + # https://console.hetzner.cloud/projects/1982619/servers/47741841/overview + hostName = "stun-0"; + domain = "main.infra.holo.host"; + ipv4 = "37.27.39.142"; + fqdn = "${config.networking.hostName}.${config.networking.domain}"; +in { + imports = [ + inputs.disko.nixosModules.disko + inputs.srvos.nixosModules.server + inputs.srvos.nixosModules.mixins-terminfo + inputs.srvos.nixosModules.hardware-hetzner-cloud + self.nixosModules.hardware-hetzner-cloud-ccx + + inputs.sops-nix.nixosModules.sops + + self.nixosModules.holo-users + ../../nixos/shared.nix + ../../nixos/shared-nix-settings.nix + + self.nixosModules.holochain-turn-server + ]; + + networking = {inherit hostName domain;}; + + hostName = ipv4; + + nix.settings.max-jobs = 8; + + nix.settings.substituters = [ + "https://holochain-ci.cachix.org" + ]; + + nix.settings.trusted-public-keys = [ + "holochain-ci.cachix.org-3:5IUSkZc0aoRS53rfkvH9Kid40NpyjwCMCzwRTXy+QN8=" + ]; + + system.stateVersion = "23.11"; + + services.holochain-turn-server = { + enable = true; + url = fqdn; + address = ipv4; + listening-port = null; + nginx-http-port = 80; + verbose = false; + extraCoturnAttrs = { + cli-ip = "127.0.0.1"; + cli-password = "$5$4c2b9a49c5e013ae$14f901c5f36d4c8d5cf0c7383ecb0f26b052134293152bd1191412641a20ddf5"; + }; + extraCoturnConfig = '' + stun-only + ''; + acme-staging = false; + }; +} diff --git a/modules/flake-parts/nixosConfigurations.stun-0.main.infra.holo.host/default.nix b/modules/flake-parts/nixosConfigurations.stun-0.main.infra.holo.host/default.nix new file mode 100644 index 00000000..896030f9 --- /dev/null +++ b/modules/flake-parts/nixosConfigurations.stun-0.main.infra.holo.host/default.nix @@ -0,0 +1,12 @@ +{ + self, + lib, + inputs, + ... +}: { + flake.nixosConfigurations.stun-0_main_infra_holo_host = inputs.nixpkgs.lib.nixosSystem { + modules = [./configuration.nix]; + system = "x86_64-linux"; + specialArgs = self.specialArgs; + }; +} diff --git a/modules/flake-parts/packages.holochain-sbd.nix b/modules/flake-parts/packages.holochain-sbd.nix new file mode 100644 index 00000000..2d50625b --- /dev/null +++ b/modules/flake-parts/packages.holochain-sbd.nix @@ -0,0 +1,53 @@ +{ + # System independent arguments. + lib, + inputs, + ... +}: { + perSystem = { + # Arguments specific to the `perSystem` context. + self', + pkgs, + ... + }: { + # system specific outputs like, apps, checks, packages + + packages = let + system = pkgs.system; + craneLib = inputs.crane.lib.${system}; + cranePkgs = inputs.crane.inputs.nixpkgs.legacyPackages.${system}; + + sbdArgs = { + pname = "sbd"; + src = inputs.sbd; + version = inputs.sbd.rev; + cargoExtraArgs = "--examples --bins"; + nativeBuildInputs = [ + pkgs.pkg-config + ]; + buildInputs = [ + pkgs.openssl + ]; + + doCheck = false; + }; + sbdDeps = lib.makeOverridable craneLib.buildDepsOnly sbdArgs; + in { + sbd = lib.makeOverridable craneLib.buildPackage (sbdArgs + // { + cargoArtifacts = sbdDeps; + }); + + sbd-serverd = self'.packages.sbd.override { + name = "sbd-serverd"; + cargoExtraArgs = "--bin sbd-serverd"; + meta.mainProgram = "sbd-serverd"; + }; + }; + }; + flake = { + # system independent outputs like nixosModules, nixosConfigurations, etc. + + # nixosConfigurations.example-host = ... + }; +} diff --git a/modules/nixos/ps1.nix b/modules/nixos/ps1.nix new file mode 100644 index 00000000..bb7debc7 --- /dev/null +++ b/modules/nixos/ps1.nix @@ -0,0 +1,20 @@ +{config, ...}: let + fqdn = "${config.networking.hostName}.${config.networking.domain}"; +in { + programs.bash.promptInit = '' + # Provide a nice prompt if the terminal supports it. + if [ "$TERM" != "dumb" ] || [ -n "$INSIDE_EMACS" ]; then + PROMPT_COLOR="1;31m" + ((UID)) && PROMPT_COLOR="1;32m" + if [ -n "$INSIDE_EMACS" ]; then + # Emacs term mode doesn't support xterm title escape sequence (\e]0;) + PS1="\n\[\033[$PROMPT_COLOR\][\u@${fqdn}:\w]\n\\$\[\033[0m\] " + else + PS1="\n\[\033[$PROMPT_COLOR\][\[\e]0;\u@${fqdn}: \w\a\]\u@${fqdn}:\w]\n\\$\[\033[0m\] " + fi + if test "$TERM" = "xterm"; then + PS1="\[\033]2;${fqdn}:\u:\w\007\]$PS1" + fi + fi + ''; +} diff --git a/modules/nixos/sbd-server.nix b/modules/nixos/sbd-server.nix new file mode 100644 index 00000000..31706e27 --- /dev/null +++ b/modules/nixos/sbd-server.nix @@ -0,0 +1,128 @@ +{ + self, + config, + lib, + pkgs, + ... +}: let + cfg = config.services.sbd-server; + types = lib.types; +in { + options.services.sbd-server = { + enable = lib.mkEnableOption "sbd-server"; + + package = lib.mkOption { + default = self.packages.${pkgs.system}.sbd-serverd; + type = types.package; + }; + + address = lib.mkOption { + description = "address to bind"; + type = types.str; + }; + + tls-port = lib.mkOption { + description = "port to bind for incoming TLS connections"; + type = types.int; + }; + + url = lib.mkOption { + description = "url for incoming TLS connections to the signal server"; + type = types.str; + }; + + trusted-ip-header = lib.mkOption { + description = "request header key to extract the trusted IP from"; + type = types.nullOr types.str; + default = null; + }; + }; + + config = lib.mkIf (cfg.enable) { + # TODO: can be tested with check-services tool on the sbd integration branch + + systemd.services.sbd-server = { + after = ["network.target"]; + wantedBy = ["multi-user.target"]; + + environment = { + TMPDIR = "%T"; + }; + + serviceConfig = { + DynamicUser = true; + PrivateTmp = true; + + # use this mechanism to let systemd take care of file permissions for the dynamic user it creates + LoadCredential = [ + "cert.pem:${config.security.acme.certs."${cfg.url}".directory}/cert.pem" + "key.pem:${config.security.acme.certs."${cfg.url}".directory}/key.pem" + ]; + Restart = "always"; + + AmbientCapabilities = + # needed for binding to ports <1024 + lib.lists.optionals (cfg.tls-port + < 1024) [ + "CAP_NET_BIND_SERVICE" + ]; + + ExecStart = builtins.concatStringsSep " " ( + [ + (lib.meta.getExe cfg.package) + + # bind to the public interface + "--bind=${cfg.address}:${builtins.toString cfg.tls-port}" + + # configure TLS certificates + ''--cert-pem-file="''${CREDENTIALS_DIRECTORY}/cert.pem"'' + ''--priv-key-pem-file="''${CREDENTIALS_DIRECTORY}/key.pem"'' + ] + ++ lib.lists.optionals (cfg.trusted-ip-header != null) [ + ''--trusted-ip-header=${cfg.trusted-ip-header}'' + ] + ); + }; + }; + + networking.firewall.allowedTCPPorts = [ + 80 + + cfg.tls-port + ]; + + services.nginx = { + enable = true; + virtualHosts."${cfg.url}" = { + serverName = cfg.url; + enableACME = true; + addSSL = true; + + locations."/".root = "/var/www/${cfg.url}"; + + listen = [ + { + addr = "${cfg.address}"; + port = 80; + ssl = false; + } + ]; + }; + }; + + security.acme = { + acceptTerms = true; + defaults = { + email = "acme@holo.host"; + }; + + # note: the directory watching tls reload story has not yet been implemented. when tls certs are updated, the service must be restarted + certs."${cfg.url}" = { + reloadServices = ["sbd-server"]; + + # staging server has higher retry limits. uncomment the following when debugging ACME challenges. + # server = "https://acme-staging-v02.api.letsencrypt.org/directory"; + }; + }; + }; +}