change nixos -> nix

This commit is contained in:
Pim Kunis 2024-03-02 14:03:27 +01:00
parent e80a3d65ac
commit 79669b27f8
50 changed files with 5 additions and 5 deletions

102
nix/default.nix Normal file
View file

@ -0,0 +1,102 @@
{ pkgs, lib, machine, disko, agenix, ... }: {
imports = [
./modules
./globals.nix
machine.nixosModule
disko.nixosModules.disko
agenix.nixosModules.default
]
++ lib.lists.optional machine.isPhysical ./physical.nix
++ lib.lists.optional machine.isVirtual ./virtual;
config = {
time.timeZone = "Europe/Amsterdam";
i18n = {
defaultLocale = "en_US.UTF-8";
extraLocaleSettings = let extraLocale = "nl_NL.UTF-8"; in {
LC_ADDRESS = extraLocale;
LC_IDENTIFICATION = extraLocale;
LC_MEASUREMENT = extraLocale;
LC_MONETARY = extraLocale;
LC_NAME = extraLocale;
LC_NUMERIC = extraLocale;
LC_PAPER = extraLocale;
LC_TELEPHONE = extraLocale;
LC_TIME = extraLocale;
};
};
services = {
openssh = {
enable = true;
openFirewall = true;
settings = {
PasswordAuthentication = false;
KbdInteractiveAuthentication = false;
};
};
xserver = {
layout = "us";
xkbVariant = "";
};
};
users.users.root.openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOodpLr+FDRyKyHjucHizNLVFHZ5AQmE9GmxMnOsSoaw pimkunis@thinkpadpim"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINUZp4BCxf7uLa1QWonx/Crf8tYZ5MKIZ+EuaBa82LrV user@user-laptop"
];
programs = {
ssh = {
knownHosts = {
dmz = {
hostNames = [ "*.dmz" ];
publicKey =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAX2IhgHNxC6JTvLu9cej+iWuG+uJFMXn4AiRro9533x";
certAuthority = true;
};
hypervisors = {
hostNames = [ "*.hyp" ];
publicKey =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFzRkH3d/KVJQouswY/DMpenWbDFVOnI3Vut0xR0e1tb";
certAuthority = true;
};
};
};
neovim = {
enable = true;
vimAlias = true;
viAlias = true;
};
};
environment.systemPackages = with pkgs; [
neofetch
wget
git
btop
htop
ripgrep
dig
tree
file
tcpdump
lsof
parted
radvd
minicom
socat
];
nixpkgs.overlays = [
(final: prev: { lib = prev.lib // (import ./net.nix prev); })
];
};
}

View file

@ -0,0 +1,46 @@
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
servername="${1-}"
hostname="${2-}"
if [ -z "$servername" ] || [ -z "$hostname" ]
then
echo "Usage: $0 SERVERNAME HOSTNAME"
exit 1
fi
confirmation="Yes, wipe ${servername}."
echo "⚠️ This will wipe ${servername} completely! ⚠️"
echo "Confirm by typing: \"${confirmation}\""
read response
if [ "$response" != "$confirmation" ]; then
echo "Aborting."
exit 1
fi
# Create a temporary directory
temp=$(mktemp -d)
# Function to cleanup temporary directory on exit
cleanup() {
rm -rf "$temp"
}
trap cleanup EXIT
# Create directory where age key will go.
# Nixos-anwhere creates a kind of overlay and retains this structure on the final file system.
mkdir "$temp/etc"
secret-tool lookup age-identity "$servername" > "$temp/etc/age_ed25519"
# Set the correct permissions
chmod 600 "$temp/etc/age_ed25519"
# Install NixOS to the host system with our age identity
nixos-anywhere --help #--extra-files "$temp" --flake ".#${servername}" "root@${hostname}"

View file

@ -0,0 +1,16 @@
{ flake-utils, hostPkgs, ... }: flake-utils.lib.eachDefaultSystem (system: {
packages.bootstrap =
let
name = "bootstrap";
buildInputs = with hostPkgs; [ libsecret coreutils nixos-anywhere ];
script = (hostPkgs.writeScriptBin name (builtins.readFile ./bootstrap.sh)).overrideAttrs (old: {
buildCommand = "${old.buildCommand}\n patchShebangs $out";
});
in
hostPkgs.symlinkJoin {
inherit name;
paths = [ script ] ++ buildInputs;
buildInputs = [ hostPkgs.makeWrapper ];
postBuild = "wrapProgram $out/bin/${name} --set PATH $out/bin";
};
})

11
nix/flake/checks.nix Normal file
View file

@ -0,0 +1,11 @@
{ self, hostPkgs, machines, flake-utils, deploy-rs, ... }: flake-utils.lib.eachDefaultSystem (system: {
# Deploy-rs' flake checks seem broken for architectures different from the deployment machine.
# We skip these here.
checks = deploy-rs.lib.${system}.deployChecks (self.deploy // {
nodes = (hostPkgs.lib.attrsets.filterAttrs
(name: node:
machines.${name}.arch == system
)
self.deploy.nodes);
});
})

23
nix/flake/deploy.nix Normal file
View file

@ -0,0 +1,23 @@
{ self, hostPkgs, physicalMachines, deploy-rs, ... }:
let
mkDeployNodes = nodeDef:
builtins.mapAttrs
(name: machine: nodeDef name machine)
physicalMachines;
in
{
deploy = {
sshUser = "root";
user = "root";
nodes = mkDeployNodes (name: machine: {
hostname = self.nixosConfigurations.${name}.config.networking.fqdn;
profiles.system = {
remoteBuild = machine.arch != hostPkgs.stdenv.hostPlatform.system;
path = deploy-rs.lib.${machine.arch}.activate.nixos
self.nixosConfigurations.${name};
};
});
};
}

20
nix/flake/nixos.nix Normal file
View file

@ -0,0 +1,20 @@
{ nixpkgs, machines, physicalMachines, dns, microvm, disko, agenix, nixos-hardware, ... }:
let
mkNixosSystems = systemDef:
builtins.mapAttrs
(name: machine:
nixpkgs.lib.nixosSystem (systemDef name machine)
)
physicalMachines;
in
{
nixosConfigurations = mkNixosSystems (name: machine: {
system = machine.arch;
specialArgs = { inherit machines machine dns microvm disko agenix nixos-hardware; };
modules = [
../.
{ networking.hostName = name; }
];
});
}

24
nix/globals.nix Normal file
View file

@ -0,0 +1,24 @@
{
lab.networking = {
public = {
ipv4.router = "192.145.57.90";
ipv6.router = "2a0d:6e00:1a77::1";
};
dmz = {
ipv4 = {
prefixLength = "24";
dockerSwarm = "192.168.30.8";
router = "192.168.30.1";
services = "192.168.30.7";
};
ipv6 = {
prefixLength = "64";
dockerSwarm = "2a0d:6e00:1a77:30:c8fe:c0ff:feff:ee08";
router = "2a0d:6e00:1a77:30::1";
services = "2a0d:6e00:1a77:30::7";
};
};
};
}

21
nix/machines/atlas.nix Normal file
View file

@ -0,0 +1,21 @@
{
machines.atlas = {
kind = "physical";
arch = "x86_64-linux";
isHypervisor = true;
nixosModule.lab = {
storage = {
osDisk = "/dev/sda";
dataPartition = "/dev/nvme0n1p1";
};
ssh = {
useCertificates = true;
hostCert = builtins.readFile ./certificates/atlas/host_ed25519.crt;
userCert = builtins.readFile ./certificates/atlas/user_ed25519.crt;
};
};
};
}

View file

@ -0,0 +1,15 @@
{
machines.bancomart = {
kind = "virtual";
hypervisorName = "jefke";
nixosModule = {
microvm.balloonMem = 7680;
lab = {
dockerSwarm.enable = true;
vm.id = 2;
};
};
};
}

View file

@ -0,0 +1 @@
ssh-ed25519-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIH4CQGHwWytKnkn7lYjT6G1NyPzINvfroZgwCLoOLO74AAAAIOMoSSEqM4VUBWUeFweJbqK9z7Ygp7fkX22hyWmgCNg8AAAAAAAAAAAAAAACAAAACWF0bGFzLmh5cAAAAA0AAAAJYXRsYXMuaHlwAAAAAAAAAAD//////////wAAAAAAAAAAAAAAAAAAADMAAAALc3NoLWVkMjU1MTkAAAAgXNGQfd38pUlCi6zBj8Myl6dZsMVU6cjdW63TFHR7W1sAAABTAAAAC3NzaC1lZDI1NTE5AAAAQAYModSEVNG06xvAcRn8XFeCp/iXFeqVcbtfT1NmmMkyIgybkXhJyHjp89BPg0zeAaoScFx8Xpsdd8CsxTeP+QU= root@atlas

View file

@ -0,0 +1 @@
ssh-ed25519-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIItpNkjaH8o51VKydwHYbbLxXMtf4euzojFKPxz+XqdwAAAAIG1vJNH1p8l8HlmYMT/vHGTjEnIul7ORQhutNnKiXlgqAAAAAAAAAAAAAAABAAAACWF0bGFzLmh5cAAAABsAAAAJYXRsYXMuaHlwAAAACmh5cGVydmlzb3IAAAAAAAAAAP//////////AAAAAAAAAIIAAAAVcGVybWl0LVgxMS1mb3J3YXJkaW5nAAAAAAAAABdwZXJtaXQtYWdlbnQtZm9yd2FyZGluZwAAAAAAAAAWcGVybWl0LXBvcnQtZm9yd2FyZGluZwAAAAAAAAAKcGVybWl0LXB0eQAAAAAAAAAOcGVybWl0LXVzZXItcmMAAAAAAAAAAAAAADMAAAALc3NoLWVkMjU1MTkAAAAgdmt4SFL+swd8kHsh6cQR+TfzMKObJx75fYBbHNT83zUAAABTAAAAC3NzaC1lZDI1NTE5AAAAQIW4tC+FJA6bKFUfRVcHLWz1u3ZL/GRTWD2WCW4ApHq7no6ODeMwE10noNt/42mwYjFmjwR+cd9EuMyUErXmaw8= root@atlas

View file

@ -0,0 +1 @@
ssh-ed25519-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIHzQMMRr2vNtTW3joxPzQYjFFu3iI/WyIRVD18YKY61CAAAAIKTzrsjwRmKg3JbRLY/RrWnIBfCupfFdMWZ/8AQAXg9uAAAAAAAAAAAAAAACAAAACWplZmtlLmh5cAAAAA0AAAAJamVma2UuaHlwAAAAAAAAAAD//////////wAAAAAAAAAAAAAAAAAAADMAAAALc3NoLWVkMjU1MTkAAAAgXNGQfd38pUlCi6zBj8Myl6dZsMVU6cjdW63TFHR7W1sAAABTAAAAC3NzaC1lZDI1NTE5AAAAQPNDgNAOmp5Gl//mjEHF2H5Yi8GIFfyiRm8nJ2UkGXzpNr3+bQvQhPigziuXO0+8910yY9QzXTfvc4mgAT1gpgU= root@jefke

View file

@ -0,0 +1 @@
ssh-ed25519-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIKdTRygvLfapNY6umK+TdoqWDIq4ZzXLZlUJ/lVvkuqtAAAAINZ3aw6gjrOt561j1Mh7kINqlavorKeujN1Q8mn/Fy69AAAAAAAAAAAAAAABAAAACWplZmtlLmh5cAAAABsAAAAJamVma2UuaHlwAAAACmh5cGVydmlzb3IAAAAAAAAAAP//////////AAAAAAAAAIIAAAAVcGVybWl0LVgxMS1mb3J3YXJkaW5nAAAAAAAAABdwZXJtaXQtYWdlbnQtZm9yd2FyZGluZwAAAAAAAAAWcGVybWl0LXBvcnQtZm9yd2FyZGluZwAAAAAAAAAKcGVybWl0LXB0eQAAAAAAAAAOcGVybWl0LXVzZXItcmMAAAAAAAAAAAAAADMAAAALc3NoLWVkMjU1MTkAAAAgdmt4SFL+swd8kHsh6cQR+TfzMKObJx75fYBbHNT83zUAAABTAAAAC3NzaC1lZDI1NTE5AAAAQI36zBw4Epr1ijXBk7T5JENgisn4SbVTLkhYBWCquHcAv3nFFJOEZ1kdC/SfYaDwmXb/rNybpr3942wF0xD3/ws= root@jefke

View file

@ -0,0 +1 @@
ssh-ed25519-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIAP9Xu3G75HcVIVhrgiCKSM+YTkaCbTqI18NBdWikIlHAAAAIKfbZauF+7q3s7VxhvxdPT7XDapch0P3tD//U4/70D6cAAAAAAAAAAAAAAACAAAACWxld2lzLmh5cAAAAA0AAAAJbGV3aXMuaHlwAAAAAAAAAAD//////////wAAAAAAAAAAAAAAAAAAADMAAAALc3NoLWVkMjU1MTkAAAAgXNGQfd38pUlCi6zBj8Myl6dZsMVU6cjdW63TFHR7W1sAAABTAAAAC3NzaC1lZDI1NTE5AAAAQGHtz4FNkj0LuplU+12A/sx0bE4QeHLYhctXag9DSMGJz9yOpyMpK3PPKkm6leLdGYs7RUjxwXvcj+f4k16VXA0= root@atlas

View file

@ -0,0 +1 @@
ssh-ed25519-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQtdjAxQG9wZW5zc2guY29tAAAAIGqYC+tRPZ24WMroezrFgxtm8YObweMCTpz/y+dbGrzKAAAAIEuhHYB6zdSsfvLm4zXfuUbUCkUgPRu6rdt1rninA7PwAAAAAAAAAAAAAAABAAAACWxld2lzLmh5cAAAABsAAAAJbGV3aXMuaHlwAAAACmh5cGVydmlzb3IAAAAAAAAAAP//////////AAAAAAAAAIIAAAAVcGVybWl0LVgxMS1mb3J3YXJkaW5nAAAAAAAAABdwZXJtaXQtYWdlbnQtZm9yd2FyZGluZwAAAAAAAAAWcGVybWl0LXBvcnQtZm9yd2FyZGluZwAAAAAAAAAKcGVybWl0LXB0eQAAAAAAAAAOcGVybWl0LXVzZXItcmMAAAAAAAAAAAAAADMAAAALc3NoLWVkMjU1MTkAAAAgdmt4SFL+swd8kHsh6cQR+TfzMKObJx75fYBbHNT83zUAAABTAAAAC3NzaC1lZDI1NTE5AAAAQGV0nCPl4HDo1Q24NnFcPc1/FPYxwkWg864eUp5hdbttL4f8h7YLtZw6k8hHIn50wVdHEJkUwYrXgR1dwYhfEwA= root@atlas

77
nix/machines/default.nix Normal file
View file

@ -0,0 +1,77 @@
{ lib, ... }:
let
machineOpts = { config, ... }: {
options = {
kind = lib.mkOption {
type = lib.types.enum [ "physical" "virtual" ];
description = ''
Whether this machine is physical or virtual.
'';
};
hypervisorName = lib.mkOption {
default = null;
type = with lib.types; nullOr str;
description = ''
The host name of the hypervisor hosting this virtual machine.
'';
};
arch = lib.mkOption {
default = null;
type = with lib.types; nullOr str;
description = ''
CPU architecture of this machine.
'';
};
isRaspberryPi = lib.mkOption {
default = false;
type = lib.types.bool;
};
isHypervisor = lib.mkOption {
default = false;
type = lib.types.bool;
};
# Derived value
isPhysical = lib.mkOption {
default = config.kind == "physical";
type = lib.types.bool;
};
# Derived value
isVirtual = lib.mkOption {
default = config.kind == "virtual";
type = lib.types.bool;
};
nixosModule = lib.mkOption {
default = { ... }: { };
type = lib.types.anything;
description = ''
Customized configuration for this machine in the form of a NixOS module.
'';
};
};
};
in
{
imports = [
./warwick.nix
./atlas.nix
./jefke.nix
./lewis.nix
./hermes.nix
./maestro.nix
./bancomart.nix
./vpay.nix
];
options = {
machines = lib.mkOption {
type = with lib.types; attrsOf (submodule machineOpts);
};
};
}

29
nix/machines/hermes.nix Normal file
View file

@ -0,0 +1,29 @@
{
machines.hermes = {
kind = "virtual";
hypervisorName = "lewis";
nixosModule = { hypervisorConfig, ... }: {
lab = {
networking = {
dmz.services.enable = true;
staticNetworking = true;
staticIPv4 = hypervisorConfig.lab.networking.dmz.ipv4.services;
staticIPv6 = hypervisorConfig.lab.networking.dmz.ipv6.services;
};
vm = {
# TODO: would be cool to create a check that a mac address is only ever assigned to one VM.
# TODO: idea: what if we generated these IDs by hashing the host name and reducing that to the amount of hosts possible?
id = 7;
shares = [{
name = "dnsmasq";
mountPoint = "/var/lib/dnsmasq";
}];
};
};
};
};
}

20
nix/machines/jefke.nix Normal file
View file

@ -0,0 +1,20 @@
{
machines.jefke = {
kind = "physical";
arch = "x86_64-linux";
isHypervisor = true;
nixosModule.lab = {
storage = {
osDisk = "/dev/sda";
dataPartition = "/dev/nvme0n1p1";
};
ssh = {
useCertificates = true;
hostCert = builtins.readFile ./certificates/jefke/host_ed25519.crt;
userCert = builtins.readFile ./certificates/jefke/user_ed25519.crt;
};
};
};
}

24
nix/machines/lewis.nix Normal file
View file

@ -0,0 +1,24 @@
{
machines.lewis = {
kind = "physical";
arch = "x86_64-linux";
isHypervisor = true;
nixosModule.lab = {
backups.enable = true;
data-sharing.enable = true;
networking.dmz.allowConnectivity = true;
storage = {
osDisk = "/dev/sda";
dataPartition = "/dev/nvme0n1p1";
};
ssh = {
useCertificates = true;
hostCert = builtins.readFile ./certificates/lewis/host_ed25519.crt;
userCert = builtins.readFile ./certificates/lewis/user_ed25519.crt;
};
};
};
}

18
nix/machines/maestro.nix Normal file
View file

@ -0,0 +1,18 @@
{
machines.maestro = {
kind = "virtual";
hypervisorName = "atlas";
nixosModule = { config, ... }: {
microvm.balloonMem = 7680;
lab = {
dockerSwarm.enable = true;
vm = {
id = 1;
};
};
};
};
}

15
nix/machines/vpay.nix Normal file
View file

@ -0,0 +1,15 @@
{
machines.vpay = {
kind = "virtual";
hypervisorName = "lewis";
nixosModule = {
microvm.balloonMem = 5120;
lab = {
dockerSwarm.enable = true;
vm.id = 3;
};
};
};
}

7
nix/machines/warwick.nix Normal file
View file

@ -0,0 +1,7 @@
{
machines.warwick = {
kind = "physical";
arch = "aarch64-linux";
isRaspberryPi = true;
};
}

129
nix/modules/backups.nix Normal file
View file

@ -0,0 +1,129 @@
{ pkgs, lib, config, ... }:
let
cfg = config.lab.backups;
beforeEverything = pkgs.writeShellScriptBin "beforeEverything" ''
if [ -d "${cfg.snapshotLocation}" ]; then
${pkgs.btrfs-progs}/bin/btrfs subvolume delete ${cfg.snapshotLocation}
fi
${pkgs.btrfs-progs}/bin/btrfs subvolume snapshot -r ${cfg.subvolumeLocation} ${cfg.snapshotLocation}
'';
borgmaticConfig = pkgs.writeTextFile {
name = "borgmatic-config";
text = ''
source_directories:
- ${cfg.snapshotLocation}
repositories:
- path: ${cfg.repoLocation}
label: nfs
- path: ssh://s6969ym3@s6969ym3.repo.borgbase.com/./repo
label: ec2
exclude_patterns:
- ${cfg.snapshotLocation}/media
ssh_command: "${pkgs.openssh}/bin/ssh -i ${config.age.secrets."ec2_borg_server.pem".path} -o StrictHostKeychecking=no"
keep_daily: 7
keep_weekly: 4
keep_monthly: 6
encryption_passcommand: "${pkgs.coreutils}/bin/cat ''${BORG_PASSPHRASE_FILE}"
before_everything:
- ${beforeEverything}/bin/beforeEverything
postgresql_databases:
- name: nextcloud
hostname: lewis.dmz
username: nextcloud
password: ''${NEXTCLOUD_DATABASE_PASSWORD}
format: tar
- name: hedgedoc
hostname: lewis.dmz
username: hedgedoc
password: ''${HEDGEDOC_DATABASE_PASSWORD}
format: tar
- name: paperless
hostname: lewis.dmz
username: paperless
password: ''${PAPERLESS_DATABASE_PASSWORD}
format: tar
'';
};
in
{
options.lab.backups = {
enable = lib.mkOption {
default = false;
type = lib.types.bool;
description = ''
Whether to enable backups of persistent data on this machine.
'';
};
repoLocation = lib.mkOption {
default = "${config.lab.storage.dataMountPoint}/backups/nfs.borg";
type = lib.types.str;
description = ''
Location of the Borg repository to back up to.
'';
};
subvolumeLocation = lib.mkOption {
default = "${config.lab.storage.dataMountPoint}/nfs";
type = lib.types.str;
description = ''
Location of the btrfs subvolume holding the data.
'';
};
snapshotLocation = lib.mkOption {
default = "${config.lab.storage.dataMountPoint}/snapshot-nfs";
type = lib.types.str;
description = ''
Location to (temporary) create a snapshot of the subvolume.
'';
};
};
config = lib.mkIf cfg.enable {
environment.systemPackages = with pkgs; [ borgbackup postgresql ];
# Converted from:
# https://github.com/borgmatic-collective/borgmatic/tree/84823dfb912db650936e3492f6ead7e0e0d32a0f/sample/systemd
systemd.services.borgmatic = {
description = "borgmatic backup";
wants = [ "network-online.target" ];
after = [ "network-online.target" ];
unitConfig.ConditionACPower = true;
preStart = "${pkgs.coreutils}/bin/sleep 10s";
path = with pkgs; [ postgresql ];
serviceConfig = {
Type = "oneshot";
Nice = 19;
CPUSchedulingPolicy = "batch";
IOSchedulingClass = "best-effort";
IOSchedulingPriority = 7;
IOWeight = 100;
Restart = "no";
LogRateLimitIntervalSec = 0;
EnvironmentFile = config.age.secrets."database_passwords.env".path;
Environment = "BORG_PASSPHRASE_FILE=${config.age.secrets."borg_passphrase".path}";
};
script = "${pkgs.systemd}/bin/systemd-inhibit --who=\"borgmatic\" --what=\"sleep:shutdown\" --why=\"Prevent interrupting scheduled backup\" ${pkgs.borgmatic}/bin/borgmatic --verbosity -2 --syslog-verbosity 1 -c ${borgmaticConfig}";
};
systemd.timers.borgmatic = {
description = "Run borgmatic backup";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "*-*-* 3:00:00";
Persistent = true;
RandomizedDelaySec = "3h";
};
};
age.secrets = {
"database_passwords.env".file = ../secrets/database_passwords.env.age;
"borg_passphrase".file = ../secrets/borg_passphrase.age;
"ec2_borg_server.pem".file = ../secrets/ec2_borg_server.pem.age;
};
};
}

View file

@ -0,0 +1,97 @@
{ pkgs, lib, config, ... }:
let
cfg = config.lab.data-sharing;
nfsShares = [
"/nextcloud/data"
"/radicale"
"/freshrss/data"
"/freshrss/extensions"
"/pihole/data"
"/pihole/dnsmasq"
"/hedgedoc/uploads"
"/traefik/acme"
"/forgejo"
"/kitchenowl/data"
"/syncthing/config"
"/paperless-ngx/data"
"/paperless-ngx/redisdata"
"/media"
"/media/books"
"/media/movies"
"/media/music"
"/media/shows"
"/jellyfin/config"
"/transmission/config"
"/jellyseerr/config"
"/radarr/config"
"/prowlarr/config"
"/sonarr/config"
"/bazarr/config"
];
nfsExports = lib.strings.concatLines (
builtins.map
(share:
"${cfg.nfsRoot}${share} 192.168.30.0/${config.lab.networking.dmz.ipv4.prefixLength}(rw,sync,no_subtree_check,no_root_squash)"
)
nfsShares
);
in
{
options.lab.data-sharing = {
enable = lib.mkOption {
default = false;
type = lib.types.bool;
description = ''
Configure this server to serve our data using NFS and PostgreSQL.
'';
};
nfsRoot = lib.mkOption {
default = "/mnt/data/nfs";
type = lib.types.str;
description = ''
Root directory of NFS data.
'';
};
postgresDir = lib.mkOption {
default = "/mnt/data/postgresql/${config.services.postgresql.package.psqlSchema}";
type = lib.types.str;
description = ''
Postgresql data directory.
'';
};
};
config = lib.mkIf cfg.enable {
networking.firewall.interfaces.${config.lab.networking.dmz.bridgeName}.allowedTCPPorts = [
2049 # NFS
5432 # PostgeSQL
111 # NFS
20048 # NFS
];
services = {
nfs.server = {
enable = true;
exports = nfsExports;
};
postgresql = {
enable = true;
package = pkgs.postgresql_15;
enableTCPIP = true;
dataDir = cfg.postgresDir;
authentication = ''
host nextcloud nextcloud all md5
host hedgedoc hedgedoc all md5
host paperless paperless all md5
'';
};
};
};
}

10
nix/modules/default.nix Normal file
View file

@ -0,0 +1,10 @@
{
imports = [
./storage.nix
./ssh-certificates.nix
./backups.nix
./networking
./data-sharing.nix
./globals.nix
];
}

89
nix/modules/globals.nix Normal file
View file

@ -0,0 +1,89 @@
{ lib, ... }: {
options.lab = {
networking = {
public = {
ipv4 = {
router = lib.mkOption {
type = lib.types.str;
description = ''
Public IPv4 address of the router.
'';
};
};
ipv6 = {
router = lib.mkOption {
type = lib.types.str;
description = ''
Publicly routable IPv6 address of the router.
'';
};
};
};
dmz = {
ipv4 = {
prefixLength = lib.mkOption {
type = lib.types.str;
description = ''
IPv4 prefix length of DMZ network.
'';
};
dockerSwarm = lib.mkOption {
type = lib.types.str;
description = ''
IPv4 address of the Docker Swarm in the DMZ.
'';
};
router = lib.mkOption {
type = lib.types.str;
description = ''
The router's IPv4 address on the DMZ network.
'';
};
services = lib.mkOption {
type = lib.types.str;
description = ''
The IPv4 address of the interface serving DHCP and DNS on the DMZ network.
'';
};
};
ipv6 = {
prefixLength = lib.mkOption {
type = lib.types.str;
description = ''
IPv6 prefix length of DMZ network.
'';
};
dockerSwarm = lib.mkOption {
type = lib.types.str;
description = ''
Globally routable IPv6 address of the Docker Swarm.
'';
};
router = lib.mkOption {
type = lib.types.str;
description = ''
The router's IPv6 address on the DMZ network.
'';
};
services = lib.mkOption {
type = lib.types.str;
description = ''
The IPv6 address of the interface serving DHCP and DNS on the DMZ network.
'';
};
};
};
};
};
}

View file

@ -0,0 +1,167 @@
{ lib, config, machine, ... }:
let cfg = config.lab.networking;
in {
imports = [ ./dmz_services ];
options.lab.networking = {
dmz = {
allowConnectivity = lib.mkOption {
default = false;
type = lib.types.bool;
description = ''
Whether to allow networking on the DMZ bridge interface.
'';
};
bridgeName = lib.mkOption {
default = "bridgedmz";
type = lib.types.str;
description = ''
The name of the DMZ bridge.
'';
};
};
staticNetworking = lib.mkOption {
default = false;
type = lib.types.bool;
description = ''
Whether this machine has static networking configuration applied.
Routing is prepopulated, but IP addresses have to be set.
'';
};
staticIPv4 = lib.mkOption {
type = lib.types.str;
description = ''
Static IPv4 address for the machine.
'';
};
staticIPv6 = lib.mkOption {
type = lib.types.str;
description = ''
Static IPv6 address for the machine.
'';
};
};
config = {
networking = {
domain = if machine.isPhysical then "hyp" else "dmz";
nftables.enable = true;
useDHCP = false;
firewall = {
enable = true;
checkReversePath = false;
};
};
systemd.network = {
enable = true;
netdevs = lib.mkIf machine.isHypervisor {
"20-vlandmz" = {
vlanConfig.Id = 30;
netdevConfig = {
Kind = "vlan";
Name = "vlandmz";
};
};
"20-bridgedmz" = {
netdevConfig = {
Kind = "bridge";
Name = cfg.dmz.bridgeName;
};
};
};
networks = lib.attrsets.mergeAttrsList [
(lib.optionalAttrs machine.isHypervisor {
"30-main-nic" = {
matchConfig.Name = "en*";
vlan = [ "vlandmz" ];
networkConfig = {
DHCP = "yes";
};
};
"40-vlandmz" = {
matchConfig.Name = "vlandmz";
linkConfig.RequiredForOnline = "enslaved";
networkConfig = {
IPv6AcceptRA = false;
LinkLocalAddressing = "no";
Bridge = cfg.dmz.bridgeName;
};
};
"40-bridgedmz" = {
matchConfig.Name = cfg.dmz.bridgeName;
linkConfig.RequiredForOnline = "carrier";
networkConfig = {
IPv6AcceptRA = cfg.dmz.allowConnectivity;
LinkLocalAddressing = if cfg.dmz.allowConnectivity then "ipv6" else "no";
DHCP = "yes";
};
};
"40-vms" = {
matchConfig.Name = "vm-*";
networkConfig.Bridge = cfg.dmz.bridgeName;
};
})
(lib.optionalAttrs machine.isVirtual {
"30-main-nic" = {
matchConfig.Name = "en*";
networkConfig = {
IPv6AcceptRA = ! cfg.staticNetworking;
DHCP = lib.mkIf (! cfg.staticNetworking) "yes";
Address = lib.mkIf cfg.staticNetworking [
"${cfg.staticIPv4}/${cfg.dmz.ipv4.prefixLength}"
"${cfg.staticIPv6}/${cfg.dmz.ipv6.prefixLength}"
];
DNS = lib.mkIf cfg.staticNetworking [
cfg.dmz.ipv4.router
cfg.dmz.ipv6.router
];
};
routes = lib.mkIf cfg.staticNetworking [
{
routeConfig = {
Gateway = cfg.dmz.ipv4.router;
Destination = "0.0.0.0/0";
};
}
{
routeConfig = {
Gateway = cfg.dmz.ipv6.router;
Destination = "::/0";
};
}
];
};
})
(lib.optionalAttrs machine.isRaspberryPi {
"30-main-nic" = {
matchConfig.Name = "end*";
networkConfig = {
IPv6AcceptRA = true;
DHCP = "yes";
};
};
})
];
};
};
}

View file

@ -0,0 +1,73 @@
# TODO: we should split this into DHCP and DNS
# This decoupling makes it easier to put one service on another host.
{ pkgs, lib, config, dns, ... }@inputs:
let
cfg = config.lab.networking.dmz.services;
kunisZoneFile = pkgs.writeTextFile {
name = "kunis-zone-file";
text = (dns.lib.toString "kun.is" (import ./zones/kun.is.nix inputs));
};
geokunis2nlZoneFile = pkgs.writeTextFile {
name = "geokunis2nl-zone-file";
text = (dns.lib.toString "geokunis2.nl" (import ./zones/geokunis2.nl.nix inputs));
};
in
{
options.lab.networking.dmz.services.enable = lib.mkOption {
default = false;
type = lib.types.bool;
description = ''
Whether to enable an authoritative DNS server and DNSmasq for DMZ network.
'';
};
config = lib.mkIf cfg.enable {
# TODO Remove this; make this explicit in the machine config.
lab.networking.dmz.allowConnectivity = true;
# TODO: listen only on dmz interface, make this portable between physical and VM.
networking.firewall = {
allowedTCPPorts = [ 53 5353 ];
allowedUDPPorts = [ 53 67 5353 ];
};
services = {
bind = {
enable = true;
forwarders = [ ];
extraOptions = ''
allow-transfer { none; };
allow-recursion { none; };
version none;
notify no;
'';
zones = {
"kun.is" = {
master = true;
file = kunisZoneFile;
allowQuery = [ "any" ];
};
"geokunis2.nl" = {
master = true;
file = geokunis2nlZoneFile;
allowQuery = [ "any" ];
slaves = [
"87.253.155.96/27"
"157.97.168.160/27"
];
};
};
};
dnsmasq = {
enable = true;
settings = import ./dnsmasq.nix inputs;
};
};
};
}

View file

@ -0,0 +1,50 @@
{ config, ... }:
let
cfg = config.lab.networking;
in
{
no-resolv = true;
local = "/dmz/";
dhcp-fqdn = true;
no-hosts = true;
expand-hosts = true;
domain = "dmz";
dhcp-authoritative = true;
ra-param = "*,0,0";
alias = "${cfg.public.ipv4.router},${cfg.dmz.ipv4.dockerSwarm}";
log-dhcp = true;
log-queries = true;
port = "5353";
host-record = [
"hermes.dmz,${cfg.dmz.ipv4.services},${cfg.dmz.ipv6.services}"
"ipv4.hermes.dmz,${cfg.dmz.ipv4.services}" # TODO: Do we need these?
"ipv6.hermes.dmz,${cfg.dmz.ipv6.services}"
];
server = [
cfg.dmz.ipv4.router
"/geokunis2.nl/${cfg.dmz.ipv4.services}"
"/kun.is/${cfg.dmz.ipv4.services}"
];
dhcp-range = [
"192.168.30.50,192.168.30.127,15m"
"2a0d:6e00:1a77:30::,ra-stateless,ra-names"
];
dhcp-host = [
"b8:27:eb:b9:ab:e2,esrom"
"ba:db:ee:f0:00:01,maestro,${cfg.dmz.ipv4.dockerSwarm}"
];
dhcp-option = [
"3,${cfg.dmz.ipv4.router}"
"option:dns-server,${cfg.dmz.ipv4.router}"
"option6:dns-server,[2a02:58:19a:30::1]"
];
address = [
"/ns.pizzapim.nl/ns.geokunis2.nl/${cfg.dmz.ipv4.services}"
"/ns.pizzapim.nl/ns.geokunis2.nl/${cfg.dmz.ipv6.services}"
];
}

View file

@ -0,0 +1,41 @@
{ config, dns, ... }:
with dns.lib.combinators;
let
cfg = config.lab.networking;
in
{
SOA = {
nameServer = "ns";
adminEmail = "hostmaster@geokunis2.nl";
serial = 2024020500;
};
NS = [
"ns.geokunis2.nl."
"ns0.transip.net."
"ns1.transip.nl."
"ns2.transip.eu."
];
MX = [ (mx.mx 10 "mail.geokunis2.nl.") ];
CAA = letsEncrypt "caa@geokunis2.nl";
subdomains = {
ns = {
A = [ cfg.public.ipv4.router ];
AAAA = [ cfg.dmz.ipv6.services ];
};
ns1 = {
A = [ cfg.public.ipv4.router ];
AAAA = [ cfg.dmz.ipv6.services ];
};
ns2 = {
A = [ cfg.public.ipv4.router ];
AAAA = [ cfg.dmz.ipv6.services ];
};
};
}

View file

@ -0,0 +1,77 @@
{ config, dns, ... }:
with dns.lib.combinators;
let
cfg = config.lab.networking;
in
{
CAA = letsEncrypt "caa@kun.is";
SOA = {
nameServer = "ns1";
adminEmail = "webmaster@kun.is";
serial = 2024021702;
};
NS = [
"ns1.kun.is."
"ns2.kun.is."
];
MX = [
(mx.mx 10 "mail.kun.is.")
];
subdomains = {
"*" = {
A = [ cfg.public.ipv4.router ];
AAAA = [ cfg.dmz.ipv6.dockerSwarm ];
};
ns = {
A = [ cfg.public.ipv4.router ];
AAAA = [ cfg.dmz.ipv6.services ];
};
ns1 = {
A = [ cfg.public.ipv4.router ];
AAAA = [ cfg.dmz.ipv6.services ];
};
ns2 = {
A = [ cfg.public.ipv4.router ];
AAAA = [ cfg.dmz.ipv6.services ];
};
# Override because we don't support IPv6 for Git SSH.
git = {
A = [ cfg.public.ipv4.router ];
AAAA = [ ];
};
# Override because we don't support IPv6 for KMS.
kms = {
A = [ cfg.public.ipv4.router ];
AAAA = [ ];
};
# Override because wg is on opnsense so ipv6 differs from "cfg.dmz.ipv6.services"
wg = {
A = [ cfg.public.ipv4.router ];
AAAA = [ cfg.dmz.ipv6.router ];
};
#for SMTP2GO to be able send emails from kun.is domain
em670271 = {
CNAME = ["return.smtp2go.net."];
};
"s670271._domainkey" = {
CNAME = ["dkim.smtp2go.net."];
};
link = {
CNAME = ["track.smtp2go.net."];
};
};
}

View file

@ -0,0 +1,70 @@
{ lib, config, ... }:
let
cfg = config.lab.ssh;
hostCert = builtins.toFile "host_ed25519-cert.pub" cfg.hostCert;
userCert = builtins.toFile "user_ed25519-cert.pub" cfg.userCert;
in
{
options.lab.ssh = {
useCertificates = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
Whether to use certificates at all.
'';
};
hostCert = lib.mkOption {
type = lib.types.str;
description = ''
SSH host certificate
'';
};
userCert = lib.mkOption {
type = lib.types.str;
description = ''
SSH user certificate
'';
};
hostKey = lib.mkOption {
default =
../secrets/${config.networking.hostName}_host_ed25519.age;
type = lib.types.path;
description = ''
SSH host key
'';
};
userKey = lib.mkOption {
default =
../secrets/${config.networking.hostName}_user_ed25519.age;
type = lib.types.path;
description = ''
SSH user key
'';
};
};
config = lib.mkIf cfg.useCertificates {
services.openssh = {
extraConfig = ''
HostCertificate ${hostCert}
HostKey ${config.age.secrets.host_ed25519.path}
'';
};
programs.ssh = {
extraConfig = ''
CertificateFile ${userCert}
IdentityFile ${config.age.secrets.user_ed25519.path}
'';
};
age.secrets = {
"host_ed25519".file = cfg.hostKey;
"user_ed25519".file = cfg.userKey;
};
};
}

80
nix/modules/storage.nix Normal file
View file

@ -0,0 +1,80 @@
{ lib, config, machine, ... }:
let cfg = config.lab.storage;
in {
options.lab.storage = {
osDisk = lib.mkOption {
type = with lib.types; nullOr str;
description = ''
The disk to be used for the machine's operating system.
'';
};
dataPartition = lib.mkOption {
default = null;
type = lib.types.nullOr lib.types.str;
description = ''
Partition to be used for data storage on this machine.
'';
};
dataMountPoint = lib.mkOption {
default = "/mnt/data";
type = lib.types.str;
description = ''
Mount point of the machine's data partition.
'';
};
};
config = {
fileSystems = lib.attrsets.mergeAttrsList [
(lib.optionalAttrs machine.isHypervisor {
"${cfg.dataMountPoint}".device = cfg.dataPartition;
})
(lib.optionalAttrs machine.isRaspberryPi {
"/" = {
device = "/dev/disk/by-label/NIXOS_SD";
fsType = "ext4";
options = [ "noatime" ];
};
})
];
disko = lib.mkIf machine.isHypervisor {
# TODO: Rename this to 'osDisk'. Unfortunately, we would need to run nixos-anywhere again then.
devices.disk.vdb = {
device = cfg.osDisk;
type = "disk";
content = {
type = "gpt";
partitions = {
swap.size = "100%";
ESP = {
type = "EF00";
size = "500M";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
root = {
end = "-4G";
content = {
type = "filesystem";
format = "btrfs";
mountpoint = "/";
};
};
};
};
};
};
};
}

View file

@ -0,0 +1,67 @@
Certificate:
Data:
Version: 1 (0x0)
Serial Number:
ef:2f:4d:d4:26:7e:33:1b
Signature Algorithm: sha256WithRSAEncryption
Issuer: CN=jefke.hyp
Validity
Not Before: Nov 22 19:12:03 2023 GMT
Not After : Oct 29 19:12:03 2123 GMT
Subject: CN=jefke.hyp
Subject Public Key Info:
Public Key Algorithm: rsaEncryption
RSA Public-Key: (2048 bit)
Modulus:
00:c7:ab:eb:9c:d0:7f:4f:f1:ba:65:0a:8b:07:7b:
2e:5b:f0:26:82:33:c9:73:e6:91:cc:11:94:05:1c:
8d:67:29:cb:5e:67:35:02:80:54:af:99:4b:aa:ce:
e8:56:62:be:63:cb:b2:4a:b0:a9:28:12:e2:77:50:
7d:d5:d2:3b:48:d8:32:59:25:26:ff:a6:5c:f6:eb:
ae:5b:3d:7a:14:10:ba:90:9c:6f:1f:b9:d8:99:0e:
b7:09:5e:62:69:c4:c0:c6:27:b0:d3:60:0d:47:4c:
a5:11:53:f2:f1:4a:f9:a6:bc:d6:a3:35:a2:e8:e5:
a9:d1:60:e8:e5:18:ce:d2:60:80:4e:dc:48:ae:7f:
b7:ea:76:51:28:39:a4:b0:95:82:95:93:98:b2:9f:
23:c9:81:69:59:a3:e4:f7:5a:1c:01:31:96:c1:4b:
59:21:f8:a2:e6:9e:21:78:0e:6b:c1:68:c7:5c:16:
9a:06:54:df:b6:77:1d:2d:89:d0:c8:9e:db:b5:d4:
8c:fb:b9:4f:b7:6e:39:5f:39:8e:48:73:76:7d:46:
6e:1f:8d:14:cb:40:b5:ff:c6:f0:c0:44:3c:ed:52:
3f:4f:7b:69:63:93:c6:41:e6:5e:ed:33:50:20:46:
db:93:bf:e8:52:51:95:f1:81:73:58:da:67:21:7b:
12:bd
Exponent: 65537 (0x10001)
Signature Algorithm: sha256WithRSAEncryption
aa:5c:89:41:a6:b7:3d:65:87:ca:50:c4:f3:58:aa:d3:b4:55:
b1:a7:8d:18:26:17:e5:8a:21:24:a1:49:53:77:31:5b:55:63:
be:01:d8:fe:b7:06:7c:da:07:1f:94:6a:de:96:ad:ca:3b:20:
2a:e1:35:90:19:83:6d:37:d1:15:12:de:3c:0e:46:be:66:a1:
6a:1d:ec:72:dc:46:79:69:e4:af:77:c8:ff:cd:d6:7d:16:88:
ab:44:fd:70:fc:40:47:ff:43:95:11:5a:9a:56:0c:d2:dd:7c:
3b:87:aa:10:26:fa:25:a3:a0:43:8a:1b:ec:54:11:7e:65:67:
d2:06:e1:3e:3b:e1:0e:b0:80:ef:4b:35:3f:fc:34:1d:95:2e:
ee:c1:67:38:da:b3:74:86:4b:95:8c:0c:1d:51:28:c1:42:e9:
77:68:d7:ec:3b:66:30:c6:e5:2a:62:ea:15:fb:24:56:cf:02:
d0:25:54:a7:58:15:b5:2a:71:93:56:c0:69:7a:36:18:6c:31:
b1:8e:3c:77:d7:77:ac:fc:e1:94:c5:08:bb:35:ac:48:5f:6b:
8b:c8:c8:78:f4:a9:ca:4f:9d:51:54:89:97:c9:af:a1:fa:71:
df:58:f6:ff:04:7c:c8:1c:95:6b:1a:e3:a7:f6:43:1c:27:94:
10:03:ce:ec
-----BEGIN CERTIFICATE-----
MIICpjCCAY4CCQDvL03UJn4zGzANBgkqhkiG9w0BAQsFADAUMRIwEAYDVQQDDAlq
ZWZrZS5oeXAwIBcNMjMxMTIyMTkxMjAzWhgPMjEyMzEwMjkxOTEyMDNaMBQxEjAQ
BgNVBAMMCWplZmtlLmh5cDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AMer65zQf0/xumUKiwd7LlvwJoIzyXPmkcwRlAUcjWcpy15nNQKAVK+ZS6rO6FZi
vmPLskqwqSgS4ndQfdXSO0jYMlklJv+mXPbrrls9ehQQupCcbx+52JkOtwleYmnE
wMYnsNNgDUdMpRFT8vFK+aa81qM1oujlqdFg6OUYztJggE7cSK5/t+p2USg5pLCV
gpWTmLKfI8mBaVmj5PdaHAExlsFLWSH4ouaeIXgOa8Fox1wWmgZU37Z3HS2J0Mie
27XUjPu5T7duOV85jkhzdn1Gbh+NFMtAtf/G8MBEPO1SP097aWOTxkHmXu0zUCBG
25O/6FJRlfGBc1jaZyF7Er0CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAqlyJQaa3
PWWHylDE81iq07RVsaeNGCYX5YohJKFJU3cxW1VjvgHY/rcGfNoHH5Rq3patyjsg
KuE1kBmDbTfRFRLePA5Gvmahah3sctxGeWnkr3fI/83WfRaIq0T9cPxAR/9DlRFa
mlYM0t18O4eqECb6JaOgQ4ob7FQRfmVn0gbhPjvhDrCA70s1P/w0HZUu7sFnONqz
dIZLlYwMHVEowULpd2jX7DtmMMblKmLqFfskVs8C0CVUp1gVtSpxk1bAaXo2GGwx
sY48d9d3rPzhlMUIuzWsSF9ri8jIePSpyk+dUVSJl8mvofpx31j2/wR8yByVaxrj
p/ZDHCeUEAPO7A==
-----END CERTIFICATE-----

1326
nix/net.nix Normal file

File diff suppressed because it is too large Load diff

85
nix/physical.nix Normal file
View file

@ -0,0 +1,85 @@
{ pkgs, config, lib, microvm, disko, agenix, machine, machines, dns, nixos-hardware, ... }: {
imports = [
microvm.nixosModules.host
]
++ lib.lists.optional (machine.isRaspberryPi) nixos-hardware.nixosModules.raspberry-pi-4;
config = {
boot = lib.mkIf (machine.isHypervisor) {
kernelModules = [ "kvm-intel" ];
extraModulePackages = [ ];
initrd = {
availableKernelModules = [
"ahci"
"xhci_pci"
"nvme"
"usbhid"
"usb_storage"
"sd_mod"
"sdhci_pci"
];
kernelModules = [ ];
};
loader = {
systemd-boot.enable = true;
efi.canTouchEfiVariables = true;
};
};
nixpkgs = {
config.allowUnfree = true;
# TODO: do we need this?
# hostPlatform = machine.arch;
};
hardware.cpu.intel.updateMicrocode = lib.mkIf (machine.isHypervisor) config.hardware.enableRedistributableFirmware;
age.identityPaths = [ "/etc/age_ed25519" ];
nix = {
package = pkgs.nixFlakes;
extraOptions = ''
experimental-features = nix-command flakes
'';
};
system = {
stateVersion = "23.05";
activationScripts.diff = ''
if [[ -e /run/current-system ]]; then
${pkgs.nix}/bin/nix store diff-closures /run/current-system "$systemConfig"
fi
'';
};
microvm.vms =
let
vmsForHypervisor = lib.filterAttrs (n: v: v.isVirtual && v.hypervisorName == config.networking.hostName) machines;
in
builtins.mapAttrs
(name: vm:
{
# Do not restart virtual machines to apply configuration changes.
# While conceptually this seems useful, it could result in annoying situations.
# For example, changing the default VM configuration will restart ALL VMs simultaneously, causing a lot of stress on the servers.
# Downside of not restarting, is that we may need to do this manually now to apply changes.
restartIfChanged = false;
specialArgs = {
inherit agenix disko pkgs lib microvm dns;
machine = vm;
hypervisorConfig = config;
};
config.imports = [
./.
{ networking.hostName = name; }
];
}
)
vmsForHypervisor;
};
}

5
nix/secrets/README.md Normal file
View file

@ -0,0 +1,5 @@
To create a secret:
```bash
nix run github:ryantm/agenix# -- -e secret.age
``

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,15 @@
age-encryption.org/v1
-> ssh-ed25519 UwNSRQ Lr6HfHB1pQVAVESUkR1a1ie8o9cTtCa0LA4y20UvfRU
8X+VZUfk2oRrM+A4pZC/6yyexo2Kr8MO7isiXPsnOJk
-> ssh-ed25519 JJ7S4A fngT1OkV0pfig7UZ4vA8CWFDWc//xn2KWRsk1+EI0Ac
9J+I87tFasCug4rVaXJKNKzxr450YtZUypSTmwf/r7g
-> ssh-ed25519 aqswPA I/RtBp+6CgMOPs41nbd8CqBgpgch8ixRGbzacXSDKRE
adBD/lskyXK/QU+v/OlQ1wQK7PkhALpdxgHUc1i+jcU
-> ssh-ed25519 LAPUww JtDnT4+NqLMBc+LpQSh0eQnSyXzJOHHbaZFNQmxIdC0
/DjWq9XUAH3xZvU1PlB7Q70LQ0x9SRMmaSYQ+DyQZEM
-> ssh-ed25519 vBZj5g 4YBFh5e32ZHr8byvd4vbZ9zljHO4FTrJGhsZiH//KVw
iA+foYHtgt2PjBG9yfBWNLeygiIbW3MsbUQdVWgyrno
-> ssh-ed25519 QP0PgA urlidySF5ZG9ILjdPuJPX6V/aDIAYzwBVd+XopDF5UA
NL/RxiKPRn+uZW37jJKLOHCaktuvzm0SIwcMmBgF5CY
--- aeaUWpBxSTjrcDDQa6Zk2dcdvhsdqs22JlvkduILpqE
âå™§òQú² à¡)Š„Åçä¿7bt¡­­íu+Õ<>=¼¯M£ÁlìMúzsÕÚ8ð… aÿ

View file

@ -0,0 +1,16 @@
age-encryption.org/v1
-> ssh-ed25519 UwNSRQ XKuX/onJklTJ1ws0svIwJy1PZN1MHsf5+N3z7XGvCyY
JkyemSdV/ZcbjWLrwYLhKCE4Ln2seLR0WyYXGMepgBw
-> ssh-ed25519 JJ7S4A 9wzkTABOPcmTG7LNWvZa7dKG0Ingf+KDckZ1tL2c3QQ
IkxcStI4kwXkWj+j3PWl7FdyoVMVsiH9SZBnyffbcYQ
-> ssh-ed25519 aqswPA 3i/v1qWLseD+FrPrnAXtSoK98a6Nrb3XrHinp2QPTn0
RxuPM1oICEoF5oZAyQlCm+fOivI9sfZenZSlOGBIZK8
-> ssh-ed25519 LAPUww MkvAMN/fZiV66+ub4Q/CDTIxJ3N3cMWBT0SQajespR0
uh6SGtxR3BvsU/fTTTOnsNXD+bHNYMhTAFoc3QUtMr8
-> ssh-ed25519 vBZj5g Jiu1sEmlws4eFPriuL2oS99Q9tFCyf4Zkv/khLONvT0
cLLHcvmIb1Nb7eVmKJyYdvfulgbcZ73N0x6GWyKeJPs
-> ssh-ed25519 QP0PgA A1Raf1CiVJ5tnJXRIeS0VpCUNX/iYNzGozQxApY9KGM
998c6IZfPNW8uMttkK8xGp1hgKXBcrwuBOgOpXWPCu8
--- /Qv6sfhphlYb9WtWdmPt6RZJPHxBO4jCSgauazsHIt8
1kYiL7¸<37>Áª-Ç}—`ýŠƒÇNƒVoäCñ'ÞÛ§ýhßô[øvDŠU€pv×½¶Òõ¦~e‰Â0yœ¦ÿ—ÑÄ2`•Ý<E280A2>ºîƱŽïÑ¥ÂÔåú8/´ª ¸
÷MEÐŽh·sÈqÌâ¤|ß kتí<Ó°¡+ÊÍ9eË0óŸ¸;­)Ï?IL-ëÓJY¾gðpk+ÛíúˆHRûé5ÔÍÉÛ¥ú”§„Ø× :8·ùo©þ1¥zâs—`•_MSÒí«Q˜;Q_o]·

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

42
nix/secrets/secrets.nix Normal file
View file

@ -0,0 +1,42 @@
let
pkgs = import <nixpkgs> { };
lib = pkgs.lib;
publicKeyURLs = [
"https://github.com/pizzapim.keys"
"https://github.com/pizzaniels.keys"
];
encryptedFileNames = [
"jefke_host_ed25519.age"
"jefke_user_ed25519.age"
"atlas_host_ed25519.age"
"atlas_user_ed25519.age"
"lewis_host_ed25519.age"
"lewis_user_ed25519.age"
"database_passwords.env.age"
"borg_passphrase.age"
"ec2_borg_server.pem.age"
];
machinePublicKeys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIJUSH2IQg8Y/CCcej7J6oe4co++6HlDo1MYDCR3gV3a root@jefke.hyp"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKZ1OGe8jLyc+72SFUnW4FOKbpqHs7Mym85ESBN4HWV7 root@atlas.hyp"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL5lZjsqS6C50WO8p08TY7Fg8rqQH04EkpDTxCRGtR7a root@lewis.hyp"
];
fetchPublicKeys = url:
let
publicKeysFile = builtins.fetchurl { inherit url; };
publicKeysFileContents = lib.strings.fileContents publicKeysFile;
in
lib.strings.splitString "\n" publicKeysFileContents;
adminPublicKeys = lib.flatten (builtins.map fetchPublicKeys publicKeyURLs);
allPublicKeys = lib.flatten [ machinePublicKeys adminPublicKeys ];
publicKeysForEncryptedFileName = encryptedFileName:
{ "${encryptedFileName}".publicKeys = allPublicKeys; };
in
lib.attrsets.mergeAttrsList (builtins.map publicKeysForEncryptedFileName encryptedFileNames)

94
nix/virtual/default.nix Normal file
View file

@ -0,0 +1,94 @@
{ pkgs, lib, config, hypervisorConfig, ... }: {
imports = [ ./docker_swarm.nix ];
options.lab.vm = {
# TODO: make global.
baseMACAddress = lib.mkOption {
default = "BA:DB:EE:F0:00:00";
type = lib.types.str;
description = ''
Base MAC address for VMs in the DMZ.
'';
};
id = lib.mkOption {
type = lib.types.int;
description = ''
Unique identifier of this VM from wich the MAC address is derived.
'';
};
shares = lib.mkOption {
default = [ ];
description = ''
Directories mounted on the VM using VirtioFS.
'';
type = lib.types.listOf (lib.types.submodule ({ config, ... }: {
options = {
name = lib.mkOption {
type = lib.types.str;
description = ''
The name of the directory share.
'';
};
mountPoint = lib.mkOption {
type = lib.types.str;
description = ''
The mount point of the directory share inside the virtual machine.
'';
};
};
}));
};
};
config = {
system.stateVersion = hypervisorConfig.system.stateVersion;
lab.vm.shares = [{
name = "host_keys";
mountPoint = "/etc/ssh/host_keys";
}];
services.openssh =
let
hostKeyPath = "/etc/ssh/host_keys/ssh_host_ed25519_key";
in
{
hostKeys = [{
path = hostKeyPath;
type = "ed25519";
}];
extraConfig = ''
HostKey ${hostKeyPath}
'';
};
microvm = {
# TODO: make this dependent on the host CPU
vcpu = 4;
shares = [{
source = "/nix/store";
mountPoint = "/nix/.ro-store";
tag = "ro-store";
proto = "virtiofs";
}] ++ map
(share: {
source = "/var/lib/microvms/${config.networking.hostName}/shares/${share.name}";
mountPoint = share.mountPoint;
tag = share.name;
proto = "virtiofs";
})
config.lab.vm.shares;
interfaces = [{
type = "tap";
id = "vm-${config.networking.hostName}";
mac = pkgs.lib.net.mac.add config.lab.vm.id config.lab.vm.baseMACAddress;
}];
};
};
}

View file

@ -0,0 +1,39 @@
{ pkgs, lib, config, machine, ... }:
let
cfg = config.lab.dockerSwarm;
in
{
options.lab.dockerSwarm.enable = lib.mkOption {
default = false;
type = lib.types.bool;
description = ''
Whether to enable Docker Swarm on this host.
'';
};
config = lib.mkIf cfg.enable {
lab.vm.shares = lib.mkIf machine.isVirtual [{
name = "docker";
mountPoint = "/var/lib/docker";
}];
networking = {
nftables.enable = lib.mkForce false;
firewall.enable = lib.mkForce false;
};
virtualisation.docker = {
enable = true;
liveRestore = false;
};
environment.systemPackages = [
(pkgs.python311.withPackages (python-pkgs: with python-pkgs; [
docker
requests
jsondiff
pyyaml
]))
];
};
}