{ lib, ... }: let machineOpts = { ... }: { options = { # TODO: rename to kind? type = lib.mkOption { type = lib.types.enum [ "physical" "virtual" ]; description = '' Whether this machine is physical or virtual. ''; }; hypervisorName = lib.mkOption { default = null; type = with lib.types; nullOr str; description = '' The host name of the hypervisor hosting this virtual machine. ''; }; arch = lib.mkOption { default = null; type = with lib.types; nullOr str; description = '' CPU architecture of this machine. ''; }; isRaspberryPi = lib.mkOption { default = false; type = lib.types.bool; }; isHypervisor = lib.mkOption { default = false; type = lib.types.bool; }; nixosModule = lib.mkOption { default = { ... }: { }; type = lib.types.anything; description = '' Customized configuration for this machine in the form of a NixOS module. ''; }; }; }; in { options = { machines = lib.mkOption { type = with lib.types; attrsOf (submodule machineOpts); }; }; config = { machines = { warwick = { type = "physical"; arch = "aarch64-linux"; isRaspberryPi = true; nixosModule.lab = { storage = { osDisk = "/dev/sda"; }; }; }; atlas = { type = "physical"; arch = "x86_64-linux"; isHypervisor = true; nixosModule.lab = { storage = { osDisk = "/dev/sda"; dataPartition = "/dev/nvme0n1p1"; }; ssh = { useCertificates = true; hostCert = builtins.readFile ./certificates/atlas/host_ed25519.crt; userCert = builtins.readFile ./certificates/atlas/user_ed25519.crt; }; }; }; jefke = { type = "physical"; arch = "x86_64-linux"; isHypervisor = true; nixosModule.lab = { storage = { osDisk = "/dev/sda"; dataPartition = "/dev/nvme0n1p1"; }; ssh = { useCertificates = true; hostCert = builtins.readFile ./certificates/jefke/host_ed25519.crt; userCert = builtins.readFile ./certificates/jefke/user_ed25519.crt; }; }; }; lewis = { type = "physical"; arch = "x86_64-linux"; isHypervisor = true; nixosModule.lab = { backups.enable = true; data-sharing.enable = true; networking.dmz.allowConnectivity = true; storage = { osDisk = "/dev/sda"; dataPartition = "/dev/nvme0n1p1"; }; ssh = { useCertificates = true; hostCert = builtins.readFile ./certificates/lewis/host_ed25519.crt; userCert = builtins.readFile ./certificates/lewis/user_ed25519.crt; }; }; }; hermes = { type = "virtual"; hypervisorName = "lewis"; nixosModule = { config, ... }: { lab = { networking = { dmz.services.enable = true; staticNetworking = true; # TODO: This seems to cause infinite recursion? Really weird. # staticIPv4 = config.lab.networking.dmz.ipv4.services; # staticIPv6 = config.lab.networking.dmz.ipv6.services; staticIPv4 = "192.168.30.7"; staticIPv6 = "2a0d:6e00:1a77:30::7"; }; vm = { # # TODO: would be cool to create a check that a mac address is only ever assigned to one VM. # # TODO: idea: what if we generated these IDs by hashing the host name and reducing that to the amount of hosts possible? id = 7; shares = [{ name = "dnsmasq"; mountPoint = "/var/lib/dnsmasq"; }]; }; }; }; }; maestro = { type = "virtual"; hypervisorName = "atlas"; nixosModule = { config, ... }: { microvm.balloonMem = 7680; lab = { dockerSwarm.enable = true; vm = { id = 1; }; }; }; }; bancomart = { type = "virtual"; hypervisorName = "jefke"; nixosModule = { microvm.balloonMem = 7680; lab = { dockerSwarm.enable = true; vm.id = 2; }; }; }; vpay = { type = "virtual"; hypervisorName = "lewis"; nixosModule = { microvm.balloonMem = 5120; lab = { dockerSwarm.enable = true; vm.id = 3; }; }; }; }; }; }