Merge branch 'master' of ssh://git.kun.is:56287/home/nixos-servers
This commit is contained in:
commit
fcf6864b77
20 changed files with 400 additions and 150 deletions
37
flake.lock
37
flake.lock
|
@ -84,6 +84,27 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"dns": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1635273082,
|
||||
"narHash": "sha256-EHiDP2jEa7Ai5ZwIf5uld9RVFcV77+2SUxjQXwJsJa0=",
|
||||
"owner": "kirelagin",
|
||||
"repo": "dns.nix",
|
||||
"rev": "c7b9645da9c0ddce4f9de4ef27ec01bb8108039a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "kirelagin",
|
||||
"repo": "dns.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
|
@ -116,6 +137,21 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"locked": {
|
||||
"lastModified": 1614513358,
|
||||
"narHash": "sha256-LakhOx3S1dRjnh0b5Dg3mbZyH0ToC9I8Y2wKSkBaTzU=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "5466c5bbece17adaab2d82fae80b46e807611bf3",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"home-manager": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
|
@ -213,6 +249,7 @@
|
|||
"agenix": "agenix",
|
||||
"deploy-rs": "deploy-rs",
|
||||
"disko": "disko",
|
||||
"dns": "dns",
|
||||
"kubenix": "kubenix",
|
||||
"nixpkgs": "nixpkgs_2",
|
||||
"nixpkgs-unstable": "nixpkgs-unstable"
|
||||
|
|
|
@ -19,10 +19,15 @@
|
|||
url = "github:ryantm/agenix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
|
||||
dns = {
|
||||
url = "github:kirelagin/dns.nix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
};
|
||||
|
||||
outputs =
|
||||
{ self, nixpkgs, deploy-rs, disko, agenix, kubenix, nixpkgs-unstable, ... }:
|
||||
{ self, nixpkgs, deploy-rs, disko, agenix, kubenix, nixpkgs-unstable, dns, ... }:
|
||||
let
|
||||
system = "x86_64-linux";
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
|
@ -65,7 +70,7 @@
|
|||
|
||||
nixosConfigurations = mkNixosSystems (machine: {
|
||||
inherit system;
|
||||
specialArgs = { inherit kubenix; };
|
||||
specialArgs = { inherit kubenix dns; };
|
||||
modules = [
|
||||
machine.nixosModule
|
||||
disko.nixosModules.disko
|
||||
|
|
|
@ -7,5 +7,5 @@ all:
|
|||
hosts:
|
||||
bancomart:
|
||||
ansible_host: bancomart.dmz
|
||||
vpay:
|
||||
ansible_host: vpay.dmz
|
||||
# vpay:
|
||||
# ansible_host: vpay.dmz
|
||||
|
|
|
@ -23,13 +23,6 @@ provider "libvirt" {
|
|||
uri = "qemu+ssh://root@atlas.hyp/system?known_hosts=/etc/ssh/ssh_known_hosts"
|
||||
}
|
||||
|
||||
module "setup_jefke" {
|
||||
source = "../../../terraform_modules/setup"
|
||||
providers = {
|
||||
libvirt = libvirt.jefke
|
||||
}
|
||||
}
|
||||
|
||||
module "bancomart" {
|
||||
source = "../../../terraform_modules/debian"
|
||||
name = "bancomart"
|
||||
|
@ -40,13 +33,6 @@ module "bancomart" {
|
|||
}
|
||||
}
|
||||
|
||||
module "setup_atlas" {
|
||||
source = "../../../terraform_modules/setup"
|
||||
providers = {
|
||||
libvirt = libvirt.atlas
|
||||
}
|
||||
}
|
||||
|
||||
module "maestro" {
|
||||
source = "../../../terraform_modules/debian"
|
||||
name = "maestro"
|
||||
|
|
38
legacy/projects/libvirt_setup/main.tf
Normal file
38
legacy/projects/libvirt_setup/main.tf
Normal file
|
@ -0,0 +1,38 @@
|
|||
terraform {
|
||||
backend "pg" {
|
||||
schema_name = "libvirtsetup"
|
||||
conn_str = "postgresql://terraform@jefke.hyp/terraformstates"
|
||||
}
|
||||
|
||||
required_providers {
|
||||
libvirt = {
|
||||
source = "dmacvicar/libvirt"
|
||||
version = "0.7.1" # https://github.com/dmacvicar/terraform-provider-libvirt/issues/1040
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# https://libvirt.org/uri.html#libssh-and-libssh2-transport
|
||||
provider "libvirt" {
|
||||
alias = "jefke"
|
||||
uri = "qemu+ssh://root@jefke.hyp/system?known_hosts=/etc/ssh/ssh_known_hosts"
|
||||
}
|
||||
|
||||
provider "libvirt" {
|
||||
alias = "atlas"
|
||||
uri = "qemu+ssh://root@atlas.hyp/system?known_hosts=/etc/ssh/ssh_known_hosts"
|
||||
}
|
||||
|
||||
module "setup_jefke" {
|
||||
source = "../../terraform_modules/setup"
|
||||
providers = {
|
||||
libvirt = libvirt.jefke
|
||||
}
|
||||
}
|
||||
|
||||
module "setup_atlas" {
|
||||
source = "../../terraform_modules/setup"
|
||||
providers = {
|
||||
libvirt = libvirt.atlas
|
||||
}
|
||||
}
|
|
@ -4,10 +4,12 @@
|
|||
hostName = "jefke.hyp";
|
||||
|
||||
nixosModule.lab = {
|
||||
dataDisk.enable = true;
|
||||
terraformDatabase.enable = true;
|
||||
# k3s.enable = true;
|
||||
disko.osDiskDevice = "/dev/nvme0n1";
|
||||
|
||||
storage = {
|
||||
osDisk = "/dev/sda";
|
||||
dataPartition = "/dev/nvme0n1p1";
|
||||
};
|
||||
|
||||
ssh = {
|
||||
useCertificates = true;
|
||||
|
@ -22,7 +24,10 @@
|
|||
hostName = "atlas.hyp";
|
||||
|
||||
nixosModule.lab = {
|
||||
disko.osDiskDevice = "/dev/nvme0n1";
|
||||
storage = {
|
||||
osDisk = "/dev/sda";
|
||||
dataPartition = "/dev/nvme0n1p1";
|
||||
};
|
||||
|
||||
ssh = {
|
||||
useCertificates = true;
|
||||
|
@ -37,14 +42,12 @@
|
|||
hostName = "lewis.hyp";
|
||||
|
||||
nixosModule.lab = {
|
||||
disko.osDiskDevice = "/dev/sda";
|
||||
backups.enable = true;
|
||||
networking.allowDMZConnectivity = true;
|
||||
data-sharing.enable = true;
|
||||
dataHost.enable = true;
|
||||
dns.enable = true;
|
||||
|
||||
dataDisk = {
|
||||
enable = true;
|
||||
devicePath = "/dev/nvme0n1p1";
|
||||
storage = {
|
||||
osDisk = "/dev/sda";
|
||||
dataPartition = "/dev/nvme0n1p1";
|
||||
};
|
||||
|
||||
ssh = {
|
||||
|
|
|
@ -1,39 +1,42 @@
|
|||
{ pkgs, lib, config, ... }:
|
||||
let
|
||||
cfg = config.lab.backups;
|
||||
snapshotFile = "/tmp/snapshot.qcow2";
|
||||
snapshotMount = "/tmp/snapshot";
|
||||
beforeEverything = pkgs.writeShellScriptBin "beforeEverything" ''
|
||||
${pkgs.libvirt}/bin/virsh snapshot-create-as --domain ${cfg.domainName} --name backup-${cfg.domainName} --disk-only --quiesce --no-metadata --diskspec vda,snapshot=no --diskspec vdb,file=${snapshotFile} && ${pkgs.coreutils}/bin/sleep 1
|
||||
${pkgs.coreutils}/bin/mkdir -p ${snapshotMount}
|
||||
${pkgs.libguestfs-with-appliance}/bin/guestmount -a ${snapshotFile} -m /dev/sda1 --ro ${snapshotMount}
|
||||
'';
|
||||
if [ -d "${cfg.snapshotLocation}" ]; then
|
||||
${pkgs.btrfs-progs}/bin/btrfs subvolume delete ${cfg.snapshotLocation}
|
||||
fi
|
||||
|
||||
afterEverything = pkgs.writeShellScriptBin "afterEverything" ''
|
||||
set +e
|
||||
${pkgs.coreutils}/bin/sleep 10
|
||||
${pkgs.libguestfs-with-appliance}/bin/guestunmount ${snapshotMount} && ${pkgs.coreutils}/bin/sleep 1
|
||||
${pkgs.coreutils}/bin/rm -rf ${snapshotMount}
|
||||
${pkgs.libvirt}/bin/virsh blockcommit ${cfg.domainName} vdb --active --verbose --pivot
|
||||
${pkgs.coreutils}/bin/rm -f ${snapshotFile}
|
||||
${pkgs.btrfs-progs}/bin/btrfs subvolume snapshot -r ${cfg.subvolumeLocation} ${cfg.snapshotLocation}
|
||||
'';
|
||||
|
||||
borgmaticConfig = pkgs.writeTextFile {
|
||||
name = "borgmatic-config";
|
||||
text = ''
|
||||
source_directories:
|
||||
- ${snapshotMount}
|
||||
- ${cfg.snapshotLocation}
|
||||
repositories:
|
||||
- path: ${cfg.repoLocation}
|
||||
label: ${cfg.domainName}
|
||||
label: nfs
|
||||
- path: ssh://admin@ec2-3-254-121-39.eu-west-1.compute.amazonaws.com/mnt/data/nfs.borg
|
||||
label: ec2
|
||||
ssh_command: "${pkgs.openssh}/bin/ssh -i ${config.age.secrets."ec2_borg_server.pem".path} -o StrictHostKeychecking=no -o ConnectTimeout=10 -o ConnectionAttempts=3"
|
||||
keep_daily: 7
|
||||
keep_weekly: 4
|
||||
keep_monthly: 6
|
||||
unknown_unencrypted_repo_access_is_ok: true
|
||||
encryption_passcommand: "${pkgs.coreutils}/bin/cat ''${BORG_PASSPHRASE_FILE}"
|
||||
before_everything:
|
||||
- ${beforeEverything}/bin/beforeEverything
|
||||
after_everything:
|
||||
- ${afterEverything}/bin/afterEverything
|
||||
postgresql_databases:
|
||||
- name: nextcloud
|
||||
hostname: lewis.dmz
|
||||
username: nextcloud
|
||||
password: ''${NEXTCLOUD_DATABASE_PASSWORD}
|
||||
format: tar
|
||||
- name: hedgedoc
|
||||
hostname: lewis.dmz
|
||||
username: hedgedoc
|
||||
password: ''${HEDGEDOC_DATABASE_PASSWORD}
|
||||
format: tar
|
||||
'';
|
||||
};
|
||||
in
|
||||
|
@ -48,33 +51,42 @@ in
|
|||
};
|
||||
|
||||
repoLocation = lib.mkOption {
|
||||
default = "${config.lab.dataDisk.mountPoint}/backups/thecloud-data.borg";
|
||||
default = "${config.lab.storage.dataMountPoint}/backups/nfs.borg";
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
Location of the Borg repository to back up to.
|
||||
'';
|
||||
};
|
||||
|
||||
domainName = lib.mkOption {
|
||||
default = "thecloud";
|
||||
subvolumeLocation = lib.mkOption {
|
||||
default = "${config.lab.storage.dataMountPoint}/nfs";
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
The name of the Libvirt domain with the data disk attached.
|
||||
Location of the btrfs subvolume holding the data.
|
||||
'';
|
||||
};
|
||||
|
||||
snapshotLocation = lib.mkOption {
|
||||
default = "${config.lab.storage.dataMountPoint}/snapshot-nfs";
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
Location to (temporary) create a snapshot of the subvolume.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
environment.systemPackages = with pkgs; [ libguestfs-with-appliance borgbackup ];
|
||||
environment.systemPackages = with pkgs; [ postgresql ];
|
||||
# Converted from:
|
||||
# https://github.com/borgmatic-collective/borgmatic/tree/84823dfb912db650936e3492f6ead7e0e0d32a0f/sample/systemd
|
||||
systemd.services.borgmatic = {
|
||||
description = "borgmatic backup";
|
||||
wants = [ "network-online.target" ];
|
||||
after = [ "network-online.target" ];
|
||||
unitConfig = {
|
||||
ConditionACPower = true;
|
||||
};
|
||||
unitConfig.ConditionACPower = true;
|
||||
preStart = "${pkgs.coreutils}/bin/sleep 10s";
|
||||
path = with pkgs; [ postgresql ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
Nice = 19;
|
||||
|
@ -84,23 +96,27 @@ in
|
|||
IOWeight = 100;
|
||||
Restart = "no";
|
||||
LogRateLimitIntervalSec = 0;
|
||||
EnvironmentFile = config.age.secrets."database_passwords.env".path;
|
||||
Environment = "BORG_PASSPHRASE_FILE=${config.age.secrets."borg_passphrase".path}";
|
||||
};
|
||||
preStart = "${pkgs.coreutils}/bin/sleep 1m";
|
||||
script = "${pkgs.systemd}/bin/systemd-inhibit --who=\"borgmatic\" --what=\"sleep:shutdown\" --why=\"Prevent interrupting scheduled backup\" ${pkgs.borgmatic}/bin/borgmatic --verbosity -2 --syslog-verbosity 1";
|
||||
|
||||
script = "${pkgs.systemd}/bin/systemd-inhibit --who=\"borgmatic\" --what=\"sleep:shutdown\" --why=\"Prevent interrupting scheduled backup\" ${pkgs.borgmatic}/bin/borgmatic --verbosity -2 --syslog-verbosity 1 -c ${borgmaticConfig}";
|
||||
};
|
||||
|
||||
environment.etc."borgmatic/config.yaml" = {
|
||||
source = borgmaticConfig;
|
||||
systemd.timers.borgmatic = {
|
||||
description = "Run borgmatic backup";
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "*-*-* 3:00:00";
|
||||
Persistent = true;
|
||||
RandomizedDelaySec = "3h";
|
||||
};
|
||||
};
|
||||
|
||||
# systemd.timers.borgmatic = {
|
||||
# description = "Run borgmatic backup";
|
||||
# wantedBy = [ "timers.target" ];
|
||||
# timerConfig = {
|
||||
# OnCalendar = "*-*-* 3:00:00";
|
||||
# Persistent = true;
|
||||
# RandomizedDelaySec = "3h";
|
||||
# };
|
||||
# };
|
||||
age.secrets = {
|
||||
"database_passwords.env".file = ../secrets/database_passwords.env.age;
|
||||
"borg_passphrase".file = ../secrets/borg_passphrase.age;
|
||||
"ec2_borg_server.pem".file = ../secrets/ec2_borg_server.pem.age;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,33 +0,0 @@
|
|||
{ lib, config, ... }:
|
||||
let cfg = config.lab.dataDisk;
|
||||
in {
|
||||
options.lab.dataDisk = {
|
||||
enable = lib.mkOption {
|
||||
default = false;
|
||||
type = lib.types.bool;
|
||||
description = ''
|
||||
Whether to automatically mount a disk to be used as a data disk.
|
||||
'';
|
||||
};
|
||||
|
||||
mountPoint = lib.mkOption {
|
||||
default = "/mnt/data";
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
Mount point of the data disk (if enabled).
|
||||
'';
|
||||
};
|
||||
|
||||
devicePath = lib.mkOption {
|
||||
default = "/dev/sda1";
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
Path of the device to be used as a data disk.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
fileSystems.${cfg.mountPoint} = { device = cfg.devicePath; };
|
||||
};
|
||||
}
|
|
@ -71,8 +71,8 @@ in
|
|||
dataDir = cfg.postgresDir;
|
||||
|
||||
authentication = ''
|
||||
host nextcloud nextcloud all md5
|
||||
host hedgedoc hedgedoc all md5
|
||||
host nextcloud nextcloud all md5
|
||||
host hedgedoc hedgedoc all md5
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
|
|
@ -1,12 +1,32 @@
|
|||
{ lib, config, ... }:
|
||||
|
||||
let cfg = config.lab.dataHost;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./storage.nix
|
||||
./terraform-database
|
||||
./data-disk.nix
|
||||
./ssh-certificates.nix
|
||||
./k3s
|
||||
./disko.nix
|
||||
./backups.nix
|
||||
./networking.nix
|
||||
./data-sharing.nix
|
||||
./dns
|
||||
];
|
||||
|
||||
options.lab.dataHost.enable = lib.mkOption {
|
||||
default = false;
|
||||
type = lib.types.bool;
|
||||
description = ''
|
||||
Whether this machine holds application data.
|
||||
This enables NFS and PostgreSQL to serve this data, and sets up backups.
|
||||
Also enables networking on the DMZ to enable serving data.
|
||||
'';
|
||||
};
|
||||
|
||||
config.lab = lib.mkIf cfg.enable {
|
||||
backups.enable = true;
|
||||
data-sharing.enable = true;
|
||||
networking.allowDMZConnectivity = true;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,39 +0,0 @@
|
|||
{ lib, config, ... }:
|
||||
let cfg = config.lab.disko;
|
||||
in {
|
||||
options.lab.disko.osDiskDevice = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
The disk device to be used for the operating system.
|
||||
'';
|
||||
};
|
||||
|
||||
# TODO: rename this to 'osDisk'. Unfortunately, we would need to run nixos-anywhere again then
|
||||
config.disko.devices.disk.vdb = {
|
||||
device = cfg.osDiskDevice;
|
||||
type = "disk";
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
ESP = {
|
||||
type = "EF00";
|
||||
size = "500M";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "vfat";
|
||||
mountpoint = "/boot";
|
||||
};
|
||||
};
|
||||
root = {
|
||||
end = "-4G";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "btrfs";
|
||||
mountpoint = "/";
|
||||
};
|
||||
};
|
||||
swap = { size = "100%"; };
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
65
nixos/modules/dns/default.nix
Normal file
65
nixos/modules/dns/default.nix
Normal file
|
@ -0,0 +1,65 @@
|
|||
{ pkgs, lib, config, dns, ... }:
|
||||
let
|
||||
cfg = config.lab.dns;
|
||||
publicIpv4 = "192.145.57.90";
|
||||
kunisZoneFile = pkgs.writeTextFile {
|
||||
name = "kunis-zone-file";
|
||||
text = (dns.lib.toString "kun.is" (import ./zones/kun.is.nix { inherit dns publicIpv4; }));
|
||||
};
|
||||
|
||||
geokunis2nlZoneFile = pkgs.writeTextFile {
|
||||
name = "geokunis2nl-zone-file";
|
||||
text = (dns.lib.toString "geokunis2.nl" (import ./zones/geokunis2.nl.nix { inherit dns publicIpv4; }));
|
||||
};
|
||||
in
|
||||
{
|
||||
options.lab.dns.enable = lib.mkOption {
|
||||
default = false;
|
||||
type = lib.types.bool;
|
||||
description = ''
|
||||
Whether to enable an authoritative DNS server and DNSmasq for DMZ network.
|
||||
'';
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ 53 ];
|
||||
allowedUDPPorts = [ 53 ];
|
||||
};
|
||||
|
||||
services.bind = {
|
||||
enable = true;
|
||||
forwarders = [ ];
|
||||
# TODO: disable ipv6 for now, as the hosts themselves lack routes it seems.
|
||||
ipv4Only = true;
|
||||
|
||||
extraOptions = ''
|
||||
allow-transfer { none; };
|
||||
allow-recursion { none; };
|
||||
version "No dice.";
|
||||
'';
|
||||
|
||||
zones = {
|
||||
"kun.is" = {
|
||||
master = true;
|
||||
file = kunisZoneFile;
|
||||
allowQuery = [ "any" ];
|
||||
extraConfig = ''
|
||||
notify yes;
|
||||
allow-update { none; };
|
||||
'';
|
||||
};
|
||||
|
||||
"geokunis2.nl" = {
|
||||
master = true;
|
||||
file = geokunis2nlZoneFile;
|
||||
allowQuery = [ "any" ];
|
||||
extraConfig = ''
|
||||
notify yes;
|
||||
allow-update { none; };
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
47
nixos/modules/dns/zones/geokunis2.nl.nix
Normal file
47
nixos/modules/dns/zones/geokunis2.nl.nix
Normal file
|
@ -0,0 +1,47 @@
|
|||
{ publicIpv4, dns }:
|
||||
with dns.lib.combinators;
|
||||
|
||||
{
|
||||
SOA = {
|
||||
nameServer = "ns";
|
||||
adminEmail = "hostmaster@geokunis2.nl";
|
||||
serial = 1704580936;
|
||||
};
|
||||
|
||||
NS = [
|
||||
"ns.geokunis2.nl."
|
||||
"ns0.transip.net."
|
||||
"ns1.transip.nl."
|
||||
"ns2.transip.eu."
|
||||
];
|
||||
|
||||
MX = [ (mx.mx 10 "mail.geokunis2.nl.") ];
|
||||
|
||||
A = [ publicIpv4 ];
|
||||
AAAA = [ "2a0d:6e00:1a77:30:b62e:99ff:fe77:1bda" ];
|
||||
CAA = letsEncrypt "caa@geokunis2.nl";
|
||||
|
||||
subdomains = {
|
||||
mail.A = [ publicIpv4 ];
|
||||
wg4.A = [ publicIpv4 ];
|
||||
wg6.AAAA = [ "2a0d:6e00:1a77::1" ];
|
||||
tuindersweijde.A = [ publicIpv4 ];
|
||||
inbucket.A = [ publicIpv4 ];
|
||||
kms.A = [ publicIpv4 ];
|
||||
|
||||
wg = {
|
||||
A = [ publicIpv4 ];
|
||||
AAAA = [ "2a0d:6e00:1a77::1" ];
|
||||
};
|
||||
|
||||
ns = {
|
||||
A = [ publicIpv4 ];
|
||||
AAAA = [ "2a0d:6e00:1a77:30:c8fe:c0ff:feff:ee07" ];
|
||||
};
|
||||
|
||||
cyberchef = {
|
||||
A = [ publicIpv4 ];
|
||||
AAAA = [ "2a0d:6e00:1a77:30:c8fe:c0ff:feff:ee03" ];
|
||||
};
|
||||
};
|
||||
}
|
28
nixos/modules/dns/zones/kun.is.nix
Normal file
28
nixos/modules/dns/zones/kun.is.nix
Normal file
|
@ -0,0 +1,28 @@
|
|||
{ publicIpv4, dns }:
|
||||
with dns.lib.combinators;
|
||||
|
||||
{
|
||||
CAA = letsEncrypt "caa@kun.is";
|
||||
|
||||
SOA = {
|
||||
nameServer = "ns1";
|
||||
adminEmail = "webmaster@kun.is";
|
||||
serial = 1704580936;
|
||||
};
|
||||
|
||||
NS = [
|
||||
"ns1.kun.is."
|
||||
"ns2.kun.is."
|
||||
];
|
||||
|
||||
MX = [
|
||||
(mx.mx 10 "mail.kun.is.")
|
||||
];
|
||||
|
||||
subdomains = {
|
||||
ns.A = [ publicIpv4 ];
|
||||
ns1.A = [ publicIpv4 ];
|
||||
ns2.A = [ publicIpv4 ];
|
||||
"*".A = [ publicIpv4 ];
|
||||
};
|
||||
}
|
66
nixos/modules/storage.nix
Normal file
66
nixos/modules/storage.nix
Normal file
|
@ -0,0 +1,66 @@
|
|||
{ lib, config, ... }:
|
||||
let cfg = config.lab.storage;
|
||||
in {
|
||||
options.lab.storage = {
|
||||
osDisk = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
The disk to be used for the machine's operating system.
|
||||
'';
|
||||
};
|
||||
|
||||
dataPartition = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
Partition to be used for data storage on this machine.
|
||||
'';
|
||||
};
|
||||
|
||||
dataMountPoint = lib.mkOption {
|
||||
default = "/mnt/data";
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
Mount point of the machine's data partition.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
fileSystems.${cfg.dataMountPoint}.device = cfg.dataPartition;
|
||||
|
||||
# TODO: Rename this to 'osDisk'. Unfortunately, we would need to run nixos-anywhere again then.
|
||||
disko.devices.disk.vdb = {
|
||||
device = cfg.osDisk;
|
||||
type = "disk";
|
||||
|
||||
content = {
|
||||
type = "gpt";
|
||||
|
||||
partitions = {
|
||||
swap.size = "100%";
|
||||
|
||||
ESP = {
|
||||
type = "EF00";
|
||||
size = "500M";
|
||||
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "vfat";
|
||||
mountpoint = "/boot";
|
||||
};
|
||||
};
|
||||
|
||||
root = {
|
||||
end = "-4G";
|
||||
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "btrfs";
|
||||
mountpoint = "/";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -18,8 +18,7 @@ in {
|
|||
package = pkgs.postgresql_15;
|
||||
enableTCPIP = true;
|
||||
|
||||
dataDir = lib.mkIf config.lab.dataDisk.enable
|
||||
"${config.lab.dataDisk.mountPoint}/postgresql/${config.services.postgresql.package.psqlSchema}";
|
||||
dataDir = "${config.lab.storage.dataMountPoint}/postgresql/${config.services.postgresql.package.psqlSchema}";
|
||||
|
||||
authentication = ''
|
||||
hostssl terraformstates terraform all cert
|
||||
|
@ -37,10 +36,7 @@ in {
|
|||
ssl_ca_file = serverCert;
|
||||
};
|
||||
|
||||
ensureUsers = [{
|
||||
name = "terraform";
|
||||
ensurePermissions = { "DATABASE terraformstates" = "ALL PRIVILEGES"; };
|
||||
}];
|
||||
ensureUsers = [{ name = "terraform"; }];
|
||||
};
|
||||
|
||||
age.secrets."postgresql_server.key" = {
|
||||
|
|
6
nixos/secrets/borg_passphrase.age
Normal file
6
nixos/secrets/borg_passphrase.age
Normal file
|
@ -0,0 +1,6 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 aqswPA BWfWJ0Detm+1l0tYnjR9n5rIUBfdHb/wTnZnGoYx6SU
|
||||
gp5vcIXtJpF6KJ0cHJ6GRpHQvxi7ij//1LH0afFoRuo
|
||||
--- exwOM8D5yMcDFp0uzRnbD6TWSgs12WmZo7sKlnHYOwY
|
||||
4Öš¾0
|
||||
e(+×}²½f%Àã^‘ kÀbד{WèŒôVüPänדù:…Å6ý£s
|
5
nixos/secrets/database_passwords.env.age
Normal file
5
nixos/secrets/database_passwords.env.age
Normal file
|
@ -0,0 +1,5 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 aqswPA nsjKPakYuFVxfbJkPKnhqPytMz07KIT32xgJpiuaRD0
|
||||
fv+HZdDb1Evy0LIA5sFMFx+KUbAF7jJojrQXMSSmNAo
|
||||
--- zJOYXheC2OupvfQNtDfcUCkVMg3TqJQEFjTfAwyi/Pw
|
||||
‚¼¬Î°‡<EFBFBD>¨×¶†¡£‰¹maåJ^¤ˆ•€UZÂ>¬f±ââ÷@¨•¤‰÷òmÎG¨`ðrOY2‰#‡ÜŽ¼oΙþ‡= åSƒî_.ô¼MÅa3›HŸ–ŸL<C5B8>ÉÈüçcB·t§ÜËZ× Žç5 c•ä0Á=ŽLK¢¥‹ +!cu<63>t«Rƒà¥U2îŸ6½ßª½)<13>ƒ¯fPÚ³AU«‘¤
|
BIN
nixos/secrets/ec2_borg_server.pem.age
Normal file
BIN
nixos/secrets/ec2_borg_server.pem.age
Normal file
Binary file not shown.
|
@ -1,3 +1,4 @@
|
|||
# TODO: Just encrypt each file with all hosts' public keys (plus our personal public keys) and deploy when demanded.
|
||||
let
|
||||
pkgs = import <nixpkgs> { };
|
||||
lib = pkgs.lib;
|
||||
|
@ -28,6 +29,9 @@ let
|
|||
encryptedFiles = [
|
||||
"lewis_host_ed25519.age"
|
||||
"lewis_user_ed25519.age"
|
||||
"database_passwords.env.age"
|
||||
"borg_passphrase.age"
|
||||
"ec2_borg_server.pem.age"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
|
Loading…
Reference in a new issue