Terraform & atlas #16

Merged
pim merged 2 commits from vms into master 2023-11-29 16:23:27 +00:00
16 changed files with 282 additions and 7 deletions
Showing only changes of commit 0bf113fa25 - Show all commits

2
.gitignore vendored
View file

@ -1 +1,3 @@
.direnv .direnv
.terraform.lock.hcl
.terraform

View file

@ -20,7 +20,7 @@ Additionally, it deploys an age identity, which is later used for decrypting sec
1. Make sure your have a [Secret service](https://www.gnu.org/software/emacs/manual/html_node/auth/Secret-Service-API.html) running (such as Keepassxc) that provides the age identity. 1. Make sure your have a [Secret service](https://www.gnu.org/software/emacs/manual/html_node/auth/Secret-Service-API.html) running (such as Keepassxc) that provides the age identity.
2. Ensure you have root SSH access to the server. 2. Ensure you have root SSH access to the server.
3. Run nixos-anywhere: `./bootstrap.sh <servername>` 3. Run nixos-anywhere: `./bootstrap.sh <servername> <hostname>`
## Deployment ## Deployment

View file

@ -4,9 +4,11 @@ IFS=$'\n\t'
servername="${1-}" servername="${1-}"
if [ -z "$servername" ] hostname="${2-}"
if [ -z "$servername" ] || [ -z "$hostname" ]
then then
echo "Usage: $0 SERVERNAME" echo "Usage: $0 SERVERNAME HOSTNAME"
exit 1 exit 1
fi fi
@ -40,4 +42,4 @@ secret-tool lookup age-identity "$servername" > "$temp/root/age_ed25519"
chmod 600 "$temp/root/age_ed25519" chmod 600 "$temp/root/age_ed25519"
# Install NixOS to the host system with our age identity # Install NixOS to the host system with our age identity
nix run github:numtide/nixos-anywhere -- --extra-files "$temp" --flake ".#${servername}" "root@${servername}.hyp" nix run github:numtide/nixos-anywhere -- --extra-files "$temp" --flake ".#${servername}" "root@${hostname}"

View file

@ -1,7 +1,7 @@
{ pkgs, config, lib, modulesPath, ... }: { { pkgs, config, lib, modulesPath, ... }: {
imports = [ imports = [
(modulesPath + "/installer/scan/not-detected.nix") (modulesPath + "/installer/scan/not-detected.nix")
./modules/disk-config.nix # ./modules/disk-config.nix
./modules/custom ./modules/custom
./modules/uptimed.nix ./modules/uptimed.nix
]; ];

View file

@ -41,6 +41,8 @@
pkgs-unstable.deploy-rs pkgs-unstable.deploy-rs
pkgs.openssl pkgs.openssl
pkgs.postgresql_15 pkgs.postgresql_15
pkgs-unstable.opentofu
pkgs.cdrtools
]; ];
}; };

View file

@ -8,12 +8,79 @@
dataDisk.enable = true; dataDisk.enable = true;
ssh = { ssh = {
useCertificates = true;
hostCert = builtins.readFile ./jefke_host_ed25519-cert.pub; hostCert = builtins.readFile ./jefke_host_ed25519-cert.pub;
userCert = builtins.readFile ./jefke_user_ed25519-cert.pub; userCert = builtins.readFile ./jefke_user_ed25519-cert.pub;
}; };
terraformDatabase.enable = true; terraformDatabase.enable = true;
}; };
disko.devices = {
disk = {
vdb = {
device = "/dev/nvme0n1";
type = "disk";
content = {
type = "gpt";
partitions = {
ESP = {
type = "EF00";
size = "500M";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
root = {
end = "-4G";
content = {
type = "filesystem";
format = "btrfs";
mountpoint = "/";
};
};
swap = { size = "100%"; };
};
};
};
};
};
};
};
bancomart = {
name = "bancomart";
hostname = "bancomart.dmz";
specificConfig = {
disko.devices = {
disk = {
vda = {
device = "/dev/vda";
type = "disk";
content = {
type = "gpt";
partitions = {
boot = {
size = "1M";
type = "EF02"; # for grub MBR
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
};
};
};
}; };
}; };
} }

View file

@ -7,6 +7,14 @@ in {
options = { options = {
custom = { custom = {
ssh = { ssh = {
useCertificates = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
Whether to use certificates at all.
'';
};
hostCert = lib.mkOption { hostCert = lib.mkOption {
type = lib.types.str; type = lib.types.str;
description = '' description = ''
@ -42,7 +50,7 @@ in {
}; };
}; };
config = { config = lib.mkIf cfg.useCertificates {
services.openssh = { services.openssh = {
extraConfig = '' extraConfig = ''
HostCertificate ${hostCert} HostCertificate ${hostCert}

View file

@ -31,4 +31,3 @@
}; };
}; };
} }

36
terraform/main.tf Normal file
View file

@ -0,0 +1,36 @@
terraform {
backend "pg" {
schema_name = "testje"
conn_str = "postgresql://terraform@jefke.hyp/terraformstates"
}
required_providers {
libvirt = {
source = "dmacvicar/libvirt"
version = "0.7.1" # https://github.com/dmacvicar/terraform-provider-libvirt/issues/1040
}
}
}
# https://libvirt.org/uri.html#libssh-and-libssh2-transport
provider "libvirt" {
# alias = "jefke"
uri = "qemu+ssh://root@jefke.hyp/system?known_hosts=/etc/ssh/ssh_known_hosts"
}
module "setup_jefke" {
source = "./modules/setup"
# providers = {
# libvirt = libvirt.jefke
# }
}
module "bancomart" {
source = "./modules/debian"
name = "bancomart"
ram = 2048
storage = 10
# providers = {
# libvirt = libvirt.jefke
# }
}

View file

@ -0,0 +1,7 @@
# tf-modules
Terraform modules we use for the virtual machines in our home network.
These are all personalized and probably of little use outside our network.
The modules are currently:
- `debian`: Personalized Debian VM using Terraform's `libvirt` provider
- `invariants`: Invariants for our home network we use in multiple places.

View file

@ -0,0 +1,15 @@
#cloud-config
hostname: "${hostname}"
manage_etc_hosts: true
disable_root: false
ssh_authorized_keys:
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOodpLr+FDRyKyHjucHizNLVFHZ5AQmE9GmxMnOsSoaw pimkunis@thinkpadpim"
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINUZp4BCxf7uLa1QWonx/Crf8tYZ5MKIZ+EuaBa82LrV user@user-laptop"
ssh_pwauth: false
# TODO: Do we need this?
runcmd:
- dhclient -r
- dhclient

View file

@ -0,0 +1,17 @@
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
eval "$(jq -r '@sh "PUBKEY=\(.pubkey) HOST=\(.host) CAHOST=\(.cahost) CASCRIPT=\(.cascript) CAKEY=\(.cakey)"')"
# TODO: Can this be done more eye-pleasingly?
set +e
CERT=$(ssh -o ConnectTimeout=3 -o ConnectionAttempts=1 root@$CAHOST '"'"$CASCRIPT"'" host "'"$CAKEY"'" "'"$PUBKEY"'" "'"$HOST"'".dmz')
retval=$?
set -e
if [ retval -neq 0 ]; then
CERT=""
fi
jq -n --arg cert "$CERT" '{"cert":$cert}'

View file

@ -0,0 +1,9 @@
version: 2
ethernets:
ens:
match:
name: ens*
dhcp4: true
routes:
- to: 0.0.0.0/0
via: 192.168.30.1

View file

@ -0,0 +1,54 @@
terraform {
required_providers {
libvirt = {
source = "dmacvicar/libvirt"
}
}
}
resource "libvirt_volume" "os" {
name = "${var.name}.qcow2"
pool = "disks"
size = 1024 * 1024 * 1024 * var.storage
base_volume_name = "debian-bookworm.qcow2"
base_volume_pool = "images"
lifecycle {
replace_triggered_by = [
libvirt_cloudinit_disk.main.id
]
}
}
resource "libvirt_cloudinit_disk" "main" {
name = "${var.name}.iso"
pool = "cloudinit"
user_data = templatefile("${path.module}/files/cloud_init.cfg.tftpl", {
hostname = var.name
})
network_config = file("${path.module}/files/network_config.cfg")
}
resource "libvirt_domain" "main" {
name = var.name
memory = var.ram
vcpu = 4
autostart = true
disk {
volume_id = libvirt_volume.os.id
}
network_interface {
bridge = "bridgedmz"
hostname = var.name
}
cloudinit = libvirt_cloudinit_disk.main.id
lifecycle {
replace_triggered_by = [
libvirt_cloudinit_disk.main.id
]
}
}

View file

@ -0,0 +1,13 @@
variable "name" {
type = string
}
variable "ram" {
type = number
description = "In MiB"
}
variable "storage" {
type = number
description = "In GiB"
}

View file

@ -0,0 +1,44 @@
terraform {
required_providers {
libvirt = {
source = "dmacvicar/libvirt"
}
}
}
resource "libvirt_pool" "images" {
name = "images"
type = "dir"
path = "/var/lib/libvirt/pools/images"
}
resource "libvirt_pool" "cloudinit" {
name = "cloudinit"
type = "dir"
path = "/var/lib/libvirt/pools/cloudinit"
}
resource "libvirt_pool" "disks" {
name = "disks"
type = "dir"
path = "/var/lib/libvirt/pools/disks"
}
resource "libvirt_volume" "debian_bookworm" {
name = "debian-bookworm.qcow2"
pool = libvirt_pool.images.name
source = "https://cloud.debian.org/images/cloud/bookworm/daily/latest/debian-12-generic-amd64-daily.qcow2"
}
resource "libvirt_network" "bridgedmz" {
name = "bridgedmz"
mode = "bridge"
bridge = "bridgedmz"
dhcp {
enabled = false
}
dns {
enabled = false
}
autostart = true
}