Compare commits

...

39 commits

Author SHA1 Message Date
eb90e5d1bd Change location of dnsmasq image 2024-09-17 14:19:44 +02:00
85cba9a3ff Deploy NixNG-built dnsmasq container image 2024-09-17 13:22:34 +02:00
ad4d78ed2a Move more stuff to kubernetes-deployments
Remove kubernetes stuff from readme
2024-09-07 21:59:17 +02:00
8744db7f1f Rename pikvm ansible playbook 2024-09-07 14:01:00 +02:00
6dd363a2a8 Restructure and clean up code 2024-09-07 13:59:04 +02:00
660191ab42 Cleanup after kubernetes deployment migration 2024-09-07 13:06:37 +02:00
a335dd4120 WIP: remove kubernetes deployments 2024-09-07 12:39:30 +02:00
c55b0752e7 radicale: 3.2.2.0 -> 3.2.3.0
immich: v1.112.1 -> v1.114.0
freshrss: 1.24.2 -> 1.24.3
2024-09-06 19:36:31 +02:00
be17c95d86 Improve documentation of recovering Longhorn volumes 2024-09-01 16:11:20 +02:00
d6f3aadeaf Enable nix garbage collection service 2024-09-01 11:39:53 +02:00
b139f3d469 Add Ansible playbook to configure PiKVM
Add Nix shell to flake
Monitor PiKVM with Prometheus
Serve Prometheus on /
2024-08-30 17:53:04 +02:00
04439a9ee5 Build tooling around nix-snapshotter 2024-08-29 06:53:05 +02:00
e9899c0d0f Resolve cyberchef and radicale images using nix-snapshotter
Increase inotify max user instances to 256
Disable tailscale by default
2024-08-25 17:04:31 +02:00
52efd614fe chore(jellyfin): update to 10.9.9
chore(radarr): update to 5.9.1
chore(paperless): update to 2.11.6
chore(syncthing): update to 1.27.10
chore(immich): update to 1.112.1
chore(freshrss): update to 1.24.2
2024-08-24 23:01:52 +02:00
55b18ef450 Refactor storage module
Add talos and pikvm machines
2024-08-24 22:30:46 +02:00
0539d35678 chore(forgejo): update to 8.0.1 2024-08-24 22:28:00 +02:00
5432d93f85 refactor: Convert configmaps containing secrets to secrets
closes #85
2024-08-04 14:59:11 +02:00
2c0a60097a chore: Update Paperless 2.3 -> 2.11.2
chore: Update Nextcloud 28 -> 29.0.4
chore: Update Syncthing 1.23.6 -> 1.27.9
chore: Update Forgejo 7.0.5 -> 8.0.0
chore: Update Immich 1.108.0 -> 1.111.0
chore: Update Kitchenowl 0.5.1 -> 0.5.2
chore: Pin Pihole to 2024.07.0
fix: Disallow rolling updates for immich ML
fix: Use tailscale host as domain
2024-07-30 22:29:10 +02:00
5a6b9f203a refactor: Extract all image names 2024-07-30 21:28:35 +02:00
872f8fe89e chore: Update jellyfin, radarr, prowlarr and sonarr 2024-07-30 21:01:41 +02:00
e21e8694c1 docs: Add more deployment instructions 2024-07-30 20:34:37 +02:00
6794fce2a2 fix: Don't use tailscale DNS for physical servers
fix: Don't do rolling updates for pihole
chore: Update flake inputs
2024-07-30 20:33:07 +02:00
8067d9a301 docs: Update readme 2024-07-28 14:48:43 +02:00
9fe5ecbb8d refactor: Set image pull policy to IfNotPresent everywhere
closes #101
2024-07-28 14:32:28 +02:00
bf1facabd7 feat: Replace transmission with deluge 2024-07-28 14:14:38 +02:00
1f72d3463e feat: Deploy ntfy
closes #93
2024-07-27 22:32:23 +02:00
ea84627e59 feat: Use Attic as binary cache
fix: Improve flake outputs yet again
refactor: Delete dead code related to hamnet
2024-07-27 21:12:24 +02:00
6db856cfe9 feat: Put nextcloud and immich behind tailscale 2024-07-25 20:30:21 +02:00
5398db801c feat(traefik): Expose traefik dashboard on Tailscale 2024-07-24 22:21:47 +02:00
fd70b4d934 Fix: Fix Traefik HTTP redirect after update 2024-07-24 21:54:31 +02:00
ae655bba6a chore: Disable EK poule 2024-07-24 21:43:50 +02:00
f961fc24ea feat: Expose Radicale, Paperless and FreshRSS only on Tailscale
fix: Fix flake output names
2024-07-24 21:25:51 +02:00
4e619eb0c4 feat(tailscale): Enable warwick as exit node and subnet router 2024-07-23 22:50:11 +02:00
15e0dce041 feat: Enable tailscale on physical servers
fix: Fix Nix flake checks
2024-07-22 22:54:08 +02:00
c22d356191 Re-enable port 444 for inbucket @pizzaniels 2024-07-22 20:44:51 +02:00
92b096608f Revert "refactor: Remove support for port 444"
This reverts commit 052c75849d.
2024-07-22 20:36:28 +02:00
052c75849d refactor: Remove support for port 444 2024-07-21 21:26:20 +02:00
d3d6abdde8 feat: Rollout tailscale for media stack
fix: Add default for tailscale ingress option
2024-07-21 21:00:32 +02:00
6152ce4577 feat: Expose longhorn UI on tailnet
refactor: Merge inbucket services
2024-07-21 20:30:47 +02:00
93 changed files with 771 additions and 9435 deletions

View file

@ -5,9 +5,11 @@ keys:
- &server_jefke age1upnqu4rpxppdw9zmqu8x3rnaqq2r6m82y25zvry5cec63vjsd9gqtl9e02
- &server_lewis age108fn93z2c55g9dm9cv5v4w47pykf3khz7e3dmnpv5dhchwnaau0qs20stq
- &server_warwick age1th8rdw4fs3vmgy9gzc0k9xy88tddjj4vasepckfx9h4nlzsg3q3q4cjgwu
- &server_talos age1h5q9ul9f8vd7w7s2fvmpytaghgpv97a9r237agwzc52c76xsdegsugml73
- &server_pikvm age1smqas3tre2hptnyn72fdzghqcnej48066l4hp6y98n8lkpm3ds4s8t8s0w
creation_rules:
- path_regex: secrets/(kubernetes|serverKeys).yaml$
- path_regex: secrets/serverKeys.yaml$
key_groups:
- age:
- *admin_pim
@ -21,3 +23,5 @@ creation_rules:
- *server_jefke
- *server_lewis
- *server_warwick
- *server_talos
- *server_pikvm

View file

@ -9,18 +9,14 @@ Nix definitions to configure our servers at home.
- [dns.nix](https://github.com/kirelagin/dns.nix): A Nix DSL for defining DNS zones
- [flake-utils](https://github.com/numtide/flake-utils): Handy utilities to develop Nix flakes
- [nixos-hardware](https://github.com/NixOS/nixos-hardware): Hardware-specific NixOS modules. Doing the heavy lifting for our Raspberry Pi
- [kubenix](https://kubenix.org/): declare and deploy Kubernetes resources using Nix
- [nixhelm](https://github.com/farcaller/nixhelm): Nix-digestible Helm charts
- [sops-nix](https://github.com/Mic92/sops-nix): Sops secret management for Nix
## Installation
### Prerequisites
## Prerequisites
1. Install the Nix package manager or NixOS ([link](https://nixos.org/download))
2. Enable flake and nix commands ([link](https://nixos.wiki/wiki/Flakes#Enable_flakes_permanently_in_NixOS))
### Bootstrapping
## Bootstrapping
We bootstrap our servers using [nixos-anywhere](https://github.com/nix-community/nixos-anywhere).
This reformats the hard disk of the server and installs a fresh NixOS.
@ -28,34 +24,15 @@ Additionally, it deploys an age identity, which is later used for decrypting sec
⚠️ This will wipe your server completely ⚠️
1. Make sure your have a [Secret service](https://www.gnu.org/software/emacs/manual/html_node/auth/Secret-Service-API.html) running (such as Keepassxc) that provides the age identity.
1. Make sure you can decrypt the Sops-encrypted secrets in `secrets/`. You can test this by running `sops -d secrets/serverKeys.yaml`.
2. Ensure you have root SSH access to the server.
3. Run nixos-anywhere: `nix run '.#bootstrap' <servername> <hostname>`
### Deployment
## Deployment
To deploy all servers at once: `nix run 'nixpkgs#deploy-rs' -- '.#' -k`
To deploy only one server: `nix run 'nixpkgs#deploy-rs' -- -k --targets '.#<host>'`
## Deploying to Kubernetes
To deploy to the Kubernetes cluster, first make sure you have an admin account on the cluster.
You can generate this using `nix run '.#gen-k3s-cert' <username> <servername> ~/.kube`, assuming you have SSH access to the master node.
This puts a private key, signed certificate and a kubeconfig in the kubeconfig directory
We are now ready to deploy to the Kubernetes cluster.
Deployments are done through an experimental Kubernetes feature called [ApplySets](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/declarative-config/#how-to-delete-objects).
Each applyset is responsible for a set number of resources within a namespace.
If the cluster has not been initialized yet, we must bootstrap it first.
Run these deployments:
- `nix run '.#bootstrap-default.deploy'`
- `nix run '.#bootstrap-kube-system.deploy'`
Now the cluster has been initialized and we can deploy applications.
To explore which applications we can deploy, run `nix flake show`.
Then, for each application, run `nix run '.#<application>.deploy'`.
## Known bugs
### Rsync not available during bootstrap

4
ansible/ansible.cfg Normal file
View file

@ -0,0 +1,4 @@
[defaults]
inventory = inventory
remote_tmp = /tmp/ansible
ansible_python_interpreter = /usr/bin/python3.12

View file

@ -0,0 +1,5 @@
all:
hosts:
pikvm:
ansible_host: pikvm.dmz
ansible_user: root

6
ansible/pikvm.yml Normal file
View file

@ -0,0 +1,6 @@
---
- name: Configure PiKVM server
hosts: all
roles:
- pikvm

View file

@ -0,0 +1,20 @@
---
- name: Mount filesystem as read-write
ansible.builtin.command: rw
vars:
root_mount: "{{ ansible_mounts | selectattr('mount', 'equalto', '/') | first }}"
when: "'ro' in root_mount.options.split(',')"
- name: Install Tailscale
community.general.pacman:
name: tailscale-pikvm
state: latest
- name: Enable Tailscale
ansible.builtin.systemd_service:
name: tailscaled
state: started
enabled: true
- name: Mount filesystem as read-only
ansible.builtin.command: ro

View file

@ -1,12 +1,15 @@
{ self, pkgs, machines, flake-utils, deploy-rs, ... }: flake-utils.lib.eachDefaultSystem (system: {
{ self, nixpkgs, flake-utils, deploy-rs, ... }: flake-utils.lib.eachDefaultSystem (system:
let
pkgs = nixpkgs.legacyPackages.${system};
in
{
# Deploy-rs' flake checks seem broken for architectures different from the deployment machine.
# We skip these here.
checks = deploy-rs.lib.${system}.deployChecks (
pkgs.lib.attrsets.updateManyAttrsByPath [{
path = [ "nodes" ];
update = pkgs.lib.attrsets.filterAttrs (name: node:
machines.${name}.arch == system
self.machines.${name}.arch == system
);
}]
self.deploy

File diff suppressed because one or more lines are too long

View file

@ -1,9 +1,10 @@
{ self, pkgs, machines, deploy-rs, ... }:
{ self, deploy-rs, ... }:
let
deployArch = "x86_64-linux";
mkDeployNodes = nodeDef:
builtins.mapAttrs
(name: machine: nodeDef name machine)
machines;
self.machines.${deployArch};
in
{
deploy = {
@ -17,7 +18,7 @@ in
{
hostname = nixosConfiguration.config.networking.fqdn;
profiles.system = {
remoteBuild = machine.arch != pkgs.stdenv.hostPlatform.system;
remoteBuild = machine.arch != deployArch;
path = deploy-rs.lib.${machine.arch}.activate.nixos nixosConfiguration;
};
});

View file

@ -1,64 +0,0 @@
# Longhorn notes
## Migration from NFS to Longhorn
1. Delete the workload, and delete the PVC and PVC using NFS.
2. Create Longhorn volumes as described below.
3. Copy NFS data from lewis.dmz to local disk.
4. Spin up a temporary pod and mount the Longhorn volume(s) in it:
```nix
{
pods.testje.spec = {
containers.testje = {
image = "nginx";
volumeMounts = [
{
name = "uploads";
mountPath = "/hedgedoc/public/uploads";
}
];
};
volumes = {
uploads.persistentVolumeClaim.claimName = "hedgedoc-uploads";
};
};
}
```
5. Use `kubectl cp` to copy the data from the local disk to the pod.
6. Delete the temporary pod.
7. Be sure to set the group ownership of the mount to the correct GID.
7. Create the workload with updated volume mounts.
8. Delete the data from local disk.
## Creation of new Longhorn volumes
While it seems handy to use a K8s StorageClass for Longhorn, we do *not* want to use that.
If you use a StorageClass, a PV and Longhorn volume will be automatically provisioned.
These will have the name `pvc-<UID of PVC>`, where the UID of the PVC is random.
This makes it hard to restore a backup to a Longhorn volume with the correct name.
Instead, we want to manually create the Longhorn volumes via the web UI.
Then, we can create the PV and PVC as usual using our K8s provisioning tool (e.g. Kubectl/Kubenix).
Follow these actions to create a Volume:
1. Using the Longhorn web UI, create a new Longhorn volume, keeping the following in mind:
- The size can be some more than what we expect to reasonable use. We use storage-overprovisioning, so the total size of volumes can exceed real disk size.
- The number of replicas should be 2.
2. Enable the "backup-nfs" recurring job for the Longhorn volume.
3. Disable the "default" recurring job group for the Longhorn volume.
4. Create the PV, PVC and workload as usual.
## Disaster recovery using Longhorn backups
Backing up Longhorn volumes is very easy, but restoring them is more tricky.
We consider here the case when all our machines are wiped, and all we have left is Longhorn backups.
To restore a backup, perform the following actions:
1. Restore the latest snapshot in the relevant Longhorn backup, keeping the following in mind:
- The name should remain the same (i.e. the one chosen at Longhorn volume creation).
- The number of replicas should be 2.
- Disable recurring jobs.
2. Enable the "backup-nfs" recurring job for the Longhorn volume.
3. Disable the "default" recurring job group for the Longhorn volume.
4. Create the PV, PVC and workload as usual.

View file

@ -1,11 +0,0 @@
# Media
[profilarr](https://github.com/Dictionarry-Hub/profilarr) was used to import the "1080p Transparent" quality profile to both Radarr and Sonarr.
Profilarr has some neat tools that magically applies custom formats and quality definitions.
As far as I understand, these are used to indentify files that are high quality.
Profilarr can then also import a quality profile, which uses the aforementioned definitions to select torrents in my desired format.
In my case, I have chosen "1080p Transparent."
According to the [docs](https://selectarr.pages.dev/):
> Projected Size: 10 - 15gb
>
> Description: Prioritizes 1080p transparent releases. Lossy audio is allowed, and all upgrades are allowed. HDR is banned.

View file

@ -1,7 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
export KUBECTL_APPLYSET=true
vals eval -fail-on-missing-key-in-map <$MANIFEST | kubectl apply -f - --prune --applyset $APPLYSET --namespace $NAMESPACE

View file

@ -1,189 +0,0 @@
{ self, pkgs, machines, dns, myLib, flake-utils, kubenix, nixhelm, blog-pim, ... }: flake-utils.lib.eachDefaultSystem
(system:
let
deployScript = (pkgs.writeScriptBin "applyset-deploy.sh" (builtins.readFile ./applyset-deploy.sh)).overrideAttrs (old: {
buildCommand = "${old.buildCommand}\npatchShebangs $out";
});
mkKubernetes = name: module: namespace: (kubenix.evalModules.${system} {
specialArgs = { inherit namespace myLib blog-pim dns nixhelm system machines; };
module = { kubenix, ... }:
{
imports = [
kubenix.modules.k8s
kubenix.modules.helm
"${self}/kubenix-modules/custom"
"${self}/kubenix-modules/custom-types.nix"
module
];
config = {
kubenix.project = name;
kubernetes.namespace = namespace;
};
};
}).config.kubernetes;
mkManifest = name: { module, namespace }:
{
manifest = (mkKubernetes name module namespace).result;
};
mkDeployApp = name: { module, namespace }:
let
kubernetes = mkKubernetes name module namespace;
kubeconfig = kubernetes.kubeconfig or "";
result = kubernetes.result or "";
wrappedDeployScript = pkgs.symlinkJoin
{
name = "applyset-deploy.sh";
paths = [ deployScript pkgs.vals pkgs.kubectl ];
buildInputs = [ pkgs.makeWrapper ];
passthru.manifest = result;
meta.mainProgram = "applyset-deploy.sh";
postBuild = ''
wrapProgram $out/bin/applyset-deploy.sh \
--suffix PATH : "$out/bin" \
--run 'export KUBECONFIG=''${KUBECONFIG:-${toString kubeconfig}}' \
--set MANIFEST '${result}' \
--set APPLYSET 'applyset-${name}' \
--set NAMESPACE '${namespace}'
'';
};
in
{
deploy = {
type = "app";
program = "${pkgs.lib.getExe wrappedDeployScript}";
};
};
deployers = {
bootstrap-default = {
module = "${self}/kubenix-modules/bootstrap-default.nix";
namespace = "default";
};
bootstrap-kube-system = {
module = "${self}/kubenix-modules/bootstrap-kube-system.nix";
namespace = "kube-system";
};
cyberchef = {
module = "${self}/kubenix-modules/cyberchef.nix";
namespace = "static-websites";
};
freshrss = {
module = "${self}/kubenix-modules/freshrss.nix";
namespace = "freshrss";
};
radicale = {
module = "${self}/kubenix-modules/radicale.nix";
namespace = "radicale";
};
kms = {
module = "${self}/kubenix-modules/kms.nix";
namespace = "kms";
};
atuin = {
module = "${self}/kubenix-modules/atuin.nix";
namespace = "atuin";
};
blog = {
module = "${self}/kubenix-modules/blog.nix";
namespace = "static-websites";
};
nextcloud = {
module = "${self}/kubenix-modules/nextcloud.nix";
namespace = "nextcloud";
};
hedgedoc = {
module = "${self}/kubenix-modules/hedgedoc.nix";
namespace = "hedgedoc";
};
kitchenowl = {
module = "${self}/kubenix-modules/kitchenowl.nix";
namespace = "kitchenowl";
};
forgejo = {
module = "${self}/kubenix-modules/forgejo";
namespace = "forgejo";
};
paperless = {
module = "${self}/kubenix-modules/paperless.nix";
namespace = "paperless";
};
syncthing = {
module = "${self}/kubenix-modules/syncthing.nix";
namespace = "syncthing";
};
pihole = {
module = "${self}/kubenix-modules/pihole.nix";
namespace = "dns";
};
immich = {
module = "${self}/kubenix-modules/immich.nix";
namespace = "immich";
};
attic = {
module = "${self}/kubenix-modules/attic.nix";
namespace = "attic";
};
inbucket = {
module = "${self}/kubenix-modules/inbucket.nix";
namespace = "inbucket";
};
dnsmasq = {
module = "${self}/kubenix-modules/dnsmasq.nix";
namespace = "dns";
};
bind9 = {
module = "${self}/kubenix-modules/bind9";
namespace = "dns";
};
media = {
module = "${self}/kubenix-modules/media.nix";
namespace = "media";
};
traefik = {
module = "${self}/kubenix-modules/traefik.nix";
namespace = "kube-system";
};
minecraft = {
module = "${self}/kubenix-modules/minecraft.nix";
namespace = "minecraft";
};
tailscale = {
module = "${self}/kubenix-modules/tailscale.nix";
namespace = "tailscale";
};
};
in
{
apps = builtins.mapAttrs mkDeployApp deployers;
packages = builtins.mapAttrs mkManifest deployers;
})

View file

@ -1,88 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
username="${1-}"
host="${2-}"
output_path="${3:-.}"
if [ -z "$username" ] || [ -z "$host" ]
then
echo "Usage: $0 USERNAME HOST [OUTPUTPATH]"
exit 1
fi
# Create a temporary directory
temp=$(mktemp -d)
# Function to cleanup temporary directory on exit
cleanup() {
rm -rf "$temp"
}
trap cleanup EXIT
echo Generating the private key
openssl genpkey -algorithm ed25519 -out "$temp/key.pem"
echo Generating the certificate request
openssl req -new -key "$temp/key.pem" -out "$temp/req.csr" -subj "/CN=$username"
echo Creating K8S CSR manifest
csr="$(cat "$temp/req.csr" | base64 | tr -d '\n')"
k8s_csr="apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: $username-csr
spec:
request: $csr
expirationSeconds: 307584000 # 10 years
signerName: kubernetes.io/kube-apiserver-client
usages:
- digital signature
- key encipherment
- client auth
"
echo Creating K8S CSR resource
ssh "root@$host" "echo \"$k8s_csr\" | k3s kubectl apply -f -"
echo Approving K8S CSR
ssh "root@$host" "k3s kubectl certificate approve $username-csr"
echo Retrieving approved certificate
encoded_cert="$(ssh root@"$host" "k3s kubectl get csr $username-csr -o jsonpath='{.status.certificate}'")"
echo Retrieving default K3S kubeconfig
base_kubeconfig="$(ssh root@"$host" "cat /etc/rancher/k3s/k3s.yaml")"
echo Getting certificate authority data from default kubeconfig
cert_authority_data="$(echo -n "$base_kubeconfig" | yq -r '.clusters[0].cluster."certificate-authority-data"')"
echo Generating final kubeconfig
result_kubeconfig="apiVersion: v1
clusters:
- cluster:
certificate-authority-data: $cert_authority_data
server: https://$host:6443
name: default
contexts:
- context:
cluster: default
user: $username
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: $username
user:
client-certificate: $username.crt
client-key: $username.key
"
echo Writing resulting files to "$output_path"
echo -n "$encoded_cert" | base64 -d > $output_path/$username.crt
echo -n "$result_kubeconfig" > $output_path/config
cp $temp/key.pem $output_path/$username.key

View file

@ -1,28 +1,5 @@
{
"nodes": {
"blog-pim": {
"inputs": {
"flutils": "flutils",
"nginx": "nginx",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1715503080,
"narHash": "sha256-/VnzHTpTq3u0z2Vgu/vKU0SHwOUIu8olHDORWT0IofM=",
"ref": "refs/heads/master",
"rev": "7296f7f5bf5f089a5137036dcbd8058cf3e4a9e5",
"revCount": 21,
"type": "git",
"url": "https://git.kun.is/home/blog-pim"
},
"original": {
"rev": "7296f7f5bf5f089a5137036dcbd8058cf3e4a9e5",
"type": "git",
"url": "https://git.kun.is/home/blog-pim"
}
},
"deploy-rs": {
"inputs": {
"flake-compat": "flake-compat",
@ -50,11 +27,11 @@
]
},
"locked": {
"lastModified": 1720661479,
"narHash": "sha256-nsGgA14vVn0GGiqEfomtVgviRJCuSR3UEopfP8ixW1I=",
"lastModified": 1722217815,
"narHash": "sha256-8r5AJ3n8WEDw3rsZLALSuFQ5kJyWOcssNZvPxYLr2yc=",
"owner": "nix-community",
"repo": "disko",
"rev": "786965e1b1ed3fd2018d78399984f461e2a44689",
"rev": "1e6f8a7b4634fc051cc9361959bf414fcf17e094",
"type": "github"
},
"original": {
@ -170,60 +147,7 @@
},
"flake-utils_2": {
"inputs": {
"systems": "systems_3"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_3": {
"inputs": {
"systems": "systems_5"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"id": "flake-utils",
"type": "indirect"
}
},
"flake-utils_4": {
"inputs": {
"systems": "systems_6"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flutils": {
"inputs": {
"systems": "systems"
"systems": "systems_2"
},
"locked": {
"lastModified": 1710146030,
@ -245,7 +169,7 @@
"nixpkgs": [
"nixpkgs-unstable"
],
"systems": "systems_4",
"systems": "systems_3",
"treefmt": "treefmt"
},
"locked": {
@ -262,59 +186,6 @@
"type": "github"
}
},
"nginx": {
"flake": false,
"locked": {
"lastModified": 1713277799,
"narHash": "sha256-VNDzQvUGeh54F3s6SIq6lBrp4RatURzJoJqVorexttA=",
"owner": "nginx",
"repo": "nginx",
"rev": "d8a849ae3c99ee5ca82c9a06074761e937dac6d6",
"type": "github"
},
"original": {
"owner": "nginx",
"repo": "nginx",
"type": "github"
}
},
"nix-github-actions": {
"inputs": {
"nixpkgs": [
"nixhelm",
"poetry2nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1703863825,
"narHash": "sha256-rXwqjtwiGKJheXB43ybM8NwWB8rO2dSRrEqes0S7F5Y=",
"owner": "nix-community",
"repo": "nix-github-actions",
"rev": "5163432afc817cf8bd1f031418d1869e4c9d5547",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nix-github-actions",
"type": "github"
}
},
"nix-kube-generators": {
"locked": {
"lastModified": 1708155396,
"narHash": "sha256-A/BIeJjiRS7sBYP6tFJa/WHDPHe7DGTCkSEKXttYeAQ=",
"owner": "farcaller",
"repo": "nix-kube-generators",
"rev": "14dbd5e5b40615937900f71d9a9851b59b4d9a88",
"type": "github"
},
"original": {
"owner": "farcaller",
"repo": "nix-kube-generators",
"type": "github"
}
},
"nix-snapshotter": {
"inputs": {
"flake-compat": "flake-compat_3",
@ -337,36 +208,34 @@
"type": "github"
}
},
"nixhelm": {
"nixng": {
"inputs": {
"flake-utils": "flake-utils_3",
"nix-kube-generators": "nix-kube-generators",
"nixpkgs": [
"nixpkgs"
],
"poetry2nix": "poetry2nix"
]
},
"locked": {
"lastModified": 1720746402,
"narHash": "sha256-+dGh0ruRbwZLymQQkvK1iqgg7J6gRp4wHxa8OqsNUlU=",
"owner": "farcaller",
"repo": "nixhelm",
"rev": "6fbf227d6b6b17e14a50c84ae66e9541306d4c98",
"lastModified": 1726571270,
"narHash": "sha256-LEug48WOL+mmFYtKM57e/oudgjBk2Km5zIP3p27hF8I=",
"owner": "pizzapim",
"repo": "NixNG",
"rev": "9538892da603608f0176d07d33b1265e038c0adf",
"type": "github"
},
"original": {
"owner": "farcaller",
"repo": "nixhelm",
"owner": "pizzapim",
"ref": "dnsmasq",
"repo": "NixNG",
"type": "github"
}
},
"nixos-hardware": {
"locked": {
"lastModified": 1720737798,
"narHash": "sha256-G/OtEAts7ZUvW5lrGMXSb8HqRp2Jr9I7reBuvCOL54w=",
"lastModified": 1722332872,
"narHash": "sha256-2xLM4sc5QBfi0U/AANJAW21Bj4ZX479MHPMPkB+eKBU=",
"owner": "NixOS",
"repo": "nixos-hardware",
"rev": "c5013aa7ce2c7ec90acee5d965d950c8348db751",
"rev": "14c333162ba53c02853add87a0000cbd7aa230c2",
"type": "github"
},
"original": {
@ -394,11 +263,11 @@
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1720282526,
"narHash": "sha256-dudRkHPRivMNOhd04YI+v4sWvn2SnN5ODSPIu5IVbco=",
"lastModified": 1721524707,
"narHash": "sha256-5NctRsoE54N86nWd0psae70YSLfrOek3Kv1e8KoXe/0=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "550ac3e955c30fe96dd8b2223e37e0f5d225c927",
"rev": "556533a23879fc7e5f98dd2e0b31a6911a213171",
"type": "github"
},
"original": {
@ -410,11 +279,11 @@
},
"nixpkgs-unstable": {
"locked": {
"lastModified": 1720687749,
"narHash": "sha256-nqJ+iK/zyqCJ/YShqCpZ2cJKE1UtjZIEUWLUFZqvxcA=",
"lastModified": 1722141560,
"narHash": "sha256-Ul3rIdesWaiW56PS/Ak3UlJdkwBrD4UcagCmXZR9Z7Y=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "6af55cb91ca2005516b9562f707bb99c8f79bf77",
"rev": "038fb464fcfa79b4f08131b07f2d8c9a6bcc4160",
"type": "github"
},
"original": {
@ -426,11 +295,11 @@
},
"nixpkgs_2": {
"locked": {
"lastModified": 1720691131,
"narHash": "sha256-CWT+KN8aTPyMIx8P303gsVxUnkinIz0a/Cmasz1jyIM=",
"lastModified": 1722221733,
"narHash": "sha256-sga9SrrPb+pQJxG1ttJfMPheZvDOxApFfwXCFO0H9xw=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "a046c1202e11b62cbede5385ba64908feb7bfac4",
"rev": "12bf09802d77264e441f48e25459c10c93eada2e",
"type": "github"
},
"original": {
@ -440,41 +309,15 @@
"type": "github"
}
},
"poetry2nix": {
"inputs": {
"flake-utils": "flake-utils_4",
"nix-github-actions": "nix-github-actions",
"nixpkgs": [
"nixhelm",
"nixpkgs"
],
"systems": "systems_7",
"treefmt-nix": "treefmt-nix"
},
"locked": {
"lastModified": 1718285706,
"narHash": "sha256-DScsBM+kZvxOva7QegfdtleebMXh30XPxDQr/1IGKYo=",
"owner": "nix-community",
"repo": "poetry2nix",
"rev": "a5be1bbbe0af0266147a88e0ec43b18c722f2bb9",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "poetry2nix",
"type": "github"
}
},
"root": {
"inputs": {
"blog-pim": "blog-pim",
"deploy-rs": "deploy-rs",
"disko": "disko",
"dns": "dns",
"flake-utils": "flake-utils_2",
"kubenix": "kubenix",
"nix-snapshotter": "nix-snapshotter",
"nixhelm": "nixhelm",
"nixng": "nixng",
"nixos-hardware": "nixos-hardware",
"nixpkgs": "nixpkgs_2",
"nixpkgs-unstable": "nixpkgs-unstable",
@ -489,11 +332,11 @@
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1720479166,
"narHash": "sha256-jqvhLDXzTLTHq9ZviFOpcTmXXmnbLfz7mWhgMNipMN4=",
"lastModified": 1722114803,
"narHash": "sha256-s6YhI8UHwQvO4cIFLwl1wZ1eS5Cuuw7ld2VzUchdFP0=",
"owner": "Mic92",
"repo": "sops-nix",
"rev": "67035a355b1d52d2d238501f8cc1a18706979760",
"rev": "eb34eb588132d653e4c4925d862f1e5a227cc2ab",
"type": "github"
},
"original": {
@ -533,65 +376,6 @@
}
},
"systems_3": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_4": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"id": "systems",
"type": "indirect"
}
},
"systems_5": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_6": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_7": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
@ -626,31 +410,9 @@
"type": "github"
}
},
"treefmt-nix": {
"inputs": {
"nixpkgs": [
"nixhelm",
"poetry2nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1717850719,
"narHash": "sha256-npYqVg+Wk4oxnWrnVG7416fpfrlRhp/lQ6wQ4DHI8YE=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "4fc1c45a5f50169f9f29f6a98a438fb910b834ed",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "treefmt-nix",
"type": "github"
}
},
"utils": {
"inputs": {
"systems": "systems_2"
"systems": "systems"
},
"locked": {
"lastModified": 1701680307,

View file

@ -1,6 +1,16 @@
{
description = "NixOS definitions for our home servers";
nixConfig = {
extra-substituters = [
"https://attic.kun.is/nixos-servers"
];
extra-trusted-public-keys = [
"nixos-servers:JThtPjQjDu3b3qXLgeXSJGgKL4OKQ4uLgTtoo1rg6Vw="
];
};
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixos-24.05";
nixpkgs-unstable.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
@ -18,22 +28,6 @@
inputs.nixpkgs.follows = "nixpkgs";
};
nixhelm = {
url = "github:farcaller/nixhelm";
inputs.nixpkgs.follows = "nixpkgs";
};
blog-pim = {
# HACK: pinning this to a specific revision, as my automation is broken.
url = "git+https://git.kun.is/home/blog-pim?rev=7296f7f5bf5f089a5137036dcbd8058cf3e4a9e5";
inputs.nixpkgs.follows = "nixpkgs";
};
kubenix = {
url = "github:pizzapim/kubenix";
inputs.nixpkgs.follows = "nixpkgs-unstable";
};
sops-nix = {
url = "github:Mic92/sops-nix";
inputs.nixpkgs.follows = "nixpkgs";
@ -43,22 +37,28 @@
url = "github:pdtpartners/nix-snapshotter";
inputs.nixpkgs.follows = "nixpkgs-unstable";
};
nixng = {
url = "github:pizzapim/NixNG/dnsmasq";
inputs.nixpkgs.follows = "nixpkgs";
};
kubenix = {
url = "github:pizzapim/kubenix";
inputs.nixpkgs.follows = "nixpkgs-unstable";
};
};
outputs =
inputs@{ self, nixpkgs, flake-utils, ... }:
let
system = "x86_64-linux";
pkgs = import nixpkgs { inherit system; };
machines = (pkgs.lib.modules.evalModules { modules = [ (import ./machines) ]; }).config.machines;
myLib = import ./my-lib pkgs.lib;
in
flake-utils.lib.meld (inputs // { inherit pkgs machines myLib; }) [
./flake-parts/scripts
./flake-parts/checks.nix
./flake-parts/deploy.nix
./flake-parts/nixos.nix
./flake-parts/kubenix.nix
inputs@{ nixpkgs, flake-utils, ... }:
flake-utils.lib.meld inputs [
./scripts
./checks.nix
./deploy.nix
./nixos.nix
./shell.nix
./utils
./machines
] // (flake-utils.lib.eachDefaultSystem (system: {
formatter = nixpkgs.legacyPackages.${system}.nixfmt;
}));

View file

@ -1,55 +0,0 @@
{
kubernetes.resources = {
ingresses.argo-workflows = {
metadata.annotations = {
"cert-manager.io/cluster-issuer" = "letsencrypt";
"traefik.ingress.kubernetes.io/router.entrypoints" = "localsecure";
};
spec = {
ingressClassName = "traefik";
rules = [{
host = "workflows.kun.is";
http.paths = [{
path = "/";
pathType = "Prefix";
backend.service = {
name = "argo-workflows-server";
port.number = 2746;
};
}];
}];
tls = [{
secretName = "argo-workflows-tls";
hosts = [ "workflows.kun.is" ];
}];
};
};
clusterRoles.argo-admin.rules = [{
apiGroups = [ "argoproj.io" ];
verbs = [ "*" ];
resources = [ "*" ];
}];
serviceAccounts.argo-admin = { };
clusterRoleBindings.argo-admin = {
subjects = [{
kind = "ServiceAccount";
name = "argo-admin";
namespace = "default";
}];
roleRef = {
kind = "ClusterRole";
name = "argo-admin";
apiGroup = "rbac.authorization.k8s.io";
};
};
};
}

View file

@ -1,190 +0,0 @@
{ pkgs, ... }: {
kubernetes.resources =
let
atticSettings = {
# The '+" is to explicitly denote the end of the Vals expression.
# This is done because we quote the template for the INI file.
# See: https://github.com/helmfile/vals?tab=readme-ov-file#expression-syntax
database.url = "ref+sops://secrets/kubernetes.yaml#attic/databaseURL+";
storage = {
type = "local";
path = "/var/lib/atticd/storage";
};
listen = "[::]:8080";
# Data chunking
#
# Warning: If you change any of the values here, it will be
# difficult to reuse existing chunks for newly-uploaded NARs
# since the cutpoints will be different. As a result, the
# deduplication ratio will suffer for a while after the change.
chunking = {
# The minimum NAR size to trigger chunking
#
# If 0, chunking is disabled entirely for newly-uploaded NARs.
# If 1, all NARs are chunked.
nar-size-threshold = 64 * 1024; # 64 KiB
# The preferred minimum size of a chunk, in bytes
min-size = 16 * 1024; # 16 KiB
# The preferred average size of a chunk, in bytes
avg-size = 64 * 1024; # 64 KiB
# The preferred maximum size of a chunk, in bytes
max-size = 256 * 1024; # 256 KiB
};
};
generatedConfig = (pkgs.formats.toml { }).generate "attic.toml" atticSettings;
in
{
configMaps.config.data.config = builtins.readFile generatedConfig;
secrets = {
server.stringData.token = "ref+sops://secrets/kubernetes.yaml#attic/jwtToken";
database.stringData.password = "ref+sops://secrets/kubernetes.yaml#/attic/databasePassword";
};
deployments = {
attic.spec = {
selector.matchLabels = {
app = "attic";
component = "website";
};
template = {
metadata.labels = {
app = "attic";
component = "website";
};
spec = {
containers.attic = {
image = "git.kun.is/home/atticd:fd910d91c2143295e959d2c903e9ea25cf94ba27";
ports.web.containerPort = 8080;
args = [ "-f" "/etc/atticd/config.toml" ];
env.ATTIC_SERVER_TOKEN_HS256_SECRET_BASE64.valueFrom.secretKeyRef = {
name = "server";
key = "token";
};
volumeMounts = [
{
name = "data";
mountPath = "/var/lib/atticd/storage";
}
{
name = "config";
mountPath = "/etc/atticd/config.toml";
subPath = "config";
}
];
};
volumes = {
data.persistentVolumeClaim.claimName = "data";
config.configMap.name = "config";
};
securityContext = {
fsGroup = 0;
fsGroupChangePolicy = "OnRootMismatch";
};
};
};
};
attic-db.spec = {
selector.matchLabels = {
app = "attic";
component = "database";
};
template = {
metadata.labels = {
app = "attic";
component = "database";
};
spec = {
containers.postgres = {
image = "postgres:15";
imagePullPolicy = "IfNotPresent";
ports.postgres.containerPort = 5432;
env = {
POSTGRES_DB.value = "attic";
POSTGRES_USER.value = "attic";
PGDATA.value = "/pgdata/data";
POSTGRES_PASSWORD.valueFrom.secretKeyRef = {
name = "database";
key = "password";
};
};
volumeMounts = [{
name = "data";
mountPath = "/pgdata";
}];
};
volumes.data.persistentVolumeClaim.claimName = "database";
};
};
};
};
services = {
attic.spec = {
selector = {
app = "attic";
component = "website";
};
ports.web = {
port = 80;
targetPort = "web";
};
};
database.spec = {
selector = {
app = "attic";
component = "database";
};
ports.postgres = {
port = 5432;
targetPort = "postgres";
};
};
};
};
lab = {
ingresses.attic = {
host = "attic.kun.is";
service = {
name = "attic";
portName = "web";
};
};
longhorn.persistentVolumeClaim = {
data = {
volumeName = "attic";
storage = "15Gi";
};
database = {
volumeName = "attic-db";
storage = "150Mi";
};
};
};
}

View file

@ -1,109 +0,0 @@
{
kubernetes.resources = {
secrets.database.stringData = {
databasePassword = "ref+sops://secrets/kubernetes.yaml#/atuin/databasePassword";
databaseURL = "ref+sops://secrets/kubernetes.yaml#/atuin/databaseURL";
};
deployments.server.spec = {
selector.matchLabels.app = "atuin";
strategy = {
type = "RollingUpdate";
rollingUpdate = {
maxSurge = 0;
maxUnavailable = 1;
};
};
template = {
metadata.labels.app = "atuin";
spec = {
volumes = {
data.persistentVolumeClaim.claimName = "data";
database.persistentVolumeClaim.claimName = "database";
};
containers = {
atuin = {
image = "ghcr.io/atuinsh/atuin:18.3.0";
imagePullPolicy = "Always";
ports.web.containerPort = 8888;
args = [ "server" "start" ];
env = {
ATUIN_HOST.value = "0.0.0.0";
ATUIN_PORT.value = "8888";
ATUIN_OPEN_REGISTRATION.value = "false";
ATUIN_DB_URI.valueFrom.secretKeyRef = {
name = "database";
key = "databaseURL";
};
};
volumeMounts = [{
name = "data";
mountPath = "/config";
}];
};
database = {
image = "postgres:14";
ports.web.containerPort = 5432;
env = {
POSTGRES_DB.value = "atuin";
POSTGRES_USER.value = "atuin";
POSTGRES_PASSWORD.valueFrom.secretKeyRef = {
name = "database";
key = "databasePassword";
};
};
volumeMounts = [{
name = "database";
mountPath = "/var/lib/postgresql/data";
}];
};
};
};
};
};
services.server.spec = {
selector.app = "atuin";
ports.web = {
port = 80;
targetPort = "web";
};
};
};
lab = {
ingresses.server = {
host = "atuin.kun.is";
service = {
name = "server";
portName = "web";
};
};
longhorn.persistentVolumeClaim = {
data = {
volumeName = "atuin";
storage = "300Mi";
};
database = {
volumeName = "atuin-db";
storage = "300Mi";
};
};
};
}

View file

@ -1,149 +0,0 @@
{ myLib, dns, ... }:
let
kunisZone = dns.lib.toString "kun.is" (import ./kun.is.zone.nix myLib dns);
in
{
kubernetes.resources = {
configMaps = {
bind9-env.data.TZ = "Europe/Amsterdam";
bind9-config.data = {
# TODO: this was copied from nix's generated bind config
# Is there a way to generate this without actually running the nixos module?
config = ''
acl cachenetworks { 127.0.0.0/24; };
acl badnetworks { };
options {
listen-on { any; };
listen-on-v6 { any; };
allow-query { cachenetworks; };
blackhole { badnetworks; };
forward first;
forwarders { };
directory "/run/named";
pid-file "/run/named/named.pid";
allow-transfer { none; };
allow-recursion { none; };
version none;
notify no;
};
zone "kun.is" {
type master;
file "/etc/bind/kun.is.zone";
allow-transfer { };
allow-query { any; };
};
'';
kunis-zone = kunisZone;
};
};
deployments.bind9.spec = {
selector.matchLabels.app = "bind9";
template = {
metadata.labels.app = "bind9";
spec = {
containers = {
bind9-udp = {
image = "ubuntu/bind9:9.18-22.04_beta";
envFrom = [{ configMapRef.name = "bind9-env"; }];
ports.dns-udp = {
containerPort = 53;
protocol = "UDP";
};
volumeMounts = [
{
name = "config";
mountPath = "/etc/bind/named.conf";
subPath = "config";
}
{
name = "config";
mountPath = "/etc/bind/kun.is.zone";
subPath = "kunis-zone";
}
];
};
bind9-tcp = {
image = "ubuntu/bind9:9.18-22.04_beta";
envFrom = [{ configMapRef.name = "bind9-env"; }];
ports.dns-tcp = {
containerPort = 53;
protocol = "TCP";
};
volumeMounts = [
{
name = "config";
mountPath = "/etc/bind/named.conf";
subPath = "config";
}
{
name = "config";
mountPath = "/etc/bind/kun.is.zone";
subPath = "kunis-zone";
}
];
};
};
volumes = [{
name = "config";
configMap.name = "bind9-config";
}];
};
};
};
services = {
bind9-udp = {
metadata.annotations = {
"metallb.universe.tf/loadBalancerIPs" = "${myLib.globals.bind9IPv4},${myLib.globals.bind9Ipv6}";
"metallb.universe.tf/allow-shared-ip" = "dns";
};
spec = {
type = "LoadBalancer";
selector.app = "bind9";
ipFamilies = [ "IPv4" "IPv6" ];
ipFamilyPolicy = "RequireDualStack";
ports.dns = {
port = 53;
targetPort = "dns-udp";
protocol = "UDP";
};
};
};
bind9-tcp = {
metadata.annotations = {
"metallb.universe.tf/loadBalancerIPs" = "${myLib.globals.bind9IPv4},${myLib.globals.bind9Ipv6}";
"metallb.universe.tf/allow-shared-ip" = "dns";
};
spec = {
type = "LoadBalancer";
selector.app = "bind9";
ipFamilies = [ "IPv4" "IPv6" ];
ipFamilyPolicy = "RequireDualStack";
ports.dns = {
port = 53;
targetPort = "dns-tcp";
protocol = "TCP";
};
};
};
};
};
}

View file

@ -1,52 +0,0 @@
myLib: dns: with dns.lib.combinators; {
CAA = letsEncrypt "caa@kun.is";
SOA = {
nameServer = "ns1";
adminEmail = "webmaster.kun.is";
serial = 2024041301;
};
NS = [
"ns1.kun.is."
"ns2.kun.is."
];
MX = [
(mx.mx 10 "mail.kun.is.")
];
TXT = [
(with spf; soft [ "include:spf.glasnet.nl" ])
];
subdomains = rec {
"*".A = [ myLib.globals.routerPublicIPv4 ];
ns = {
A = [ myLib.globals.routerPublicIPv4 ];
AAAA = [ ];
};
ns1 = ns;
ns2 = ns;
wg = {
A = [ myLib.globals.routerPublicIPv4 ];
AAAA = [ ];
};
#for SMTP2GO to be able send emails from kun.is domain
em670271 = {
CNAME = [ "return.smtp2go.net." ];
};
"s670271._domainkey" = {
CNAME = [ "dkim.smtp2go.net." ];
};
link = {
CNAME = [ "track.smtp2go.net." ];
};
};
}

View file

@ -1,37 +0,0 @@
{ blog-pim, ... }: {
kubernetes.resources = {
deployments.blog.spec = {
replicas = 3;
selector.matchLabels.app = "blog";
template = {
metadata.labels.app = "blog";
spec = {
containers.blog = {
image = "git.kun.is/home/blog-pim:${blog-pim.rev}";
ports.web.containerPort = 80;
};
};
};
};
services.blog.spec = {
selector.app = "blog";
ports.web = {
port = 80;
targetPort = "web";
};
};
};
lab.ingresses.blog = {
host = "pim.kun.is";
service = {
name = "blog";
portName = "web";
};
};
}

View file

@ -1,159 +0,0 @@
{ lib, nixhelm, system, machines, ... }: {
kubernetes = {
helm.releases = {
metallb = {
chart = nixhelm.chartsDerivations.${system}.metallb.metallb;
includeCRDs = true;
};
# argo-workflows = {
# chart = nixhelm.chartsDerivations.${system}.argoproj.argo-workflows;
# includeCRDs = true;
# };
longhorn = {
chart = nixhelm.chartsDerivations.${system}.longhorn.longhorn;
includeCRDs = true;
values = {
persistence.defaultClassReplicaCount = 2;
defaultSettings = {
defaultDataPath = "/mnt/longhorn";
storageMinimalAvailablePercentage = 0;
allowRecurringJobWhileVolumeDetached = true;
backupTarget = "nfs://lewis.dmz:/mnt/longhorn/persistent/longhorn-backup";
};
};
};
};
resources = {
namespaces = {
static-websites = { };
freshrss = { };
radicale = { };
kms = { };
atuin = { };
nextcloud = { };
hedgedoc = { };
kitchenowl = { };
forgejo = { };
paperless = { };
syncthing = { };
immich = { };
attic = { };
inbucket = { };
dns = { };
media = { };
minecraft = { };
tailscale = { };
};
nodes =
let
machinesWithKubernetesLabels = lib.filterAttrs (name: machine: machine.kubernetesNodeLabels != null) machines;
in
builtins.mapAttrs
(name: machine: {
metadata.labels = machine.kubernetesNodeLabels;
})
machinesWithKubernetesLabels;
ingresses.longhorn = {
metadata.annotations = {
"cert-manager.io/cluster-issuer" = "letsencrypt";
"traefik.ingress.kubernetes.io/router.entrypoints" = "localsecure";
};
spec = {
ingressClassName = "traefik";
rules = [{
host = "longhorn.kun.is";
http.paths = [{
path = "/";
pathType = "Prefix";
backend.service = {
name = "longhorn-frontend";
port.number = 80;
};
}];
}];
tls = [{
secretName = "longhorn-tls";
hosts = [ "longhorn.kun.is" ];
}];
};
};
recurringJobs.backup-nfs.spec = {
cron = "0 1 * * *"; # One o'clock at night
task = "backup";
retain = 2; # We don't need many, as we also make Borg backups.
concurrency = 1;
};
ipAddressPools.main.spec.addresses = [ "192.168.30.128-192.168.30.200" "2a0d:6e00:1a77:30::2-2a0d:6e00:1a77:30:ffff:ffff:ffff:fffe" ];
l2Advertisements.main.metadata = { };
persistentVolumes = {
music-syncthing.spec = {
capacity.storage = "1Gi";
accessModes = [ "ReadWriteMany" ];
nfs = {
server = "lewis.dmz";
path = "/mnt/longhorn/persistent/media/music";
};
};
media-media.spec = {
capacity.storage = "1Gi";
accessModes = [ "ReadWriteMany" ];
nfs = {
server = "lewis.dmz";
path = "/mnt/longhorn/persistent/media";
};
};
};
};
};
lab = {
longhorn.persistentVolume = {
freshrss.storage = "1Gi";
radicale.storage = "200Mi";
atuin.storage = "300Mi";
atuin-db.storage = "300Mi";
nextcloud.storage = "50Gi";
nextcloud-db.storage = "400Mi";
hedgedoc-uploads.storage = "50Mi";
hedgedoc-db.storage = "100Mi";
kitchenowl.storage = "100Mi";
forgejo.storage = "20Gi";
paperless-data.storage = "10Gi";
paperless-redisdata.storage = "20Mi";
paperless-db.storage = "150Mi";
syncthing.storage = "400Mi";
pihole-data.storage = "750Mi";
pihole-dnsmasq.storage = "16Mi";
immich.storage = "50Gi";
immich-db.storage = "5Gi";
attic.storage = "15Gi";
attic-db.storage = "150Mi";
jellyfin.storage = "5Gi";
transmission.storage = "25Mi";
jellyseerr.storage = "75Mi";
radarr.storage = "300Mi";
prowlarr.storage = "150Mi";
sonarr.storage = "150Mi";
bazarr.storage = "25Mi";
minecraft.storage = "1Gi";
};
};
}

View file

@ -1,5 +0,0 @@
{
imports = [
./cert-manager
];
}

View file

@ -1,36 +0,0 @@
{ nixhelm, system, ... }: {
kubernetes = {
# TODO: These were copied from https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.crds.yaml
# See https://cert-manager.io/docs/installation/helm/
# Seems kubenix cannot import a list of resources, only individual resources.
# Might be good to create a PR for this.
imports = [
./manifests/certificaterequest.yaml
./manifests/certificate.yaml
./manifests/challenge.yaml
./manifests/clusterissuer.yaml
./manifests/issuer.yaml
./manifests/order.yaml
];
helm.releases = {
cert-manager = {
chart = nixhelm.chartsDerivations.${system}.jetstack.cert-manager;
includeCRDs = false;
namespace = "kube-system";
};
};
resources.clusterIssuers.letsencrypt = {
spec.acme = {
server = "https://acme-v02.api.letsencrypt.org/directory";
email = "pim@kunis.nl";
privateKeySecretRef.name = "letsencrypt-private-key";
solvers = [{
selector = { };
http01.ingress.class = "traefik";
}];
};
};
};
}

View file

@ -1,443 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: certificates.cert-manager.io
labels:
app: 'cert-manager'
app.kubernetes.io/name: 'cert-manager'
app.kubernetes.io/instance: 'cert-manager'
# Generated labels
app.kubernetes.io/version: "v1.14.4"
spec:
group: cert-manager.io
names:
kind: Certificate
listKind: CertificateList
plural: certificates
shortNames:
- cert
- certs
singular: certificate
categories:
- cert-manager
scope: Namespaced
versions:
- name: v1
subresources:
status: {}
additionalPrinterColumns:
- jsonPath: .status.conditions[?(@.type=="Ready")].status
name: Ready
type: string
- jsonPath: .spec.secretName
name: Secret
type: string
- jsonPath: .spec.issuerRef.name
name: Issuer
priority: 1
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
priority: 1
type: string
- jsonPath: .metadata.creationTimestamp
description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.
name: Age
type: date
schema:
openAPIV3Schema:
description: "A Certificate resource should be created to ensure an up to date and signed X.509 certificate is stored in the Kubernetes Secret resource named in `spec.secretName`. \n The stored certificate will be renewed before it expires (as configured by `spec.renewBefore`)."
type: object
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Specification of the desired state of the Certificate resource. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
type: object
required:
- issuerRef
- secretName
properties:
additionalOutputFormats:
description: "Defines extra output formats of the private key and signed certificate chain to be written to this Certificate's target Secret. \n This is an Alpha Feature and is only enabled with the `--feature-gates=AdditionalCertificateOutputFormats=true` option set on both the controller and webhook components."
type: array
items:
description: CertificateAdditionalOutputFormat defines an additional output format of a Certificate resource. These contain supplementary data formats of the signed certificate chain and paired private key.
type: object
required:
- type
properties:
type:
description: Type is the name of the format type that should be written to the Certificate's target Secret.
type: string
enum:
- DER
- CombinedPEM
commonName:
description: "Requested common name X509 certificate subject attribute. More info: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 NOTE: TLS clients will ignore this value when any subject alternative name is set (see https://tools.ietf.org/html/rfc6125#section-6.4.4). \n Should have a length of 64 characters or fewer to avoid generating invalid CSRs. Cannot be set if the `literalSubject` field is set."
type: string
dnsNames:
description: Requested DNS subject alternative names.
type: array
items:
type: string
duration:
description: "Requested 'duration' (i.e. lifetime) of the Certificate. Note that the issuer may choose to ignore the requested duration, just like any other requested attribute. \n If unset, this defaults to 90 days. Minimum accepted duration is 1 hour. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration."
type: string
emailAddresses:
description: Requested email subject alternative names.
type: array
items:
type: string
encodeUsagesInRequest:
description: "Whether the KeyUsage and ExtKeyUsage extensions should be set in the encoded CSR. \n This option defaults to true, and should only be disabled if the target issuer does not support CSRs with these X509 KeyUsage/ ExtKeyUsage extensions."
type: boolean
ipAddresses:
description: Requested IP address subject alternative names.
type: array
items:
type: string
isCA:
description: "Requested basic constraints isCA value. The isCA value is used to set the `isCA` field on the created CertificateRequest resources. Note that the issuer may choose to ignore the requested isCA value, just like any other requested attribute. \n If true, this will automatically add the `cert sign` usage to the list of requested `usages`."
type: boolean
issuerRef:
description: "Reference to the issuer responsible for issuing the certificate. If the issuer is namespace-scoped, it must be in the same namespace as the Certificate. If the issuer is cluster-scoped, it can be used from any namespace. \n The `name` field of the reference must always be specified."
type: object
required:
- name
properties:
group:
description: Group of the resource being referred to.
type: string
kind:
description: Kind of the resource being referred to.
type: string
name:
description: Name of the resource being referred to.
type: string
keystores:
description: Additional keystore output formats to be stored in the Certificate's Secret.
type: object
properties:
jks:
description: JKS configures options for storing a JKS keystore in the `spec.secretName` Secret resource.
type: object
required:
- create
- passwordSecretRef
properties:
create:
description: Create enables JKS keystore creation for the Certificate. If true, a file named `keystore.jks` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will be updated immediately. If the issuer provided a CA certificate, a file named `truststore.jks` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority
type: boolean
passwordSecretRef:
description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the JKS keystore.
type: object
required:
- name
properties:
key:
description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
type: string
name:
description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
pkcs12:
description: PKCS12 configures options for storing a PKCS12 keystore in the `spec.secretName` Secret resource.
type: object
required:
- create
- passwordSecretRef
properties:
create:
description: Create enables PKCS12 keystore creation for the Certificate. If true, a file named `keystore.p12` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will be updated immediately. If the issuer provided a CA certificate, a file named `truststore.p12` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority
type: boolean
passwordSecretRef:
description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the PKCS12 keystore.
type: object
required:
- name
properties:
key:
description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required.
type: string
name:
description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
profile:
description: "Profile specifies the key and certificate encryption algorithms and the HMAC algorithm used to create the PKCS12 keystore. Default value is `LegacyRC2` for backward compatibility. \n If provided, allowed values are: `LegacyRC2`: Deprecated. Not supported by default in OpenSSL 3 or Java 20. `LegacyDES`: Less secure algorithm. Use this option for maximal compatibility. `Modern2023`: Secure algorithm. Use this option in case you have to always use secure algorithms (eg. because of company policy). Please note that the security of the algorithm is not that important in reality, because the unencrypted certificate and private key are also stored in the Secret."
type: string
enum:
- LegacyRC2
- LegacyDES
- Modern2023
literalSubject:
description: "Requested X.509 certificate subject, represented using the LDAP \"String Representation of a Distinguished Name\" [1]. Important: the LDAP string format also specifies the order of the attributes in the subject, this is important when issuing certs for LDAP authentication. Example: `CN=foo,DC=corp,DC=example,DC=com` More info [1]: https://datatracker.ietf.org/doc/html/rfc4514 More info: https://github.com/cert-manager/cert-manager/issues/3203 More info: https://github.com/cert-manager/cert-manager/issues/4424 \n Cannot be set if the `subject` or `commonName` field is set. This is an Alpha Feature and is only enabled with the `--feature-gates=LiteralCertificateSubject=true` option set on both the controller and webhook components."
type: string
nameConstraints:
description: "x.509 certificate NameConstraint extension which MUST NOT be used in a non-CA certificate. More Info: https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.10 \n This is an Alpha Feature and is only enabled with the `--feature-gates=NameConstraints=true` option set on both the controller and webhook components."
type: object
properties:
critical:
description: if true then the name constraints are marked critical.
type: boolean
excluded:
description: Excluded contains the constraints which must be disallowed. Any name matching a restriction in the excluded field is invalid regardless of information appearing in the permitted
type: object
properties:
dnsDomains:
description: DNSDomains is a list of DNS domains that are permitted or excluded.
type: array
items:
type: string
emailAddresses:
description: EmailAddresses is a list of Email Addresses that are permitted or excluded.
type: array
items:
type: string
ipRanges:
description: IPRanges is a list of IP Ranges that are permitted or excluded. This should be a valid CIDR notation.
type: array
items:
type: string
uriDomains:
description: URIDomains is a list of URI domains that are permitted or excluded.
type: array
items:
type: string
permitted:
description: Permitted contains the constraints in which the names must be located.
type: object
properties:
dnsDomains:
description: DNSDomains is a list of DNS domains that are permitted or excluded.
type: array
items:
type: string
emailAddresses:
description: EmailAddresses is a list of Email Addresses that are permitted or excluded.
type: array
items:
type: string
ipRanges:
description: IPRanges is a list of IP Ranges that are permitted or excluded. This should be a valid CIDR notation.
type: array
items:
type: string
uriDomains:
description: URIDomains is a list of URI domains that are permitted or excluded.
type: array
items:
type: string
otherNames:
description: '`otherNames` is an escape hatch for SAN that allows any type. We currently restrict the support to string like otherNames, cf RFC 5280 p 37 Any UTF8 String valued otherName can be passed with by setting the keys oid: x.x.x.x and UTF8Value: somevalue for `otherName`. Most commonly this would be UPN set with oid: 1.3.6.1.4.1.311.20.2.3 You should ensure that any OID passed is valid for the UTF8String type as we do not explicitly validate this.'
type: array
items:
type: object
properties:
oid:
description: OID is the object identifier for the otherName SAN. The object identifier must be expressed as a dotted string, for example, "1.2.840.113556.1.4.221".
type: string
utf8Value:
description: utf8Value is the string value of the otherName SAN. The utf8Value accepts any valid UTF8 string to set as value for the otherName SAN.
type: string
privateKey:
description: Private key options. These include the key algorithm and size, the used encoding and the rotation policy.
type: object
properties:
algorithm:
description: "Algorithm is the private key algorithm of the corresponding private key for this certificate. \n If provided, allowed values are either `RSA`, `ECDSA` or `Ed25519`. If `algorithm` is specified and `size` is not provided, key size of 2048 will be used for `RSA` key algorithm and key size of 256 will be used for `ECDSA` key algorithm. key size is ignored when using the `Ed25519` key algorithm."
type: string
enum:
- RSA
- ECDSA
- Ed25519
encoding:
description: "The private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. \n If provided, allowed values are `PKCS1` and `PKCS8` standing for PKCS#1 and PKCS#8, respectively. Defaults to `PKCS1` if not specified."
type: string
enum:
- PKCS1
- PKCS8
rotationPolicy:
description: "RotationPolicy controls how private keys should be regenerated when a re-issuance is being processed. \n If set to `Never`, a private key will only be generated if one does not already exist in the target `spec.secretName`. If one does exists but it does not have the correct algorithm or size, a warning will be raised to await user intervention. If set to `Always`, a private key matching the specified requirements will be generated whenever a re-issuance occurs. Default is `Never` for backward compatibility."
type: string
enum:
- Never
- Always
size:
description: "Size is the key bit size of the corresponding private key for this certificate. \n If `algorithm` is set to `RSA`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `algorithm` is set to `ECDSA`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. If `algorithm` is set to `Ed25519`, Size is ignored. No other values are allowed."
type: integer
renewBefore:
description: "How long before the currently issued certificate's expiry cert-manager should renew the certificate. For example, if a certificate is valid for 60 minutes, and `renewBefore=10m`, cert-manager will begin to attempt to renew the certificate 50 minutes after it was issued (i.e. when there are 10 minutes remaining until the certificate is no longer valid). \n NOTE: The actual lifetime of the issued certificate is used to determine the renewal time. If an issuer returns a certificate with a different lifetime than the one requested, cert-manager will use the lifetime of the issued certificate. \n If unset, this defaults to 1/3 of the issued certificate's lifetime. Minimum accepted value is 5 minutes. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration."
type: string
revisionHistoryLimit:
description: "The maximum number of CertificateRequest revisions that are maintained in the Certificate's history. Each revision represents a single `CertificateRequest` created by this Certificate, either when it was created, renewed, or Spec was changed. Revisions will be removed by oldest first if the number of revisions exceeds this number. \n If set, revisionHistoryLimit must be a value of `1` or greater. If unset (`nil`), revisions will not be garbage collected. Default value is `nil`."
type: integer
format: int32
secretName:
description: Name of the Secret resource that will be automatically created and managed by this Certificate resource. It will be populated with a private key and certificate, signed by the denoted issuer. The Secret resource lives in the same namespace as the Certificate resource.
type: string
secretTemplate:
description: Defines annotations and labels to be copied to the Certificate's Secret. Labels and annotations on the Secret will be changed as they appear on the SecretTemplate when added or removed. SecretTemplate annotations are added in conjunction with, and cannot overwrite, the base set of annotations cert-manager sets on the Certificate's Secret.
type: object
properties:
annotations:
description: Annotations is a key value map to be copied to the target Kubernetes Secret.
type: object
additionalProperties:
type: string
labels:
description: Labels is a key value map to be copied to the target Kubernetes Secret.
type: object
additionalProperties:
type: string
subject:
description: "Requested set of X509 certificate subject attributes. More info: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 \n The common name attribute is specified separately in the `commonName` field. Cannot be set if the `literalSubject` field is set."
type: object
properties:
countries:
description: Countries to be used on the Certificate.
type: array
items:
type: string
localities:
description: Cities to be used on the Certificate.
type: array
items:
type: string
organizationalUnits:
description: Organizational Units to be used on the Certificate.
type: array
items:
type: string
organizations:
description: Organizations to be used on the Certificate.
type: array
items:
type: string
postalCodes:
description: Postal codes to be used on the Certificate.
type: array
items:
type: string
provinces:
description: State/Provinces to be used on the Certificate.
type: array
items:
type: string
serialNumber:
description: Serial number to be used on the Certificate.
type: string
streetAddresses:
description: Street addresses to be used on the Certificate.
type: array
items:
type: string
uris:
description: Requested URI subject alternative names.
type: array
items:
type: string
usages:
description: "Requested key usages and extended key usages. These usages are used to set the `usages` field on the created CertificateRequest resources. If `encodeUsagesInRequest` is unset or set to `true`, the usages will additionally be encoded in the `request` field which contains the CSR blob. \n If unset, defaults to `digital signature` and `key encipherment`."
type: array
items:
description: "KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 \n Valid KeyUsage values are as follows: \"signing\", \"digital signature\", \"content commitment\", \"key encipherment\", \"key agreement\", \"data encipherment\", \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\", \"server auth\", \"client auth\", \"code signing\", \"email protection\", \"s/mime\", \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\", \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\""
type: string
enum:
- signing
- digital signature
- content commitment
- key encipherment
- key agreement
- data encipherment
- cert sign
- crl sign
- encipher only
- decipher only
- any
- server auth
- client auth
- code signing
- email protection
- s/mime
- ipsec end system
- ipsec tunnel
- ipsec user
- timestamping
- ocsp signing
- microsoft sgc
- netscape sgc
status:
description: 'Status of the Certificate. This is set and managed automatically. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status'
type: object
properties:
conditions:
description: List of status conditions to indicate the status of certificates. Known condition types are `Ready` and `Issuing`.
type: array
items:
description: CertificateCondition contains condition information for an Certificate.
type: object
required:
- status
- type
properties:
lastTransitionTime:
description: LastTransitionTime is the timestamp corresponding to the last status change of this condition.
type: string
format: date-time
message:
description: Message is a human readable description of the details of the last transition, complementing reason.
type: string
observedGeneration:
description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Certificate.
type: integer
format: int64
reason:
description: Reason is a brief machine readable explanation for the condition's last transition.
type: string
status:
description: Status of the condition, one of (`True`, `False`, `Unknown`).
type: string
enum:
- "True"
- "False"
- Unknown
type:
description: Type of the condition, known values are (`Ready`, `Issuing`).
type: string
x-kubernetes-list-map-keys:
- type
x-kubernetes-list-type: map
failedIssuanceAttempts:
description: The number of continuous failed issuance attempts up till now. This field gets removed (if set) on a successful issuance and gets set to 1 if unset and an issuance has failed. If an issuance has failed, the delay till the next issuance will be calculated using formula time.Hour * 2 ^ (failedIssuanceAttempts - 1).
type: integer
lastFailureTime:
description: LastFailureTime is set only if the lastest issuance for this Certificate failed and contains the time of the failure. If an issuance has failed, the delay till the next issuance will be calculated using formula time.Hour * 2 ^ (failedIssuanceAttempts - 1). If the latest issuance has succeeded this field will be unset.
type: string
format: date-time
nextPrivateKeySecretName:
description: The name of the Secret resource containing the private key to be used for the next certificate iteration. The keymanager controller will automatically set this field if the `Issuing` condition is set to `True`. It will automatically unset this field when the Issuing condition is not set or False.
type: string
notAfter:
description: The expiration time of the certificate stored in the secret named by this resource in `spec.secretName`.
type: string
format: date-time
notBefore:
description: The time after which the certificate stored in the secret named by this resource in `spec.secretName` is valid.
type: string
format: date-time
renewalTime:
description: RenewalTime is the time at which the certificate will be next renewed. If not set, no upcoming renewal is scheduled.
type: string
format: date-time
revision:
description: "The current 'revision' of the certificate as issued. \n When a CertificateRequest resource is created, it will have the `cert-manager.io/certificate-revision` set to one greater than the current value of this field. \n Upon issuance, this field will be set to the value of the annotation on the CertificateRequest resource used to issue the certificate. \n Persisting the value on the CertificateRequest resource allows the certificates controller to know whether a request is part of an old issuance or if it is part of the ongoing revision's issuance by checking if the revision value in the annotation is greater than this field."
type: integer
served: true
storage: true

View file

@ -1,196 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: certificaterequests.cert-manager.io
labels:
app: 'cert-manager'
app.kubernetes.io/name: 'cert-manager'
app.kubernetes.io/instance: 'cert-manager'
# Generated labels
app.kubernetes.io/version: "v1.14.4"
spec:
group: cert-manager.io
names:
kind: CertificateRequest
listKind: CertificateRequestList
plural: certificaterequests
shortNames:
- cr
- crs
singular: certificaterequest
categories:
- cert-manager
scope: Namespaced
versions:
- name: v1
subresources:
status: {}
additionalPrinterColumns:
- jsonPath: .status.conditions[?(@.type=="Approved")].status
name: Approved
type: string
- jsonPath: .status.conditions[?(@.type=="Denied")].status
name: Denied
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].status
name: Ready
type: string
- jsonPath: .spec.issuerRef.name
name: Issuer
type: string
- jsonPath: .spec.username
name: Requestor
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
priority: 1
type: string
- jsonPath: .metadata.creationTimestamp
description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.
name: Age
type: date
schema:
openAPIV3Schema:
description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `Ready` status condition and its `status.failureTime` field. \n A CertificateRequest is a one-shot resource, meaning it represents a single point in time request for a certificate and cannot be re-used."
type: object
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Specification of the desired state of the CertificateRequest resource. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
type: object
required:
- issuerRef
- request
properties:
duration:
description: Requested 'duration' (i.e. lifetime) of the Certificate. Note that the issuer may choose to ignore the requested duration, just like any other requested attribute.
type: string
extra:
description: Extra contains extra attributes of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable.
type: object
additionalProperties:
type: array
items:
type: string
groups:
description: Groups contains group membership of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable.
type: array
items:
type: string
x-kubernetes-list-type: atomic
isCA:
description: "Requested basic constraints isCA value. Note that the issuer may choose to ignore the requested isCA value, just like any other requested attribute. \n NOTE: If the CSR in the `Request` field has a BasicConstraints extension, it must have the same isCA value as specified here. \n If true, this will automatically add the `cert sign` usage to the list of requested `usages`."
type: boolean
issuerRef:
description: "Reference to the issuer responsible for issuing the certificate. If the issuer is namespace-scoped, it must be in the same namespace as the Certificate. If the issuer is cluster-scoped, it can be used from any namespace. \n The `name` field of the reference must always be specified."
type: object
required:
- name
properties:
group:
description: Group of the resource being referred to.
type: string
kind:
description: Kind of the resource being referred to.
type: string
name:
description: Name of the resource being referred to.
type: string
request:
description: "The PEM-encoded X.509 certificate signing request to be submitted to the issuer for signing. \n If the CSR has a BasicConstraints extension, its isCA attribute must match the `isCA` value of this CertificateRequest. If the CSR has a KeyUsage extension, its key usages must match the key usages in the `usages` field of this CertificateRequest. If the CSR has a ExtKeyUsage extension, its extended key usages must match the extended key usages in the `usages` field of this CertificateRequest."
type: string
format: byte
uid:
description: UID contains the uid of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable.
type: string
usages:
description: "Requested key usages and extended key usages. \n NOTE: If the CSR in the `Request` field has uses the KeyUsage or ExtKeyUsage extension, these extensions must have the same values as specified here without any additional values. \n If unset, defaults to `digital signature` and `key encipherment`."
type: array
items:
description: "KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 \n Valid KeyUsage values are as follows: \"signing\", \"digital signature\", \"content commitment\", \"key encipherment\", \"key agreement\", \"data encipherment\", \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\", \"server auth\", \"client auth\", \"code signing\", \"email protection\", \"s/mime\", \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\", \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\""
type: string
enum:
- signing
- digital signature
- content commitment
- key encipherment
- key agreement
- data encipherment
- cert sign
- crl sign
- encipher only
- decipher only
- any
- server auth
- client auth
- code signing
- email protection
- s/mime
- ipsec end system
- ipsec tunnel
- ipsec user
- timestamping
- ocsp signing
- microsoft sgc
- netscape sgc
username:
description: Username contains the name of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable.
type: string
status:
description: 'Status of the CertificateRequest. This is set and managed automatically. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status'
type: object
properties:
ca:
description: The PEM encoded X.509 certificate of the signer, also known as the CA (Certificate Authority). This is set on a best-effort basis by different issuers. If not set, the CA is assumed to be unknown/not available.
type: string
format: byte
certificate:
description: The PEM encoded X.509 certificate resulting from the certificate signing request. If not set, the CertificateRequest has either not been completed or has failed. More information on failure can be found by checking the `conditions` field.
type: string
format: byte
conditions:
description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`, `InvalidRequest`, `Approved` and `Denied`.
type: array
items:
description: CertificateRequestCondition contains condition information for a CertificateRequest.
type: object
required:
- status
- type
properties:
lastTransitionTime:
description: LastTransitionTime is the timestamp corresponding to the last status change of this condition.
type: string
format: date-time
message:
description: Message is a human readable description of the details of the last transition, complementing reason.
type: string
reason:
description: Reason is a brief machine readable explanation for the condition's last transition.
type: string
status:
description: Status of the condition, one of (`True`, `False`, `Unknown`).
type: string
enum:
- "True"
- "False"
- Unknown
type:
description: Type of the condition, known values are (`Ready`, `InvalidRequest`, `Approved`, `Denied`).
type: string
x-kubernetes-list-map-keys:
- type
x-kubernetes-list-type: map
failureTime:
description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off.
type: string
format: date-time
served: true
storage: true

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,180 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: orders.acme.cert-manager.io
labels:
app: 'cert-manager'
app.kubernetes.io/name: 'cert-manager'
app.kubernetes.io/instance: 'cert-manager'
# Generated labels
app.kubernetes.io/version: "v1.14.4"
spec:
group: acme.cert-manager.io
names:
kind: Order
listKind: OrderList
plural: orders
singular: order
categories:
- cert-manager
- cert-manager-acme
scope: Namespaced
versions:
- name: v1
subresources:
status: {}
additionalPrinterColumns:
- jsonPath: .status.state
name: State
type: string
- jsonPath: .spec.issuerRef.name
name: Issuer
priority: 1
type: string
- jsonPath: .status.reason
name: Reason
priority: 1
type: string
- jsonPath: .metadata.creationTimestamp
description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.
name: Age
type: date
schema:
openAPIV3Schema:
description: Order is a type to represent an Order with an ACME server
type: object
required:
- metadata
- spec
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
type: object
required:
- issuerRef
- request
properties:
commonName:
description: CommonName is the common name as specified on the DER encoded CSR. If specified, this value must also be present in `dnsNames` or `ipAddresses`. This field must match the corresponding field on the DER encoded CSR.
type: string
dnsNames:
description: DNSNames is a list of DNS names that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR.
type: array
items:
type: string
duration:
description: Duration is the duration for the not after date for the requested certificate. this is set on order creation as pe the ACME spec.
type: string
ipAddresses:
description: IPAddresses is a list of IP addresses that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR.
type: array
items:
type: string
issuerRef:
description: IssuerRef references a properly configured ACME-type Issuer which should be used to create this Order. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Order will be marked as failed.
type: object
required:
- name
properties:
group:
description: Group of the resource being referred to.
type: string
kind:
description: Kind of the resource being referred to.
type: string
name:
description: Name of the resource being referred to.
type: string
request:
description: Certificate signing request bytes in DER encoding. This will be used when finalizing the order. This field must be set on the order.
type: string
format: byte
status:
type: object
properties:
authorizations:
description: Authorizations contains data returned from the ACME server on what authorizations must be completed in order to validate the DNS names specified on the Order.
type: array
items:
description: ACMEAuthorization contains data returned from the ACME server on an authorization that must be completed in order validate a DNS name on an ACME Order resource.
type: object
required:
- url
properties:
challenges:
description: Challenges specifies the challenge types offered by the ACME server. One of these challenge types will be selected when validating the DNS name and an appropriate Challenge resource will be created to perform the ACME challenge process.
type: array
items:
description: Challenge specifies a challenge offered by the ACME server for an Order. An appropriate Challenge resource can be created to perform the ACME challenge process.
type: object
required:
- token
- type
- url
properties:
token:
description: Token is the token that must be presented for this challenge. This is used to compute the 'key' that must also be presented.
type: string
type:
description: Type is the type of challenge being offered, e.g. 'http-01', 'dns-01', 'tls-sni-01', etc. This is the raw value retrieved from the ACME server. Only 'http-01' and 'dns-01' are supported by cert-manager, other values will be ignored.
type: string
url:
description: URL is the URL of this challenge. It can be used to retrieve additional metadata about the Challenge from the ACME server.
type: string
identifier:
description: Identifier is the DNS name to be validated as part of this authorization
type: string
initialState:
description: InitialState is the initial state of the ACME authorization when first fetched from the ACME server. If an Authorization is already 'valid', the Order controller will not create a Challenge resource for the authorization. This will occur when working with an ACME server that enables 'authz reuse' (such as Let's Encrypt's production endpoint). If not set and 'identifier' is set, the state is assumed to be pending and a Challenge will be created.
type: string
enum:
- valid
- ready
- pending
- processing
- invalid
- expired
- errored
url:
description: URL is the URL of the Authorization that must be completed
type: string
wildcard:
description: Wildcard will be true if this authorization is for a wildcard DNS name. If this is true, the identifier will be the *non-wildcard* version of the DNS name. For example, if '*.example.com' is the DNS name being validated, this field will be 'true' and the 'identifier' field will be 'example.com'.
type: boolean
certificate:
description: Certificate is a copy of the PEM encoded certificate for this Order. This field will be populated after the order has been successfully finalized with the ACME server, and the order has transitioned to the 'valid' state.
type: string
format: byte
failureTime:
description: FailureTime stores the time that this order failed. This is used to influence garbage collection and back-off.
type: string
format: date-time
finalizeURL:
description: FinalizeURL of the Order. This is used to obtain certificates for this order once it has been completed.
type: string
reason:
description: Reason optionally provides more information about a why the order is in the current state.
type: string
state:
description: State contains the current state of this Order resource. States 'success' and 'expired' are 'final'
type: string
enum:
- valid
- ready
- pending
- processing
- invalid
- expired
- errored
url:
description: URL of the Order. This will initially be empty when the resource is first created. The Order controller will populate this field when the Order is first processed. This field will be immutable after it is initially set.
type: string
served: true
storage: true

View file

@ -1,42 +0,0 @@
{
kubernetes.customTypes = {
# HACK: These are dummy custom types.
# This is needed, because the CRDs imported as a chart are not available as Nix modules.
# There is no nix-based validation on resources defined using these types!
# See: https://github.com/hall/kubenix/issues/34
ipAddressPool = {
attrName = "ipAddressPools";
group = "metallb.io";
version = "v1beta1";
kind = "IPAddressPool";
};
l2Advertisement = {
attrName = "l2Advertisements";
group = "metallb.io";
version = "v1beta1";
kind = "L2Advertisement";
};
helmChartConfig = {
attrName = "helmChartConfigs";
group = "helm.cattle.io";
version = "v1";
kind = "HelmChartConfig";
};
clusterIssuer = {
attrName = "clusterIssuers";
group = "cert-manager.io";
version = "v1";
kind = "ClusterIssuer";
};
recurringJob = {
attrName = "recurringJobs";
group = "longhorn.io";
version = "v1beta1";
kind = "RecurringJob";
};
};
}

View file

@ -1,7 +0,0 @@
{
imports = [
./ingress.nix
./longhorn-volume.nix
./tailscale.nix
];
}

View file

@ -1,67 +0,0 @@
{ lib, config, ... }:
let
ingressOpts = { name, ... }: {
options = {
host = lib.mkOption {
type = lib.types.str;
};
entrypoint = lib.mkOption {
type = lib.types.str;
default = "websecure";
};
service = {
name = lib.mkOption {
type = lib.types.str;
};
portName = lib.mkOption {
type = lib.types.str;
};
};
};
};
in
{
options = {
lab.ingresses = lib.mkOption {
type = with lib.types; attrsOf (submodule ingressOpts);
default = { };
};
};
config = {
kubernetes.resources.ingresses = builtins.mapAttrs
(name: ingress: {
metadata.annotations = {
"cert-manager.io/cluster-issuer" = "letsencrypt";
"traefik.ingress.kubernetes.io/router.entrypoints" = ingress.entrypoint;
};
spec = {
ingressClassName = "traefik";
rules = [{
host = ingress.host;
http.paths = [{
path = "/";
pathType = "Prefix";
backend.service = {
name = ingress.service.name;
port.name = ingress.service.portName;
};
}];
}];
tls = [{
secretName = "${name}-tls";
hosts = [ ingress.host ];
}];
};
})
config.lab.ingresses;
};
}

View file

@ -1,149 +0,0 @@
{ lib, config, ... }:
let
longhornVolumeOpts = { name, ... }: {
options = {
storage = lib.mkOption {
type = lib.types.str;
};
namespace = lib.mkOption {
type = lib.types.str;
default = "default";
};
};
};
longhornPVOpts = { name, ... }: {
options = {
storage = lib.mkOption {
type = lib.types.str;
};
};
};
longhornPVCOpts = { name, ... }: {
options = {
volumeName = lib.mkOption {
type = lib.types.str;
default = name;
};
# TODO: ideally we take this from the longhornPV so we don't duplicate this information.
storage = lib.mkOption {
type = lib.types.str;
};
};
};
in
{
options = {
lab.longhornVolumes = lib.mkOption {
type = with lib.types; attrsOf (submodule longhornVolumeOpts);
default = { };
};
lab.longhorn = {
persistentVolume = lib.mkOption {
type = with lib.types; attrsOf (submodule longhornPVOpts);
default = { };
};
persistentVolumeClaim = lib.mkOption {
type = with lib.types; attrsOf (submodule longhornPVCOpts);
default = { };
};
};
};
config = {
kubernetes.resources = {
persistentVolumes = lib.mergeAttrs
(builtins.mapAttrs
(name: longhornVolume: {
spec = {
accessModes = [ "ReadWriteOnce" ];
capacity.storage = longhornVolume.storage;
persistentVolumeReclaimPolicy = "Delete";
volumeMode = "Filesystem";
claimRef = {
inherit name;
namespace = longhornVolume.namespace;
};
csi = {
driver = "driver.longhorn.io";
fsType = "ext4";
volumeHandle = name;
volumeAttributes = {
dataLocality = "disabled";
fromBackup = "";
fsType = "ext4";
numberOfReplicas = "2";
staleReplicaTimeout = "30";
unmapMarkSnapChainRemoved = "ignored";
recurringJobSelector = lib.generators.toYAML { } [{
name = "backup-nfs";
isGroup = false;
}];
};
};
};
})
config.lab.longhornVolumes)
(builtins.mapAttrs
(name: longhornPV: {
spec = {
accessModes = [ "ReadWriteOnce" ];
capacity.storage = longhornPV.storage;
persistentVolumeReclaimPolicy = "Delete";
volumeMode = "Filesystem";
csi = {
driver = "driver.longhorn.io";
fsType = "ext4";
volumeHandle = name;
volumeAttributes = {
dataLocality = "disabled";
fromBackup = "";
fsType = "ext4";
numberOfReplicas = "2";
staleReplicaTimeout = "30";
unmapMarkSnapChainRemoved = "ignored";
recurringJobSelector = lib.generators.toYAML { } [{
name = "backup-nfs";
isGroup = false;
}];
};
};
};
})
config.lab.longhorn.persistentVolume);
persistentVolumeClaims = lib.mergeAttrs
(builtins.mapAttrs
(name: longhornVolume: {
spec = {
accessModes = [ "ReadWriteOnce" ];
resources.requests.storage = longhornVolume.storage;
storageClassName = "";
};
})
config.lab.longhornVolumes)
(builtins.mapAttrs
(name: longhornPVC: {
spec = {
accessModes = [ "ReadWriteOnce" ];
resources.requests.storage = longhornPVC.storage;
storageClassName = "";
volumeName = longhornPVC.volumeName;
};
})
config.lab.longhorn.persistentVolumeClaim);
};
};
}

View file

@ -1,50 +0,0 @@
{ lib, config, ... }: {
options = with lib.types; {
lab.tailscaleIngresses = lib.mkOption {
type = attrsOf (submodule {
options = {
host = lib.mkOption { type = str; };
service = {
name = lib.mkOption { type = str; };
portName = lib.mkOption {
type = str;
default = "web";
};
};
};
});
};
};
config =
let
cfg = config.lab.tailscaleIngresses;
mkTailscaleIngress = name: { host, service }: {
spec = {
ingressClassName = "tailscale";
rules = [{
http.paths = [{
path = "/";
pathType = "Prefix";
backend.service = {
name = service.name;
port.name = service.portName;
};
}];
}];
tls = [{
hosts = [ host ];
}];
};
};
in
{
kubernetes.resources.ingresses = builtins.mapAttrs mkTailscaleIngress cfg;
};
}

View file

@ -1,35 +0,0 @@
{
kubernetes.resources = {
deployments.cyberchef.spec = {
replicas = 3;
selector.matchLabels.app = "cyberchef";
template = {
metadata.labels.app = "cyberchef";
spec.containers.cyberchef = {
image = "mpepping/cyberchef";
ports.web.containerPort = 8000;
};
};
};
services.cyberchef.spec = {
selector.app = "cyberchef";
ports.web = {
port = 80;
targetPort = "web";
};
};
};
lab.ingresses.cyberchef = {
host = "cyberchef.kun.is";
service = {
name = "cyberchef";
portName = "web";
};
};
}

View file

@ -1,57 +0,0 @@
{ myLib, ... }: {
kubernetes.resources = {
configMaps.dnsmasq-config.data.config = ''
address=/kms.kun.is/${myLib.globals.kmsIPv4}
address=/ssh.git.kun.is/${myLib.globals.gitIPv4}
alias=${myLib.globals.routerPublicIPv4},${myLib.globals.traefikIPv4}
expand-hosts
host-record=hermes.dmz,${myLib.globals.dnsmasqIPv4}
local=/dmz/
log-queries
no-hosts
no-resolv
port=53
server=192.168.30.1
server=/kun.is/${myLib.globals.bind9IPv4}
'';
deployments.dnsmasq.spec = {
selector.matchLabels.app = "dnsmasq";
template = {
metadata.labels.app = "dnsmasq";
spec = {
containers.dnsmasq = {
image = "dockurr/dnsmasq:2.90";
ports.dns = {
containerPort = 53;
protocol = "UDP";
};
volumeMounts = [{
name = "config";
mountPath = "/etc/dnsmasq.conf";
subPath = "config";
}];
};
volumes.config.configMap.name = "dnsmasq-config";
};
};
};
services.dnsmasq.spec = {
type = "LoadBalancer";
loadBalancerIP = myLib.globals.dnsmasqIPv4;
selector.app = "dnsmasq";
ports.dns = {
port = 53;
targetPort = "dns";
protocol = "UDP";
};
};
};
}

View file

@ -1,104 +0,0 @@
{
"repository.local".LOCAL_COPY_PATH = "/data/gitea/tmp/local-repo";
"repository.upload".TEMP_PATH = "/data/gitea/uploads";
attachment.PATH = "/data/gitea/attachments";
lfs.PATH = "/data/git/lfs";
mailer.ENABLED = false;
"repository.pull-request".DEFAULT_MERGE_STYLE = "merge";
"repository.signing".DEFAULT_TRUST_MODEL = "committer";
ui.DEFAULT_THEME = "forgejo-light";
oauth2 = {
ENABLED = false;
JWT_SECRET = "ref+sops://secrets/kubernetes.yaml#/forgejo/jwtSecret";
};
DEFAULT = {
APP_NAME = "Forgejo: Beyond coding. We forge.";
RUN_MODE = "prod";
RUN_USER = "git";
WORK_PATH = "/data/gitea";
};
repository = {
ROOT = "/data/git/repositories";
DEFAULT_BRANCH = "master";
};
server = {
APP_DATA_PATH = "/data/gitea";
DOMAIN = "git.kun.is";
SSH_DOMAIN = "ssh.git.kun.is";
HTTP_PORT = 3000;
ROOT_URL = "https://git.kun.is";
DISABLE_SSH = false;
SSH_PORT = 56287;
SSH_LISTEN_PORT = 22;
LFS_START_SERVER = true;
LFS_JWT_SECRET = "ref+sops://secrets/kubernetes.yaml#/forgejo/lfsJwtSecret";
OFFLINE_MODE = false;
};
database = {
PATH = "/data/gitea/gitea.db";
DB_TYPE = "sqlite3";
HOST = "localhost:3306";
NAME = "gitea";
USER = "root";
PASSWD = "";
LOG_SQL = false;
SCHEMA = "";
SSL_MODE = "disable";
CHARSET = "utf8";
};
indexer = {
ISSUE_INDEXER_PATH = "/data/gitea/indexers/issues.bleve";
ISSUE_INDEXER_TYPE = "db";
};
session = {
PROVIDER_CONFIG = "/data/gitea/sessions";
PROVIDER = "file";
};
picture = {
AVATAR_UPLOAD_PATH = "/data/gitea/avatars";
REPOSITORY_AVATAR_UPLOAD_PATH = "/data/gitea/repo-avatars";
ENABLE_FEDERATED_AVATAR = false;
};
log = {
MODE = "console";
LEVEL = "info";
"logger.router.MODE" = "console";
ROOT_PATH = "/data/gitea/log";
"logger.access.MODE" = "console";
};
security = {
INSTALL_LOCK = true;
SECRET_KEY = "";
REVERSE_PROXY_LIMIT = 1;
REVERSE_PROXY_TRUSTED_PROXIES = "*";
INTERNAL_TOKEN = "ref+sops://secrets/kubernetes.yaml#/forgejo/internalToken";
PASSWORD_HASH_ALGO = "pbkdf2";
};
service = {
DISABLE_REGISTRATION = true;
REQUIRE_SIGNIN_VIEW = false;
REGISTER_EMAIL_CONFIRM = false;
ENABLE_NOTIFY_MAIL = false;
ALLOW_ONLY_EXTERNAL_REGISTRATION = false;
ENABLE_CAPTCHA = false;
DEFAULT_KEEP_EMAIL_PRIVATE = true;
DEFAULT_ALLOW_CREATE_ORGANIZATION = true;
DEFAULT_ENABLE_TIMETRACKING = true;
NO_REPLY_ADDRESS = "noreply.localhost";
};
openid = {
ENABLE_OPENID_SIGNIN = true;
ENABLE_OPENID_SIGNUP = false;
};
}

View file

@ -1,102 +0,0 @@
{ lib, myLib, ... }: {
kubernetes.resources = {
configMaps = {
config.data = {
config = lib.generators.toINI { } (import ./config.nix);
};
};
deployments.server.spec = {
selector.matchLabels.app = "forgejo";
strategy = {
type = "RollingUpdate";
rollingUpdate = {
maxSurge = 0;
maxUnavailable = 1;
};
};
template = {
metadata.labels.app = "forgejo";
spec = {
# This disables services from becoming environmental variables
# to prevent SSH_PORT clashing with Forgejo config.
enableServiceLinks = false;
containers.forgejo = {
image = "codeberg.org/forgejo/forgejo:7.0.5";
imagePullPolicy = "Always";
env = {
USER_UID.value = "1000";
USER_GID.value = "1000";
};
ports = {
web.containerPort = 3000;
ssh.containerPort = 22;
};
volumeMounts = [
{
name = "data";
mountPath = "/data";
}
{
name = "config";
mountPath = "/data/gitea/conf/app.ini";
subPath = "config";
}
];
};
volumes = {
data.persistentVolumeClaim.claimName = "data";
config.configMap.name = "config";
};
};
};
};
services = {
web.spec = {
selector.app = "forgejo";
ports.web = {
port = 80;
targetPort = "web";
};
};
ssh.spec = {
type = "LoadBalancer";
loadBalancerIP = myLib.globals.gitIPv4;
selector.app = "forgejo";
ports.ssh = {
port = 56287;
targetPort = "ssh";
};
};
};
};
lab = {
ingresses.web = {
host = "git.kun.is";
service = {
name = "web";
portName = "web";
};
};
longhorn.persistentVolumeClaim.data = {
volumeName = "forgejo";
storage = "20Gi";
};
};
}

View file

@ -1,84 +0,0 @@
{
kubernetes.resources = {
secrets.server.stringData.adminPassword = "ref+sops://secrets/kubernetes.yaml#/freshrss/password";
deployments.server.spec = {
selector.matchLabels.app = "freshrss";
strategy = {
type = "RollingUpdate";
rollingUpdate = {
maxSurge = 0;
maxUnavailable = 1;
};
};
template = {
metadata.labels.app = "freshrss";
spec = {
containers.freshrss = {
image = "freshrss/freshrss:1.24.1";
imagePullPolicy = "Always";
ports.web.containerPort = 80;
env = {
TZ.value = "Europe/Amsterdam";
CRON_MIN.value = "2,32";
ADMIN_EMAIL.value = "pim@kunis.nl";
PUBLISHED_PORT.value = "443";
ADMIN_PASSWORD.valueFrom.secretKeyRef = {
name = "server";
key = "adminPassword";
};
ADMIN_API_PASSWORD.valueFrom.secretKeyRef = {
name = "server";
key = "adminPassword";
};
};
volumeMounts = [{
name = "data";
mountPath = "/var/www/FreshRSS/data";
}];
};
volumes.data.persistentVolumeClaim.claimName = "data";
securityContext = {
fsGroup = 33;
fsGroupChangePolicy = "OnRootMismatch";
};
};
};
};
services.server.spec = {
selector.app = "freshrss";
ports.web = {
port = 80;
targetPort = "web";
};
};
};
lab = {
ingresses.web = {
host = "rss.kun.is";
service = {
name = "server";
portName = "web";
};
};
longhorn.persistentVolumeClaim.data = {
volumeName = "freshrss";
storage = "1Gi";
};
};
}

View file

@ -1,162 +0,0 @@
{ lib, ... }: {
kubernetes.resources = {
configMaps.hedgedoc-config.data.config = lib.generators.toJSON { } {
useSSL = false;
};
secrets.hedgedoc.stringData = {
databaseURL = "ref+sops://secrets/kubernetes.yaml#/hedgedoc/databaseURL";
sessionSecret = "ref+sops://secrets/kubernetes.yaml#/hedgedoc/sessionSecret";
};
deployments = {
server.spec = {
selector.matchLabels = {
app = "hedgedoc";
component = "website";
};
template = {
metadata.labels = {
app = "hedgedoc";
component = "website";
};
spec = {
containers.hedgedoc = {
image = "quay.io/hedgedoc/hedgedoc:1.9.9";
ports.web.containerPort = 3000;
env = {
CMD_DOMAIN.value = "md.kun.is";
CMD_PORT.value = "3000";
CMD_URL_ADDPORT.value = "false";
CMD_ALLOW_ANONYMOUS.value = "true";
CMD_ALLOW_EMAIL_REGISTER.value = "false";
CMD_PROTOCOL_USESSL.value = "true";
CMD_CSP_ENABLE.value = "false";
CMD_DB_URL.valueFrom.secretKeyRef = {
name = "hedgedoc";
key = "databaseURL";
};
CMD_SESSION_SECRET.valueFrom.secretKeyRef = {
name = "hedgedoc";
key = "sessionSecret";
};
};
volumeMounts = [
{
name = "uploads";
mountPath = "/hedgedoc/public/uploads";
}
{
name = "config";
mountPath = "/hedgedoc/config.json";
subPath = "config";
}
];
};
volumes = {
uploads.persistentVolumeClaim.claimName = "uploads";
config.configMap.name = "hedgedoc-config";
};
securityContext = {
fsGroup = 65534;
fsGroupChangePolicy = "OnRootMismatch";
};
};
};
};
database.spec = {
selector.matchLabels = {
app = "hedgedoc";
component = "database";
};
template = {
metadata.labels = {
app = "hedgedoc";
component = "database";
};
spec = {
containers.postgres = {
image = "postgres:15";
imagePullPolicy = "IfNotPresent";
ports.postgres.containerPort = 5432;
env = {
POSTGRES_DB.value = "hedgedoc";
POSTGRES_USER.value = "hedgedoc";
POSTGRES_PASSWORD.value = "ref+sops://secrets/kubernetes.yaml#/hedgedoc/databasePassword";
PGDATA.value = "/pgdata/data";
};
volumeMounts = [{
name = "database";
mountPath = "/pgdata";
}];
};
volumes.database.persistentVolumeClaim.claimName = "database";
};
};
};
};
services = {
server.spec = {
selector = {
app = "hedgedoc";
component = "website";
};
ports.web = {
port = 80;
targetPort = "web";
};
};
database.spec = {
selector = {
app = "hedgedoc";
component = "database";
};
ports.postgres = {
port = 5432;
targetPort = "postgres";
};
};
};
};
lab = {
ingresses.web = {
host = "md.kun.is";
service = {
name = "server";
portName = "web";
};
};
longhorn.persistentVolumeClaim = {
uploads = {
volumeName = "hedgedoc-uploads";
storage = "50Mi";
};
database = {
volumeName = "hedgedoc-db";
storage = "100Mi";
};
};
};
}

View file

@ -1,244 +0,0 @@
{
kubernetes.resources = {
deployments = {
immich.spec = {
selector.matchLabels = {
app = "immich";
component = "server";
};
strategy = {
type = "RollingUpdate";
rollingUpdate = {
maxSurge = 0;
maxUnavailable = 1;
};
};
template = {
metadata.labels = {
app = "immich";
component = "server";
};
spec = {
volumes.data.persistentVolumeClaim.claimName = "data";
enableServiceLinks = false;
containers.immich = {
image = "ghcr.io/immich-app/immich-server:v1.108.0";
imagePullPolicy = "Always";
ports.web.containerPort = 3001;
env = {
TZ.value = "Europe/Amsterdam";
REDIS_HOSTNAME.value = "redis.immich.svc.cluster.local";
DB_HOSTNAME.value = "postgres.immich.svc.cluster.local";
DB_USERNAME.value = "postgres";
DB_PASSWORD.value = "ref+sops://secrets/kubernetes.yaml#/immich/databasePassword";
DB_DATABASE_NAME.value = "immich";
IMMICH_MACHINE_LEARNING_URL.value = "http://ml.immich.svc.cluster.local";
};
volumeMounts = [{
name = "data";
mountPath = "/usr/src/app/upload";
}];
};
};
};
};
ml.spec = {
selector.matchLabels = {
app = "immich";
component = "machine-learning";
};
template = {
metadata.labels = {
app = "immich";
component = "machine-learning";
};
spec = {
volumes.cache.persistentVolumeClaim.claimName = "cache";
containers.machine-learning = {
image = "ghcr.io/immich-app/immich-machine-learning:v1.108.0";
imagePullPolicy = "Always";
ports.ml.containerPort = 3003;
env.MACHINE_LEARNING_WORKER_TIMEOUT.value = "600";
volumeMounts = [{
name = "cache";
mountPath = "/cache";
}];
};
};
};
};
redis.spec = {
selector.matchLabels = {
app = "immich";
component = "redis";
};
strategy = {
type = "RollingUpdate";
rollingUpdate = {
maxSurge = 0;
maxUnavailable = 1;
};
};
template = {
metadata.labels = {
app = "immich";
component = "redis";
};
spec = {
containers.redis = {
image = "docker.io/redis:6.2-alpine@sha256:d6c2911ac51b289db208767581a5d154544f2b2fe4914ea5056443f62dc6e900";
ports.redis.containerPort = 6379;
imagePullPolicy = "Always";
};
};
};
};
database.spec = {
selector.matchLabels = {
app = "immich";
component = "database";
};
strategy = {
type = "RollingUpdate";
rollingUpdate = {
maxSurge = 0;
maxUnavailable = 1;
};
};
template = {
metadata.labels = {
app = "immich";
component = "database";
};
spec = {
volumes.data.persistentVolumeClaim.claimName = "database";
containers.postgres = {
image = "docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0";
imagePullPolicy = "Always";
command = [ "postgres" ];
args = [ "-c" "shared_preload_libraries=vectors.so" "-c" "search_path=\"$$user\", public, vectors" "-c" "logging_collector=on" "-c" "max_wal_size=2GB" "-c" "shared_buffers=512MB" "-c" "wal_compression=on" ];
ports.postgres.containerPort = 5432;
securityContext.runAsUser = 999;
securityContext.runAsGroup = 999;
env = {
POSTGRES_PASSWORD.value = "ref+sops://secrets/kubernetes.yaml#/immich/databasePassword";
POSTGRES_USER.value = "postgres";
POSTGRES_DB.value = "immich";
POSTGRES_INITDB_ARGS.value = "--data-checksums";
PGDATA.value = "/pgdata/data";
};
volumeMounts = [{
name = "data";
mountPath = "/pgdata";
}];
};
};
};
};
};
services = {
server.spec = {
selector = {
app = "immich";
component = "server";
};
ports.web = {
port = 80;
targetPort = "web";
};
};
redis.spec = {
selector = {
app = "immich";
component = "redis";
};
ports.redis = {
port = 6379;
targetPort = "redis";
};
};
ml.spec = {
selector = {
app = "immich";
component = "machine-learning";
};
ports.ml = {
port = 80;
targetPort = "ml";
};
};
postgres.spec = {
selector = {
app = "immich";
component = "database";
};
ports.postgres = {
port = 5432;
targetPort = "postgres";
};
};
};
persistentVolumeClaims.cache.spec = {
accessModes = [ "ReadWriteOnce" ];
resources.requests.storage = "5Gi";
};
};
lab = {
ingresses.immich = {
host = "immich.kun.is";
service = {
name = "server";
portName = "web";
};
};
longhorn.persistentVolumeClaim = {
data = {
volumeName = "immich";
storage = "50Gi";
};
database = {
volumeName = "immich-db";
storage = "5Gi";
};
};
};
}

View file

@ -1,57 +0,0 @@
{ myLib, ... }: {
kubernetes.resources = {
serviceAccounts.inbucket = { };
deployments.inbucket.spec = {
selector.matchLabels.app = "inbucket";
template = {
metadata.labels.app = "inbucket";
spec = {
serviceAccountName = "inbucket";
containers = {
inbucket = {
image = "inbucket/inbucket:edge";
ports = {
web.containerPort = 9000;
smtp.containerPort = 2500;
};
};
};
};
};
};
services = {
web.spec = {
type = "LoadBalancer";
loadBalancerIP = myLib.globals.inbucketWebIPv4;
selector.app = "inbucket";
ports.web = {
port = 80;
targetPort = "web";
};
};
email.spec = {
type = "LoadBalancer";
loadBalancerIP = myLib.globals.inbucketEmailIPv4;
selector.app = "inbucket";
ports = [{
port = 25;
targetPort = "smtp";
}];
};
};
};
lab.tailscaleIngresses.tailscale = {
host = "inbucket";
service.name = "web";
};
}

View file

@ -1,72 +0,0 @@
{
kubernetes.resources = {
secrets.server.stringData.jwtSecretKey = "ref+sops://secrets/kubernetes.yaml#/kitchenowl/jwtSecretKey";
deployments.server.spec = {
selector.matchLabels.app = "kitchenowl";
strategy = {
type = "RollingUpdate";
rollingUpdate = {
maxSurge = 0;
maxUnavailable = 1;
};
};
template = {
metadata.labels.app = "kitchenowl";
spec = {
volumes.data.persistentVolumeClaim.claimName = "data";
containers.kitchenowl = {
image = "tombursch/kitchenowl:v0.5.1";
ports.web.containerPort = 8080;
imagePullPolicy = "Always";
env.JWT_SECRET_KEY.valueFrom.secretKeyRef = {
name = "server";
key = "jwtSecretKey";
};
volumeMounts = [{
name = "data";
mountPath = "/data";
}];
};
securityContext = {
fsGroup = 0;
fsGroupChangePolicy = "OnRootMismatch";
};
};
};
};
services.server.spec = {
selector.app = "kitchenowl";
ports.web = {
port = 80;
targetPort = "web";
};
};
};
lab = {
ingresses.web = {
host = "boodschappen.kun.is";
service = {
name = "server";
portName = "web";
};
};
longhorn.persistentVolumeClaim.data = {
volumeName = "kitchenowl";
storage = "100Mi";
};
};
}

View file

@ -1,27 +0,0 @@
{ myLib, ... }: {
kubernetes.resources = {
deployments.server.spec = {
selector.matchLabels.app = "kms";
template = {
metadata.labels.app = "kms";
spec.containers.kms = {
image = "teddysun/kms";
ports.kms.containerPort = 1688;
};
};
};
services.server.spec = {
type = "LoadBalancer";
loadBalancerIP = myLib.globals.kmsIPv4;
selector.app = "kms";
ports.kms = {
port = 1688;
targetPort = "kms";
};
};
};
}

View file

@ -1,634 +0,0 @@
{ myLib, ... }: {
kubernetes.resources = {
deployments = {
jellyfin.spec = {
selector.matchLabels = {
app = "media";
component = "jellyfin";
};
strategy = {
type = "RollingUpdate";
rollingUpdate = {
maxSurge = 0;
maxUnavailable = 1;
};
};
template = {
metadata.labels = {
app = "media";
component = "jellyfin";
};
spec = {
containers.jellyfin = {
image = "jellyfin/jellyfin:10.9.7";
ports.web.containerPort = 8096;
imagePullPolicy = "Always";
env.JELLYFIN_PublishedServerUrl.value = "https://media.kun.is";
volumeMounts = [
{
name = "config";
mountPath = "/config";
}
{
name = "media";
mountPath = "/media";
}
{
name = "cache";
mountPath = "/config/transcodes";
}
];
};
volumes = {
config.persistentVolumeClaim.claimName = "jellyfin";
cache.persistentVolumeClaim.claimName = "jellyfin-cache";
media.hostPath = {
path = "/mnt/longhorn/persistent/media";
type = "Directory";
};
};
securityContext = {
fsGroup = 0;
fsGroupChangePolicy = "OnRootMismatch";
};
affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms = [{
matchExpressions = [{
key = "hasMedia";
operator = "In";
values = [ "true" ];
}];
}];
};
};
};
transmission.spec = {
selector.matchLabels = {
app = "media";
component = "transmission";
};
strategy = {
type = "RollingUpdate";
rollingUpdate = {
maxSurge = 0;
maxUnavailable = 1;
};
};
template = {
metadata.labels = {
app = "media";
component = "transmission";
};
spec = {
containers.transmission = {
image = "lscr.io/linuxserver/transmission:4.0.6";
imagePullPolicy = "Always";
ports = {
web.containerPort = 9091;
bittorrent.containerPort = 31780;
};
env = {
PUID.value = "1000";
PGID.value = "1000";
TZ.value = "Europe/Amsterdam";
};
volumeMounts = [
{
name = "config";
mountPath = "/config";
}
{
name = "media";
mountPath = "/media";
}
];
};
volumes = {
config.persistentVolumeClaim.claimName = "transmission";
media.persistentVolumeClaim.claimName = "media";
};
securityContext = {
fsGroup = 1000;
fsGroupChangePolicy = "OnRootMismatch";
};
};
};
};
jellyseerr.spec = {
selector.matchLabels = {
app = "media";
component = "jellyseerr";
};
strategy = {
type = "RollingUpdate";
rollingUpdate = {
maxSurge = 0;
maxUnavailable = 1;
};
};
template = {
metadata.labels = {
app = "media";
component = "jellyseerr";
};
spec = {
volumes.config.persistentVolumeClaim.claimName = "jellyseerr";
containers.jellyseerr = {
image = "fallenbagel/jellyseerr:1.9.2";
ports.web.containerPort = 5055;
imagePullPolicy = "Always";
env = {
LOG_LEVEL.value = "debug";
TZ.value = "Europe/Amsterdam";
};
volumeMounts = [{
name = "config";
mountPath = "/app/config";
}];
};
securityContext = {
fsGroup = 0;
fsGroupChangePolicy = "OnRootMismatch";
};
};
};
};
radarr.spec = {
selector.matchLabels = {
app = "media";
component = "radarr";
};
strategy = {
type = "RollingUpdate";
rollingUpdate = {
maxSurge = 0;
maxUnavailable = 1;
};
};
template = {
metadata.labels = {
app = "media";
component = "radarr";
};
spec = {
containers.radarr = {
image = "lscr.io/linuxserver/radarr:5.7.0";
ports.web.containerPort = 7878;
imagePullPolicy = "Always";
env = {
PUID.value = "1000";
PGID.value = "1000";
TZ.value = "Europe/Amsterdam";
};
volumeMounts = [
{
name = "config";
mountPath = "/config";
}
{
name = "media";
mountPath = "/media";
}
];
};
volumes = {
config.persistentVolumeClaim.claimName = "radarr";
media.persistentVolumeClaim.claimName = "media";
};
securityContext = {
fsGroup = 1000;
fsGroupChangePolicy = "OnRootMismatch";
};
};
};
};
prowlarr.spec = {
selector.matchLabels = {
app = "media";
component = "prowlarr";
};
strategy = {
type = "RollingUpdate";
rollingUpdate = {
maxSurge = 0;
maxUnavailable = 1;
};
};
template = {
metadata.labels = {
app = "media";
component = "prowlarr";
};
spec = {
volumes.config.persistentVolumeClaim.claimName = "prowlarr";
containers.prowlarr = {
image = "lscr.io/linuxserver/prowlarr:1.20.1";
ports.web.containerPort = 9696;
imagePullPolicy = "Always";
env = {
PUID.value = "1000";
PGID.value = "1000";
TZ.value = "Europe/Amsterdam";
};
volumeMounts = [{
name = "config";
mountPath = "/config";
}];
};
securityContext = {
fsGroup = 1000;
fsGroupChangePolicy = "OnRootMismatch";
};
};
};
};
sonarr.spec = {
selector.matchLabels = {
app = "media";
component = "sonarr";
};
strategy = {
type = "RollingUpdate";
rollingUpdate = {
maxSurge = 0;
maxUnavailable = 1;
};
};
template = {
metadata.labels = {
app = "media";
component = "sonarr";
};
spec = {
containers.sonarr = {
image = "lscr.io/linuxserver/sonarr:4.0.6";
ports.web.containerPort = 8989;
imagePullPolicy = "Always";
env = {
PUID.value = "1000";
PGID.value = "1000";
TZ.value = "Europe/Amsterdam";
};
volumeMounts = [
{
name = "config";
mountPath = "/config";
}
{
name = "media";
mountPath = "/media";
}
];
};
volumes = {
config.persistentVolumeClaim.claimName = "sonarr";
media.persistentVolumeClaim.claimName = "media";
};
securityContext = {
fsGroup = 1000;
fsGroupChangePolicy = "OnRootMismatch";
};
};
};
};
bazarr.spec = {
selector.matchLabels = {
app = "media";
component = "bazarr";
};
strategy = {
type = "RollingUpdate";
rollingUpdate = {
maxSurge = 0;
maxUnavailable = 1;
};
};
template = {
metadata.labels = {
app = "media";
component = "bazarr";
};
spec = {
containers.bazarr = {
image = "lscr.io/linuxserver/bazarr:1.4.3";
ports.web.containerPort = 6767;
imagePullPolicy = "Always";
env = {
PUID.value = "1000";
PGID.value = "1000";
TZ.value = "Europe/Amsterdam";
};
volumeMounts = [
{
name = "config";
mountPath = "/config";
}
{
name = "media";
mountPath = "/media";
}
];
};
volumes = {
config.persistentVolumeClaim.claimName = "bazarr";
media.persistentVolumeClaim.claimName = "media";
};
securityContext = {
fsGroup = 1000;
fsGroupChangePolicy = "OnRootMismatch";
};
};
};
};
};
services = {
jellyfin.spec = {
selector = {
app = "media";
component = "jellyfin";
};
ports.web = {
port = 80;
targetPort = "web";
};
};
transmission-web.spec = {
selector = {
app = "media";
component = "transmission";
};
ports.web = {
port = 80;
targetPort = "web";
};
};
transmission-bittorrent.spec = {
type = "LoadBalancer";
loadBalancerIP = myLib.globals.bittorrentIPv4;
selector = {
app = "media";
component = "transmission";
};
ports.bittorrent = {
port = 31780;
targetPort = "bittorrent";
};
};
jellyseerr.spec = {
selector = {
app = "media";
component = "jellyseerr";
};
ports.web = {
port = 80;
targetPort = "web";
};
};
radarr.spec = {
selector = {
app = "media";
component = "radarr";
};
ports.web = {
port = 80;
targetPort = "web";
};
};
prowlarr.spec = {
selector = {
app = "media";
component = "prowlarr";
};
ports.web = {
port = 80;
targetPort = "web";
};
};
sonarr.spec = {
selector = {
app = "media";
component = "sonarr";
};
ports.web = {
port = 80;
targetPort = "web";
};
};
bazarr.spec = {
selector = {
app = "media";
component = "bazarr";
};
ports.web = {
port = 80;
targetPort = "web";
};
};
};
persistentVolumeClaims = {
jellyfin-cache.spec = {
accessModes = [ "ReadWriteOnce" ];
resources.requests.storage = "20Gi";
};
media.spec = {
accessModes = [ "ReadWriteMany" ];
storageClassName = "";
resources.requests.storage = "1Mi";
volumeName = "media-media";
};
};
};
lab = {
ingresses = {
jellyfin = {
host = "media.kun.is";
service = {
name = "jellyfin";
portName = "web";
};
};
transmission = {
host = "transmission.kun.is";
entrypoint = "localsecure";
service = {
name = "transmission-web";
portName = "web";
};
};
jellyseerr = {
host = "jellyseerr.kun.is";
entrypoint = "localsecure";
service = {
name = "jellyseerr";
portName = "web";
};
};
radarr = {
host = "radarr.kun.is";
entrypoint = "localsecure";
service = {
name = "radarr";
portName = "web";
};
};
prowlarr = {
host = "prowlarr.kun.is";
entrypoint = "localsecure";
service = {
name = "prowlarr";
portName = "web";
};
};
sonarr = {
host = "sonarr.kun.is";
entrypoint = "localsecure";
service = {
name = "sonarr";
portName = "web";
};
};
bazarr = {
host = "bazarr.kun.is";
entrypoint = "localsecure";
service = {
name = "bazarr";
portName = "web";
};
};
};
longhorn.persistentVolumeClaim = {
jellyfin = {
volumeName = "jellyfin";
storage = "5Gi";
};
transmission = {
volumeName = "transmission";
storage = "25Mi";
};
jellyseerr = {
volumeName = "jellyseerr";
storage = "75Mi";
};
radarr = {
volumeName = "radarr";
storage = "300Mi";
};
prowlarr = {
volumeName = "prowlarr";
storage = "150Mi";
};
sonarr = {
volumeName = "sonarr";
storage = "150Mi";
};
bazarr = {
volumeName = "bazarr";
storage = "25Mi";
};
};
};
}

View file

@ -1,48 +0,0 @@
{ myLib, ... }: {
# kubernetes.resources = {
# deployments.minecraft.spec = {
# selector.matchLabels.app = "minecraft";
# template = {
# metadata.labels.app = "minecraft";
# spec = {
# volumes.data.persistentVolumeClaim.claimName = "data";
# containers.minecraft = {
# image = "itzg/minecraft-server";
# ports.minecraft.containerPort = 25565;
# env.EULA.value = "TRUE";
# volumeMounts = [{
# name = "data";
# mountPath = "/data";
# }];
# };
# securityContext = {
# fsGroup = 1000;
# fsGroupChangePolicy = "OnRootMismatch";
# };
# };
# };
# };
# services.minecraft.spec = {
# type = "LoadBalancer";
# loadBalancerIP = myLib.globals.minecraftIPv4;
# selector.app = "minecraft";
# ports.minecraft = {
# port = 25565;
# targetPort = "minecraft";
# };
# };
# };
lab.longhorn.persistentVolumeClaim.data = {
volumeName = "minecraft";
storage = "1Gi";
};
}

View file

@ -1,158 +0,0 @@
{
kubernetes.resources = {
secrets.database.stringData.databasePassword = "ref+sops://secrets/kubernetes.yaml#/nextcloud/databasePassword";
deployments = {
server.spec = {
selector.matchLabels = {
app = "nextcloud";
component = "server";
};
strategy = {
type = "RollingUpdate";
rollingUpdate = {
maxSurge = 0;
maxUnavailable = 1;
};
};
template = {
metadata.labels = {
app = "nextcloud";
component = "server";
};
spec = {
volumes.data.persistentVolumeClaim.claimName = "data";
containers.nextcloud = {
image = "nextcloud:28";
ports.web.containerPort = 80;
env = {
POSTGRES_USER.value = "nextcloud";
POSTGRES_DB.value = "nextcloud";
POSTGRES_HOST.value = "lewis.dmz";
POSTGRES_PASSWORD.valueFrom.secretKeyRef = {
name = "database";
key = "databasePassword";
};
};
volumeMounts = [{
name = "data";
mountPath = "/var/www/html";
}];
};
securityContext = {
fsGroup = 33;
fsGroupChangePolicy = "OnRootMismatch";
};
affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution = [{
weight = 1;
preference.matchExpressions = [{
key = "storageType";
operator = "In";
values = [ "fast" ];
}];
}];
};
};
};
database.spec = {
selector.matchLabels = {
app = "nextcloud";
component = "database";
};
template = {
metadata.labels = {
app = "nextcloud";
component = "database";
};
spec = {
containers.postgres = {
image = "postgres:15";
imagePullPolicy = "IfNotPresent";
ports.postgres.containerPort = 5432;
env = {
POSTGRES_DB.value = "nextcloud";
POSTGRES_USER.value = "nextcloud";
PGDATA.value = "/pgdata/data";
POSTGRES_PASSWORD.valueFrom.secretKeyRef = {
name = "database";
key = "databasePassword";
};
};
volumeMounts = [{
name = "database";
mountPath = "/pgdata";
}];
};
volumes.database.persistentVolumeClaim.claimName = "database";
};
};
};
};
services = {
server.spec = {
selector = {
app = "nextcloud";
component = "server";
};
ports.web = {
port = 80;
targetPort = "web";
};
};
database.spec = {
selector = {
app = "nextcloud";
component = "database";
};
ports.postgres = {
port = 5432;
targetPort = "postgres";
};
};
};
};
lab = {
ingresses.web = {
host = "cloud.kun.is";
service = {
name = "server";
portName = "web";
};
};
longhorn.persistentVolumeClaim = {
data = {
volumeName = "nextcloud";
storage = "50Gi";
};
database = {
volumeName = "nextcloud-db";
storage = "400Mi";
};
};
};
}

View file

@ -1,237 +0,0 @@
{
kubernetes.resources = {
secrets = {
database.stringData.password = "ref+sops://secrets/kubernetes.yaml#/paperless/databasePassword";
server.stringData.secretKey = "ref+sops://secrets/kubernetes.yaml#/paperless/secretKey";
};
deployments = {
server.spec = {
selector.matchLabels = {
app = "paperless";
component = "web";
};
strategy = {
type = "RollingUpdate";
rollingUpdate = {
maxSurge = 0;
maxUnavailable = 1;
};
};
template = {
metadata.labels = {
app = "paperless";
component = "web";
};
spec = {
volumes.data.persistentVolumeClaim.claimName = "data";
containers.paperless = {
image = "ghcr.io/paperless-ngx/paperless-ngx:2.3";
imagePullPolicy = "Always";
ports.web.containerPort = 8000;
env = {
PAPERLESS_REDIS.value = "redis://redis.paperless.svc.cluster.local:6379";
PAPERLESS_DBENGINE.value = "postgresql";
PAPERLESS_DBHOST.value = "database.paperless.svc.cluster.local";
PAPERLESS_DBNAME.value = "paperless";
PAPERLESS_DBUSER.value = "paperless";
PAPERLESS_DATA_DIR.value = "/data/";
PAPERLESS_MEDIA_ROOT.value = "/data/";
PAPERLESS_OCR_LANGUAGES.value = "nld eng";
PAPERLESS_URL.value = "https://paperless.kun.is";
PAPERLESS_TIME_ZONE.value = "Europe/Amsterdam";
PAPERLESS_OCR_LANGUAGE.value = "nld";
USERMAP_UID.value = "33";
USERMAP_GID.value = "33";
PAPERLESS_DBPASS.valueFrom.secretKeyRef = {
name = "database";
key = "password";
};
PAPERLESS_SECRET_KEY.valueFrom.secretKeyRef = {
name = "server";
key = "secretKey";
};
};
volumeMounts = [{
name = "data";
mountPath = "/data";
}];
};
securityContext = {
fsGroup = 33;
fsGroupChangePolicy = "OnRootMismatch";
};
};
};
};
redis.spec = {
selector.matchLabels = {
app = "paperless";
component = "redis";
};
strategy = {
type = "RollingUpdate";
rollingUpdate = {
maxSurge = 0;
maxUnavailable = 1;
};
};
template = {
metadata.labels = {
app = "paperless";
component = "redis";
};
spec = {
volumes.data.persistentVolumeClaim.claimName = "redisdata";
containers.redis = {
image = "docker.io/library/redis:7";
ports.redis.containerPort = 6379;
imagePullPolicy = "Always";
volumeMounts = [{
name = "data";
mountPath = "/data";
}];
};
securityContext = {
fsGroup = 999;
fsGroupChangePolicy = "OnRootMismatch";
};
};
};
};
database.spec = {
selector.matchLabels = {
app = "paperless";
component = "database";
};
strategy = {
type = "RollingUpdate";
rollingUpdate = {
maxSurge = 0;
maxUnavailable = 1;
};
};
template = {
metadata.labels = {
app = "paperless";
component = "database";
};
spec = {
containers.postgres = {
image = "postgres:15";
ports.postgres.containerPort = 5432;
imagePullPolicy = "Always";
env = {
POSTGRES_DB.value = "paperless";
POSTGRES_USER.value = "paperless";
PGDATA.value = "/pgdata/data";
POSTGRES_PASSWORD.valueFrom.secretKeyRef = {
name = "database";
key = "password";
};
};
volumeMounts = [{
name = "data";
mountPath = "/pgdata";
}];
};
volumes.data.persistentVolumeClaim.claimName = "database";
};
};
};
};
services = {
web.spec = {
selector = {
app = "paperless";
component = "web";
};
ports.web = {
port = 80;
targetPort = "web";
};
};
redis.spec = {
selector = {
app = "paperless";
component = "redis";
};
ports.redis = {
port = 6379;
targetPort = "redis";
};
};
database.spec = {
selector = {
app = "paperless";
component = "database";
};
ports.postgres = {
port = 5432;
targetPort = "postgres";
};
};
};
};
lab = {
ingresses.web = {
host = "paperless.kun.is";
service = {
name = "web";
portName = "web";
};
};
longhorn.persistentVolumeClaim = {
data = {
volumeName = "paperless-data";
storage = "10Gi";
};
redisdata = {
volumeName = "paperless-redisdata";
storage = "20Mi";
};
database = {
volumeName = "paperless-db";
storage = "150Mi";
};
};
};
}

View file

@ -1,99 +0,0 @@
{ myLib, ... }: {
kubernetes.resources = {
secrets.pihole.stringData.webPassword = "ref+sops://secrets/kubernetes.yaml#/pihole/password";
deployments.pihole.spec = {
selector.matchLabels.app = "pihole";
template = {
metadata.labels.app = "pihole";
spec = {
containers.pihole = {
image = "pihole/pihole:latest";
env = {
TZ.value = "Europe/Amsterdam";
PIHOLE_DNS_.value = "192.168.30.1";
WEBPASSWORD.valueFrom.secretKeyRef = {
name = "pihole";
key = "webPassword";
};
};
ports = {
web.containerPort = 80;
dns = {
containerPort = 53;
protocol = "UDP";
};
};
volumeMounts = [
{
name = "data";
mountPath = "/etc/pihole";
}
{
name = "dnsmasq";
mountPath = "/etc/dnsmasq.d";
}
];
};
volumes = {
data.persistentVolumeClaim.claimName = "pihole-data";
dnsmasq.persistentVolumeClaim.claimName = "pihole-dnsmasq";
};
securityContext = {
fsGroup = 1000;
fsGroupChangePolicy = "OnRootMismatch";
};
};
};
};
services = {
pihole.spec = {
type = "LoadBalancer";
loadBalancerIP = myLib.globals.piholeIPv4;
selector.app = "pihole";
ports = {
dns = {
protocol = "UDP";
port = 53;
targetPort = "dns";
};
web = {
port = 80;
targetPort = "web";
};
};
};
};
};
lab = {
longhorn.persistentVolumeClaim = {
pihole-data = {
volumeName = "pihole-data";
storage = "750Mi";
};
pihole-dnsmasq = {
volumeName = "pihole-dnsmasq";
storage = "16Mi";
};
};
tailscaleIngresses.tailscale-pihole = {
host = "pihole";
service.name = "pihole";
};
};
}

View file

@ -1,113 +0,0 @@
{ lib, ... }: {
kubernetes.resources = {
configMaps.server.data = {
users = "pim:$apr1$GUiTihkS$dDCkaUxFx/O86m6NCy/yQ.";
config = lib.generators.toINI { } {
server = {
hosts = "0.0.0.0:5232, [::]:5232";
ssl = false;
};
encoding = {
request = "utf-8";
stock = "utf-8";
};
auth = {
realm = "Radicale - Password Required";
type = "htpasswd";
htpasswd_filename = "/config/users";
htpasswd_encryption = "md5";
};
rights.type = "owner_only";
storage = {
type = "multifilesystem";
filesystem_folder = "/data";
};
logging = { };
headers = { };
};
};
deployments.server.spec = {
selector.matchLabels.app = "radicale";
strategy = {
type = "RollingUpdate";
rollingUpdate = {
maxSurge = 0;
maxUnavailable = 1;
};
};
template = {
metadata.labels.app = "radicale";
spec = {
containers.radicale = {
image = "tomsquest/docker-radicale:3.2.2.0";
ports.web.containerPort = 5232;
imagePullPolicy = "Always";
volumeMounts = [
{
name = "data";
mountPath = "/data";
}
{
name = "config";
mountPath = "/config/config";
subPath = "config";
}
{
name = "config";
mountPath = "/config/users";
subPath = "users";
}
];
};
volumes = {
data.persistentVolumeClaim.claimName = "data";
config.configMap.name = "server";
};
securityContext = {
fsGroup = 2999;
fsGroupChangePolicy = "OnRootMismatch";
};
};
};
};
services.server.spec = {
selector.app = "radicale";
ports.web = {
port = 80;
targetPort = "web";
};
};
};
lab = {
ingresses.web = {
host = "dav.kun.is";
service = {
name = "server";
portName = "web";
};
};
longhorn.persistentVolumeClaim.data = {
volumeName = "radicale";
storage = "200Mi";
};
};
}

View file

@ -1,89 +0,0 @@
{ myLib, ... }: {
kubernetes.resources = {
serviceAccounts.syncthing = { };
deployments.syncthing.spec = {
selector.matchLabels.app = "syncthing";
strategy = {
type = "RollingUpdate";
rollingUpdate = {
maxSurge = 0;
maxUnavailable = 1;
};
};
template = {
metadata.labels.app = "syncthing";
spec = {
serviceAccountName = "syncthing";
containers.syncthing = {
image = "lscr.io/linuxserver/syncthing:1.23.6";
ports.web.containerPort = 8384;
imagePullPolicy = "Always";
env = {
PUID.value = "33";
PGID.value = "33";
TZ.value = "Europe/Amsterdam";
};
volumeMounts = [
{
name = "config";
mountPath = "/config";
}
{
name = "music";
mountPath = "/music";
}
];
};
volumes = {
config.persistentVolumeClaim.claimName = "config";
music.persistentVolumeClaim.claimName = "music";
};
securityContext = {
fsGroup = 33;
fsGroupChangePolicy = "OnRootMismatch";
};
};
};
};
services.syncthing.spec = {
type = "LoadBalancer";
loadBalancerIP = myLib.globals.syncthingWebIPv4;
selector.app = "syncthing";
ports.web = {
port = 80;
targetPort = "web";
};
};
persistentVolumeClaims.music.spec = {
accessModes = [ "ReadWriteMany" ];
storageClassName = "";
resources.requests.storage = "1Mi";
volumeName = "music-syncthing";
};
};
lab = {
longhorn.persistentVolumeClaim.config = {
volumeName = "syncthing";
storage = "400Mi";
};
tailscaleIngresses.tailscale = {
host = "sync";
service.name = "syncthing";
};
};
}

View file

@ -1,14 +0,0 @@
{ nixhelm, system, ... }: {
kubernetes = {
helm.releases.tailscale = {
chart = nixhelm.chartsDerivations.${system}.tailscale.tailscale-operator;
includeCRDs = true;
namespace = "tailscale";
};
resources.secrets.operator-oauth.stringData = {
client_id = "ref+sops://secrets/kubernetes.yaml#/tailscale/clientID";
client_secret = "ref+sops://secrets/kubernetes.yaml#/tailscale/clientSecret";
};
};
}

View file

@ -1,75 +0,0 @@
{ lib, myLib, ... }: {
kubernetes.resources = {
helmChartConfigs = {
traefik = {
# Override Traefik's service with a static load balancer IP.
# Create endpoint for HTTPS on port 444.
# Allow external name services for servers in LAN.
spec.valuesContent = lib.generators.toYAML { } {
providers.kubernetesIngress.allowExternalNameServices = true;
service.loadBalancerIP = myLib.globals.traefikIPv4;
ports = {
localsecure = {
port = 8444;
expose = true;
exposedPort = 444;
protocol = "TCP";
tls = {
enabled = true;
options = "";
certResolver = "";
domains = [ ];
};
};
web.redirectTo = "websecure";
};
};
};
};
services = {
ek2024.spec = {
type = "ExternalName";
externalName = "ek2024.dmz";
ports.web = {
port = 80;
targetPort = 80;
};
};
esrom.spec = {
type = "ExternalName";
externalName = "esrom.dmz";
ports.web = {
port = 80;
targetPort = 80;
};
};
};
};
lab.ingresses = {
ek2024 = {
host = "ek2024.kun.is";
service = {
name = "ek2024";
portName = "web";
};
};
esrom = {
host = "esrom.kun.is";
service = {
name = "esrom";
portName = "web";
};
};
};
}

View file

@ -4,6 +4,9 @@
kubernetesNodeLabels.storageType = "slow";
nixosModule.lab = {
storage.profile = "kubernetes";
tailscale.enable = true;
k3s = {
enable = true;
serverAddr = "https://jefke.dmz:6443";

View file

@ -1,40 +0,0 @@
log syslog all;
debug protocols all;
router id 44.137.17.110;
protocol bgp hamgre {
local as 4220401706;
neighbor 44.137.61.33 as 4220406100;
source address 44.137.61.34;
multihop;
ipv4 {
import all;
export none;
};
}
protocol device {
}
protocol direct {
interface "lo";
ipv4 {
};
}
protocol kernel {
metric 0;
learn;
ipv4 {
import none;
export all;
};
}
protocol static {
route 44.137.17.96/28 via 44.137.61.33;
ipv4 {
};
}

View file

@ -1,5 +1,8 @@
{ lib, ... }:
{ nixpkgs, flake-utils, ... }: flake-utils.lib.eachDefaultSystem (system:
let
pkgs = nixpkgs.legacyPackages.${system};
lib = pkgs.lib;
machineOpts = { config, ... }: {
options = {
arch = lib.mkOption {
@ -32,18 +35,25 @@ let
};
};
};
in
{
imports = [
./warwick.nix
./atlas.nix
./jefke.nix
./lewis.nix
];
options = {
machines = lib.mkOption {
type = with lib.types; attrsOf (submodule machineOpts);
allOpts = {
options = {
machines = lib.mkOption {
type = with lib.types; attrsOf (submodule machineOpts);
};
};
};
}
in
{
machines = (lib.modules.evalModules {
modules = [
allOpts
./warwick.nix
./atlas.nix
./jefke.nix
./lewis.nix
# ./talos.nix
# ./pikvm.nix
];
}).config.machines;
})

View file

@ -4,6 +4,9 @@
kubernetesNodeLabels.storageType = "fast";
nixosModule.lab = {
storage.profile = "kubernetes";
tailscale.enable = true;
k3s = {
enable = true;
clusterInit = true;

View file

@ -8,8 +8,10 @@
nixosModule = {
lab = {
storage.profile = "kubernetes";
backups.enable = true;
data-sharing.enable = true;
tailscale.enable = true;
k3s = {
enable = true;

23
machines/pikvm.nix Normal file
View file

@ -0,0 +1,23 @@
{
machines.pikvm = {
arch = "aarch64-linux";
isRaspberryPi = true;
nixosModule = { config, inputs, lib, ... }: {
# imports = [ "${inputs.nixpkgs}/nixos/modules/installer/sd-card/sd-image-aarch64.nix" ];
lab = {
storage.profile = "pi";
};
environment.systemPackages = with inputs.nixpkgs.legacyPackages.aarch64-linux; [
(mplayer.override {
v4lSupport = true;
})
ffmpeg
v4l-utils
];
boot.extraModulePackages = with config.boot.kernelPackages; [ v4l2loopback ];
};
};
}

11
machines/talos.nix Normal file
View file

@ -0,0 +1,11 @@
{
machines.talos = {
arch = "x86_64-linux";
nixosModule = { lib, ... }: {
lab.storage.profile = "normal";
# boot.loader.systemd-boot.enable = lib.mkForce false;
};
};
}

View file

@ -3,54 +3,16 @@
arch = "aarch64-linux";
isRaspberryPi = true;
nixosModule = {
lab.monitoring.server.enable = true;
nixosModule = { lib, ... }: {
lab = {
storage.profile = "pi";
monitoring.server.enable = true;
services.bird2 = {
enable = false;
config = builtins.readFile ./bird.conf;
tailscale = {
advertiseExitNode = true;
enable = true;
};
};
#systemd.network = {
# netdevs = {
# hamgre = {
# netdevConfig = {
# Name = "hamgre";
# Kind = "gre";
# MTUBytes = "1468";
# };
# tunnelConfig = {
# Remote = "145.220.78.4";
# #Local = "192.145.57.90";
# };
# };
# # hambr = {
# # netdevConfig = {
# # Name = "hambr";
# # Kind = "bridge";
# # };
# # };
# };
# networks = {
# "30-main-nic".networkConfig.Tunnel = "hamgre";
# "40-hamgre" = {
# matchConfig.Name = "hamgre";
# networkConfig = {
# Address = "44.137.61.34/30";
# };
# };
# # "40-hambr" = {
# # matchConfig.Name = "hambr";
# # };
# };
#};
};
};
}

View file

@ -1,6 +1,12 @@
{ pkgs, self, config, lib, inputs, machine, ... }: {
{ self, pkgs, config, lib, inputs, machine, ... }: {
imports = [
"${self}/nixos-modules"
./storage.nix
./backups.nix
./networking
./data-sharing.nix
./monitoring
./k3s
./tailscale.nix
machine.nixosModule
inputs.disko.nixosModules.disko
inputs.sops-nix.nixosModules.sops
@ -91,6 +97,7 @@
boot = lib.mkIf (! machine.isRaspberryPi) {
kernelModules = [ "kvm-intel" ];
extraModulePackages = [ ];
kernel.sysctl."fs.inotify.max_user_instances" = 256;
initrd = {
kernelModules = [ ];
@ -107,7 +114,7 @@
};
loader = {
systemd-boot.enable = true;
systemd-boot.enable = lib.mkDefault true;
efi.canTouchEfiVariables = true;
};
};
@ -118,6 +125,13 @@
extraOptions = ''
experimental-features = nix-command flakes
'';
gc = {
automatic = true;
persistent = true;
dates = "weekly";
options = "--delete-older-than 7d";
};
};
system = {
@ -132,7 +146,7 @@
sops = {
age.keyFile = "/root/.config/sops/age/keys.txt";
defaultSopsFile = ./secrets/nixos.yaml;
defaultSopsFile = "${self}/secrets/nixos.yaml";
};
};
}

View file

@ -1,22 +1,6 @@
{ inputs, pkgs, lib, config, ... }:
{ self, inputs, pkgs, lib, config, globals, ... }:
let
cfg = config.lab.k3s;
k3s-cni-plugins = pkgs.buildEnv {
name = "k3s-cni-plugins";
paths = with pkgs; [
cni-plugins
cni-plugin-flannel
];
};
image = pkgs.nix-snapshotter.buildImage {
name = "redis";
resolvedByNix = true;
config = {
entrypoint = [ "${pkgs.redis}/bin/redis-server" ];
};
};
in
{
options.lab.k3s = {
@ -61,6 +45,7 @@ in
nfs-utils # Required for Longhorn
];
# TODO!!!!!
networking = {
nftables.enable = lib.mkForce false;
firewall.enable = lib.mkForce false;
@ -77,30 +62,40 @@ in
address = "/run/nix-snapshotter/nix-snapshotter.sock";
};
plugins = {
"io.containerd.grpc.v1.cri" = {
stream_server_address = "127.0.0.1";
stream_server_port = "10010";
enable_selinux = false;
enable_unprivileged_ports = true;
enable_unprivileged_icmp = true;
disable_apparmor = true;
disable_cgroup = true;
restrict_oom_score_adj = true;
sandbox_image = "rancher/mirrored-pause:3.6";
containerd.snapshotter = "nix";
cni = {
conf_dir = "/var/lib/rancher/k3s/agent/etc/cni/net.d/";
bin_dir = "${k3s-cni-plugins}/bin";
plugins =
let
k3s-cni-plugins = pkgs.buildEnv {
name = "k3s-cni-plugins";
paths = with pkgs; [
cni-plugins
cni-plugin-flannel
];
};
};
in
{
"io.containerd.grpc.v1.cri" = {
stream_server_address = "127.0.0.1";
stream_server_port = "10010";
enable_selinux = false;
enable_unprivileged_ports = true;
enable_unprivileged_icmp = true;
disable_apparmor = true;
disable_cgroup = true;
restrict_oom_score_adj = true;
sandbox_image = "rancher/mirrored-pause:3.6";
containerd.snapshotter = "nix";
"io.containerd.transfer.v1.local".unpack_config = [{
platform = "linux/amd64";
snapshotter = "nix";
}];
};
cni = {
conf_dir = "/var/lib/rancher/k3s/agent/etc/cni/net.d/";
bin_dir = "${k3s-cni-plugins}/bin";
};
};
"io.containerd.transfer.v1.local".unpack_config = [{
platform = "linux/amd64";
snapshotter = "nix";
}];
};
};
};
@ -144,9 +139,9 @@ in
"L+ /usr/local/bin - - - - /run/current-system/sw/bin/"
];
system = lib.mkIf (cfg.role == "server") {
activationScripts = {
k3s-bootstrap.text = (
system.activationScripts = {
k3s-bootstrap = lib.mkIf (cfg.role == "server") {
text = (
let
k3sBootstrapFile = (inputs.kubenix.evalModules.x86_64-linux {
module = import ./bootstrap.nix;
@ -157,8 +152,10 @@ in
ln -sf ${k3sBootstrapFile} /var/lib/rancher/k3s/server/manifests/k3s-bootstrap.json
''
);
};
k3s-certs.text = ''
k3s-certs = lib.mkIf (cfg.role == "server") {
text = ''
mkdir -p /var/lib/rancher/k3s/server/tls/etcd
cp -f ${./k3s-ca/server-ca.crt} /var/lib/rancher/k3s/server/tls/server-ca.crt
cp -f ${./k3s-ca/client-ca.crt} /var/lib/rancher/k3s/server/tls/client-ca.crt
@ -166,11 +163,76 @@ in
cp -f ${./k3s-ca/etcd/peer-ca.crt} /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt
cp -f ${./k3s-ca/etcd/server-ca.crt} /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt
'';
nix-snapshotter-image = ''
ln -sf ${image} /root/image.tar
'';
};
nixng = lib.mkIf (cfg.role == "server")
(
let
dnsmasqStream = (import ./dnsmasq.nix {
inherit (inputs) nixpkgs nixng;
inherit (inputs.nixng) nglib;
inherit (self) globals;
}).config.system.build.ociImage.stream;
dnsmasqImage = pkgs.stdenv.mkDerivation {
name = "dnsmasq.tar";
src = dnsmasqStream;
dontUnpack = true;
buildPhase = ''
$src > $out
'';
};
in
{
text = ''
rm -rf ${self.globals.imageDir}
mkdir -p ${self.globals.imageDir}
ln -sf ${dnsmasqImage} ${self.globals.imageDir}/dnsmasq.tar
'';
}
);
docker-images.text =
let
imageDefs = import "${self}/container-images.nix";
setupCommands = [
"rm -rf ${self.globals.imageDir}"
"mkdir -p ${self.globals.imageDir}"
];
getDockerImageConfig = dockerImage:
let
configJson = pkgs.runCommand "config.json"
{
nativeBuildInputs = [ pkgs.skopeo pkgs.jq ];
}
''
skopeo --tmpdir $TMPDIR --insecure-policy inspect docker-archive:${dockerImage} --config | jq '.config' > $out
'';
in
builtins.fromJSON (builtins.readFile configJson);
imageDefToLinkCommand = name: imageDef:
let
dockerImage = pkgs.dockerTools.pullImage imageDef;
nixSnapshotterImage = pkgs.nix-snapshotter.buildImage {
inherit name;
resolvedByNix = true;
fromImage = dockerImage;
config = getDockerImageConfig dockerImage;
};
imageLinkPath = "${self.globals.imageDir}/${name}.tar";
in
"ln -sf ${nixSnapshotterImage} ${imageLinkPath}";
linkCommandList = lib.attrsets.mapAttrsToList imageDefToLinkCommand imageDefs;
# TODO: Creating Docker images like this seems to *explode* in size.
# Doing this for every image we currently have is infeasible.
# I should investigate why the size increases like that.
commandList = setupCommands; # ++ linkCommandList;
in
builtins.concatStringsSep "\n" commandList;
};
sops.secrets =

41
modules/k3s/dnsmasq.nix Normal file
View file

@ -0,0 +1,41 @@
{ globals, nixpkgs, nglib, ... }:
nglib.makeSystem {
inherit nixpkgs;
system = "x86_64-linux";
name = "nixng-dnsmasq";
config = { ... }: {
dumb-init = {
enable = true;
type.services = { };
};
init.services.dnsmasq = {
shutdownOnExit = true;
};
services.dnsmasq = {
enable = true;
settings = {
address = [
"/kms.kun.is/${globals.kmsIPv4}"
"/ssh.git.kun.is/${globals.gitIPv4}"
];
alias = "${globals.routerPublicIPv4},${globals.traefikIPv4}";
expand-hosts = true;
local = "/dmz/";
log-queries = true;
no-hosts = true;
no-resolv = true;
port = 53;
server = [
"192.168.30.1"
"/kun.is/${globals.bind9IPv4}"
];
};
};
};
}

View file

@ -23,7 +23,6 @@ in
services.prometheus = {
enable = cfg.server.enable;
webExternalUrl = "/prometheus";
exporters = {
node = {
@ -32,14 +31,34 @@ in
};
scrapeConfigs = lib.mkIf cfg.server.enable (
lib.attrsets.mapAttrsToList
(name: machine: {
job_name = name;
let
generated = lib.attrsets.mapAttrsToList
(name: machine: {
job_name = name;
static_configs = [{
targets = [ "${name}.dmz:${toString config.services.prometheus.exporters.node.port}" ];
}];
})
machines;
pikvm = {
job_name = "pikvm";
metrics_path = "/api/export/prometheus/metrics";
scheme = "https";
tls_config.insecure_skip_verify = true;
# We don't care about security here, it's behind a VPN.
basic_auth = {
username = "admin";
password = "admin";
};
static_configs = [{
targets = [ "${name}.dmz:${toString config.services.prometheus.exporters.node.port}" ];
targets = [ "pikvm.dmz" ];
}];
})
machines
};
in
generated ++ [ pikvm ]
);
};
@ -47,7 +66,7 @@ in
enable = true;
virtualHosts."${config.networking.fqdn}" = {
locations."/prometheus/" = {
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.prometheus.port}";
recommendedProxySettings = true;
};

View file

@ -2,12 +2,10 @@
config = {
networking = {
domain = "dmz";
nftables.enable = true;
nftables.enable = lib.mkDefault true;
useDHCP = false;
firewall = {
enable = true;
};
firewall.enable = lib.mkDefault true;
};
systemd.network = {

167
modules/storage.nix Normal file
View file

@ -0,0 +1,167 @@
{ lib, config, ... }:
let
cfg = config.lab.storage;
modules = [
{
config = lib.mkIf (cfg.profile == "pi") {
fileSystems."/" = {
device = "/dev/disk/by-label/NIXOS_SD";
fsType = "ext4";
options = [ "noatime" ];
};
};
}
{
config = lib.mkIf (cfg.profile == "kubernetes") {
disko.devices = {
disk = {
nvme = {
device = "/dev/nvme0n1";
type = "disk";
content = {
type = "gpt";
partitions = {
boot = {
type = "EF00";
size = "500M";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
pv_os = {
size = "79G";
content = {
type = "lvm_pv";
vg = "vg_os";
};
};
pv_nvme_extra = {
size = "100%";
content = {
type = "lvm_pv";
vg = "vg_data";
};
};
};
};
};
sata = {
device = "/dev/sda";
type = "disk";
content = {
type = "gpt";
partitions.pv_sata = {
size = "100%";
content = {
type = "lvm_pv";
vg = "vg_data";
};
};
};
};
};
lvm_vg = {
vg_os = {
type = "lvm_vg";
lvs = {
root = {
size = "75G";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
mountOptions = [ "defaults" ];
};
};
swap = {
size = "100%FREE";
content.type = "swap";
};
};
};
vg_data = {
type = "lvm_vg";
lvs.longhorn = {
size = "100%FREE";
content = {
type = "filesystem";
format = "xfs";
mountpoint = "/mnt/longhorn";
};
};
};
};
};
};
}
{
config = lib.mkIf (cfg.profile == "normal") {
disko.devices = {
disk.sata = {
device = "/dev/sda";
type = "disk";
content = {
type = "gpt";
partitions = {
boot = {
type = "EF00";
size = "500M";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
mountOptions = [ "defaults" ];
};
};
};
};
};
};
};
}
];
in
{
imports = modules;
options.lab.storage = {
profile = lib.mkOption {
type = lib.types.str;
};
};
}

35
modules/tailscale.nix Normal file
View file

@ -0,0 +1,35 @@
{ lib, config, ... }:
let
cfg = config.lab.tailscale;
in
{
options = {
lab.tailscale = {
enable = lib.mkEnableOption "tailscale";
advertiseExitNode = lib.mkOption {
type = lib.types.bool;
default = false;
};
};
};
config = lib.mkIf cfg.enable {
services.tailscale = {
enable = true;
authKeyFile = config.sops.secrets."tailscale/authKey".path;
useRoutingFeatures = "server";
openFirewall = true;
extraUpFlags = [
"--accept-dns=false"
"--hostname=${config.networking.hostName}"
] ++ lib.lists.optional cfg.advertiseExitNode "--advertise-exit-node"
++ lib.lists.optional cfg.advertiseExitNode "--advertise-routes=192.168.30.0/24";
};
sops.secrets."tailscale/authKey" = { };
systemd.network.wait-online.ignoredInterfaces = [ "tailscale0" ];
};
}

View file

@ -1,4 +0,0 @@
lib: {
net = import ./net.nix lib;
globals = import ./globals.nix;
}

View file

@ -1,16 +0,0 @@
{
routerPublicIPv4 = "192.145.57.90";
routerPublicIPv6 = "2a0d:6e00:1a77::1";
minecraftIPv4 = "192.168.30.136";
dnsmasqIPv4 = "192.168.30.135";
bind9IPv4 = "192.168.30.134";
bind9Ipv6 = "2a0d:6e00:1a77:30::134";
bittorrentIPv4 = "192.168.30.133";
gitIPv4 = "192.168.30.132";
piholeIPv4 = "192.168.30.131";
inbucketEmailIPv4 = "192.168.30.130";
kmsIPv4 = "192.168.30.129";
traefikIPv4 = "192.168.30.128";
inbucketWebIPv4 = "192.168.30.137";
syncthingWebIPv4 = "192.168.30.138";
}

View file

@ -1,10 +0,0 @@
{
imports = [
./storage.nix
./backups.nix
./networking
./data-sharing.nix
./monitoring
./k3s
];
}

View file

@ -1,89 +0,0 @@
{ lib, ... }: {
options.lab = {
networking = {
public = {
ipv4 = {
router = lib.mkOption {
type = lib.types.str;
description = ''
Public IPv4 address of the router.
'';
};
};
ipv6 = {
router = lib.mkOption {
type = lib.types.str;
description = ''
Publicly routable IPv6 address of the router.
'';
};
};
};
dmz = {
ipv4 = {
prefixLength = lib.mkOption {
type = lib.types.str;
description = ''
IPv4 prefix length of DMZ network.
'';
};
dockerSwarm = lib.mkOption {
type = lib.types.str;
description = ''
IPv4 address of the Docker Swarm in the DMZ.
'';
};
router = lib.mkOption {
type = lib.types.str;
description = ''
The router's IPv4 address on the DMZ network.
'';
};
services = lib.mkOption {
type = lib.types.str;
description = ''
The IPv4 address of the interface serving DHCP and DNS on the DMZ network.
'';
};
};
ipv6 = {
prefixLength = lib.mkOption {
type = lib.types.str;
description = ''
IPv6 prefix length of DMZ network.
'';
};
dockerSwarm = lib.mkOption {
type = lib.types.str;
description = ''
Globally routable IPv6 address of the Docker Swarm.
'';
};
router = lib.mkOption {
type = lib.types.str;
description = ''
The router's IPv6 address on the DMZ network.
'';
};
services = lib.mkOption {
type = lib.types.str;
description = ''
The IPv6 address of the interface serving DHCP and DNS on the DMZ network.
'';
};
};
};
};
};
}

View file

@ -1,122 +0,0 @@
{ lib, config, machine, ... }:
let cfg = config.lab.storage;
in {
options.lab.storage = {
osDisk = lib.mkOption {
type = with lib.types; nullOr str;
description = ''
The disk to be used for the machine's operating system.
'';
};
};
config = {
fileSystems."/" = lib.mkIf machine.isRaspberryPi {
device = "/dev/disk/by-label/NIXOS_SD";
fsType = "ext4";
options = [ "noatime" ];
};
disko = lib.mkIf (! machine.isRaspberryPi) {
devices = {
disk = {
nvme = {
device = "/dev/nvme0n1";
type = "disk";
content = {
type = "gpt";
partitions = {
boot = {
type = "EF00";
size = "500M";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
pv_os = {
size = "79G";
content = {
type = "lvm_pv";
vg = "vg_os";
};
};
pv_nvme_extra = {
size = "100%";
content = {
type = "lvm_pv";
vg = "vg_data";
};
};
};
};
};
sata = {
device = "/dev/sda";
type = "disk";
content = {
type = "gpt";
partitions.pv_sata = {
size = "100%";
content = {
type = "lvm_pv";
vg = "vg_data";
};
};
};
};
};
lvm_vg = {
vg_os = {
type = "lvm_vg";
lvs = {
root = {
size = "75G";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
mountOptions = [ "defaults" ];
};
};
swap = {
size = "100%FREE";
content.type = "swap";
};
};
};
vg_data = {
type = "lvm_vg";
lvs.longhorn = {
size = "100%FREE";
content = {
type = "filesystem";
format = "xfs";
mountpoint = "/mnt/longhorn";
};
};
};
};
};
};
};
}

View file

@ -1,5 +1,7 @@
{ self, myLib, nixpkgs, machines, ... }@inputs:
{ self, nixpkgs, ... }@inputs:
let
deployArch = "x86_64-linux";
machines = self.machines.${deployArch};
mkNixosSystems = systemDef:
builtins.mapAttrs
(name: machine:
@ -11,10 +13,10 @@ in
nixosConfigurations = mkNixosSystems (name: machine: {
system = machine.arch;
specialArgs = { inherit self inputs myLib machine machines; };
specialArgs = { inherit self inputs machine machines; };
modules = [
"${self}/configuration.nix"
"${self}/modules"
{ networking.hostName = name; }
];
});

View file

@ -1,5 +1,6 @@
{ flake-utils, pkgs, ... }: flake-utils.lib.eachDefaultSystem (system:
{ self, nixpkgs, flake-utils, ... }: flake-utils.lib.eachDefaultSystem (system:
let
pkgs = nixpkgs.legacyPackages.${system};
createScript = { name, runtimeInputs, scriptPath, extraWrapperFlags ? "", ... }:
let
script = (pkgs.writeScriptBin name (builtins.readFile scriptPath)).overrideAttrs (old: {
@ -20,25 +21,11 @@ in
scriptPath = ./bootstrap.sh;
};
packages.gen-k3s-cert = createScript {
name = "create-k3s-cert";
runtimeInputs = with pkgs; [ openssl coreutils openssh yq ];
scriptPath = ./gen-k3s-cert.sh;
};
packages.prefetch-container-images =
let
images = {
cyberchef = {
cyberchef = {
image-name = "mpepping/cyberchef";
image-tag = "latest";
};
};
};
imagesJSON = builtins.toFile "images.json" (builtins.toJSON images);
imagesJSON = builtins.toFile "images.json" (builtins.toJSON self.globals.images);
in
pkgs.writers.writePython3Bin "prefetch-container-images"
pkgs.writers.writePython3Bin "prefetch-container-images.py"
{ } ''
import json
import subprocess
@ -55,31 +42,34 @@ in
with open(images_file_name, 'r') as file:
data = json.load(file)
for project_name, images in data.items():
print(f"Prefetching images for project {project_name}", file=sys.stderr)
for image_name, image_ref in data.items():
[name, tag] = image_ref.split(":", maxsplit=1)
print(f"Prefetching image {image_ref}", file=sys.stderr)
for image_name, image in images.items():
name = image["image-name"]
tag = image["image-tag"]
digest = ""
if "@" in tag:
[tag, digest] = tag.split("@", maxsplit=1)
print(f"Prefetching image {name}:{tag}", file=sys.stderr)
prefetch_args = [
prefetch_docker_cmd,
"--os", "linux",
"--arch", "amd64",
"--image-name", name,
"--image-tag", tag,
"--json",
"--quiet"
]
prefetch_args = [
prefetch_docker_cmd,
"--os", "linux",
"--arch", "amd64",
"--image-name", name,
"--image-tag", tag,
"--json",
"--quiet"
]
result = subprocess.run(prefetch_args,
check=True,
capture_output=True,
text=True)
if digest:
prefetch_args.extend(["--image-digest", digest])
prefetch_data = json.loads(result.stdout)
results[project_name][image_name] = prefetch_data
result = subprocess.run(prefetch_args,
check=True,
capture_output=True,
text=True)
prefetch_data = json.loads(result.stdout)
results[image_name] = prefetch_data
with tempfile.NamedTemporaryFile(mode='w+', suffix='.json') as temp_file:
json.dump(results, temp_file, indent=4)

View file

@ -1,60 +0,0 @@
freshrss:
password: ENC[AES256_GCM,data:LDLp7cEToWA7zpd5UK+eBUHDaSEtNpFjI7C0LRE+72n0Vu1saPOdSQ==,iv:OEJDcFZwxGJ9vVD1lH7QY5Ue4Kfmx37v9kSEbI0YvRI=,tag:gIyquRc9t+GOOre8MKWxHQ==,type:str]
pihole:
password: ENC[AES256_GCM,data:yqPpovQKmP7NgUMI3w1p8t7RjbxNsMMHZbsNEaleyLJTqnDzNqONsQ==,iv:i+ys/EZelT4a4Sr0RpDto8udk/9yYC6pzl3FiUZQxrQ=,tag:FlvbMN6fuo+VV50YyuMeGg==,type:str]
hedgedoc:
databaseURL: ENC[AES256_GCM,data:VVz5meJM/SWC9+gWvorSj4ymLRux0vQPbI0kQLFrUGz2bocaRFzDqHAKbF4sd5iSzc6y5LQqwUfOgNoVrKhIROzKxStOmaQAWTLAJvfdReAqQoEaLVuLcZeML9QIhqvdAvPV5kVMznJ1u5YczSA=,iv:wU/GrAYSF2y0JWl0Nz6UuYmII0kCPIZ+UfAGI/1mUsE=,tag:xVOUwd5T6VHZ7vrpj9FMxg==,type:str]
sessionSecret: ENC[AES256_GCM,data:FhYr4rFNHmtk9jUcjM4UthepS/5Z4x7WPAE5lTB94WmHrALbzZl2M3JcmibR6/z1FtAJhCsaPZ7Xeg8nOZtU2g==,iv:7soqcd8A+yNfXEZg0qDjOZgfsUIFHfflxByuf7nZk3Y=,tag:x/rmaXo4nTdA080Zl/0MiQ==,type:str]
databasePassword: ENC[AES256_GCM,data:Fv1qeGvXZ93KvdFCCz9t9Dzhe7wKGOfR0lj64lzRM3s48E5FYdrH0w==,iv:cqhIOUKiSSkBpf95Eza9C9l8PX6YmTBpvBAR4+ibgeA=,tag:r8ZvF6l8oNeOt3d5UCA7Ww==,type:str]
nextcloud:
databasePassword: ENC[AES256_GCM,data:Xz0zUpu/W12Io1LSh5CLvGkq1X6yQErz4kdCdTyNZTw=,iv:OkY1fGzHmmbO9u+e9yNlLjJf8dqQtePTj9ifaDBFJ4g=,tag:S8/z9HJTPCZo43wAB5fWpA==,type:str]
paperless:
databasePassword: ENC[AES256_GCM,data:eF4+lxuTnvm+NYwZiU1VFp8Y2JQ=,iv:c36Rk2pEkiqXkLngpyZNulObxek+evvfeugYiBYJrBo=,tag:T0uArgOkJYCvCgmdJauhIg==,type:str]
secretKey: ENC[AES256_GCM,data:ByJpX/tIyzb4fewUOI9MwFBVHkc=,iv:08GvsSOI1OkckH01nzmsyhGoQYl82vyWIDEjrNUQUgk=,tag:YgVY0C7XmlQYw+Aup5LIPw==,type:str]
kitchenowl:
jwtSecretKey: ENC[AES256_GCM,data:9TyqeYlfhvhVg4WOn++/wrqguTM=,iv:+EgGaZxeI+npq5VAX7MHRDYQm8uRcKa8+u2wkn/dwr4=,tag:ATIuPdZQwuDQ+R8nVWWWIA==,type:str]
forgejo:
lfsJwtSecret: ENC[AES256_GCM,data:VWyUDUKZ6km0YPZLejnISBI3wkmOi26CS55NZm+eWbiymGDN9Z9xUQ4FTA==,iv:gGhNGtEEOJnsmq9GMIAImkVOPWMwYq+kDQeWoHVU860=,tag:63z/7PJKI0ePXbJ94radpw==,type:str]
internalToken: ENC[AES256_GCM,data:nKLE/Ir8Ewm3GuRzUNZZTShnMMx6avxYu40PvMEti14Be0YmQhJ0IZruRdpktyW1Jj4n5ksXhk+qsO/vEIzQaJmPU1RxN6vsGGk6EBIwMP0kuUNmp25lPefafoJvxoQpXdJvkLy8f8MC,iv:dUki8hCTOF1O5fmwDqZAkaE1OCH3IL/SFPBDSJ/GMiU=,tag:HUpkVqJg53H8uEmHFqJ7+w==,type:str]
jwtSecret: ENC[AES256_GCM,data:ZIGOR53XCE1kGPQIpaY6ImbLMISbTpmC8R1oRFbjQGxHDG9dQuBigyjs5w==,iv:14WHd/RwniA7+YFGGrs+oyHx5Cc9G+D/IV9aBqn3KOI=,tag:+3LiFnV3Emx4i4efSRmthw==,type:str]
attic:
jwtToken: ENC[AES256_GCM,data:nAuryLY1xD9ur3qDcsJXPJPLFcPwssPKv+/BoivZ4aO6ec6rmOaYAkSRsBjgANyKhssbn0fhGsdyhMBwdHTXDnnIo67amFdxxSe+jJlGtcBXcekaOfD0Ug==,iv:h+h7CD8oI8u2ItzD/KKM16FKaG2xuVqIKh4r1TGjYtw=,tag:Er141FCK8usfzRRtrawHOw==,type:str]
databaseURL: ENC[AES256_GCM,data:caKIXEAOIqWl1tjZItabbdYjotKjMwrPYJKR8mj/Zs0LkrUhOzOlyybNIhHAR/5rqHZlAhimVnVIxh/95g6AJOCNNukbForHUbj/PxkVUG8E,iv:9uh9FyN7n7M+FMLe5G/Z3NmbCgqc3t2SRocc4xL/Qbc=,tag:4JAb3qJUMIkBrAIAuKhjWQ==,type:str]
databasePassword: ENC[AES256_GCM,data:Zwv5DKkihOUU/yL1tvbZl1+bPtI=,iv:C+6n6RHo1zTUJ/g0DWCWNxtLbusoYmDHMySsea5Jpz0=,tag:+pyw0WqnX5rMQxSl/48L5A==,type:str]
atuin:
databaseURL: ENC[AES256_GCM,data:IBmND/J2Pzz+CDCeNBRtErxSQIi8PeUuLGN4rIXKSLwZ6TGJKcNmbuxQDvWkCnI1crx3oak=,iv:wc3G/00oIuaiGF4mA2vIm35wFGxT0a3Ox3k1C9YBAx4=,tag:MQPcsR+vrD85DttYYi6jUw==,type:str]
databasePassword: ENC[AES256_GCM,data:qfWOmFfBOuguOfb1Z51F527ic3o=,iv:4Yx5rpzZHzRlfvZydcBNFRStEO0P4uIcjDqxgRgQmHE=,tag:pbJXcUdvul7nCrXQ9ylAdQ==,type:str]
immich:
databasePassword: ENC[AES256_GCM,data:fZtGYiHOhYjdzBxaSdnstjlOAJE=,iv:YV+o4upajDHtwWSU6Z9h3Ncl9fXbo65KT6YMqlh2evY=,tag:BWLRc3bdnS9M70jC3SZXlA==,type:str]
tailscale:
clientID: ENC[AES256_GCM,data:p5gGizzxAsRpW/a9GAkFZnc=,iv:DBJbEy2GbYUxvsY1MlvlVsLR+/DH/FYlQJIyxbt457c=,tag:Glm3/25WLO1VwKHWCm8wMw==,type:str]
clientSecret: ENC[AES256_GCM,data:/rB7WGmo0uyszwtMR1yAFzJ8F8USw1P7Cx5rKwOWXxPFTWZ88EaKOnDcDo5mwJVpe4OobGe+L83qwkq4lOPi,iv:QZ0dBSyYaWMD9+c2Mgmel2/3warW1f2fmeijm/HMTOE=,tag:VUxij/19MVpPhQ3SK4vvHw==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age189laethzry4ylnd790dmpuc4xjjuwqxruc76caj3ceqhqug4g9qs0upuvw
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBuYnNhbmtEQlpEYUV6Vklo
S1NKZkJ0ZGhOdHA3Y1lmUUUzTzh2Q1IxSUNnClZLdnJtUGNZTVUxZ0ozd1FDT0tL
VVhhcVJEaThjNWlUMGlxcG5VOVMwYjQKLS0tIGhJdHBVdnpZNzE0QmdRQzViVGpM
UGI4V2U1Ri9md3RHUVpvbFdtQ0NCNDQKl5QEg2FTMz6oTPF5s8pItduVJLPyLben
B/7KYQd6blJfM7mhF6eUQ61AWehvtzUhIPf57ZhFjpKj+Vzho4Bumw==
-----END AGE ENCRYPTED FILE-----
- recipient: age159whjxeyw94xmkkephmtlur8e85xd9d5vnvkwkcayfv7el0neqfq863yga
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA2ckF3dzArMTBrYnNjZmJo
MzV5NDJoNWpEQmo2TXFzUmdQUUlpa1dIblNZCkhGSklTYVdCa1hJOUoyeDUyc29L
Q05DVEY4M2QxOThXNTJjcTBWNkRQVHMKLS0tIHdyVS9zR1VzQzdTUXJFSlFObWpT
aHpYZ2VtdVBVTkxZbGFOYzRpbGltZHMKJs4E+CsthuzQZqA0Yip4G/1XK4SuoiRP
Lo65L33lfNibdSOeIygqnyo6GBwjD52TcNQpvzkVbr3M3hWlJs8wCA==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2024-07-21T17:48:17Z"
mac: ENC[AES256_GCM,data:iseLH9pFaZ65lwf0wT386TN/ysscGBuSrJ+8aYI+3YzxHzPjcySaTcnvZ9gBB5ZF8tD15CkwlegNl7P2atx0jvigpKouE1n6Xvv2gKHjDvtqF/gCpdu1EmQOC/krfhRVJm0CtYTGKM7OHjb+dxuwrfeyiftjkXnNsgnl4sg57jM=,iv:QdrzTIRFZgCPSrIKiczLKgXMvd1QoPztYFowj/5GHtc=,tag:gUgzbV4yrstl+caasAlzAg==,type:str]
pgp: []
unencrypted_suffix: _unencrypted
version: 3.8.1

View file

@ -11,6 +11,8 @@ k3s:
etcd:
peerCAKey: ENC[AES256_GCM,data:hr/Q9UqzA5IKK4o+mxyYQyXjTl1/guRLcjeBBaErxlvtQ0QarNWBMV0SuekCTiv0aGEUiXrY4u/39n6/VdVsxCdCDFDSuEJE5iEklpReKkW0gIvW3wIk98PC8xhNKjwRNnPwgE6TmOi8RSR9jdL9A3VKUXXo4XDkKPWrK6yHOJHKWgGOKX8+TP8HHwGGG6JvcMgOfbLJIvstsB9C17bOHt0KNaPKIpGN3gRkY7rJE/ORIJaOFxQB9WrcmweB2B7K3tlnVyLsY/wZsturZDJtK4CtVPEba7jXlpI4xnr0EANhRxs=,iv:gy8/RAxOxMrzFbPynQw1iDbXYEM4iYXJ+OfvQE9MAfU=,tag:vlnfHLzOm9ztsnaSIbL14w==,type:str]
serverCAKey: ENC[AES256_GCM,data:bn4BLlUSOHBOzjxO7oCmnWY3+yc/+J149QFfHOxrrFFblCkY3MEtXg9ogFsU+CYhZg6HZtOiecbo3V1fTe6dbSdWlUW7mHVoFP75aRuLjeEwX9Crgu/BVce7tcL0nFXvaBfaPngz3irzE2t2Dt+p1rVFWsMa2Ms2Wfzx9ZfVUbD0mOBgKmR+fGCHQBuUk4F9kzXA//J6iuk2VNh0+6YXBfTWCEsBllg8CvLgD9aU3DE7nS/xcbZcbpR3nWp8nQvezA5/cAEVTyuQfUO2u/tnYAoEE7t1Qo4RJrWlY30xTvXdq44=,iv:kXjH9JPjix64b+nWWIF/TBlZH9DsOYGTq5okQB3HKYs=,tag:MYM0xdi8AjaR0I/ZcpELAQ==,type:str]
tailscale:
authKey: ENC[AES256_GCM,data:nOxCntC28235lk47BRpIPuNRwmp87DbEY8c3QHIZLXfLvS+U1neoNNlAZ8ThQd4addLoPrJRH0LgDiWAUQ==,iv:7ymbpb78mdXm1/MaGe/ZrsJv8zYQNGm3//Hud7lCgPY=,tag:Wuwf2EKz2RBsaEbrxyNQ0w==,type:str]
sops:
kms: []
gcp_kms: []
@ -20,59 +22,77 @@ sops:
- recipient: age189laethzry4ylnd790dmpuc4xjjuwqxruc76caj3ceqhqug4g9qs0upuvw
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSByOVluY3hiZXVNNnlINHRG
K2Fwa0VIWDlETmZwUzNFbkNHZSttNHhUbnlVCjVVdWZHVzJCTkQyS3VlSXA0WFhY
TnR0TEZBQWwzNlVVdVl2K1RnUzE0UG8KLS0tIHhoU0xGM0xJR3ZwbHJNaTlPUHBQ
VzJCQjQ0NG5sbWFLK2phM2lEdlpuMG8Kw8ftkoEbYrA++cJSfUZRthK2cU+iIzNy
oYxlHm5va6JVZ/Sg05mxBB8kWX410/yCW9nH6ZkLrJ5YmpugePzr2g==
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBJc244cytDZ29QSG5LMzlU
RXBwZ2FWcGJTMCs2R0o5R2YyRXZOUklIc2hBCjJVS2l0bG1SK3Z4MWpYUWxaWXgv
ZWVYeCt2NFZGME5mYTVycUloZ09wYkUKLS0tIEtPMzRpamc0dkFoZS9JZzNEbzFI
MlBUT1RJanNzcTJkb25rWjZwbW0zeW8KsbrRPWw1qMOBCXZWkgdlVR1+tEqXYix2
sOV5n3DmeljL2NrKX8j4qRTuxpPQKuJ9FU7DAF8HRWRkyXTnGJ79ow==
-----END AGE ENCRYPTED FILE-----
- recipient: age159whjxeyw94xmkkephmtlur8e85xd9d5vnvkwkcayfv7el0neqfq863yga
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA0bXZMZlVRNWIydFdUcE9T
c0FMN3AvWXUyTUQ4U0VJL3IzcVpXTnVGOTBNCk5rWFlWeVA4b0JRZXY3NHhSbEVp
RlA5cGs0SVg1Rk4xZXBVdWtUcHFURjgKLS0tIHlwTWJQR09DZnBUTWY2NWdFZWZN
RkxTQ1p4VG9sZ0UrWW9ZWnZLNjZtQW8Kax+WCtGOaNYdkmV/Ty2pP9JFgRaHe/Xn
C1o5W2hMBSoLcC14mlokdVKp81dPDQuuxLtDcCgCQU7aOzvWO3CqKg==
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBJblNBTTFuNFNVSkhFRkhq
ZmlPU1ptRm9VblB6di9LeStVcDV4VGZ6cEYwCngrVVVqdUxVY0RrWFRCK3FxTWh4
cEhHOGM2Z09CREZSVnFRSnVVQW51M1UKLS0tIEl1K1VoMjhpeUg5UXBsQWNiU0FP
UFE3RDF1bXBZOVVFbVBBWWs5RlZOdDQK6LXDGPl9HBmbYgVlmtjiT2BmQXJ/3K7e
2eFhmEzFzpE8DS0X7pIV6dSYWHku1CslwlsQK60rJr2ipve6u62sdA==
-----END AGE ENCRYPTED FILE-----
- recipient: age1unkshctcpucc298kmw9a0qzvtjzgdnjytrxr5p750dv0z95feymqpn68qf
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBoMkNqQnY2TkZRaUJaTjAz
TUxVSUhyMzRsMm1OYVllM001UmpvL2lNcXhNCkRxQlMxZHBrNlNlNnIrQUY1NHpn
dzNFeGhlbE1wMlBwN3RxWUZyT1kyYUkKLS0tIGhpRGN5WFRCT1I5eGlhdUhWc3FR
WHZKWTlmN2llUndzeEdGV0xDSGZqZ2sKlZ0CGVfCtDdRl2vW7BxVkrBMFOZ5Fdk6
9Z9oqBOde0Mp9FGEwnt+IC79FKIknIyYfMf9tpo9Is85/IvyDHTMwA==
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBJK0Mrc2pnZGZxSW16Tk1j
dE9WaXdUYmtjMVIrdHQ1VkRJSWlhVm5kdzFjCjhkZGNqTEdVblFMdm0yRjZ0TkFz
UWxIL2JEajRXYmhBb1ZFQ3VzQzRsaGsKLS0tIGgvcXVocnlNWGJGaXZ6cXJ3Mmla
U3h5dnlXRnFYQUlYNG5wWTNsSGU1UUkKc5jEmW19ST7/MgR4igBhuB6ic93Qy6GP
jtpUMeH0DDU3Z1/f5400DrHwWgUQRb3Gv8zV1LndzqJMaXL1Afiwdg==
-----END AGE ENCRYPTED FILE-----
- recipient: age1upnqu4rpxppdw9zmqu8x3rnaqq2r6m82y25zvry5cec63vjsd9gqtl9e02
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBIT1VNTTVjcy9rakUwVFBY
UGh6L2l0Q2I1bFlWcG1XYVJiMkhYMnA4YlFzCnRXVmZDWnY4Zi9TK3NCc3huaC9W
dDQ5ek5EY2FQeTVhUWpHVkV3TXhxbncKLS0tIDNKN0hYNjVUdHNaMXYzdUE5Mm85
NSt2OGp4VENRS1pLWHNQVFdhRU9STXMKXfcamWoU/bz39wstSEEuIJZknZpoOPzE
W/kDJ5xytfydUkYqoIiGH7s1JyHyCpqbRplPrjQZCmNDvXtcq3L/uQ==
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBOMXorems4ZVh4T2NQZFFj
a1psYXBFZ3ZXWnYxRnQzOHB5dDlKS2RaKzFNCnpwekhESGMzSzFYVEU3a3V5c2Rq
OGpZa0I5RVVQSzhZZkFlY3FjMXlXV00KLS0tIGRNODFGT2swb0FOZzRDR1FFbzZ5
VzQ3bUkxeTlLZnNCd1lKc2h4enI1SzgK7vhR+pyRiVFgyt75MYt84pqjoUHsPj1k
42d2AKB1ZWiD98/vN8LOAGlIyfRCUJB1j9rw3W/PkFs08qvHRLqy3Q==
-----END AGE ENCRYPTED FILE-----
- recipient: age108fn93z2c55g9dm9cv5v4w47pykf3khz7e3dmnpv5dhchwnaau0qs20stq
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBZS1hHTTJudnUrQzJDYUh6
ZEhjYTFaeXRwQXRrL3g1b05LaXdWMit6M2t3Ck81NVZyTUE0RVo5ZmdRcUZ0ZTBx
MkdUVDRyZ3Bmd21FZkdzckp3eGp1bmMKLS0tIFk5blFPMUlPdXJ2NThYME8reGxv
cXlZMTMvcFhScVBObXZRQXQ4WkI2d1EKFYLSfJlDx2BlBWUebBOy/PV0gu0KyhY8
WSYL992HR043ENrbmkfbpVHaOZi8imyNKa7FWpLaj/Nuwv/Kfvy7uQ==
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBjeG9KTkd6UysxaG8rV0I3
ODFKOWFBaXZpNEdCSXZxeDBFM2xVSjVsdjFVClhxV2lLaCt3cDNMS3ltQUNBTFBX
NWZqRGU3NzdBNzhHcGFDQ2syT0NRZmsKLS0tIG1oZ2dtV2tkbURUTUE2RlJZaXky
VlFCdENqUnFJOVFVMHRXQ05RZUVnUTgKESkjiK2JwEGyXtET794bzGkURLix4kkP
JB57xHBf4B1/UXu+h+jWQAotQSOFFa7IbtDOVejqT8dHqGDs+16HeQ==
-----END AGE ENCRYPTED FILE-----
- recipient: age1th8rdw4fs3vmgy9gzc0k9xy88tddjj4vasepckfx9h4nlzsg3q3q4cjgwu
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBqZU9Wb0JLTG1kOWZ1YjJQ
SUh1NWxqS0ZGa0xEOHFUOWpYR3hTM2dQRWdZCklBb25LajV6RnZhOUVKLzJjY3lz
MTYvNmRPTEgrc0dJK0g5N2RkdEt0RUUKLS0tIHdxcFJCaTg4ZE5TQVVKS3k5K3Bo
Q0VudEFzRUFGWlNJcHc0VzZJUVRwbHMKjTMUFFbHhDeP7QLmR64yqDEh4naazL9f
etbOvYUkgj4IaB9UgDerG4MjyyHiVVY9Md8Jqe3dOQN0rqXRxNOW1g==
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB4SFpkRlNQcjBFUkRjRjhk
TW5kQzhTbE1wSmFTZ3l0cTNPSmdydC9nRTBjCmxneSt3TGhzYzhjb0tEY2xMT2tC
bE4wOUgzMFR1dTVOUDFRUXdWOFZQcFEKLS0tIHR5cHBwQkN1d3ZMYitWOG9JRVJh
ZU5tMTM5L3c4QVN6YjZBZEJkRk5yYWcK7TW19C9wI9FMWIDhn8otcNjLwNh1n5lr
f92zaPrmHWC6JVxeKmm3wB3uvONvW0v82DKZJI/gxl41zJTXsapT+Q==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2024-06-15T19:11:54Z"
mac: ENC[AES256_GCM,data:OR2ibRtOtUwIuQ27c5PHRzdvKoTGMl4Ll7/hmuIB40amBqs54Cku/SEOqw2kHG31ii3cK5XbyaR6tC8Lvu07tn1iutbU8WjN8Ww+txr0FgdbeTYRIWr9aClAKmR3Ek1Ky2NsA2OaTm02Um6W0xX78Ran04Gjuf8vpaXSRYVsPbA=,iv:w9M3O5DHlm7Jq9vjfxaq34petJtgMeEUHZ0fZKycOjs=,tag:ShLvjfZJV3FARa4An+YfQA==,type:str]
- recipient: age1h5q9ul9f8vd7w7s2fvmpytaghgpv97a9r237agwzc52c76xsdegsugml73
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBaWUZMVEsxQVFnYWhiZjFQ
bTQxaC9odHpXS0F5VHBrRVZ0UE1yZnJmTVRBCm9Uc2hTdUNOQU9JYVV6MHNiTmor
SUJrOXhta2lqMERWaVNseEpIaDNubEEKLS0tIFBiUG9CaEF0NzhaRm45MUUyYW1L
TnZGZVZONkFZWDZpNGtSek5Ka25PSUUKEXTRK8MsGnSkT5tPX+nFYN1Mons+nEZu
EFCtGzSuAeZWCW4We+264dDZjwlfdj47oBPCk8iwx9N1yoR1BF4LfQ==
-----END AGE ENCRYPTED FILE-----
- recipient: age1smqas3tre2hptnyn72fdzghqcnej48066l4hp6y98n8lkpm3ds4s8t8s0w
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA5cG9WMzJRbXFXS2JyTzY5
clh5QmZXc0dYcEU5UkRzMkZKczQwelVhYWhNCi9md3hwSnVRMVU5TU5UZjBWcG52
WTZRNVlXOXgrcXdKeTNDTEpYcXdrMkEKLS0tIFIwMW5BV1pSWUF3UW5DaHUvVm1Q
dUZJVWRLeFFnV1ZpVThBZGtxai9oMlkKja55rkW/ZthR2AbscOIgHRfYDUCxIAm0
HKgELNQDz2QXFwS98aHeelLCLufb/hyWBn1y4kx+WWppAtQewByhkA==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2024-07-22T20:27:25Z"
mac: ENC[AES256_GCM,data:zIY2DotoqnJmz/aBRHq+4ZLi/Smi1Bn4phmFsngMY1w0LVauKX95jwKwOhE0PfvIyd8E54N+BoCQ3QmRMv3uvBddScPNSGJgdgDRn8LDWol4/8avDoPFISpNvdS32Ac00UDnMeBEkW4S/oo9CwYHCpEsiwjL6FgjCX/KOK++kzA=,iv:sGCFNJ6gsEOskMlLWUnR9Gnsp8Emc0vdBAl4WN2A1f8=,tag:fHi4CR+exp1roW7UOzhMmQ==,type:str]
pgp: []
unencrypted_suffix: _unencrypted
version: 3.8.1

View file

@ -2,6 +2,8 @@ atlas: ENC[AES256_GCM,data:TgYf6Jck5L2feQyvyUb2FcLm2M3aSwN0W0xdH6qLU3L4q7LSeB0yB
jefke: ENC[AES256_GCM,data:PH+4rNhATssck8cmKZrhw4VoyHtkqKlRt1wH+BlOvxdhw5GNDsiT4DOf0cveJ090XcOpkAxEf2yqnpIiZhallKVMJS3aFxpNpNw=,iv:QJQZo6x4PE3mNIK8KaQ16BlJeZsdorX683lpf2FjAJk=,tag:rljZMJ/xv7kbkPKP/pqZ9A==,type:str]
lewis: ENC[AES256_GCM,data:rdm5YMnWkg2MpY2ZGYi11HHGJzY/ssKA5DCv/wbcf8qIXRhRt5heA1un1zCJdYBKlxsVGOuQEtHMKuA/vLYqNnIXxr5NxDxhgIo=,iv:y+fyLns2B/JDuumHIuk4p9PybXf8isd7Ve+1gcX0mp8=,tag:VoAORxiU+6WbhAgkm9lAgQ==,type:str]
warwick: ENC[AES256_GCM,data:8ABH+BMdKjLaVG1FkLWksJRtIO8Vu/j1USLGaAAFi6KA/o/S2X936doUl3/D6MKz71i8FwEH410K4JcGJXVboY45Dfp2g1/6bog=,iv:pvXBQcWs/dFSEVe807bpQQKI9n0A/IUxSG0Z1Sl00/Y=,tag:l/sTOe6sNJ34Z2UmmBBBNw==,type:str]
talos: ENC[AES256_GCM,data:DD70h1qX06cuQ+2S6EIxdBWqkECZFO3UmusKvLKXoocuJfA7CU4sM03GJxnlff26mv53LyMUtZsPWgWWQNrwrICXmhg/I4CDAuA=,iv:zoWlL1SjyxXjemnkbQBtgutfXL41/eqpLk6l/fXntmQ=,tag:v64nkexcG9Y2gCqAE8kcwA==,type:str]
pikvm: ENC[AES256_GCM,data:CrOdqkb+MJK7t6+3mkm+MdUqwBRtYY1jMsQvtBOoZYF3h1vPidJRAHZvX5n5aBsbf5DFCcWXRs5v7I18ANyA1TbkZVKLS0PsrQU=,iv:LMo9zpoRF3EEtQ7GtmIVRNsyVFga7Vcvpv7DxHQRhjo=,tag:Ca3KqKmZJzyEcs6SAiuJZg==,type:str]
sops:
kms: []
gcp_kms: []
@ -26,8 +28,8 @@ sops:
eDdFZERVZUJ2QmYvTUlGMlFFNTlna00KLil0QQySKHDAdFxIZAlWvkCRT2v8RNL7
CWIs/HhjmGk0BEoXIVlmbnAVNATABCCWnUTHFKvvW/8KIDhwgu72Eg==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2024-06-15T19:19:59Z"
mac: ENC[AES256_GCM,data:Y+aBXyowjQTXgteYLU2j1I5cv9UFU/ylrVy9QQub3NLzBbpW4pb+oI2wVcZI0K40jwSX7xOEjgGOtjdLRGTG8/xHm/yf+R0Wgs7fyIxOzcZv8XBadR6f2jUnAPA74ZDQ9ngwh1xyJteQPLwr+XPuGNlylYn/mj/EcwFs1SCok5A=,iv:/7XR2P/nfEicarsCALXhKIbvzsqUYhg9SgT2Z7P3W20=,tag:+uHRHU+WVfWefjHcH/C4fA==,type:str]
lastmodified: "2024-08-20T20:59:29Z"
mac: ENC[AES256_GCM,data:KFBbDkz2ZhG+j/yGVK6spADmNM0t73C0QyD7/KoV/gLCD4jwWRxfAxCAUNlBeHIFrZDfyW2KR04oPA2LBDqASnQcITgRYhbNj51wFjiU6kCT0LK9uIx+hNo3RuAtw21/2qsg9Xf0PvAC33yB+iaNrDDBtiWyg2Aq+q0wdMzXRfU=,iv:MIF4iqOCSaoLyFuyZ32rCN6qCGtlWoNtkt7mXE/njVQ=,tag:rCsyRPNSAam65zarTKLnHg==,type:str]
pgp: []
unencrypted_suffix: _unencrypted
version: 3.8.1

9
shell.nix Normal file
View file

@ -0,0 +1,9 @@
{ flake-utils, nixpkgs, ... }: flake-utils.lib.eachDefaultSystem (system:
let
pkgs = nixpkgs.legacyPackages.${system};
in
{
devShells.default = pkgs.mkShell {
buildInputs = with pkgs; [ ansible ];
};
})

20
utils/default.nix Normal file
View file

@ -0,0 +1,20 @@
{ nixpkgs, flake-utils, ... }:
let
systemAttrs = flake-utils.lib.eachDefaultSystem (system:
let
pkgs = nixpkgs.legacypackages.${system};
lib = pkgs.lib;
in
{
net = import ./net.nix lib;
});
nonSystemAttrs = rec {
globals = import ./globals.nix;
imagePath = name: "nix:0${globals.imageDir}/${name}.tar";
};
allAttrs = systemAttrs // nonSystemAttrs;
in
allAttrs

65
utils/globals.nix Normal file
View file

@ -0,0 +1,65 @@
{
routerPublicIPv4 = "192.145.57.90";
routerPublicIPv6 = "2a0d:6e00:1a77::1";
bind9Ipv6 = "2a0d:6e00:1a77:30::134";
# Load balancer IPv4
traefikIPv4 = "192.168.30.128";
kmsIPv4 = "192.168.30.129";
inbucketIPv4 = "192.168.30.130";
piholeIPv4 = "192.168.30.131";
gitIPv4 = "192.168.30.132";
transmissionIPv4 = "192.168.30.133";
bind9IPv4 = "192.168.30.134";
dnsmasqIPv4 = "192.168.30.135";
minecraftIPv4 = "192.168.30.136";
jellyseerrIPv4 = "192.168.30.137";
syncthingIPv4 = "192.168.30.138";
longhornIPv4 = "192.168.30.139";
radarrIPv4 = "192.168.30.140";
prowlarrIPv4 = "192.168.30.141";
sonarrIPv4 = "192.168.30.142";
bazarrIPv4 = "192.168.30.143";
paperlessIPv4 = "192.168.30.144";
radicaleIPv4 = "192.168.30.145";
freshrssIPv4 = "192.168.30.146";
immichIPv4 = "192.168.30.147";
nextcloudIPv4 = "192.168.30.148";
imageDir = "/var/container_images";
images = {
jellyfin = "jellyfin/jellyfin:10.9.9";
deluge = "linuxserver/deluge:2.1.1";
jellyseerr = "fallenbagel/jellyseerr:1.9.2";
radarr = "lscr.io/linuxserver/radarr:5.9.1";
prowlarr = "lscr.io/linuxserver/prowlarr:1.21.2";
sonarr = "lscr.io/linuxserver/sonarr:4.0.8";
bazarr = "lscr.io/linuxserver/bazarr:1.4.3";
atuin = "ghcr.io/atuinsh/atuin:18.3.0";
postgres14 = "postgres:14";
kms = "teddysun/kms:latest";
paperless = "ghcr.io/paperless-ngx/paperless-ngx:2.11.6";
redis7 = "docker.io/library/redis:7";
nextcloud = "nextcloud:29.0.5";
postgres15 = "postgres:15";
inbucket = "inbucket/inbucket:edge";
syncthing = "lscr.io/linuxserver/syncthing:1.27.10";
radicale = "tomsquest/docker-radicale:3.2.3.0";
ntfy = "binwiederhier/ntfy:v2.11.0";
forgejo = "codeberg.org/forgejo/forgejo:8.0.1";
pihole = "pihole/pihole:2024.07.0";
immich = "ghcr.io/immich-app/immich-server:v1.114.0";
immich-machine-learning = "ghcr.io/immich-app/immich-machine-learning:v1.114.0";
immich-redis = "docker.io/redis:6.2-alpine@sha256:e3b17ba9479deec4b7d1eeec1548a253acc5374d68d3b27937fcfe4df8d18c7e";
immich-postgres = "docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0";
kitchenowl = "tombursch/kitchenowl:v0.5.2";
cyberchef = "mpepping/cyberchef:latest";
freshrss = "freshrss/freshrss:1.24.3";
bind9 = "ubuntu/bind9:9.18-22.04_beta";
dnsmasq = "dockurr/dnsmasq:2.90";
attic = "git.kun.is/home/atticd:fd910d91c2143295e959d2c903e9ea25cf94ba27";
hedgedoc = "quay.io/hedgedoc/hedgedoc:1.9.9";
minecraft = "itzg/minecraft-server:latest";
};
}