refactor(kubenix): DRY deployment creation

feat(kubenix): Create applysets for remaining resources
This commit is contained in:
Pim Kunis 2024-07-17 16:25:41 +02:00
parent 5a4376f699
commit 7a915f0928
10 changed files with 324 additions and 301 deletions

View file

@ -1,17 +1,11 @@
{ self, pkgs, machines, dns, myLib, flake-utils, kubenix, nixhelm, blog-pim, ... }: flake-utils.lib.eachDefaultSystem { self, pkgs, machines, dns, myLib, flake-utils, kubenix, nixhelm, blog-pim, ... }: flake-utils.lib.eachDefaultSystem
(system: (system:
let let
mkKubenixPackage = module: kubenix.packages.${system}.default.override
{
specialArgs = { inherit myLib kubenix nixhelm system dns blog-pim machines; };
module = { imports = [ module ]; };
};
deployScript = (pkgs.writeScriptBin "kubenix" (builtins.readFile ./kubenix-deploy.sh)).overrideAttrs (old: { deployScript = (pkgs.writeScriptBin "kubenix" (builtins.readFile ./kubenix-deploy.sh)).overrideAttrs (old: {
buildCommand = "${old.buildCommand}\npatchShebangs $out"; buildCommand = "${old.buildCommand}\npatchShebangs $out";
}); });
mkDeployScript = kubernetes: applyset: namespace: mkDeployScript = kubernetes: applysetName: namespace:
let let
kubeconfig = kubernetes.kubeconfig or ""; kubeconfig = kubernetes.kubeconfig or "";
result = kubernetes.result or ""; result = kubernetes.result or "";
@ -28,17 +22,17 @@
--suffix PATH : "$out/bin" \ --suffix PATH : "$out/bin" \
--run 'export KUBECONFIG=''${KUBECONFIG:-${toString kubeconfig}}' \ --run 'export KUBECONFIG=''${KUBECONFIG:-${toString kubeconfig}}' \
--set MANIFEST '${result}' \ --set MANIFEST '${result}' \
--set APPLYSET 'applyset-${applyset}' \ --set APPLYSET 'applyset-${applysetName}' \
--set NAMESPACE '${namespace}' --set NAMESPACE '${namespace}'
''; '';
}; };
in in
wrappedDeployScript; wrappedDeployScript;
mkDeployScriptAndManifest = module: applyset: namespace: mkDeployScriptAndManifest = name: { module, namespace }:
let let
kubernetes = (kubenix.evalModules.${system} { kubernetes = (kubenix.evalModules.${system} {
specialArgs = { inherit namespace myLib blog-pim dns; }; specialArgs = { inherit namespace myLib blog-pim dns nixhelm system machines; };
module = { kubenix, ... }: module = { kubenix, ... }:
{ {
@ -51,7 +45,7 @@
]; ];
config = { config = {
kubenix.project = applyset; kubenix.project = name;
kubernetes.namespace = namespace; kubernetes.namespace = namespace;
}; };
}; };
@ -59,53 +53,126 @@
in in
{ {
manifest = kubernetes.result; manifest = kubernetes.result;
deploy = mkDeployScript kubernetes applyset namespace; deploy = mkDeployScript kubernetes name namespace;
};
deployers = {
bootstrap-default = {
module = "${self}/kubenix-modules/bootstrapDefault.nix";
namespace = "default";
};
bootstrap-kube-system = {
module = "${self}/kubenix-modules/bootstrapKubeSystem.nix";
namespace = "kube-system";
};
cyberchef = {
module = "${self}/kubenix-modules/cyberchef.nix";
namespace = "static-websites";
};
freshrss = {
module = "${self}/kubenix-modules/freshrss.nix";
namespace = "freshrss";
};
radicale = {
module = "${self}/kubenix-modules/radicale.nix";
namespace = "radicale";
};
kms = {
module = "${self}/kubenix-modules/kms.nix";
namespace = "kms";
};
atuin = {
module = "${self}/kubenix-modules/atuin.nix";
namespace = "atuin";
};
blog = {
module = "${self}/kubenix-modules/blog.nix";
namespace = "static-websites";
};
nextcloud = {
module = "${self}/kubenix-modules/nextcloud.nix";
namespace = "nextcloud";
};
hedgedoc = {
module = "${self}/kubenix-modules/hedgedoc.nix";
namespace = "hedgedoc";
};
kitchenowl = {
module = "${self}/kubenix-modules/kitchenowl.nix";
namespace = "kitchenowl";
};
forgejo = {
module = "${self}/kubenix-modules/forgejo";
namespace = "forgejo";
};
paperless = {
module = "${self}/kubenix-modules/paperless.nix";
namespace = "paperless";
};
syncthing = {
module = "${self}/kubenix-modules/syncthing.nix";
namespace = "syncthing";
};
pihole = {
module = "${self}/kubenix-modules/pihole.nix";
namespace = "dns";
};
immich = {
module = "${self}/kubenix-modules/immich.nix";
namespace = "immich";
};
attic = {
module = "${self}/kubenix-modules/attic.nix";
namespace = "attic";
};
inbucket = {
module = "${self}/kubenix-modules/inbucket.nix";
namespace = "inbucket";
};
dnsmasq = {
module = "${self}/kubenix-modules/dnsmasq.nix";
namespace = "dns";
};
bind9 = {
module = "${self}/kubenix-modules/bind9";
namespace = "dns";
};
media = {
module = "${self}/kubenix-modules/media.nix";
namespace = "media";
};
traefik = {
module = "${self}/kubenix-modules/traefik.nix";
namespace = "kube-system";
};
minecraft = {
module = "${self}/kubenix-modules/minecraft.nix";
namespace = "minecraft";
};
}; };
in in
{ {
kubenix.all.deploy = mkKubenixPackage "${self}/kubenix-modules/all.nix"; kubenix = builtins.mapAttrs mkDeployScriptAndManifest deployers;
kubenix.bootstrap.deploy = mkKubenixPackage "${self}/kubenix-modules/base.nix";
kubenix.cyberchef = mkDeployScriptAndManifest
"${self}/kubenix-modules/cyberchef.nix" "cyberchef" "static-websites";
kubenix.freshrss = mkDeployScriptAndManifest
"${self}/kubenix-modules/freshrss.nix" "freshrss" "freshrss";
kubenix.radicale = mkDeployScriptAndManifest
"${self}/kubenix-modules/radicale.nix" "radicale" "radicale";
kubenix.kms = mkDeployScriptAndManifest
"${self}/kubenix-modules/kms.nix" "kms" "kms";
kubenix.atuin = mkDeployScriptAndManifest
"${self}/kubenix-modules/atuin.nix" "atuin" "atuin";
kubenix.blog = mkDeployScriptAndManifest
"${self}/kubenix-modules/blog.nix" "blog" "static-websites";
kubenix.nextcloud = mkDeployScriptAndManifest
"${self}/kubenix-modules/nextcloud.nix" "nextcloud" "nextcloud";
kubenix.hedgedoc = mkDeployScriptAndManifest
"${self}/kubenix-modules/hedgedoc.nix" "hedgedoc" "hedgedoc";
kubenix.kitchenowl = mkDeployScriptAndManifest
"${self}/kubenix-modules/kitchenowl.nix" "kitchenowl" "kitchenowl";
kubenix.forgejo = mkDeployScriptAndManifest
"${self}/kubenix-modules/forgejo" "forgejo" "forgejo";
kubenix.paperless = mkDeployScriptAndManifest
"${self}/kubenix-modules/paperless.nix" "paperless" "paperless";
kubenix.syncthing = mkDeployScriptAndManifest
"${self}/kubenix-modules/syncthing.nix" "syncthing" "syncthing";
kubenix.pihole = mkDeployScriptAndManifest
"${self}/kubenix-modules/pihole.nix" "pihole" "dns";
kubenix.immich = mkDeployScriptAndManifest
"${self}/kubenix-modules/immich.nix" "immich" "immich";
kubenix.attic = mkDeployScriptAndManifest
"${self}/kubenix-modules/attic.nix" "attic" "attic";
kubenix.inbucket = mkDeployScriptAndManifest
"${self}/kubenix-modules/inbucket.nix" "inbucket" "inbucket";
kubenix.dnsmasq = mkDeployScriptAndManifest
"${self}/kubenix-modules/dnsmasq.nix" "dnsmasq" "dns";
kubenix.bind9 = mkDeployScriptAndManifest
"${self}/kubenix-modules/bind9" "bind9" "dns";
kubenix.media = mkDeployScriptAndManifest
"${self}/kubenix-modules/media.nix" "media" "media";
kubenix.traefik = mkDeployScriptAndManifest
"${self}/kubenix-modules/traefik.nix" "traefik" "kube-system";
kubenix.minecraft = mkDeployScriptAndManifest
"${self}/kubenix-modules/minecraft.nix" "minecraft" "minecraft";
}) })

View file

@ -1,12 +0,0 @@
{
imports = [
# ./argo.nix
./base.nix
./longhorn.nix
./metallb.nix
./cert-manager.nix
./custom
./volumes.nix
./custom-types.nix
];
}

View file

@ -1,94 +0,0 @@
# We deploy several resources that rely on "custom resource definitions".
# We must first import these resources definitions, before deploying resources that depend on them.
{ lib, kubenix, nixhelm, system, machines, ... }: {
imports = [
kubenix.modules.k8s
kubenix.modules.helm
];
config = {
kubenix.project = "home";
kubernetes = {
kubeconfig = "~/.kube/config";
# TODO: These were copied from https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.crds.yaml
# See https://cert-manager.io/docs/installation/helm/
# Seems kubenix cannot import a list of resources, but only individual resources.
# Might be good to create a PR for this.
imports = [
./cert-manager-manifests/certificaterequest.yaml
./cert-manager-manifests/certificate.yaml
./cert-manager-manifests/challenge.yaml
./cert-manager-manifests/clusterissuer.yaml
./cert-manager-manifests/issuer.yaml
./cert-manager-manifests/order.yaml
];
helm.releases = {
metallb = {
chart = nixhelm.chartsDerivations.${system}.metallb.metallb;
includeCRDs = true;
};
cert-manager = {
chart = nixhelm.chartsDerivations.${system}.jetstack.cert-manager;
includeCRDs = false;
};
# argo-workflows = {
# chart = nixhelm.chartsDerivations.${system}.argoproj.argo-workflows;
# includeCRDs = true;
# };
longhorn = {
chart = nixhelm.chartsDerivations.${system}.longhorn.longhorn;
includeCRDs = true;
values = {
persistence.defaultClassReplicaCount = 2;
defaultSettings = {
defaultDataPath = "/mnt/longhorn";
storageMinimalAvailablePercentage = 0;
allowRecurringJobWhileVolumeDetached = true;
backupTarget = "nfs://lewis.dmz:/mnt/longhorn/persistent/longhorn-backup";
};
};
};
};
resources = {
namespaces = {
static-websites = { };
freshrss = { };
radicale = { };
kms = { };
atuin = { };
nextcloud = { };
hedgedoc = { };
kitchenowl = { };
forgejo = { };
paperless = { };
syncthing = { };
immich = { };
attic = { };
inbucket = { };
dns = { };
media = { };
minecraft = { };
};
nodes =
let
machinesWithKubernetesLabels = lib.filterAttrs (name: machine: machine.kubernetesNodeLabels != null) machines;
in
builtins.mapAttrs
(name: machine: {
metadata.labels = machine.kubernetesNodeLabels;
})
machinesWithKubernetesLabels;
};
};
};
}

View file

@ -0,0 +1,164 @@
{ lib, nixhelm, system, machines, ... }: {
kubernetes = {
helm.releases = {
metallb = {
chart = nixhelm.chartsDerivations.${system}.metallb.metallb;
includeCRDs = true;
};
# argo-workflows = {
# chart = nixhelm.chartsDerivations.${system}.argoproj.argo-workflows;
# includeCRDs = true;
# };
longhorn = {
chart = nixhelm.chartsDerivations.${system}.longhorn.longhorn;
includeCRDs = true;
values = {
persistence.defaultClassReplicaCount = 2;
defaultSettings = {
defaultDataPath = "/mnt/longhorn";
storageMinimalAvailablePercentage = 0;
allowRecurringJobWhileVolumeDetached = true;
backupTarget = "nfs://lewis.dmz:/mnt/longhorn/persistent/longhorn-backup";
};
};
};
# argo-workflows = {
# chart = nixhelm.chartsDerivations.${system}.argoproj.argo-workflows;
# includeCRDs = true;
# };
};
resources = {
namespaces = {
static-websites = { };
freshrss = { };
radicale = { };
kms = { };
atuin = { };
nextcloud = { };
hedgedoc = { };
kitchenowl = { };
forgejo = { };
paperless = { };
syncthing = { };
immich = { };
attic = { };
inbucket = { };
dns = { };
media = { };
minecraft = { };
};
nodes =
let
machinesWithKubernetesLabels = lib.filterAttrs (name: machine: machine.kubernetesNodeLabels != null) machines;
in
builtins.mapAttrs
(name: machine: {
metadata.labels = machine.kubernetesNodeLabels;
})
machinesWithKubernetesLabels;
ingresses.longhorn = {
metadata.annotations = {
"cert-manager.io/cluster-issuer" = "letsencrypt";
"traefik.ingress.kubernetes.io/router.entrypoints" = "localsecure";
};
spec = {
ingressClassName = "traefik";
rules = [{
host = "longhorn.kun.is";
http.paths = [{
path = "/";
pathType = "Prefix";
backend.service = {
name = "longhorn-frontend";
port.number = 80;
};
}];
}];
tls = [{
secretName = "longhorn-tls";
hosts = [ "longhorn.kun.is" ];
}];
};
};
recurringJobs.backup-nfs.spec = {
cron = "0 1 * * *"; # One o'clock at night
task = "backup";
retain = 2; # We don't need many, as we also make Borg backups.
concurrency = 1;
};
ipAddressPools.main.spec.addresses = [ "192.168.30.128-192.168.30.200" "2a0d:6e00:1a77:30::2-2a0d:6e00:1a77:30:ffff:ffff:ffff:fffe" ];
l2Advertisements.main.metadata = { };
persistentVolumes = {
music-syncthing.spec = {
capacity.storage = "1Gi";
accessModes = [ "ReadWriteMany" ];
nfs = {
server = "lewis.dmz";
path = "/mnt/longhorn/persistent/media/music";
};
};
media-media.spec = {
capacity.storage = "1Gi";
accessModes = [ "ReadWriteMany" ];
nfs = {
server = "lewis.dmz";
path = "/mnt/longhorn/persistent/media";
};
};
};
};
};
lab = {
longhorn.persistentVolume = {
freshrss.storage = "1Gi";
radicale.storage = "200Mi";
atuin.storage = "300Mi";
atuin-db.storage = "300Mi";
nextcloud.storage = "50Gi";
nextcloud-db.storage = "400Mi";
hedgedoc-uploads.storage = "50Mi";
hedgedoc-db.storage = "100Mi";
kitchenowl.storage = "100Mi";
forgejo.storage = "20Gi";
paperless-data.storage = "10Gi";
paperless-redisdata.storage = "20Mi";
paperless-db.storage = "150Mi";
syncthing.storage = "400Mi";
pihole-data.storage = "750Mi";
pihole-dnsmasq.storage = "16Mi";
immich.storage = "50Gi";
immich-db.storage = "5Gi";
attic.storage = "15Gi";
attic-db.storage = "150Mi";
jellyfin.storage = "5Gi";
transmission.storage = "25Mi";
jellyseerr.storage = "75Mi";
radarr.storage = "300Mi";
prowlarr.storage = "150Mi";
sonarr.storage = "150Mi";
bazarr.storage = "25Mi";
minecraft.storage = "1Gi";
};
};
}

View file

@ -0,0 +1,36 @@
{ nixhelm, system, ... }: {
kubernetes = {
# TODO: These were copied from https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.crds.yaml
# See https://cert-manager.io/docs/installation/helm/
# Seems kubenix cannot import a list of resources, only individual resources.
# Might be good to create a PR for this.
imports = [
./cert-manager-manifests/certificaterequest.yaml
./cert-manager-manifests/certificate.yaml
./cert-manager-manifests/challenge.yaml
./cert-manager-manifests/clusterissuer.yaml
./cert-manager-manifests/issuer.yaml
./cert-manager-manifests/order.yaml
];
helm.releases = {
cert-manager = {
chart = nixhelm.chartsDerivations.${system}.jetstack.cert-manager;
includeCRDs = false;
namespace = "kube-system";
};
};
resources.clusterIssuers.letsencrypt = {
spec.acme = {
server = "https://acme-v02.api.letsencrypt.org/directory";
email = "pim@kunis.nl";
privateKeySecretRef.name = "letsencrypt-private-key";
solvers = [{
selector = { };
http01.ingress.class = "traefik";
}];
};
};
};
}

View file

@ -1,15 +0,0 @@
{
kubernetes.resources.clusterIssuers.letsencrypt = {
metadata.namespace = "kube-system";
spec.acme = {
server = "https://acme-v02.api.letsencrypt.org/directory";
email = "pim@kunis.nl";
privateKeySecretRef.name = "letsencrypt-private-key";
solvers = [{
selector = { };
http01.ingress.class = "traefik";
}];
};
};
}

View file

@ -1,44 +0,0 @@
{ lib, nixhelm, system, ... }: {
config = {
kubernetes = {
resources = {
ingresses.longhorn = {
metadata.annotations = {
"cert-manager.io/cluster-issuer" = "letsencrypt";
"traefik.ingress.kubernetes.io/router.entrypoints" = "localsecure";
};
spec = {
ingressClassName = "traefik";
rules = [{
host = "longhorn.kun.is";
http.paths = [{
path = "/";
pathType = "Prefix";
backend.service = {
name = "longhorn-frontend";
port.number = 80;
};
}];
}];
tls = [{
secretName = "longhorn-tls";
hosts = [ "longhorn.kun.is" ];
}];
};
};
recurringJobs.backup-nfs.spec = {
cron = "0 1 * * *"; # One o'clock at night
task = "backup";
retain = 2; # We don't need many, as we also make Borg backups.
concurrency = 1;
};
};
};
};
}

View file

@ -1,7 +0,0 @@
# TODO: These resources should probably exist within the kube-system namespace.
{
kubernetes.resources = {
ipAddressPools.main.spec.addresses = [ "192.168.30.128-192.168.30.200" "2a0d:6e00:1a77:30::2-2a0d:6e00:1a77:30:ffff:ffff:ffff:fffe" ];
l2Advertisements.main.metadata = { };
};
}

View file

@ -2,13 +2,10 @@
kubernetes.resources = { kubernetes.resources = {
helmChartConfigs = { helmChartConfigs = {
traefik = { traefik = {
metadata.namespace = lib.mkForce "kube-system";
# Override Traefik's service with a static load balancer IP. # Override Traefik's service with a static load balancer IP.
# Create endpoint for HTTPS on port 444. # Create endpoint for HTTPS on port 444.
# Allow external name services for servers in LAN. # Allow external name services for servers in LAN.
spec.valuesContent = lib.generators.toYAML { } { spec.valuesContent = lib.generators.toYAML { } {
# service.annotations."metallb.universe.tf/loadBalancerIPs" = myLib.globals.traefikIPv4;
providers.kubernetesIngress.allowExternalNameServices = true; providers.kubernetesIngress.allowExternalNameServices = true;
service.loadBalancerIP = myLib.globals.traefikIPv4; service.loadBalancerIP = myLib.globals.traefikIPv4;

View file

@ -1,69 +0,0 @@
{
# kubernetes.resources.pods.testje.spec = {
# containers.testje = {
# image = "nginx";
# volumeMounts = [
# { name = "freshrss"; mountPath = "/freshrss"; }
# ];
# };
# volumes.freshrss.persistentVolumeClaim.claimName = "freshrss";
# };
kubernetes.resources.persistentVolumes = {
music-syncthing.spec = {
capacity.storage = "1Gi";
accessModes = [ "ReadWriteMany" ];
nfs = {
server = "lewis.dmz";
path = "/mnt/longhorn/persistent/media/music";
};
};
media-media.spec = {
capacity.storage = "1Gi";
accessModes = [ "ReadWriteMany" ];
nfs = {
server = "lewis.dmz";
path = "/mnt/longhorn/persistent/media";
};
};
};
lab = {
longhorn.persistentVolume = {
freshrss.storage = "1Gi";
radicale.storage = "200Mi";
atuin.storage = "300Mi";
atuin-db.storage = "300Mi";
nextcloud.storage = "50Gi";
nextcloud-db.storage = "400Mi";
hedgedoc-uploads.storage = "50Mi";
hedgedoc-db.storage = "100Mi";
kitchenowl.storage = "100Mi";
forgejo.storage = "20Gi";
paperless-data.storage = "10Gi";
paperless-redisdata.storage = "20Mi";
paperless-db.storage = "150Mi";
syncthing.storage = "400Mi";
pihole-data.storage = "750Mi";
pihole-dnsmasq.storage = "16Mi";
immich.storage = "50Gi";
immich-db.storage = "5Gi";
attic.storage = "15Gi";
attic-db.storage = "150Mi";
jellyfin.storage = "5Gi";
transmission.storage = "25Mi";
jellyseerr.storage = "75Mi";
radarr.storage = "300Mi";
prowlarr.storage = "150Mi";
sonarr.storage = "150Mi";
bazarr.storage = "25Mi";
minecraft.storage = "1Gi";
};
};
}