refactor(kubenix): DRY deployment creation
feat(kubenix): Create applysets for remaining resources
This commit is contained in:
parent
5a4376f699
commit
7a915f0928
10 changed files with 324 additions and 301 deletions
|
@ -1,12 +0,0 @@
|
|||
{
|
||||
imports = [
|
||||
# ./argo.nix
|
||||
./base.nix
|
||||
./longhorn.nix
|
||||
./metallb.nix
|
||||
./cert-manager.nix
|
||||
./custom
|
||||
./volumes.nix
|
||||
./custom-types.nix
|
||||
];
|
||||
}
|
|
@ -1,94 +0,0 @@
|
|||
# We deploy several resources that rely on "custom resource definitions".
|
||||
# We must first import these resources definitions, before deploying resources that depend on them.
|
||||
{ lib, kubenix, nixhelm, system, machines, ... }: {
|
||||
imports = [
|
||||
kubenix.modules.k8s
|
||||
kubenix.modules.helm
|
||||
];
|
||||
|
||||
config = {
|
||||
kubenix.project = "home";
|
||||
|
||||
kubernetes = {
|
||||
kubeconfig = "~/.kube/config";
|
||||
|
||||
# TODO: These were copied from https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.crds.yaml
|
||||
# See https://cert-manager.io/docs/installation/helm/
|
||||
# Seems kubenix cannot import a list of resources, but only individual resources.
|
||||
# Might be good to create a PR for this.
|
||||
imports = [
|
||||
./cert-manager-manifests/certificaterequest.yaml
|
||||
./cert-manager-manifests/certificate.yaml
|
||||
./cert-manager-manifests/challenge.yaml
|
||||
./cert-manager-manifests/clusterissuer.yaml
|
||||
./cert-manager-manifests/issuer.yaml
|
||||
./cert-manager-manifests/order.yaml
|
||||
];
|
||||
|
||||
helm.releases = {
|
||||
metallb = {
|
||||
chart = nixhelm.chartsDerivations.${system}.metallb.metallb;
|
||||
includeCRDs = true;
|
||||
};
|
||||
|
||||
cert-manager = {
|
||||
chart = nixhelm.chartsDerivations.${system}.jetstack.cert-manager;
|
||||
includeCRDs = false;
|
||||
};
|
||||
|
||||
# argo-workflows = {
|
||||
# chart = nixhelm.chartsDerivations.${system}.argoproj.argo-workflows;
|
||||
# includeCRDs = true;
|
||||
# };
|
||||
|
||||
longhorn = {
|
||||
chart = nixhelm.chartsDerivations.${system}.longhorn.longhorn;
|
||||
includeCRDs = true;
|
||||
|
||||
values = {
|
||||
persistence.defaultClassReplicaCount = 2;
|
||||
|
||||
defaultSettings = {
|
||||
defaultDataPath = "/mnt/longhorn";
|
||||
storageMinimalAvailablePercentage = 0;
|
||||
allowRecurringJobWhileVolumeDetached = true;
|
||||
backupTarget = "nfs://lewis.dmz:/mnt/longhorn/persistent/longhorn-backup";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
resources = {
|
||||
namespaces = {
|
||||
static-websites = { };
|
||||
freshrss = { };
|
||||
radicale = { };
|
||||
kms = { };
|
||||
atuin = { };
|
||||
nextcloud = { };
|
||||
hedgedoc = { };
|
||||
kitchenowl = { };
|
||||
forgejo = { };
|
||||
paperless = { };
|
||||
syncthing = { };
|
||||
immich = { };
|
||||
attic = { };
|
||||
inbucket = { };
|
||||
dns = { };
|
||||
media = { };
|
||||
minecraft = { };
|
||||
};
|
||||
|
||||
nodes =
|
||||
let
|
||||
machinesWithKubernetesLabels = lib.filterAttrs (name: machine: machine.kubernetesNodeLabels != null) machines;
|
||||
in
|
||||
builtins.mapAttrs
|
||||
(name: machine: {
|
||||
metadata.labels = machine.kubernetesNodeLabels;
|
||||
})
|
||||
machinesWithKubernetesLabels;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
164
kubenix-modules/bootstrapDefault.nix
Normal file
164
kubenix-modules/bootstrapDefault.nix
Normal file
|
@ -0,0 +1,164 @@
|
|||
{ lib, nixhelm, system, machines, ... }: {
|
||||
kubernetes = {
|
||||
|
||||
helm.releases = {
|
||||
metallb = {
|
||||
chart = nixhelm.chartsDerivations.${system}.metallb.metallb;
|
||||
includeCRDs = true;
|
||||
};
|
||||
|
||||
# argo-workflows = {
|
||||
# chart = nixhelm.chartsDerivations.${system}.argoproj.argo-workflows;
|
||||
# includeCRDs = true;
|
||||
# };
|
||||
|
||||
longhorn = {
|
||||
chart = nixhelm.chartsDerivations.${system}.longhorn.longhorn;
|
||||
includeCRDs = true;
|
||||
|
||||
values = {
|
||||
persistence.defaultClassReplicaCount = 2;
|
||||
|
||||
defaultSettings = {
|
||||
defaultDataPath = "/mnt/longhorn";
|
||||
storageMinimalAvailablePercentage = 0;
|
||||
allowRecurringJobWhileVolumeDetached = true;
|
||||
backupTarget = "nfs://lewis.dmz:/mnt/longhorn/persistent/longhorn-backup";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# argo-workflows = {
|
||||
# chart = nixhelm.chartsDerivations.${system}.argoproj.argo-workflows;
|
||||
# includeCRDs = true;
|
||||
# };
|
||||
};
|
||||
|
||||
resources = {
|
||||
namespaces = {
|
||||
static-websites = { };
|
||||
freshrss = { };
|
||||
radicale = { };
|
||||
kms = { };
|
||||
atuin = { };
|
||||
nextcloud = { };
|
||||
hedgedoc = { };
|
||||
kitchenowl = { };
|
||||
forgejo = { };
|
||||
paperless = { };
|
||||
syncthing = { };
|
||||
immich = { };
|
||||
attic = { };
|
||||
inbucket = { };
|
||||
dns = { };
|
||||
media = { };
|
||||
minecraft = { };
|
||||
};
|
||||
|
||||
nodes =
|
||||
let
|
||||
machinesWithKubernetesLabels = lib.filterAttrs (name: machine: machine.kubernetesNodeLabels != null) machines;
|
||||
in
|
||||
builtins.mapAttrs
|
||||
(name: machine: {
|
||||
metadata.labels = machine.kubernetesNodeLabels;
|
||||
})
|
||||
machinesWithKubernetesLabels;
|
||||
|
||||
ingresses.longhorn = {
|
||||
metadata.annotations = {
|
||||
"cert-manager.io/cluster-issuer" = "letsencrypt";
|
||||
"traefik.ingress.kubernetes.io/router.entrypoints" = "localsecure";
|
||||
};
|
||||
|
||||
spec = {
|
||||
ingressClassName = "traefik";
|
||||
|
||||
rules = [{
|
||||
host = "longhorn.kun.is";
|
||||
|
||||
http.paths = [{
|
||||
path = "/";
|
||||
pathType = "Prefix";
|
||||
|
||||
backend.service = {
|
||||
name = "longhorn-frontend";
|
||||
port.number = 80;
|
||||
};
|
||||
}];
|
||||
}];
|
||||
|
||||
tls = [{
|
||||
secretName = "longhorn-tls";
|
||||
hosts = [ "longhorn.kun.is" ];
|
||||
}];
|
||||
};
|
||||
};
|
||||
|
||||
recurringJobs.backup-nfs.spec = {
|
||||
cron = "0 1 * * *"; # One o'clock at night
|
||||
task = "backup";
|
||||
retain = 2; # We don't need many, as we also make Borg backups.
|
||||
concurrency = 1;
|
||||
};
|
||||
|
||||
ipAddressPools.main.spec.addresses = [ "192.168.30.128-192.168.30.200" "2a0d:6e00:1a77:30::2-2a0d:6e00:1a77:30:ffff:ffff:ffff:fffe" ];
|
||||
l2Advertisements.main.metadata = { };
|
||||
|
||||
persistentVolumes = {
|
||||
music-syncthing.spec = {
|
||||
capacity.storage = "1Gi";
|
||||
accessModes = [ "ReadWriteMany" ];
|
||||
|
||||
nfs = {
|
||||
server = "lewis.dmz";
|
||||
path = "/mnt/longhorn/persistent/media/music";
|
||||
};
|
||||
};
|
||||
|
||||
media-media.spec = {
|
||||
capacity.storage = "1Gi";
|
||||
accessModes = [ "ReadWriteMany" ];
|
||||
|
||||
nfs = {
|
||||
server = "lewis.dmz";
|
||||
path = "/mnt/longhorn/persistent/media";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
lab = {
|
||||
longhorn.persistentVolume = {
|
||||
freshrss.storage = "1Gi";
|
||||
radicale.storage = "200Mi";
|
||||
atuin.storage = "300Mi";
|
||||
atuin-db.storage = "300Mi";
|
||||
nextcloud.storage = "50Gi";
|
||||
nextcloud-db.storage = "400Mi";
|
||||
hedgedoc-uploads.storage = "50Mi";
|
||||
hedgedoc-db.storage = "100Mi";
|
||||
kitchenowl.storage = "100Mi";
|
||||
forgejo.storage = "20Gi";
|
||||
paperless-data.storage = "10Gi";
|
||||
paperless-redisdata.storage = "20Mi";
|
||||
paperless-db.storage = "150Mi";
|
||||
syncthing.storage = "400Mi";
|
||||
pihole-data.storage = "750Mi";
|
||||
pihole-dnsmasq.storage = "16Mi";
|
||||
immich.storage = "50Gi";
|
||||
immich-db.storage = "5Gi";
|
||||
attic.storage = "15Gi";
|
||||
attic-db.storage = "150Mi";
|
||||
jellyfin.storage = "5Gi";
|
||||
transmission.storage = "25Mi";
|
||||
jellyseerr.storage = "75Mi";
|
||||
radarr.storage = "300Mi";
|
||||
prowlarr.storage = "150Mi";
|
||||
sonarr.storage = "150Mi";
|
||||
bazarr.storage = "25Mi";
|
||||
minecraft.storage = "1Gi";
|
||||
};
|
||||
};
|
||||
}
|
36
kubenix-modules/bootstrapKubeSystem.nix
Normal file
36
kubenix-modules/bootstrapKubeSystem.nix
Normal file
|
@ -0,0 +1,36 @@
|
|||
{ nixhelm, system, ... }: {
|
||||
kubernetes = {
|
||||
# TODO: These were copied from https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.crds.yaml
|
||||
# See https://cert-manager.io/docs/installation/helm/
|
||||
# Seems kubenix cannot import a list of resources, only individual resources.
|
||||
# Might be good to create a PR for this.
|
||||
imports = [
|
||||
./cert-manager-manifests/certificaterequest.yaml
|
||||
./cert-manager-manifests/certificate.yaml
|
||||
./cert-manager-manifests/challenge.yaml
|
||||
./cert-manager-manifests/clusterissuer.yaml
|
||||
./cert-manager-manifests/issuer.yaml
|
||||
./cert-manager-manifests/order.yaml
|
||||
];
|
||||
|
||||
helm.releases = {
|
||||
cert-manager = {
|
||||
chart = nixhelm.chartsDerivations.${system}.jetstack.cert-manager;
|
||||
includeCRDs = false;
|
||||
namespace = "kube-system";
|
||||
};
|
||||
};
|
||||
|
||||
resources.clusterIssuers.letsencrypt = {
|
||||
spec.acme = {
|
||||
server = "https://acme-v02.api.letsencrypt.org/directory";
|
||||
email = "pim@kunis.nl";
|
||||
privateKeySecretRef.name = "letsencrypt-private-key";
|
||||
solvers = [{
|
||||
selector = { };
|
||||
http01.ingress.class = "traefik";
|
||||
}];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
{
|
||||
kubernetes.resources.clusterIssuers.letsencrypt = {
|
||||
metadata.namespace = "kube-system";
|
||||
|
||||
spec.acme = {
|
||||
server = "https://acme-v02.api.letsencrypt.org/directory";
|
||||
email = "pim@kunis.nl";
|
||||
privateKeySecretRef.name = "letsencrypt-private-key";
|
||||
solvers = [{
|
||||
selector = { };
|
||||
http01.ingress.class = "traefik";
|
||||
}];
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
{ lib, nixhelm, system, ... }: {
|
||||
config = {
|
||||
kubernetes = {
|
||||
resources = {
|
||||
ingresses.longhorn = {
|
||||
metadata.annotations = {
|
||||
"cert-manager.io/cluster-issuer" = "letsencrypt";
|
||||
"traefik.ingress.kubernetes.io/router.entrypoints" = "localsecure";
|
||||
};
|
||||
|
||||
spec = {
|
||||
ingressClassName = "traefik";
|
||||
|
||||
rules = [{
|
||||
host = "longhorn.kun.is";
|
||||
|
||||
http.paths = [{
|
||||
path = "/";
|
||||
pathType = "Prefix";
|
||||
|
||||
backend.service = {
|
||||
name = "longhorn-frontend";
|
||||
port.number = 80;
|
||||
};
|
||||
}];
|
||||
}];
|
||||
|
||||
tls = [{
|
||||
secretName = "longhorn-tls";
|
||||
hosts = [ "longhorn.kun.is" ];
|
||||
}];
|
||||
};
|
||||
};
|
||||
|
||||
recurringJobs.backup-nfs.spec = {
|
||||
cron = "0 1 * * *"; # One o'clock at night
|
||||
task = "backup";
|
||||
retain = 2; # We don't need many, as we also make Borg backups.
|
||||
concurrency = 1;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
# TODO: These resources should probably exist within the kube-system namespace.
|
||||
{
|
||||
kubernetes.resources = {
|
||||
ipAddressPools.main.spec.addresses = [ "192.168.30.128-192.168.30.200" "2a0d:6e00:1a77:30::2-2a0d:6e00:1a77:30:ffff:ffff:ffff:fffe" ];
|
||||
l2Advertisements.main.metadata = { };
|
||||
};
|
||||
}
|
|
@ -2,13 +2,10 @@
|
|||
kubernetes.resources = {
|
||||
helmChartConfigs = {
|
||||
traefik = {
|
||||
metadata.namespace = lib.mkForce "kube-system";
|
||||
|
||||
# Override Traefik's service with a static load balancer IP.
|
||||
# Create endpoint for HTTPS on port 444.
|
||||
# Allow external name services for servers in LAN.
|
||||
spec.valuesContent = lib.generators.toYAML { } {
|
||||
# service.annotations."metallb.universe.tf/loadBalancerIPs" = myLib.globals.traefikIPv4;
|
||||
providers.kubernetesIngress.allowExternalNameServices = true;
|
||||
service.loadBalancerIP = myLib.globals.traefikIPv4;
|
||||
|
||||
|
|
|
@ -1,69 +0,0 @@
|
|||
{
|
||||
# kubernetes.resources.pods.testje.spec = {
|
||||
# containers.testje = {
|
||||
# image = "nginx";
|
||||
|
||||
# volumeMounts = [
|
||||
|
||||
# { name = "freshrss"; mountPath = "/freshrss"; }
|
||||
# ];
|
||||
# };
|
||||
|
||||
# volumes.freshrss.persistentVolumeClaim.claimName = "freshrss";
|
||||
# };
|
||||
|
||||
kubernetes.resources.persistentVolumes = {
|
||||
music-syncthing.spec = {
|
||||
capacity.storage = "1Gi";
|
||||
accessModes = [ "ReadWriteMany" ];
|
||||
|
||||
nfs = {
|
||||
server = "lewis.dmz";
|
||||
path = "/mnt/longhorn/persistent/media/music";
|
||||
};
|
||||
};
|
||||
|
||||
media-media.spec = {
|
||||
capacity.storage = "1Gi";
|
||||
accessModes = [ "ReadWriteMany" ];
|
||||
|
||||
nfs = {
|
||||
server = "lewis.dmz";
|
||||
path = "/mnt/longhorn/persistent/media";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
lab = {
|
||||
longhorn.persistentVolume = {
|
||||
freshrss.storage = "1Gi";
|
||||
radicale.storage = "200Mi";
|
||||
atuin.storage = "300Mi";
|
||||
atuin-db.storage = "300Mi";
|
||||
nextcloud.storage = "50Gi";
|
||||
nextcloud-db.storage = "400Mi";
|
||||
hedgedoc-uploads.storage = "50Mi";
|
||||
hedgedoc-db.storage = "100Mi";
|
||||
kitchenowl.storage = "100Mi";
|
||||
forgejo.storage = "20Gi";
|
||||
paperless-data.storage = "10Gi";
|
||||
paperless-redisdata.storage = "20Mi";
|
||||
paperless-db.storage = "150Mi";
|
||||
syncthing.storage = "400Mi";
|
||||
pihole-data.storage = "750Mi";
|
||||
pihole-dnsmasq.storage = "16Mi";
|
||||
immich.storage = "50Gi";
|
||||
immich-db.storage = "5Gi";
|
||||
attic.storage = "15Gi";
|
||||
attic-db.storage = "150Mi";
|
||||
jellyfin.storage = "5Gi";
|
||||
transmission.storage = "25Mi";
|
||||
jellyseerr.storage = "75Mi";
|
||||
radarr.storage = "300Mi";
|
||||
prowlarr.storage = "150Mi";
|
||||
sonarr.storage = "150Mi";
|
||||
bazarr.storage = "25Mi";
|
||||
minecraft.storage = "1Gi";
|
||||
};
|
||||
};
|
||||
}
|
Reference in a new issue