From 07bd2e1e0179e2e22ffec7f6869a85369cf4e5b3 Mon Sep 17 00:00:00 2001 From: Pim Kunis Date: Sun, 14 Jul 2024 11:47:46 +0200 Subject: [PATCH] feat(freshrss): Move to its own namespace feat(longhorn): Add ability to specify PVC namespace refactor(freshrss): Simplify env variable declaration --- flake-parts/kubenix.nix | 11 ++++- kubenix-modules/all.nix | 2 +- kubenix-modules/base.nix | 5 ++ kubenix-modules/custom/longhorn-volume.nix | 7 ++- kubenix-modules/cyberchef.nix | 56 +++++++++------------- kubenix-modules/freshrss.nix | 23 +++++---- kubenix-modules/volumes.nix | 8 ++-- 7 files changed, 62 insertions(+), 50 deletions(-) diff --git a/flake-parts/kubenix.nix b/flake-parts/kubenix.nix index 8cbc485..bbd2ace 100644 --- a/flake-parts/kubenix.nix +++ b/flake-parts/kubenix.nix @@ -28,7 +28,7 @@ --suffix PATH : "$out/bin" \ --run 'export KUBECONFIG=''${KUBECONFIG:-${toString kubeconfig}}' \ --set MANIFEST '${result}' \ - --set APPLYSET '${applyset}' \ + --set APPLYSET 'applyset-${applyset}' \ --set NAMESPACE '${namespace}' ''; }; @@ -38,6 +38,8 @@ mkDeployScriptAndManifest = module: applyset: namespace: let kubernetes = (kubenix.evalModules.${system} { + specialArgs = { inherit namespace; }; + module = { kubenix, ... }: { imports = [ @@ -45,6 +47,11 @@ "${self}/kubenix-modules/custom" module ]; + + config = { + kubenix.project = applyset; + kubernetes.namespace = namespace; + }; }; }).config.kubernetes; in @@ -59,4 +66,6 @@ kubenix.cyberchef = mkDeployScriptAndManifest "${self}/kubenix-modules/cyberchef.nix" "cyberchef" "cyberchef"; + kubenix.freshrss = mkDeployScriptAndManifest + "${self}/kubenix-modules/freshrss.nix" "freshrss" "freshrss"; }) diff --git a/kubenix-modules/all.nix b/kubenix-modules/all.nix index 98a05a4..ddf15ed 100644 --- a/kubenix-modules/all.nix +++ b/kubenix-modules/all.nix @@ -1,6 +1,6 @@ let applications = [ - ./freshrss.nix + # ./freshrss.nix # ./cyberchef.nix ./kms.nix ./inbucket.nix diff --git a/kubenix-modules/base.nix b/kubenix-modules/base.nix index 7f422f4..4ba88ef 100644 --- a/kubenix-modules/base.nix +++ b/kubenix-modules/base.nix @@ -60,6 +60,11 @@ }; resources = { + namespaces = { + cyberchef = { }; + freshrss = { }; + }; + nodes = let machinesWithKubernetesLabels = lib.filterAttrs (name: machine: machine.kubernetesNodeLabels != null) machines; diff --git a/kubenix-modules/custom/longhorn-volume.nix b/kubenix-modules/custom/longhorn-volume.nix index 8f3cb76..e261cab 100644 --- a/kubenix-modules/custom/longhorn-volume.nix +++ b/kubenix-modules/custom/longhorn-volume.nix @@ -5,6 +5,11 @@ let storage = lib.mkOption { type = lib.types.str; }; + + namespace = lib.mkOption { + type = lib.types.str; + default = "default"; + }; }; }; in @@ -28,7 +33,7 @@ in claimRef = { inherit name; - namespace = "default"; + namespace = longhornVolume.namespace; }; csi = { diff --git a/kubenix-modules/cyberchef.nix b/kubenix-modules/cyberchef.nix index 2e25d32..19c2578 100644 --- a/kubenix-modules/cyberchef.nix +++ b/kubenix-modules/cyberchef.nix @@ -1,45 +1,35 @@ { - config = { - kubenix.project = "cyberchef"; + kubernetes.resources = { + deployments.cyberchef.spec = { + replicas = 3; + selector.matchLabels.app = "cyberchef"; - kubernetes = { - namespace = "cyberchef"; + template = { + metadata.labels.app = "cyberchef"; - resources = { - namespaces.cyberchef = { }; - - deployments.cyberchef.spec = { - replicas = 3; - selector.matchLabels.app = "cyberchef"; - - template = { - metadata.labels.app = "cyberchef"; - - spec.containers.cyberchef = { - image = "mpepping/cyberchef"; - ports.web.containerPort = 8000; - }; - }; - }; - - services.cyberchef.spec = { - selector.app = "cyberchef"; - - ports.web = { - port = 80; - targetPort = "web"; - }; + spec.containers.cyberchef = { + image = "mpepping/cyberchef"; + ports.web.containerPort = 8000; }; }; }; - lab.ingresses.cyberchef = { - host = "cyberchef.kun.is"; + services.cyberchef.spec = { + selector.app = "cyberchef"; - service = { - name = "cyberchef"; - portName = "web"; + ports.web = { + port = 80; + targetPort = "web"; }; }; }; + + lab.ingresses.cyberchef = { + host = "cyberchef.kun.is"; + + service = { + name = "cyberchef"; + portName = "web"; + }; + }; } diff --git a/kubenix-modules/freshrss.nix b/kubenix-modules/freshrss.nix index 6567b36..0f62a40 100644 --- a/kubenix-modules/freshrss.nix +++ b/kubenix-modules/freshrss.nix @@ -1,12 +1,5 @@ -{ +{ namespace, ... }: { kubernetes.resources = { - configMaps.freshrss.data = { - TZ = "Europe/Amsterdam"; - CRON_MIN = "2,32"; - ADMIN_EMAIL = "pim@kunis.nl"; - PUBLISHED_PORT = "443"; - }; - secrets.freshrss.stringData.adminPassword = "ref+sops://secrets/kubernetes.yaml#/freshrss/password"; deployments.freshrss = { @@ -31,10 +24,14 @@ containers.freshrss = { image = "freshrss/freshrss:1.24.1"; imagePullPolicy = "Always"; - envFrom = [{ configMapRef.name = "freshrss"; }]; ports.web.containerPort = 80; env = { + TZ.value = "Europe/Amsterdam"; + CRON_MIN.value = "2,32"; + ADMIN_EMAIL.value = "pim@kunis.nl"; + PUBLISHED_PORT.value = "443"; + ADMIN_PASSWORD.valueFrom.secretKeyRef = { name = "freshrss"; key = "adminPassword"; @@ -82,5 +79,13 @@ portName = "web"; }; }; + + # TODO: Maybe we should revisit this architecture? + # The PVs are cluster-wide and should probably be defined elsewhere. + # Then the PVC should reference the PV probably. + longhornVolumes.freshrss = { + storage = "1Gi"; + inherit namespace; + }; }; } diff --git a/kubenix-modules/volumes.nix b/kubenix-modules/volumes.nix index f4592e7..ecd3d09 100644 --- a/kubenix-modules/volumes.nix +++ b/kubenix-modules/volumes.nix @@ -5,20 +5,18 @@ # volumeMounts = [ - # { name = "atuin"; mountPath = "/atuin"; } - # { name = "atuin-db"; mountPath = "/atuin-db"; } + # { name = "freshrss"; mountPath = "/freshrss"; } # ]; # }; - # volumes.atuin.persistentVolumeClaim.claimName = "atuin"; - # volumes.atuin-db.persistentVolumeClaim.claimName = "atuin-db"; + # volumes.freshrss.persistentVolumeClaim.claimName = "freshrss"; # }; lab = { longhornVolumes = { hedgedoc-uploads.storage = "50Mi"; hedgedoc-db.storage = "100Mi"; - freshrss.storage = "1Gi"; + # freshrss.storage = "1Gi"; radicale.storage = "200Mi"; minecraft.storage = "1Gi"; nextcloud.storage = "50Gi";