feat(freshrss): Move to its own namespace

feat(longhorn): Add ability to specify PVC namespace
refactor(freshrss): Simplify env variable declaration
This commit is contained in:
Pim Kunis 2024-07-14 11:47:46 +02:00
parent ada288674a
commit 07bd2e1e01
7 changed files with 62 additions and 50 deletions

View file

@ -28,7 +28,7 @@
--suffix PATH : "$out/bin" \ --suffix PATH : "$out/bin" \
--run 'export KUBECONFIG=''${KUBECONFIG:-${toString kubeconfig}}' \ --run 'export KUBECONFIG=''${KUBECONFIG:-${toString kubeconfig}}' \
--set MANIFEST '${result}' \ --set MANIFEST '${result}' \
--set APPLYSET '${applyset}' \ --set APPLYSET 'applyset-${applyset}' \
--set NAMESPACE '${namespace}' --set NAMESPACE '${namespace}'
''; '';
}; };
@ -38,6 +38,8 @@
mkDeployScriptAndManifest = module: applyset: namespace: mkDeployScriptAndManifest = module: applyset: namespace:
let let
kubernetes = (kubenix.evalModules.${system} { kubernetes = (kubenix.evalModules.${system} {
specialArgs = { inherit namespace; };
module = { kubenix, ... }: module = { kubenix, ... }:
{ {
imports = [ imports = [
@ -45,6 +47,11 @@
"${self}/kubenix-modules/custom" "${self}/kubenix-modules/custom"
module module
]; ];
config = {
kubenix.project = applyset;
kubernetes.namespace = namespace;
};
}; };
}).config.kubernetes; }).config.kubernetes;
in in
@ -59,4 +66,6 @@
kubenix.cyberchef = mkDeployScriptAndManifest kubenix.cyberchef = mkDeployScriptAndManifest
"${self}/kubenix-modules/cyberchef.nix" "cyberchef" "cyberchef"; "${self}/kubenix-modules/cyberchef.nix" "cyberchef" "cyberchef";
kubenix.freshrss = mkDeployScriptAndManifest
"${self}/kubenix-modules/freshrss.nix" "freshrss" "freshrss";
}) })

View file

@ -1,6 +1,6 @@
let let
applications = [ applications = [
./freshrss.nix # ./freshrss.nix
# ./cyberchef.nix # ./cyberchef.nix
./kms.nix ./kms.nix
./inbucket.nix ./inbucket.nix

View file

@ -60,6 +60,11 @@
}; };
resources = { resources = {
namespaces = {
cyberchef = { };
freshrss = { };
};
nodes = nodes =
let let
machinesWithKubernetesLabels = lib.filterAttrs (name: machine: machine.kubernetesNodeLabels != null) machines; machinesWithKubernetesLabels = lib.filterAttrs (name: machine: machine.kubernetesNodeLabels != null) machines;

View file

@ -5,6 +5,11 @@ let
storage = lib.mkOption { storage = lib.mkOption {
type = lib.types.str; type = lib.types.str;
}; };
namespace = lib.mkOption {
type = lib.types.str;
default = "default";
};
}; };
}; };
in in
@ -28,7 +33,7 @@ in
claimRef = { claimRef = {
inherit name; inherit name;
namespace = "default"; namespace = longhornVolume.namespace;
}; };
csi = { csi = {

View file

@ -1,45 +1,35 @@
{ {
config = { kubernetes.resources = {
kubenix.project = "cyberchef"; deployments.cyberchef.spec = {
replicas = 3;
selector.matchLabels.app = "cyberchef";
kubernetes = { template = {
namespace = "cyberchef"; metadata.labels.app = "cyberchef";
resources = { spec.containers.cyberchef = {
namespaces.cyberchef = { }; image = "mpepping/cyberchef";
ports.web.containerPort = 8000;
deployments.cyberchef.spec = {
replicas = 3;
selector.matchLabels.app = "cyberchef";
template = {
metadata.labels.app = "cyberchef";
spec.containers.cyberchef = {
image = "mpepping/cyberchef";
ports.web.containerPort = 8000;
};
};
};
services.cyberchef.spec = {
selector.app = "cyberchef";
ports.web = {
port = 80;
targetPort = "web";
};
}; };
}; };
}; };
lab.ingresses.cyberchef = { services.cyberchef.spec = {
host = "cyberchef.kun.is"; selector.app = "cyberchef";
service = { ports.web = {
name = "cyberchef"; port = 80;
portName = "web"; targetPort = "web";
}; };
}; };
}; };
lab.ingresses.cyberchef = {
host = "cyberchef.kun.is";
service = {
name = "cyberchef";
portName = "web";
};
};
} }

View file

@ -1,12 +1,5 @@
{ { namespace, ... }: {
kubernetes.resources = { kubernetes.resources = {
configMaps.freshrss.data = {
TZ = "Europe/Amsterdam";
CRON_MIN = "2,32";
ADMIN_EMAIL = "pim@kunis.nl";
PUBLISHED_PORT = "443";
};
secrets.freshrss.stringData.adminPassword = "ref+sops://secrets/kubernetes.yaml#/freshrss/password"; secrets.freshrss.stringData.adminPassword = "ref+sops://secrets/kubernetes.yaml#/freshrss/password";
deployments.freshrss = { deployments.freshrss = {
@ -31,10 +24,14 @@
containers.freshrss = { containers.freshrss = {
image = "freshrss/freshrss:1.24.1"; image = "freshrss/freshrss:1.24.1";
imagePullPolicy = "Always"; imagePullPolicy = "Always";
envFrom = [{ configMapRef.name = "freshrss"; }];
ports.web.containerPort = 80; ports.web.containerPort = 80;
env = { env = {
TZ.value = "Europe/Amsterdam";
CRON_MIN.value = "2,32";
ADMIN_EMAIL.value = "pim@kunis.nl";
PUBLISHED_PORT.value = "443";
ADMIN_PASSWORD.valueFrom.secretKeyRef = { ADMIN_PASSWORD.valueFrom.secretKeyRef = {
name = "freshrss"; name = "freshrss";
key = "adminPassword"; key = "adminPassword";
@ -82,5 +79,13 @@
portName = "web"; portName = "web";
}; };
}; };
# TODO: Maybe we should revisit this architecture?
# The PVs are cluster-wide and should probably be defined elsewhere.
# Then the PVC should reference the PV probably.
longhornVolumes.freshrss = {
storage = "1Gi";
inherit namespace;
};
}; };
} }

View file

@ -5,20 +5,18 @@
# volumeMounts = [ # volumeMounts = [
# { name = "atuin"; mountPath = "/atuin"; } # { name = "freshrss"; mountPath = "/freshrss"; }
# { name = "atuin-db"; mountPath = "/atuin-db"; }
# ]; # ];
# }; # };
# volumes.atuin.persistentVolumeClaim.claimName = "atuin"; # volumes.freshrss.persistentVolumeClaim.claimName = "freshrss";
# volumes.atuin-db.persistentVolumeClaim.claimName = "atuin-db";
# }; # };
lab = { lab = {
longhornVolumes = { longhornVolumes = {
hedgedoc-uploads.storage = "50Mi"; hedgedoc-uploads.storage = "50Mi";
hedgedoc-db.storage = "100Mi"; hedgedoc-db.storage = "100Mi";
freshrss.storage = "1Gi"; # freshrss.storage = "1Gi";
radicale.storage = "200Mi"; radicale.storage = "200Mi";
minecraft.storage = "1Gi"; minecraft.storage = "1Gi";
nextcloud.storage = "50Gi"; nextcloud.storage = "50Gi";