From bef9d8c474be383bd970932e31fc133bf412114e Mon Sep 17 00:00:00 2001 From: Pim Kunis Date: Tue, 26 Mar 2024 20:26:02 +0100 Subject: [PATCH] wip k3s cluster --- docker_swarm/roles/traefik/services.yml | 4 +- flake.nix | 2 +- .../{kubenix.nix => kubenix/default.nix} | 48 ++++----- nix/flake/kubenix/freshrss.nix | 97 +++++++++++++++++++ nix/modules/k3s/default.nix | 4 +- 5 files changed, 129 insertions(+), 26 deletions(-) rename nix/flake/{kubenix.nix => kubenix/default.nix} (67%) create mode 100644 nix/flake/kubenix/freshrss.nix diff --git a/docker_swarm/roles/traefik/services.yml b/docker_swarm/roles/traefik/services.yml index 8f75e2a..5a99eed 100644 --- a/docker_swarm/roles/traefik/services.yml +++ b/docker_swarm/roles/traefik/services.yml @@ -3,7 +3,9 @@ http: k3s: loadBalancer: servers: - - url: http://jefke.dmz + # TODO: This WILL break when the cluster is reprovisioned and another IP addrss is chosen. + # The load balancer service for Traefik is automatically provisioned by k3s, unsure how to statically assign the IP address. + - url: http://192.168.40.101 esrom: loadBalancer: servers: diff --git a/flake.nix b/flake.nix index 0a3f9e0..d3137c5 100644 --- a/flake.nix +++ b/flake.nix @@ -52,7 +52,7 @@ ./nix/flake/checks.nix ./nix/flake/deploy.nix ./nix/flake/nixos.nix - ./nix/flake/kubenix.nix + ./nix/flake/kubenix ] // (flake-utils.lib.eachDefaultSystem (system: { formatter = nixpkgs.legacyPackages.${system}.nixfmt; })); diff --git a/nix/flake/kubenix.nix b/nix/flake/kubenix/default.nix similarity index 67% rename from nix/flake/kubenix.nix rename to nix/flake/kubenix/default.nix index 31c2742..312fd52 100644 --- a/nix/flake/kubenix.nix +++ b/nix/flake/kubenix/default.nix @@ -4,27 +4,31 @@ specialArgs.flake = self; module = { kubenix, ... }: { - imports = [ kubenix.modules.k8s kubenix.modules.helm ]; + imports = [ + kubenix.modules.k8s + kubenix.modules.helm + # ./freshrss.nix + ]; kubernetes.kubeconfig = "~/.kube/config"; kubenix.project = "home"; kubernetes = { - namespace = "kubenix"; + # namespace = "kubenix"; customTypes = { # HACK: These are dummy custom types. - # This is needed, because the CRDs imported as a chart are not available as Nix modules - # There is no validation whatsoever on resources defined using these types! + # This is needed, because the CRDs imported as a chart are not available as Nix modules. + # There is no nix-based validation on resources defined using these types! # See: https://github.com/hall/kubenix/issues/34 - ipaddresspool = { - attrName = "ipaddresspools"; + ipAddressPool = { + attrName = "ipAddressPools"; group = "metallb.io"; version = "v1beta1"; kind = "IPAddressPool"; }; - l2advertisement = { - attrName = "l2advertisements"; + l2Advertisement = { + attrName = "l2Advertisements"; group = "metallb.io"; version = "v1beta1"; kind = "L2Advertisement"; @@ -32,16 +36,15 @@ }; resources = { - namespaces = { - kubenix = { }; - - metallb-system.metadata.labels = { - "pod-security.kubernetes.io/enforce" = "privileged"; - "pod-security.kubernetes.io/audit" = "privileged"; - "pod-security.kubernetes.io/warn" = "privileged"; - }; - }; + # namespaces = { + # kubenix = { }; + # metallb-system.metadata.labels = { + # "pod-security.kubernetes.io/enforce" = "privileged"; + # "pod-security.kubernetes.io/audit" = "privileged"; + # "pod-security.kubernetes.io/warn" = "privileged"; + # }; + # }; deployments.cyberchef.spec = { replicas = 3; @@ -91,17 +94,18 @@ }]; }; - ipaddresspools.main = { - metadata.namespace = "metallb-system"; - spec.addresses = [ "192.168.40.100-192.168.40.253" ]; + ipAddressPools.main = { + # metadata.namespace = "metallb-system"; + spec.addresses = [ "192.168.40.100-192.168.40.254" ]; }; - l2advertisements.main.metadata.namespace = "metallb-system"; + # l2Advertisements.main.metadata.namespace = "metallb-system"; + l2Advertisements.main.metadata = { }; }; helm.releases.metallb = { chart = nixhelm.chartsDerivations.${system}.metallb.metallb; - namespace = "metallb-system"; + # namespace = "metallb-system"; includeCRDs = true; }; }; diff --git a/nix/flake/kubenix/freshrss.nix b/nix/flake/kubenix/freshrss.nix new file mode 100644 index 0000000..62d8b08 --- /dev/null +++ b/nix/flake/kubenix/freshrss.nix @@ -0,0 +1,97 @@ +{ + kubernetes.resources = { + configMaps.freshrss.data = { + TZ = "Europe/Amsterdam"; + CRON_MIN = "2,32"; + ADMIN_EMAIL = "pim@kunis.nl"; + PUBLISHED_PORT = "443"; + }; + + secrets.freshrss.stringData.adminPassword = "ref+file:///home/pim/.config/home/vals.yaml"; + + persistentVolumeClaims.freshrss.spec = { + accessModes = [ "ReadWriteOnce" ]; + storageClassName = "local-path"; + resources.requests.storage = "1Mi"; + }; + + deployments.freshrss = { + metadata.labels.app = "freshrss"; + + spec = { + selector.matchLabels.app = "freshrss"; + + template = { + metadata.labels.app = "freshrss"; + + spec = { + containers.freshrss = { + image = "freshrss/freshrss:edge"; + + ports = [{ + containerPort = 80; + protocol = "TCP"; + }]; + + envFrom = [{ configMapRef.name = "freshrss"; }]; + env = [ + { + name = "ADMIN_PASSWORD"; + valueFrom.secretKeyRef = { + name = "freshrss"; + key = "adminPassword"; + }; + } + { + name = "ADMIN_API_PASSWORD"; + valueFrom.secretKeyRef = { + name = "freshrss"; + key = "adminPassword"; + }; + } + ]; + + volumeMounts = [{ + name = "data"; + mountPath = "/var/www/FreshRSS/data"; + }]; + }; + + volumes = [{ + name = "data"; + persistentVolumeClaim.claimName = "freshrss"; + }]; + }; + }; + }; + }; + + services.freshrss.spec = { + selector.app = "freshrss"; + + ports = [{ + protocol = "TCP"; + port = 80; + targetPort = 80; + }]; + }; + + ingresses.freshrss.spec = { + ingressClassName = "traefik"; + + rules = [{ + host = "freshrss.k3s.kun.is"; + + http.paths = [{ + path = "/"; + pathType = "Prefix"; + + backend.service = { + name = "freshrss"; + port.number = 80; + }; + }]; + }]; + }; + }; +} diff --git a/nix/modules/k3s/default.nix b/nix/modules/k3s/default.nix index 51f121d..a8730c3 100644 --- a/nix/modules/k3s/default.nix +++ b/nix/modules/k3s/default.nix @@ -20,7 +20,7 @@ in { services.k3s = { enable = true; role = "server"; - extraFlags = "--tls-san ${config.networking.fqdn} --data-dir ${config.lab.storage.dataMountPoint}/k3s"; + extraFlags = "--tls-san ${config.networking.fqdn}"; }; system.activationScripts.k3s-bootstrap.text = @@ -30,7 +30,7 @@ in { }).config.kubernetes.result; in '' - ln -sf ${k3sBootstrapFile} ${config.lab.storage.dataMountPoint}/k3s/server/manifests/k3s-bootstrap.json + ln -sf ${k3sBootstrapFile} /var/lib/rancher/k3s/server/manifests/k3s-bootstrap.json ''; }; }