wip k3s cluster

This commit is contained in:
Pim Kunis 2024-03-26 20:26:02 +01:00
parent 3124436b46
commit bef9d8c474
5 changed files with 129 additions and 26 deletions

View file

@ -3,7 +3,9 @@ http:
k3s:
loadBalancer:
servers:
- url: http://jefke.dmz
# TODO: This WILL break when the cluster is reprovisioned and another IP addrss is chosen.
# The load balancer service for Traefik is automatically provisioned by k3s, unsure how to statically assign the IP address.
- url: http://192.168.40.101
esrom:
loadBalancer:
servers:

View file

@ -52,7 +52,7 @@
./nix/flake/checks.nix
./nix/flake/deploy.nix
./nix/flake/nixos.nix
./nix/flake/kubenix.nix
./nix/flake/kubenix
] // (flake-utils.lib.eachDefaultSystem (system: {
formatter = nixpkgs.legacyPackages.${system}.nixfmt;
}));

View file

@ -4,27 +4,31 @@
specialArgs.flake = self;
module = { kubenix, ... }: {
imports = [ kubenix.modules.k8s kubenix.modules.helm ];
imports = [
kubenix.modules.k8s
kubenix.modules.helm
# ./freshrss.nix
];
kubernetes.kubeconfig = "~/.kube/config";
kubenix.project = "home";
kubernetes = {
namespace = "kubenix";
# namespace = "kubenix";
customTypes = {
# HACK: These are dummy custom types.
# This is needed, because the CRDs imported as a chart are not available as Nix modules
# There is no validation whatsoever on resources defined using these types!
# This is needed, because the CRDs imported as a chart are not available as Nix modules.
# There is no nix-based validation on resources defined using these types!
# See: https://github.com/hall/kubenix/issues/34
ipaddresspool = {
attrName = "ipaddresspools";
ipAddressPool = {
attrName = "ipAddressPools";
group = "metallb.io";
version = "v1beta1";
kind = "IPAddressPool";
};
l2advertisement = {
attrName = "l2advertisements";
l2Advertisement = {
attrName = "l2Advertisements";
group = "metallb.io";
version = "v1beta1";
kind = "L2Advertisement";
@ -32,16 +36,15 @@
};
resources = {
namespaces = {
kubenix = { };
metallb-system.metadata.labels = {
"pod-security.kubernetes.io/enforce" = "privileged";
"pod-security.kubernetes.io/audit" = "privileged";
"pod-security.kubernetes.io/warn" = "privileged";
};
};
# namespaces = {
# kubenix = { };
# metallb-system.metadata.labels = {
# "pod-security.kubernetes.io/enforce" = "privileged";
# "pod-security.kubernetes.io/audit" = "privileged";
# "pod-security.kubernetes.io/warn" = "privileged";
# };
# };
deployments.cyberchef.spec = {
replicas = 3;
@ -91,17 +94,18 @@
}];
};
ipaddresspools.main = {
metadata.namespace = "metallb-system";
spec.addresses = [ "192.168.40.100-192.168.40.253" ];
ipAddressPools.main = {
# metadata.namespace = "metallb-system";
spec.addresses = [ "192.168.40.100-192.168.40.254" ];
};
l2advertisements.main.metadata.namespace = "metallb-system";
# l2Advertisements.main.metadata.namespace = "metallb-system";
l2Advertisements.main.metadata = { };
};
helm.releases.metallb = {
chart = nixhelm.chartsDerivations.${system}.metallb.metallb;
namespace = "metallb-system";
# namespace = "metallb-system";
includeCRDs = true;
};
};

View file

@ -0,0 +1,97 @@
{
kubernetes.resources = {
configMaps.freshrss.data = {
TZ = "Europe/Amsterdam";
CRON_MIN = "2,32";
ADMIN_EMAIL = "pim@kunis.nl";
PUBLISHED_PORT = "443";
};
secrets.freshrss.stringData.adminPassword = "ref+file:///home/pim/.config/home/vals.yaml";
persistentVolumeClaims.freshrss.spec = {
accessModes = [ "ReadWriteOnce" ];
storageClassName = "local-path";
resources.requests.storage = "1Mi";
};
deployments.freshrss = {
metadata.labels.app = "freshrss";
spec = {
selector.matchLabels.app = "freshrss";
template = {
metadata.labels.app = "freshrss";
spec = {
containers.freshrss = {
image = "freshrss/freshrss:edge";
ports = [{
containerPort = 80;
protocol = "TCP";
}];
envFrom = [{ configMapRef.name = "freshrss"; }];
env = [
{
name = "ADMIN_PASSWORD";
valueFrom.secretKeyRef = {
name = "freshrss";
key = "adminPassword";
};
}
{
name = "ADMIN_API_PASSWORD";
valueFrom.secretKeyRef = {
name = "freshrss";
key = "adminPassword";
};
}
];
volumeMounts = [{
name = "data";
mountPath = "/var/www/FreshRSS/data";
}];
};
volumes = [{
name = "data";
persistentVolumeClaim.claimName = "freshrss";
}];
};
};
};
};
services.freshrss.spec = {
selector.app = "freshrss";
ports = [{
protocol = "TCP";
port = 80;
targetPort = 80;
}];
};
ingresses.freshrss.spec = {
ingressClassName = "traefik";
rules = [{
host = "freshrss.k3s.kun.is";
http.paths = [{
path = "/";
pathType = "Prefix";
backend.service = {
name = "freshrss";
port.number = 80;
};
}];
}];
};
};
}

View file

@ -20,7 +20,7 @@ in {
services.k3s = {
enable = true;
role = "server";
extraFlags = "--tls-san ${config.networking.fqdn} --data-dir ${config.lab.storage.dataMountPoint}/k3s";
extraFlags = "--tls-san ${config.networking.fqdn}";
};
system.activationScripts.k3s-bootstrap.text =
@ -30,7 +30,7 @@ in {
}).config.kubernetes.result;
in
''
ln -sf ${k3sBootstrapFile} ${config.lab.storage.dataMountPoint}/k3s/server/manifests/k3s-bootstrap.json
ln -sf ${k3sBootstrapFile} /var/lib/rancher/k3s/server/manifests/k3s-bootstrap.json
'';
};
}