Install longhorn on k3s

Introduce new storage standard with LVM
This commit is contained in:
Pim Kunis 2024-05-18 23:32:58 +02:00
parent 7e14a2cc13
commit a21a09ad6b
9 changed files with 257 additions and 69 deletions

View file

@ -15,7 +15,7 @@ let
./media.nix
./bind9
./dnsmasq.nix
./minecraft.nix
# ./minecraft.nix
./blog.nix
./atticd.nix
./argo.nix
@ -25,6 +25,7 @@ in
imports = [
./base.nix
./custom-types.nix
./longhorn.nix
./esrom.nix
./metallb.nix
./cert-manager.nix

View file

@ -93,6 +93,7 @@
lab = {
ingresses.atticd = {
host = "attic.kun.is";
entrypoint = "localsecure";
service = {
name = "atticd";

View file

@ -17,7 +17,14 @@
web.containerPort = 9000;
smtp.containerPort = 2500;
};
volumeMounts = [{
name = "storage";
mountPath = "/storage";
}];
};
volumes.storage.persistentVolumeClaim.claimName = "inbucket";
};
};
};
@ -44,6 +51,12 @@
}];
};
};
persistentVolumeClaims.inbucket.spec = {
accessModes = [ "ReadWriteOnce" ];
storageClassName = "longhorn";
resources.requests.storage = "30Mi";
};
};
lab.ingresses.inbucket = {

View file

@ -0,0 +1,52 @@
{ nixhelm, system, ... }: {
config = {
kubernetes = {
helm.releases.longhorn = {
chart = nixhelm.chartsDerivations.${system}.longhorn.longhorn;
includeCRDs = true;
values = {
defaultSettings = {
defaultDataPath = "/mnt/longhorn";
storageMinimalAvailablePercentage = 0;
};
persistence = {
defaultClassReplicaCount = 2;
};
};
};
resources = {
ingresses.longhorn = {
metadata.annotations = {
"cert-manager.io/cluster-issuer" = "letsencrypt";
"traefik.ingress.kubernetes.io/router.entrypoints" = "localsecure";
};
spec = {
ingressClassName = "traefik";
rules = [{
host = "longhorn.kun.is";
http.paths = [{
path = "/";
pathType = "Prefix";
backend.service = {
name = "longhorn-frontend";
port.number = 80;
};
}];
}];
tls = [{
secretName = "longhorn-tls";
hosts = [ "longhorn.kun.is" ];
}];
};
};
};
};
};
}

View file

@ -4,13 +4,12 @@
nixosModule.lab = {
storage = {
osDisk = "/dev/sda";
dataPartition = "/dev/nvme0n1p1";
kubernetesNode = true;
};
k3s = {
enable = true;
role = "agent";
# role = "server";
serverAddr = "https://jefke.dmz:6443";
};
};

View file

@ -4,8 +4,7 @@
nixosModule.lab = {
storage = {
osDisk = "/dev/sda";
dataPartition = "/dev/nvme0n1p1";
kubernetesNode = true;
};
k3s = {

View file

@ -11,46 +11,46 @@
config = builtins.readFile ./bird.conf;
};
systemd.network = {
netdevs = {
hamgre = {
netdevConfig = {
Name = "hamgre";
Kind = "gre";
MTUBytes = "1468";
};
#systemd.network = {
# netdevs = {
# hamgre = {
# netdevConfig = {
# Name = "hamgre";
# Kind = "gre";
# MTUBytes = "1468";
# };
tunnelConfig = {
Remote = "145.220.78.4";
#Local = "192.145.57.90";
};
};
# tunnelConfig = {
# Remote = "145.220.78.4";
# #Local = "192.145.57.90";
# };
# };
# hambr = {
# netdevConfig = {
# Name = "hambr";
# Kind = "bridge";
# };
# };
};
# # hambr = {
# # netdevConfig = {
# # Name = "hambr";
# # Kind = "bridge";
# # };
# # };
# };
networks = {
"30-main-nic".networkConfig.Tunnel = "hamgre";
# networks = {
# "30-main-nic".networkConfig.Tunnel = "hamgre";
"40-hamgre" = {
matchConfig.Name = "hamgre";
# "40-hamgre" = {
# matchConfig.Name = "hamgre";
networkConfig = {
Address = "44.137.61.34/30";
};
};
# networkConfig = {
# Address = "44.137.61.34/30";
# };
# };
# "40-hambr" = {
# matchConfig.Name = "hambr";
# # "40-hambr" = {
# # matchConfig.Name = "hambr";
# };
};
};
# # };
# };
#};
};
};
}

View file

@ -37,25 +37,45 @@ in {
};
config = lib.mkIf cfg.enable {
environment.systemPackages = with pkgs; [ k3s ];
environment.systemPackages = with pkgs; [
k3s
openiscsi # Required for Longhorn
nfs-utils # Required for Longhorn
jq # Required for Longhorn
];
networking = {
nftables.enable = lib.mkForce false;
firewall.enable = lib.mkForce false;
};
services.k3s =
let
serverFlags = "--tls-san ${config.networking.fqdn} --disable servicelb --cluster-cidr=10.42.0.0/16,2001:cafe:42::/56 --service-cidr=10.43.0.0/16,2001:cafe:43::/112";
in
{
services = {
k3s =
let
serverFlags = "--tls-san ${config.networking.fqdn} --disable servicelb --cluster-cidr=10.42.0.0/16,2001:cafe:42::/56 --service-cidr=10.43.0.0/16,2001:cafe:43::/112";
in
{
enable = true;
role = cfg.role;
tokenFile = config.age.secrets.k3s-server-token.path;
extraFlags = lib.mkIf (cfg.role == "server") serverFlags;
clusterInit = cfg.clusterInit;
serverAddr = lib.mkIf (! (cfg.serverAddr == null)) cfg.serverAddr;
};
# Required for Longhorn
openiscsi = {
enable = true;
role = cfg.role;
tokenFile = config.age.secrets.k3s-server-token.path;
extraFlags = lib.mkIf (cfg.role == "server") serverFlags;
clusterInit = cfg.clusterInit;
serverAddr = lib.mkIf (! (cfg.serverAddr == null)) cfg.serverAddr;
name = "iqn.2016-04.com.open-iscsi:${config.networking.fqdn}";
};
};
# HACK: Symlink binaries to /usr/local/bin such that Longhorn can find them
# when they use nsenter.
# https://github.com/longhorn/longhorn/issues/2166#issuecomment-1740179416
systemd.tmpfiles.rules = [
"L+ /usr/local/bin - - - - /run/current-system/sw/bin/"
];
system = lib.mkIf (cfg.role == "server") {
activationScripts = {
@ -73,11 +93,11 @@ in {
k3s-certs.text = ''
mkdir -p /var/lib/rancher/k3s/server/tls/etcd
ln -sf ${./k3s-ca/server-ca.crt} /var/lib/rancher/k3s/server/tls/server-ca.crt
ln -sf ${./k3s-ca/client-ca.crt} /var/lib/rancher/k3s/server/tls/client-ca.crt
ln -sf ${./k3s-ca/request-header-ca.crt} /var/lib/rancher/k3s/server/tls/request-header-ca.crt
ln -sf ${./k3s-ca/etcd/peer-ca.crt} /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt
ln -sf ${./k3s-ca/etcd/server-ca.crt} /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt
cp -f ${./k3s-ca/server-ca.crt} /var/lib/rancher/k3s/server/tls/server-ca.crt
cp -f ${./k3s-ca/client-ca.crt} /var/lib/rancher/k3s/server/tls/client-ca.crt
cp -f ${./k3s-ca/request-header-ca.crt} /var/lib/rancher/k3s/server/tls/request-header-ca.crt
cp -f ${./k3s-ca/etcd/peer-ca.crt} /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt
cp -f ${./k3s-ca/etcd/server-ca.crt} /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt
'';
};
};

View file

@ -24,23 +24,126 @@ in {
Mount point of the machine's data partition.
'';
};
kubernetesNode = lib.mkOption {
default = false;
type = lib.types.bool;
description = ''
Whether to apply the Kubernetes disk setup.
'';
};
};
config = {
fileSystems = lib.attrsets.mergeAttrsList [
(lib.optionalAttrs (! machine.isRaspberryPi) {
"${cfg.dataMountPoint}".device = cfg.dataPartition;
})
(lib.optionalAttrs machine.isRaspberryPi {
"/" = {
device = "/dev/disk/by-label/NIXOS_SD";
fsType = "ext4";
options = [ "noatime" ];
};
})
];
fileSystems = {
"/" = lib.mkIf machine.isRaspberryPi {
device = "/dev/disk/by-label/NIXOS_SD";
fsType = "ext4";
options = [ "noatime" ];
};
};
disko = lib.mkIf (! machine.isRaspberryPi) {
disko = lib.mkIf (! machine.isRaspberryPi) (if cfg.kubernetesNode then {
devices = {
disk = {
nvme = {
device = "/dev/nvme0n1";
type = "disk";
content = {
type = "gpt";
partitions = {
boot = {
type = "EF00";
size = "500M";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
pv_os = {
size = "79G";
content = {
type = "lvm_pv";
vg = "vg_os";
};
};
pv_nvme_extra = {
size = "100%";
content = {
type = "lvm_pv";
vg = "vg_data";
};
};
};
};
};
sata = {
device = "/dev/sda";
type = "disk";
content = {
type = "gpt";
partitions.pv_sata = {
size = "100%";
content = {
type = "lvm_pv";
vg = "vg_data";
};
};
};
};
};
lvm_vg = {
vg_os = {
type = "lvm_vg";
lvs = {
root = {
size = "75G";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
mountOptions = [ "defaults" ];
};
};
swap = {
size = "100%FREE";
content.type = "swap";
};
};
};
vg_data = {
type = "lvm_vg";
lvs.longhorn = {
size = "100%FREE";
content = {
type = "filesystem";
format = "xfs";
mountpoint = "/mnt/longhorn";
};
};
};
};
};
} else {
# TODO: Rename this to 'osDisk'. Unfortunately, we would need to run nixos-anywhere again then.
devices.disk.vdb = {
device = cfg.osDisk;
@ -75,6 +178,6 @@ in {
};
};
};
};
});
};
}