Install longhorn on k3s

Introduce new storage standard with LVM
This commit is contained in:
Pim Kunis 2024-05-18 23:32:58 +02:00
parent 7e14a2cc13
commit a21a09ad6b
9 changed files with 257 additions and 69 deletions

View file

@ -15,7 +15,7 @@ let
./media.nix ./media.nix
./bind9 ./bind9
./dnsmasq.nix ./dnsmasq.nix
./minecraft.nix # ./minecraft.nix
./blog.nix ./blog.nix
./atticd.nix ./atticd.nix
./argo.nix ./argo.nix
@ -25,6 +25,7 @@ in
imports = [ imports = [
./base.nix ./base.nix
./custom-types.nix ./custom-types.nix
./longhorn.nix
./esrom.nix ./esrom.nix
./metallb.nix ./metallb.nix
./cert-manager.nix ./cert-manager.nix

View file

@ -93,6 +93,7 @@
lab = { lab = {
ingresses.atticd = { ingresses.atticd = {
host = "attic.kun.is"; host = "attic.kun.is";
entrypoint = "localsecure";
service = { service = {
name = "atticd"; name = "atticd";

View file

@ -17,7 +17,14 @@
web.containerPort = 9000; web.containerPort = 9000;
smtp.containerPort = 2500; smtp.containerPort = 2500;
}; };
volumeMounts = [{
name = "storage";
mountPath = "/storage";
}];
}; };
volumes.storage.persistentVolumeClaim.claimName = "inbucket";
}; };
}; };
}; };
@ -44,6 +51,12 @@
}]; }];
}; };
}; };
persistentVolumeClaims.inbucket.spec = {
accessModes = [ "ReadWriteOnce" ];
storageClassName = "longhorn";
resources.requests.storage = "30Mi";
};
}; };
lab.ingresses.inbucket = { lab.ingresses.inbucket = {

View file

@ -0,0 +1,52 @@
{ nixhelm, system, ... }: {
config = {
kubernetes = {
helm.releases.longhorn = {
chart = nixhelm.chartsDerivations.${system}.longhorn.longhorn;
includeCRDs = true;
values = {
defaultSettings = {
defaultDataPath = "/mnt/longhorn";
storageMinimalAvailablePercentage = 0;
};
persistence = {
defaultClassReplicaCount = 2;
};
};
};
resources = {
ingresses.longhorn = {
metadata.annotations = {
"cert-manager.io/cluster-issuer" = "letsencrypt";
"traefik.ingress.kubernetes.io/router.entrypoints" = "localsecure";
};
spec = {
ingressClassName = "traefik";
rules = [{
host = "longhorn.kun.is";
http.paths = [{
path = "/";
pathType = "Prefix";
backend.service = {
name = "longhorn-frontend";
port.number = 80;
};
}];
}];
tls = [{
secretName = "longhorn-tls";
hosts = [ "longhorn.kun.is" ];
}];
};
};
};
};
};
}

View file

@ -4,13 +4,12 @@
nixosModule.lab = { nixosModule.lab = {
storage = { storage = {
osDisk = "/dev/sda"; kubernetesNode = true;
dataPartition = "/dev/nvme0n1p1";
}; };
k3s = { k3s = {
enable = true; enable = true;
role = "agent"; # role = "server";
serverAddr = "https://jefke.dmz:6443"; serverAddr = "https://jefke.dmz:6443";
}; };
}; };

View file

@ -4,8 +4,7 @@
nixosModule.lab = { nixosModule.lab = {
storage = { storage = {
osDisk = "/dev/sda"; kubernetesNode = true;
dataPartition = "/dev/nvme0n1p1";
}; };
k3s = { k3s = {

View file

@ -11,46 +11,46 @@
config = builtins.readFile ./bird.conf; config = builtins.readFile ./bird.conf;
}; };
systemd.network = { #systemd.network = {
netdevs = { # netdevs = {
hamgre = { # hamgre = {
netdevConfig = { # netdevConfig = {
Name = "hamgre"; # Name = "hamgre";
Kind = "gre"; # Kind = "gre";
MTUBytes = "1468"; # MTUBytes = "1468";
}; # };
tunnelConfig = { # tunnelConfig = {
Remote = "145.220.78.4"; # Remote = "145.220.78.4";
#Local = "192.145.57.90"; # #Local = "192.145.57.90";
}; # };
}; # };
# hambr = { # # hambr = {
# netdevConfig = { # # netdevConfig = {
# Name = "hambr"; # # Name = "hambr";
# Kind = "bridge"; # # Kind = "bridge";
# }; # # };
# }; # # };
}; # };
networks = { # networks = {
"30-main-nic".networkConfig.Tunnel = "hamgre"; # "30-main-nic".networkConfig.Tunnel = "hamgre";
"40-hamgre" = { # "40-hamgre" = {
matchConfig.Name = "hamgre"; # matchConfig.Name = "hamgre";
networkConfig = { # networkConfig = {
Address = "44.137.61.34/30"; # Address = "44.137.61.34/30";
}; # };
}; # };
# "40-hambr" = { # # "40-hambr" = {
# matchConfig.Name = "hambr"; # # matchConfig.Name = "hambr";
# }; # # };
}; # };
}; #};
}; };
}; };
} }

View file

@ -37,25 +37,45 @@ in {
}; };
config = lib.mkIf cfg.enable { config = lib.mkIf cfg.enable {
environment.systemPackages = with pkgs; [ k3s ]; environment.systemPackages = with pkgs; [
k3s
openiscsi # Required for Longhorn
nfs-utils # Required for Longhorn
jq # Required for Longhorn
];
networking = { networking = {
nftables.enable = lib.mkForce false; nftables.enable = lib.mkForce false;
firewall.enable = lib.mkForce false; firewall.enable = lib.mkForce false;
}; };
services.k3s = services = {
let k3s =
serverFlags = "--tls-san ${config.networking.fqdn} --disable servicelb --cluster-cidr=10.42.0.0/16,2001:cafe:42::/56 --service-cidr=10.43.0.0/16,2001:cafe:43::/112"; let
in serverFlags = "--tls-san ${config.networking.fqdn} --disable servicelb --cluster-cidr=10.42.0.0/16,2001:cafe:42::/56 --service-cidr=10.43.0.0/16,2001:cafe:43::/112";
{ in
{
enable = true;
role = cfg.role;
tokenFile = config.age.secrets.k3s-server-token.path;
extraFlags = lib.mkIf (cfg.role == "server") serverFlags;
clusterInit = cfg.clusterInit;
serverAddr = lib.mkIf (! (cfg.serverAddr == null)) cfg.serverAddr;
};
# Required for Longhorn
openiscsi = {
enable = true; enable = true;
role = cfg.role; name = "iqn.2016-04.com.open-iscsi:${config.networking.fqdn}";
tokenFile = config.age.secrets.k3s-server-token.path;
extraFlags = lib.mkIf (cfg.role == "server") serverFlags;
clusterInit = cfg.clusterInit;
serverAddr = lib.mkIf (! (cfg.serverAddr == null)) cfg.serverAddr;
}; };
};
# HACK: Symlink binaries to /usr/local/bin such that Longhorn can find them
# when they use nsenter.
# https://github.com/longhorn/longhorn/issues/2166#issuecomment-1740179416
systemd.tmpfiles.rules = [
"L+ /usr/local/bin - - - - /run/current-system/sw/bin/"
];
system = lib.mkIf (cfg.role == "server") { system = lib.mkIf (cfg.role == "server") {
activationScripts = { activationScripts = {
@ -73,11 +93,11 @@ in {
k3s-certs.text = '' k3s-certs.text = ''
mkdir -p /var/lib/rancher/k3s/server/tls/etcd mkdir -p /var/lib/rancher/k3s/server/tls/etcd
ln -sf ${./k3s-ca/server-ca.crt} /var/lib/rancher/k3s/server/tls/server-ca.crt cp -f ${./k3s-ca/server-ca.crt} /var/lib/rancher/k3s/server/tls/server-ca.crt
ln -sf ${./k3s-ca/client-ca.crt} /var/lib/rancher/k3s/server/tls/client-ca.crt cp -f ${./k3s-ca/client-ca.crt} /var/lib/rancher/k3s/server/tls/client-ca.crt
ln -sf ${./k3s-ca/request-header-ca.crt} /var/lib/rancher/k3s/server/tls/request-header-ca.crt cp -f ${./k3s-ca/request-header-ca.crt} /var/lib/rancher/k3s/server/tls/request-header-ca.crt
ln -sf ${./k3s-ca/etcd/peer-ca.crt} /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt cp -f ${./k3s-ca/etcd/peer-ca.crt} /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt
ln -sf ${./k3s-ca/etcd/server-ca.crt} /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt cp -f ${./k3s-ca/etcd/server-ca.crt} /var/lib/rancher/k3s/server/tls/etcd/server-ca.crt
''; '';
}; };
}; };

View file

@ -24,23 +24,126 @@ in {
Mount point of the machine's data partition. Mount point of the machine's data partition.
''; '';
}; };
kubernetesNode = lib.mkOption {
default = false;
type = lib.types.bool;
description = ''
Whether to apply the Kubernetes disk setup.
'';
};
}; };
config = { config = {
fileSystems = lib.attrsets.mergeAttrsList [ fileSystems = {
(lib.optionalAttrs (! machine.isRaspberryPi) { "/" = lib.mkIf machine.isRaspberryPi {
"${cfg.dataMountPoint}".device = cfg.dataPartition; device = "/dev/disk/by-label/NIXOS_SD";
}) fsType = "ext4";
(lib.optionalAttrs machine.isRaspberryPi { options = [ "noatime" ];
"/" = { };
device = "/dev/disk/by-label/NIXOS_SD"; };
fsType = "ext4";
options = [ "noatime" ];
};
})
];
disko = lib.mkIf (! machine.isRaspberryPi) { disko = lib.mkIf (! machine.isRaspberryPi) (if cfg.kubernetesNode then {
devices = {
disk = {
nvme = {
device = "/dev/nvme0n1";
type = "disk";
content = {
type = "gpt";
partitions = {
boot = {
type = "EF00";
size = "500M";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
pv_os = {
size = "79G";
content = {
type = "lvm_pv";
vg = "vg_os";
};
};
pv_nvme_extra = {
size = "100%";
content = {
type = "lvm_pv";
vg = "vg_data";
};
};
};
};
};
sata = {
device = "/dev/sda";
type = "disk";
content = {
type = "gpt";
partitions.pv_sata = {
size = "100%";
content = {
type = "lvm_pv";
vg = "vg_data";
};
};
};
};
};
lvm_vg = {
vg_os = {
type = "lvm_vg";
lvs = {
root = {
size = "75G";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
mountOptions = [ "defaults" ];
};
};
swap = {
size = "100%FREE";
content.type = "swap";
};
};
};
vg_data = {
type = "lvm_vg";
lvs.longhorn = {
size = "100%FREE";
content = {
type = "filesystem";
format = "xfs";
mountpoint = "/mnt/longhorn";
};
};
};
};
};
} else {
# TODO: Rename this to 'osDisk'. Unfortunately, we would need to run nixos-anywhere again then. # TODO: Rename this to 'osDisk'. Unfortunately, we would need to run nixos-anywhere again then.
devices.disk.vdb = { devices.disk.vdb = {
device = cfg.osDisk; device = cfg.osDisk;
@ -75,6 +178,6 @@ in {
}; };
}; };
}; };
}; });
}; };
} }