nixos-servers/nix/flake/kubenix/default.nix

170 lines
5.1 KiB
Nix
Raw Normal View History

{ self, flake-utils, kubenix, nixhelm, ... }: flake-utils.lib.eachDefaultSystem
(system: {
kubenix = kubenix.packages.${system}.default.override
{
2024-04-10 21:23:22 +00:00
specialArgs = {
flake = self;
inherit nixhelm system;
2024-04-10 21:23:22 +00:00
};
module = { kubenix, ... }: {
imports = [
kubenix.modules.k8s
kubenix.modules.helm
./base.nix
./freshrss.nix
./cyberchef.nix
./kms.nix
./inbucket.nix
./radicale.nix
./syncthing.nix
./nextcloud.nix
./pihole.nix
2024-04-09 06:45:45 +00:00
./hedgedoc.nix
./paperless-ngx.nix
./kitchenowl.nix
./forgejo.nix
./media.nix
2024-04-10 21:23:22 +00:00
./bind9.nix
2024-04-11 21:17:01 +00:00
./dnsmasq.nix
];
kubernetes = {
customTypes = {
# HACK: These are dummy custom types.
# This is needed, because the CRDs imported as a chart are not available as Nix modules.
# There is no nix-based validation on resources defined using these types!
# See: https://github.com/hall/kubenix/issues/34
ipAddressPool = {
attrName = "ipAddressPools";
group = "metallb.io";
version = "v1beta1";
kind = "IPAddressPool";
};
l2Advertisement = {
attrName = "l2Advertisements";
group = "metallb.io";
version = "v1beta1";
kind = "L2Advertisement";
};
2024-03-27 22:11:34 +00:00
helmChartConfig = {
attrName = "helmChartConfigs";
group = "helm.cattle.io";
version = "v1";
kind = "HelmChartConfig";
};
clusterIssuer = {
attrName = "clusterIssuers";
group = "cert-manager.io";
version = "v1";
kind = "ClusterIssuer";
};
2024-03-27 22:11:34 +00:00
};
# TODO: These resources should probably exist within the kube-system namespace.
resources = {
ipAddressPools.main.spec.addresses = [ "192.168.30.128-192.168.30.200" ];
l2Advertisements.main.metadata = { };
# NOTE: The name of each helmChartConfig must match the relevant chart name!
# Override Traefik's service with a static load balancer IP.
helmChartConfigs = {
traefik = {
metadata.namespace = "kube-system";
2024-03-27 22:11:34 +00:00
spec.valuesContent = ''
service:
spec:
loadBalancerIP: "192.168.30.128"
ports:
localsecure:
port: 8444
expose: true
exposedPort: 444
protocol: TCP
tls:
enabled: true
options: ""
certResolver: ""
domains: []
providers:
kubernetesIngress:
allowExternalNameServices: true
'';
};
};
2024-03-27 22:11:34 +00:00
clusterIssuers.letsencrypt = {
metadata.namespace = "kube-system";
spec.acme = {
server = "https://acme-v02.api.letsencrypt.org/directory";
email = "pim@kunis.nl";
privateKeySecretRef.name = "letsencrypt-private-key";
solvers = [{
selector = { };
http01.ingress.class = "traefik";
}];
};
};
services.esrom.spec = {
type = "ExternalName";
externalName = "esrom.dmz";
ports = [{
port = 80;
targetPort = 80;
}];
};
ingresses.esrom = {
metadata.annotations."cert-manager.io/cluster-issuer" = "letsencrypt";
spec = {
ingressClassName = "traefik";
rules = [{
host = "esrom.kun.is";
http.paths = [{
path = "/";
pathType = "Prefix";
backend.service = {
name = "esrom";
port.number = 80;
};
}];
}];
tls = [{
secretName = "esrom-tls";
hosts = [ "esrom.kun.is" ];
}];
};
};
2024-03-27 22:11:34 +00:00
};
};
};
};
kubenix-bootstrap = kubenix.packages.${system}.default.override
{
specialArgs = {
flake = self;
inherit nixhelm system;
};
module = { kubenix, ... }: {
imports = [
kubenix.modules.k8s
kubenix.modules.helm
./base.nix
];
};
};
})