diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 0000000..53d5dad --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,4 @@ +[defaults] +inventory = inventory +remote_tmp = /tmp/ansible +ansible_python_interpreter = /usr/bin/python3.12 diff --git a/ansible/inventory/pikvm.yml b/ansible/inventory/pikvm.yml new file mode 100644 index 0000000..7b3205d --- /dev/null +++ b/ansible/inventory/pikvm.yml @@ -0,0 +1,5 @@ +all: + hosts: + pikvm: + ansible_host: pikvm.dmz + ansible_user: root diff --git a/ansible/main.yml b/ansible/main.yml new file mode 100644 index 0000000..6b557e0 --- /dev/null +++ b/ansible/main.yml @@ -0,0 +1,6 @@ +--- +- name: Configure PiKVM server + hosts: all + + roles: + - pikvm diff --git a/ansible/roles/pikvm/tasks/main.yml b/ansible/roles/pikvm/tasks/main.yml new file mode 100644 index 0000000..4a93953 --- /dev/null +++ b/ansible/roles/pikvm/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Mount filesystem as read-write + ansible.builtin.command: rw + vars: + root_mount: "{{ ansible_mounts | selectattr('mount', 'equalto', '/') | first }}" + when: "'ro' in root_mount.options.split(',')" + +- name: Install Tailscale + community.general.pacman: + name: tailscale-pikvm + state: latest + +- name: Enable Tailscale + ansible.builtin.systemd_service: + name: tailscaled + state: started + enabled: true + +- name: Mount filesystem as read-only + ansible.builtin.command: ro diff --git a/flake-parts/shell.nix b/flake-parts/shell.nix new file mode 100644 index 0000000..67e7b03 --- /dev/null +++ b/flake-parts/shell.nix @@ -0,0 +1,9 @@ +{ flake-utils, nixpkgs, ... }: flake-utils.lib.eachDefaultSystem (system: +let + pkgs = nixpkgs.legacyPackages.${system}; +in +{ + devShells.default = pkgs.mkShell { + buildInputs = with pkgs; [ ansible ]; + }; +}) diff --git a/flake.nix b/flake.nix index 198e0d5..25e1310 100644 --- a/flake.nix +++ b/flake.nix @@ -69,6 +69,7 @@ ./flake-parts/deploy.nix ./flake-parts/nixos.nix ./flake-parts/kubenix.nix + ./flake-parts/shell.nix ] // (flake-utils.lib.eachDefaultSystem (system: { formatter = nixpkgs.legacyPackages.${system}.nixfmt; })); diff --git a/kubenix-modules/attic.nix b/kubenix-modules/attic.nix index 7d59c8b..7e0e79b 100644 --- a/kubenix-modules/attic.nix +++ b/kubenix-modules/attic.nix @@ -122,7 +122,7 @@ spec = { containers.postgres = { - image = myLib.globals.images.atticPostgres; + image = myLib.globals.images.postgres15; imagePullPolicy = "IfNotPresent"; ports.postgres.containerPort = 5432; diff --git a/kubenix-modules/atuin.nix b/kubenix-modules/atuin.nix index 96a1002..673ccc2 100644 --- a/kubenix-modules/atuin.nix +++ b/kubenix-modules/atuin.nix @@ -51,7 +51,7 @@ }; database = { - image = myLib.globals.images.atuinPostgres; + image = myLib.globals.images.postgres14; ports.web.containerPort = 5432; env = { diff --git a/kubenix-modules/hedgedoc.nix b/kubenix-modules/hedgedoc.nix index 7cb68cc..d9ceeb0 100644 --- a/kubenix-modules/hedgedoc.nix +++ b/kubenix-modules/hedgedoc.nix @@ -88,7 +88,7 @@ spec = { containers.postgres = { - image = myLib.globals.images.hedgedocPostgres; + image = myLib.globals.images.postgres15; imagePullPolicy = "IfNotPresent"; ports.postgres.containerPort = 5432; diff --git a/kubenix-modules/immich.nix b/kubenix-modules/immich.nix index e51297f..1bca830 100644 --- a/kubenix-modules/immich.nix +++ b/kubenix-modules/immich.nix @@ -82,7 +82,7 @@ volumes.cache.persistentVolumeClaim.claimName = "cache"; containers.machine-learning = { - image = myLib.globals.images.immichML; + image = myLib.globals.images.immich-machine-learning; imagePullPolicy = "IfNotPresent"; ports.ml.containerPort = 3003; env.MACHINE_LEARNING_WORKER_TIMEOUT.value = "600"; @@ -119,7 +119,7 @@ spec = { containers.redis = { - image = myLib.globals.images.immichRedis; + image = myLib.globals.images.immich-redis; ports.redis.containerPort = 6379; imagePullPolicy = "IfNotPresent"; }; @@ -152,7 +152,7 @@ volumes.data.persistentVolumeClaim.claimName = "database"; containers.postgres = { - image = myLib.globals.images.immichPostgres; + image = myLib.globals.images.immich-postgres; imagePullPolicy = "IfNotPresent"; command = [ "postgres" ]; args = [ "-c" "shared_preload_libraries=vectors.so" "-c" "search_path=\"$$user\", public, vectors" "-c" "logging_collector=on" "-c" "max_wal_size=2GB" "-c" "shared_buffers=512MB" "-c" "wal_compression=on" ]; diff --git a/kubenix-modules/nextcloud.nix b/kubenix-modules/nextcloud.nix index 11f5aef..e5a7056 100644 --- a/kubenix-modules/nextcloud.nix +++ b/kubenix-modules/nextcloud.nix @@ -79,7 +79,7 @@ spec = { containers.postgres = { - image = myLib.globals.images.nextcloudPostgres; + image = myLib.globals.images.postgres15; imagePullPolicy = "IfNotPresent"; ports.postgres.containerPort = 5432; diff --git a/kubenix-modules/traefik.nix b/kubenix-modules/traefik.nix index d9018db..e2d476b 100644 --- a/kubenix-modules/traefik.nix +++ b/kubenix-modules/traefik.nix @@ -65,7 +65,7 @@ }; }; - tailscaleIngresses.tailscale = { + tailscaleIngresses.traefik-dashboard = { host = "traefik"; service.name = "traefik-dashboard"; }; diff --git a/machines/default.nix b/machines/default.nix index 4adab0f..f815d11 100644 --- a/machines/default.nix +++ b/machines/default.nix @@ -39,8 +39,8 @@ in ./atlas.nix ./jefke.nix ./lewis.nix - ./talos.nix - ./pikvm.nix + # ./talos.nix + # ./pikvm.nix ]; options = { diff --git a/nixos-modules/monitoring/default.nix b/nixos-modules/monitoring/default.nix index 574af79..2d6560d 100644 --- a/nixos-modules/monitoring/default.nix +++ b/nixos-modules/monitoring/default.nix @@ -23,7 +23,6 @@ in services.prometheus = { enable = cfg.server.enable; - webExternalUrl = "/prometheus"; exporters = { node = { @@ -32,14 +31,34 @@ in }; scrapeConfigs = lib.mkIf cfg.server.enable ( - lib.attrsets.mapAttrsToList - (name: machine: { - job_name = name; + let + generated = lib.attrsets.mapAttrsToList + (name: machine: { + job_name = name; + static_configs = [{ + targets = [ "${name}.dmz:${toString config.services.prometheus.exporters.node.port}" ]; + }]; + }) + machines; + + pikvm = { + job_name = "pikvm"; + metrics_path = "/api/export/prometheus/metrics"; + scheme = "https"; + tls_config.insecure_skip_verify = true; + + # We don't care about security here, it's behind a VPN. + basic_auth = { + username = "admin"; + password = "admin"; + }; + static_configs = [{ - targets = [ "${name}.dmz:${toString config.services.prometheus.exporters.node.port}" ]; + targets = [ "pikvm.dmz" ]; }]; - }) - machines + }; + in + generated ++ [ pikvm ] ); }; @@ -47,7 +66,7 @@ in enable = true; virtualHosts."${config.networking.fqdn}" = { - locations."/prometheus/" = { + locations."/" = { proxyPass = "http://127.0.0.1:${toString config.services.prometheus.port}"; recommendedProxySettings = true; };