Add Ansible playbook to configure PiKVM
Add Nix shell to flake Monitor PiKVM with Prometheus Serve Prometheus on /
This commit is contained in:
parent
04439a9ee5
commit
b139f3d469
14 changed files with 82 additions and 18 deletions
4
ansible/ansible.cfg
Normal file
4
ansible/ansible.cfg
Normal file
|
@ -0,0 +1,4 @@
|
|||
[defaults]
|
||||
inventory = inventory
|
||||
remote_tmp = /tmp/ansible
|
||||
ansible_python_interpreter = /usr/bin/python3.12
|
5
ansible/inventory/pikvm.yml
Normal file
5
ansible/inventory/pikvm.yml
Normal file
|
@ -0,0 +1,5 @@
|
|||
all:
|
||||
hosts:
|
||||
pikvm:
|
||||
ansible_host: pikvm.dmz
|
||||
ansible_user: root
|
6
ansible/main.yml
Normal file
6
ansible/main.yml
Normal file
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
- name: Configure PiKVM server
|
||||
hosts: all
|
||||
|
||||
roles:
|
||||
- pikvm
|
20
ansible/roles/pikvm/tasks/main.yml
Normal file
20
ansible/roles/pikvm/tasks/main.yml
Normal file
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
- name: Mount filesystem as read-write
|
||||
ansible.builtin.command: rw
|
||||
vars:
|
||||
root_mount: "{{ ansible_mounts | selectattr('mount', 'equalto', '/') | first }}"
|
||||
when: "'ro' in root_mount.options.split(',')"
|
||||
|
||||
- name: Install Tailscale
|
||||
community.general.pacman:
|
||||
name: tailscale-pikvm
|
||||
state: latest
|
||||
|
||||
- name: Enable Tailscale
|
||||
ansible.builtin.systemd_service:
|
||||
name: tailscaled
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Mount filesystem as read-only
|
||||
ansible.builtin.command: ro
|
9
flake-parts/shell.nix
Normal file
9
flake-parts/shell.nix
Normal file
|
@ -0,0 +1,9 @@
|
|||
{ flake-utils, nixpkgs, ... }: flake-utils.lib.eachDefaultSystem (system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
in
|
||||
{
|
||||
devShells.default = pkgs.mkShell {
|
||||
buildInputs = with pkgs; [ ansible ];
|
||||
};
|
||||
})
|
|
@ -69,6 +69,7 @@
|
|||
./flake-parts/deploy.nix
|
||||
./flake-parts/nixos.nix
|
||||
./flake-parts/kubenix.nix
|
||||
./flake-parts/shell.nix
|
||||
] // (flake-utils.lib.eachDefaultSystem (system: {
|
||||
formatter = nixpkgs.legacyPackages.${system}.nixfmt;
|
||||
}));
|
||||
|
|
|
@ -122,7 +122,7 @@
|
|||
|
||||
spec = {
|
||||
containers.postgres = {
|
||||
image = myLib.globals.images.atticPostgres;
|
||||
image = myLib.globals.images.postgres15;
|
||||
imagePullPolicy = "IfNotPresent";
|
||||
ports.postgres.containerPort = 5432;
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@
|
|||
};
|
||||
|
||||
database = {
|
||||
image = myLib.globals.images.atuinPostgres;
|
||||
image = myLib.globals.images.postgres14;
|
||||
ports.web.containerPort = 5432;
|
||||
|
||||
env = {
|
||||
|
|
|
@ -88,7 +88,7 @@
|
|||
|
||||
spec = {
|
||||
containers.postgres = {
|
||||
image = myLib.globals.images.hedgedocPostgres;
|
||||
image = myLib.globals.images.postgres15;
|
||||
imagePullPolicy = "IfNotPresent";
|
||||
ports.postgres.containerPort = 5432;
|
||||
|
||||
|
|
|
@ -82,7 +82,7 @@
|
|||
volumes.cache.persistentVolumeClaim.claimName = "cache";
|
||||
|
||||
containers.machine-learning = {
|
||||
image = myLib.globals.images.immichML;
|
||||
image = myLib.globals.images.immich-machine-learning;
|
||||
imagePullPolicy = "IfNotPresent";
|
||||
ports.ml.containerPort = 3003;
|
||||
env.MACHINE_LEARNING_WORKER_TIMEOUT.value = "600";
|
||||
|
@ -119,7 +119,7 @@
|
|||
|
||||
spec = {
|
||||
containers.redis = {
|
||||
image = myLib.globals.images.immichRedis;
|
||||
image = myLib.globals.images.immich-redis;
|
||||
ports.redis.containerPort = 6379;
|
||||
imagePullPolicy = "IfNotPresent";
|
||||
};
|
||||
|
@ -152,7 +152,7 @@
|
|||
volumes.data.persistentVolumeClaim.claimName = "database";
|
||||
|
||||
containers.postgres = {
|
||||
image = myLib.globals.images.immichPostgres;
|
||||
image = myLib.globals.images.immich-postgres;
|
||||
imagePullPolicy = "IfNotPresent";
|
||||
command = [ "postgres" ];
|
||||
args = [ "-c" "shared_preload_libraries=vectors.so" "-c" "search_path=\"$$user\", public, vectors" "-c" "logging_collector=on" "-c" "max_wal_size=2GB" "-c" "shared_buffers=512MB" "-c" "wal_compression=on" ];
|
||||
|
|
|
@ -79,7 +79,7 @@
|
|||
|
||||
spec = {
|
||||
containers.postgres = {
|
||||
image = myLib.globals.images.nextcloudPostgres;
|
||||
image = myLib.globals.images.postgres15;
|
||||
imagePullPolicy = "IfNotPresent";
|
||||
ports.postgres.containerPort = 5432;
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@
|
|||
};
|
||||
};
|
||||
|
||||
tailscaleIngresses.tailscale = {
|
||||
tailscaleIngresses.traefik-dashboard = {
|
||||
host = "traefik";
|
||||
service.name = "traefik-dashboard";
|
||||
};
|
||||
|
|
|
@ -39,8 +39,8 @@ in
|
|||
./atlas.nix
|
||||
./jefke.nix
|
||||
./lewis.nix
|
||||
./talos.nix
|
||||
./pikvm.nix
|
||||
# ./talos.nix
|
||||
# ./pikvm.nix
|
||||
];
|
||||
|
||||
options = {
|
||||
|
|
|
@ -23,7 +23,6 @@ in
|
|||
|
||||
services.prometheus = {
|
||||
enable = cfg.server.enable;
|
||||
webExternalUrl = "/prometheus";
|
||||
|
||||
exporters = {
|
||||
node = {
|
||||
|
@ -32,14 +31,34 @@ in
|
|||
};
|
||||
|
||||
scrapeConfigs = lib.mkIf cfg.server.enable (
|
||||
lib.attrsets.mapAttrsToList
|
||||
(name: machine: {
|
||||
job_name = name;
|
||||
let
|
||||
generated = lib.attrsets.mapAttrsToList
|
||||
(name: machine: {
|
||||
job_name = name;
|
||||
static_configs = [{
|
||||
targets = [ "${name}.dmz:${toString config.services.prometheus.exporters.node.port}" ];
|
||||
}];
|
||||
})
|
||||
machines;
|
||||
|
||||
pikvm = {
|
||||
job_name = "pikvm";
|
||||
metrics_path = "/api/export/prometheus/metrics";
|
||||
scheme = "https";
|
||||
tls_config.insecure_skip_verify = true;
|
||||
|
||||
# We don't care about security here, it's behind a VPN.
|
||||
basic_auth = {
|
||||
username = "admin";
|
||||
password = "admin";
|
||||
};
|
||||
|
||||
static_configs = [{
|
||||
targets = [ "${name}.dmz:${toString config.services.prometheus.exporters.node.port}" ];
|
||||
targets = [ "pikvm.dmz" ];
|
||||
}];
|
||||
})
|
||||
machines
|
||||
};
|
||||
in
|
||||
generated ++ [ pikvm ]
|
||||
);
|
||||
};
|
||||
|
||||
|
@ -47,7 +66,7 @@ in
|
|||
enable = true;
|
||||
|
||||
virtualHosts."${config.networking.fqdn}" = {
|
||||
locations."/prometheus/" = {
|
||||
locations."/" = {
|
||||
proxyPass = "http://127.0.0.1:${toString config.services.prometheus.port}";
|
||||
recommendedProxySettings = true;
|
||||
};
|
||||
|
|
Loading…
Reference in a new issue