This commit is contained in:
Pim Kunis 2024-04-26 10:59:32 +02:00
commit f212aae139
157 changed files with 6428 additions and 0 deletions

3
.envrc Normal file
View file

@ -0,0 +1,3 @@
source_url "https://raw.githubusercontent.com/cachix/devenv/95f329d49a8a5289d31e0982652f7058a189bfca/direnvrc" "sha256-d+8cBpDfDBj41inrADaJt+bDWhOktwslgoP5YiGJ1v0="
use devenv

20
.gitignore vendored Normal file
View file

@ -0,0 +1,20 @@
# Devenv
.devenv*
devenv.local.nix
# direnv
.direnv
# pre-commit
.pre-commit-config.yaml
# Devenv
.devenv*
devenv.local.nix
# direnv
.direnv
# pre-commit
.pre-commit-config.yaml
.jekyll-cache/

156
devenv.lock Normal file
View file

@ -0,0 +1,156 @@
{
"nodes": {
"devenv": {
"locked": {
"dir": "src/modules",
"lastModified": 1713968789,
"owner": "cachix",
"repo": "devenv",
"rev": "b26b52a4dac68bdc305f6b9df948c97f49b2c3ee",
"treeHash": "4a034bbd3511c196f4075a1eb0da1b422d1011db",
"type": "github"
},
"original": {
"dir": "src/modules",
"owner": "cachix",
"repo": "devenv",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"treeHash": "2addb7b71a20a25ea74feeaf5c2f6a6b30898ecb",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1710146030,
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"treeHash": "bd263f021e345cb4a39d80c126ab650bebc3c10c",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"pre-commit-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"treeHash": "ca14199cabdfe1a06a7b1654c76ed49100a689f9",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1713361204,
"owner": "cachix",
"repo": "devenv-nixpkgs",
"rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6",
"treeHash": "50354b35a3e0277d4a83a0a88fa0b0866b5f392f",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "rolling",
"repo": "devenv-nixpkgs",
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1713995372,
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "dd37924974b9202f8226ed5d74a252a9785aedf8",
"treeHash": "8114bf8e19ad8c67c0e2639b83c606c58c7bccec",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-23.11",
"repo": "nixpkgs",
"type": "github"
}
},
"pre-commit-hooks": {
"inputs": {
"flake-compat": "flake-compat",
"flake-utils": "flake-utils",
"gitignore": "gitignore",
"nixpkgs": [
"nixpkgs"
],
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1713954846,
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"rev": "6fb82e44254d6a0ece014ec423cb62d92435336f",
"treeHash": "a456512c8da29752b79131f1e5b45053e2394078",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"type": "github"
}
},
"root": {
"inputs": {
"devenv": "devenv",
"nixpkgs": "nixpkgs",
"pre-commit-hooks": "pre-commit-hooks"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"treeHash": "cce81f2a0f0743b2eb61bc2eb6c7adbe2f2c6beb",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

11
devenv.nix Normal file
View file

@ -0,0 +1,11 @@
{ pkgs, lib, config, inputs, ... }:
let
gems = pkgs.bundlerEnv {
name = "blog-pim";
gemdir = ./src;
};
in
{
packages = [ gems gems.wrappedRuby ];
}

14
devenv.yaml Normal file
View file

@ -0,0 +1,14 @@
inputs:
nixpkgs:
url: github:cachix/devenv-nixpkgs/rolling
# If you're using non-OSS software, you can set allowUnfree to true.
# allowUnfree: true
# If you're willing to use a package that's vulnerable
# permittedInsecurePackages:
# - "openssl-1.1.1w"
# If you have more than one devenv you can merge them
#imports:
# - ./backend

15
flake.nix Normal file
View file

@ -0,0 +1,15 @@
{
description = "A very basic flake";
inputs = {
nixpkgs.url = "github:nixos/nixpkgs?ref=nixos-unstable";
};
outputs = { self, nixpkgs }: {
packages.x86_64-linux.hello = nixpkgs.legacyPackages.x86_64-linux.hello;
packages.x86_64-linux.default = self.packages.x86_64-linux.hello;
};
}

View file

@ -0,0 +1 @@
I"× {"source"=>"/home/pim/git/blog-pim/src", "destination"=>"/home/pim/git/blog-pim/src/_site", "collections_dir"=>"", "cache_dir"=>".jekyll-cache", "plugins_dir"=>"_plugins", "layouts_dir"=>"_layouts", "data_dir"=>"_data", "includes_dir"=>"_includes", "collections"=>{"posts"=>{"output"=>true, "permalink"=>"/:title/"}}, "safe"=>false, "include"=>["_redirects", ".htaccess"], "exclude"=>["CNAME", "Gemfile", "Gemfile.lock", "LICENSE", "CHANGELOG.md", "README.md", "node_modules", "CODE_OF_CONDUCT.md", "CONTRIBUTING.md", "lighthouse.png", "klise-*.gem", "klise.gemspec", "gemset.nix", ".sass-cache", ".jekyll-cache", "gemfiles", "vendor/bundle/", "vendor/cache/", "vendor/gems/", "vendor/ruby/"], "keep_files"=>[".git", ".svn"], "encoding"=>"utf-8", "markdown_ext"=>"markdown,mkdown,mkdn,mkd,md", "strict_front_matter"=>false, "show_drafts"=>nil, "limit_posts"=>0, "future"=>false, "unpublished"=>false, "whitelist"=>[], "plugins"=>["jekyll-feed", "jekyll-sitemap", "jekyll-postfiles", "jekyll-commonmark-ghpages"], "markdown"=>"CommonMarkGhPages", "highlighter"=>"rouge", "lsi"=>false, "excerpt_separator"=>"\n\n", "incremental"=>false, "detach"=>false, "port"=>"4000", "host"=>"127.0.0.1", "baseurl"=>"/", "show_dir_listing"=>false, "permalink"=>"/:title/", "paginate_path"=>"/page:num", "timezone"=>"Europe/Amsterdam", "quiet"=>false, "verbose"=>false, "defaults"=>[{"scope"=>{"path"=>""}, "values"=>{"layout"=>"post", "comments"=>false}}], "liquid"=>{"error_mode"=>"warn", "strict_filters"=>false, "strict_variables"=>false}, "kramdown"=>{"auto_ids"=>true, "toc_levels"=>[1, 2, 3, 4, 5, 6], "entity_output"=>"as_char", "smart_quotes"=>"lsquo,rsquo,ldquo,rdquo", "input"=>"GFM", "hard_wrap"=>false, "guess_lang"=>true, "footnote_nr"=>1, "show_warnings"=>false, "syntax_highlighter"=>"rouge"}, "title"=>"Pim Kunis", "description"=>"A pig's gotta fly", "lang"=>"en-US", "image"=>"assets/img/avatar.jpg", "repo"=>"https://git.kun.is/pim/static", "mode"=>"light", "author"=>{"name"=>"Pim Kunis", "bio"=>"A pig's gotta fly", "username"=>"pim", "avatar"=>"/assets/img/avatar.jpg"}, "url"=>"http://localhost:4000", "jekyll_compose"=>{"post_default_front_matter"=>{"modified"=>nil, "tags"=>[], "description"=>nil}, "draft_default_front_matter"=>{"modified"=>nil, "tags"=>[], "description"=>nil}}, "number_of_posts"=>5, "sass"=>{"style"=>"compressed"}, "commonmark"=>{"options"=>["SMART", "FOOTNOTES"], "extensions"=>["strikethrough", "autolink", "table", "tagfilter"]}, "livereload_port"=>35729, "serving"=>true, "watch"=>true}:ET

View file

@ -0,0 +1,6 @@
I"¥<p>Ever SSH’ed into a freshly installed server and gotten the following annoying message?</p>
<div class="language-text highlighter-rouge"><div class="highlight"><pre class="highlight"><code>The authenticity of host 'host.tld (1.2.3.4)' can't be established.
ED25519 key fingerprint is SHA256:eUXGdm1YdsMAS7vkdx6dOJdOGHdem5gQp4tadCfdLB8.
Are you sure you want to continue connecting (yes/no)?
</code></pre></div></div>
:ET

View file

@ -0,0 +1,149 @@
I"GL<p>Recently, I deployed <a href="https://concourse-ci.org/">Concourse CI</a> because I wanted to get my feet wet with a CI/CD pipeline.
However, I had a practical use case lying around for a long time: automatically compiling my static website and deploying it to my docker Swarm.
This took some time getting right, but the result works like a charm (<a href="https://git.kun.is/pim/static">source code</a>).</p>
<p>Its comforting to know I dont have move a finger and my website is automatically deployed.
However, I would still like to receive some indication of whats happening.
And whats a better way to do that, than using my <a href="https://github.com/caronc/apprise">Apprise</a> service to keep me up to date.
Theres a little snag though: I could not find any Concourse resource that does this.
Thats when I decided to just create it myself.</p>
<h1 id="the-plagiarism-hunt">The Plagiarism Hunt</h1>
<p>As any good computer person, I am lazy.
Id rather just copy someones work, so thats what I did.
I found <a href="https://github.com/mockersf/concourse-slack-notifier">this</a> GitHub repository that does the same thing but for Slack notifications.
For some reason its archived, but it seemed like it should work.
I actually noticed lots of repositories for Concourse resource types are archived, so not sure whats going on there.</p>
<h1 id="getting-to-know-concourse">Getting to know Concourse</h1>
<p>Lets first understand what we need to do reach our end goal of sending Apprise notifications from Concourse.</p>
<p>A Concourse pipeline takes some inputs, performs some operations on them which result in some outputs.
These inputs and outputs are called <em>resources</em> in Concourse.
For example, a Git repository could be a resource.
Each resource is an instance of a <em>resource type</em>.
A resource type therefore is simply a blueprint that can create multiple resources.
To continue the example, a resource type could be “Git repository”.</p>
<p>We therefore need to create our own resource type that can send Apprise notifications.
A resource type is simply a container that includes three scripts:</p>
<ul>
<li><code>check</code>: check for a new version of a resource</li>
<li><code>in</code>: retrieve a version of the resource</li>
<li><code>out</code>: create a version of the resource</li>
</ul>
<p>As Apprise notifications are basically fire-and-forget, we will only implement the <code>out</code> script.</p>
<h1 id="writing-the-codeoutcode-script">Writing the <code>out</code> script</h1>
<p>The whole script can be found <a href="https://git.kun.is/pim/concourse-apprise-notifier/src/branch/master/out">here</a>, but I will explain the most important bits of it.
Note that I only use Apprises persistent storage solution, and not its stateless solution.</p>
<p>Concourse provides us with the working directory, which we <code>cd</code> to:</p>
<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nb">cd</span> <span class="s2">"</span><span class="k">${</span><span class="nv">1</span><span class="k">}</span><span class="s2">"</span>
</code></pre></div></div>
<p>We create a timestamp, formatted in JSON, which we will use for the resources new version later.
Concourse requires us to set a version for the resource, but since Apprise notifications dont have that, we use the timestamp:</p>
<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">timestamp</span><span class="o">=</span><span class="s2">"</span><span class="si">$(</span>jq <span class="nt">-n</span> <span class="s2">"{version:{timestamp:</span><span class="se">\"</span><span class="si">$(</span><span class="nb">date</span> +%s<span class="si">)</span><span class="se">\"</span><span class="s2">}}"</span><span class="si">)</span><span class="s2">"</span>
</code></pre></div></div>
<p>First some black magic Bash to redirect file descriptors.
Not sure why this is needed, but I copied it anyways.
After that, we create a temporary file holding resources parameters.</p>
<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nb">exec </span>3&gt;&amp;1
<span class="nb">exec </span>1&gt;&amp;2
<span class="nv">payload</span><span class="o">=</span><span class="si">$(</span><span class="nb">mktemp</span> /tmp/resource-in.XXXXXX<span class="si">)</span>
<span class="nb">cat</span> <span class="o">&gt;</span> <span class="s2">"</span><span class="k">${</span><span class="nv">payload</span><span class="k">}</span><span class="s2">"</span> &lt;&amp;0
</code></pre></div></div>
<p>We then extract the individual parameters.
The <code>source</code> key contains values how the resource type was specified, while the <code>params</code> key specifies parameters for this specific resource.</p>
<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">apprise_host</span><span class="o">=</span><span class="s2">"</span><span class="si">$(</span>jq <span class="nt">-r</span> <span class="s1">'.source.host'</span> &lt; <span class="s2">"</span><span class="k">${</span><span class="nv">payload</span><span class="k">}</span><span class="s2">"</span><span class="si">)</span><span class="s2">"</span>
<span class="nv">apprise_key</span><span class="o">=</span><span class="s2">"</span><span class="si">$(</span>jq <span class="nt">-r</span> <span class="s1">'.source.key'</span> &lt; <span class="s2">"</span><span class="k">${</span><span class="nv">payload</span><span class="k">}</span><span class="s2">"</span><span class="si">)</span><span class="s2">"</span>
<span class="nv">alert_body</span><span class="o">=</span><span class="s2">"</span><span class="si">$(</span>jq <span class="nt">-r</span> <span class="s1">'.params.body'</span> &lt; <span class="s2">"</span><span class="k">${</span><span class="nv">payload</span><span class="k">}</span><span class="s2">"</span><span class="si">)</span><span class="s2">"</span>
<span class="nv">alert_title</span><span class="o">=</span><span class="s2">"</span><span class="si">$(</span>jq <span class="nt">-r</span> <span class="s1">'.params.title // null'</span> &lt; <span class="s2">"</span><span class="k">${</span><span class="nv">payload</span><span class="k">}</span><span class="s2">"</span><span class="si">)</span><span class="s2">"</span>
<span class="nv">alert_type</span><span class="o">=</span><span class="s2">"</span><span class="si">$(</span>jq <span class="nt">-r</span> <span class="s1">'.params.type // null'</span> &lt; <span class="s2">"</span><span class="k">${</span><span class="nv">payload</span><span class="k">}</span><span class="s2">"</span><span class="si">)</span><span class="s2">"</span>
<span class="nv">alert_tag</span><span class="o">=</span><span class="s2">"</span><span class="si">$(</span>jq <span class="nt">-r</span> <span class="s1">'.params.tag // null'</span> &lt; <span class="s2">"</span><span class="k">${</span><span class="nv">payload</span><span class="k">}</span><span class="s2">"</span><span class="si">)</span><span class="s2">"</span>
<span class="nv">alert_format</span><span class="o">=</span><span class="s2">"</span><span class="si">$(</span>jq <span class="nt">-r</span> <span class="s1">'.params.format // null'</span> &lt; <span class="s2">"</span><span class="k">${</span><span class="nv">payload</span><span class="k">}</span><span class="s2">"</span><span class="si">)</span><span class="s2">"</span>
</code></pre></div></div>
<p>We then format the different parameters using JSON:</p>
<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">alert_body</span><span class="o">=</span><span class="s2">"</span><span class="si">$(</span><span class="nb">eval</span> <span class="s2">"printf </span><span class="se">\"</span><span class="k">${</span><span class="nv">alert_body</span><span class="k">}</span><span class="se">\"</span><span class="s2">"</span> | jq <span class="nt">-R</span> <span class="nt">-s</span> .<span class="si">)</span><span class="s2">"</span>
<span class="o">[</span> <span class="s2">"</span><span class="k">${</span><span class="nv">alert_title</span><span class="k">}</span><span class="s2">"</span> <span class="o">!=</span> <span class="s2">"null"</span> <span class="o">]</span> <span class="o">&amp;&amp;</span> <span class="nv">alert_title</span><span class="o">=</span><span class="s2">"</span><span class="si">$(</span><span class="nb">eval</span> <span class="s2">"printf </span><span class="se">\"</span><span class="k">${</span><span class="nv">alert_title</span><span class="k">}</span><span class="se">\"</span><span class="s2">"</span> | jq <span class="nt">-R</span> <span class="nt">-s</span> .<span class="si">)</span><span class="s2">"</span>
<span class="o">[</span> <span class="s2">"</span><span class="k">${</span><span class="nv">alert_type</span><span class="k">}</span><span class="s2">"</span> <span class="o">!=</span> <span class="s2">"null"</span> <span class="o">]</span> <span class="o">&amp;&amp;</span> <span class="nv">alert_type</span><span class="o">=</span><span class="s2">"</span><span class="si">$(</span><span class="nb">eval</span> <span class="s2">"printf </span><span class="se">\"</span><span class="k">${</span><span class="nv">alert_type</span><span class="k">}</span><span class="se">\"</span><span class="s2">"</span> | jq <span class="nt">-R</span> <span class="nt">-s</span> .<span class="si">)</span><span class="s2">"</span>
<span class="o">[</span> <span class="s2">"</span><span class="k">${</span><span class="nv">alert_tag</span><span class="k">}</span><span class="s2">"</span> <span class="o">!=</span> <span class="s2">"null"</span> <span class="o">]</span> <span class="o">&amp;&amp;</span> <span class="nv">alert_tag</span><span class="o">=</span><span class="s2">"</span><span class="si">$(</span><span class="nb">eval</span> <span class="s2">"printf </span><span class="se">\"</span><span class="k">${</span><span class="nv">alert_tag</span><span class="k">}</span><span class="se">\"</span><span class="s2">"</span> | jq <span class="nt">-R</span> <span class="nt">-s</span> .<span class="si">)</span><span class="s2">"</span>
<span class="o">[</span> <span class="s2">"</span><span class="k">${</span><span class="nv">alert_format</span><span class="k">}</span><span class="s2">"</span> <span class="o">!=</span> <span class="s2">"null"</span> <span class="o">]</span> <span class="o">&amp;&amp;</span> <span class="nv">alert_format</span><span class="o">=</span><span class="s2">"</span><span class="si">$(</span><span class="nb">eval</span> <span class="s2">"printf </span><span class="se">\"</span><span class="k">${</span><span class="nv">alert_format</span><span class="k">}</span><span class="se">\"</span><span class="s2">"</span> | jq <span class="nt">-R</span> <span class="nt">-s</span> .<span class="si">)</span><span class="s2">"</span>
</code></pre></div></div>
<p>Next, from the individual parameters we construct the final JSON message body we send to the Apprise endpoint.</p>
<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">body</span><span class="o">=</span><span class="s2">"</span><span class="si">$(</span><span class="nb">cat</span> <span class="o">&lt;&lt;</span><span class="no">EOF</span><span class="sh">
{
"body": </span><span class="k">${</span><span class="nv">alert_body</span><span class="k">}</span><span class="sh">,
"title": </span><span class="k">${</span><span class="nv">alert_title</span><span class="k">}</span><span class="sh">,
"type": </span><span class="k">${</span><span class="nv">alert_type</span><span class="k">}</span><span class="sh">,
"tag": </span><span class="k">${</span><span class="nv">alert_tag</span><span class="k">}</span><span class="sh">,
"format": </span><span class="k">${</span><span class="nv">alert_format</span><span class="k">}</span><span class="sh">
}
</span><span class="no">EOF
</span><span class="si">)</span><span class="s2">"</span>
</code></pre></div></div>
<p>Before sending it just yet, we compact the JSON and remove any values that are <code>null</code>:</p>
<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">compact_body</span><span class="o">=</span><span class="s2">"</span><span class="si">$(</span><span class="nb">echo</span> <span class="s2">"</span><span class="k">${</span><span class="nv">body</span><span class="k">}</span><span class="s2">"</span> | jq <span class="nt">-c</span> <span class="s1">'.'</span><span class="si">)</span><span class="s2">"</span>
<span class="nb">echo</span> <span class="s2">"</span><span class="nv">$compact_body</span><span class="s2">"</span> | jq <span class="s1">'del(..|nulls)'</span> <span class="o">&gt;</span> /tmp/compact_body.json
</code></pre></div></div>
<p>Here is the most important line, where we send the payload to the Apprise endpoint.
Its quite straight-forward.</p>
<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code>curl <span class="nt">-v</span> <span class="nt">-X</span> POST <span class="nt">-T</span> /tmp/compact_body.json <span class="nt">-H</span> <span class="s2">"Content-Type: application/json"</span> <span class="s2">"</span><span class="k">${</span><span class="nv">apprise_host</span><span class="k">}</span><span class="s2">/notify/</span><span class="k">${</span><span class="nv">apprise_key</span><span class="k">}</span><span class="s2">"</span>
</code></pre></div></div>
<p>Finally, we print the timestamp (fake version) in order to appease the Concourse gods.</p>
<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nb">echo</span> <span class="s2">"</span><span class="k">${</span><span class="nv">timestamp</span><span class="k">}</span><span class="s2">"</span> <span class="o">&gt;</span>&amp;3
</code></pre></div></div>
<h1 id="building-the-container">Building the Container</h1>
<p>As said earlier, to actually use this script, we need to add it to a image.
I wont be explaining this whole process, but the source can be found <a href="https://git.kun.is/pim/concourse-apprise-notifier/src/branch/master/pipeline.yml">here</a>.
The most important take-aways are these:</p>
<ul>
<li>Use <code>concourse/oci-build-task</code> to build a image from a Dockerfile.</li>
<li>Use <code>registry-image</code> to push the image to an image registry.</li>
</ul>
<h1 id="using-the-resource-type">Using the Resource Type</h1>
<p>Using our newly created resource type is surprisingly simple.
I use it for the blog you are reading right now and the pipeline definition can be found <a href="https://git.kun.is/pim/static/src/branch/main/pipeline.yml">here</a>.
Here we specify the resource type in a Concourse pipeline:</p>
<div class="language-yaml highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="na">resource_types</span><span class="pi">:</span>
<span class="pi">-</span> <span class="na">name</span><span class="pi">:</span> <span class="s">apprise</span>
<span class="na">type</span><span class="pi">:</span> <span class="s">registry-image</span>
<span class="na">source</span><span class="pi">:</span>
<span class="na">repository</span><span class="pi">:</span> <span class="s">git.kun.is/pim/concourse-apprise-notifier</span>
<span class="na">tag</span><span class="pi">:</span> <span class="s2">"</span><span class="s">1.1.1"</span>
</code></pre></div></div>
<p>We simply have to tell Concourse where to find the image, and which tag we want.
Next, we instantiate the resource type to create a resource:</p>
<div class="language-yaml highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="na">resources</span><span class="pi">:</span>
<span class="pi">-</span> <span class="na">name</span><span class="pi">:</span> <span class="s">apprise-notification</span>
<span class="na">type</span><span class="pi">:</span> <span class="s">apprise</span>
<span class="na">source</span><span class="pi">:</span>
<span class="na">host</span><span class="pi">:</span> <span class="s">https://apprise.kun.is:444</span>
<span class="na">key</span><span class="pi">:</span> <span class="s">concourse</span>
<span class="na">icon</span><span class="pi">:</span> <span class="s">bell</span>
</code></pre></div></div>
<p>We simply specify the host to send Apprise notifications to.
Yeah, I even gave it a little bell because its cute.</p>
<p>All thats left to do, is actually send the notification.
Lets see how that is done:</p>
<div class="language-yaml highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="pi">-</span> <span class="na">name</span><span class="pi">:</span> <span class="s">deploy-static-website</span>
<span class="na">plan</span><span class="pi">:</span>
<span class="pi">-</span> <span class="na">task</span><span class="pi">:</span> <span class="s">deploy-site</span>
<span class="na">config</span><span class="pi">:</span> <span class="s">...</span>
<span class="na">on_success</span><span class="pi">:</span>
<span class="err"> </span><span class="na">put</span><span class="pi">:</span> <span class="s">apprise-notification</span>
<span class="na"> params</span><span class="pi">:</span>
<span class="err"> </span> <span class="na">title</span><span class="pi">:</span> <span class="s2">"</span><span class="s">Static</span><span class="nv"> </span><span class="s">website</span><span class="nv"> </span><span class="s">deployed!"</span>
<span class="err"> </span> <span class="na">body</span><span class="pi">:</span> <span class="s2">"</span><span class="s">New</span><span class="nv"> </span><span class="s">version:</span><span class="nv"> </span><span class="s">$(cat</span><span class="nv"> </span><span class="s">version/version)"</span>
<span class="err"> </span><span class="na">no_get</span><span class="pi">:</span> <span class="no">true</span>
</code></pre></div></div>
<p>As can be seen, the Apprise notification can be triggered when a task is executed successfully.
We do this using the <code>put</code> command, which execute the <code>out</code> script underwater.
We set the notifications title and body, and send it!
The result is seen below in my Ntfy app, which Apprise forwards the message to:
<img src="ntfy.png" alt="picture showing my Ntfy app with the Apprise notification" /></p>
<p>And to finish this off, here is what it looks like in the Concourse web UI:
<img src="pipeline.png" alt="the concourse web gui showing the pipeline of my static website including the the apprise notification resources" /></p>
<h1 id="conclusion">Conclusion</h1>
<p>Concourses way of representing everything as an image/container is really interesting in my opinion.
A resource type is quite easily implemented as well, although Bash might not be the optimal way to do this.
Ive seen some people implement it in Rust, which might be a good excuse to finally learn that language :)</p>
<p>Apart from Apprise notifications, Im planning on creating a resource type to deploy to a Docker swarm eventually.
This seems like a lot harder than simply sending notifications though.</p>
:ET

View file

@ -0,0 +1,146 @@
I"¸8<p>Ever SSH’ed into a freshly installed server and gotten the following annoying message?</p>
<div class="language-text highlighter-rouge"><div class="highlight"><pre class="highlight"><code>The authenticity of host 'host.tld (1.2.3.4)' can't be established.
ED25519 key fingerprint is SHA256:eUXGdm1YdsMAS7vkdx6dOJdOGHdem5gQp4tadCfdLB8.
Are you sure you want to continue connecting (yes/no)?
</code></pre></div></div>
<p>Or even more annoying:</p>
<div class="language-text highlighter-rouge"><div class="highlight"><pre class="highlight"><code>@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! @
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
IT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY!
Someone could be eavesdropping on you right now (man-in-the-middle attack)!
It is also possible that a host key has just been changed.
The fingerprint for the ED25519 key sent by the remote host is
SHA256:eUXGdm1YdsMAS7vkdx6dOJdOGHdem5gQp4tadCfdLB8.
Please contact your system administrator.
Add correct host key in /home/user/.ssh/known_hosts to get rid of this message.
Offending ED25519 key in /home/user/.ssh/known_hosts:3
remove with:
ssh-keygen -f "/etc/ssh/ssh_known_hosts" -R "1.2.3.4"
ED25519 host key for 1.2.3.4 has changed and you have requested strict checking.
Host key verification failed.
</code></pre></div></div>
<p>Could it be that the programmers at OpenSSH simply like to annoy us with these confusing messages?
Maybe, but these warnings also serve as a way to notify users of a potential Man-in-the-Middle (MITM) attack.
I won’t go into the details of this problem, but I refer you to <a href="https://blog.g3rt.nl/ssh-host-key-validation-strict-yet-user-friendly.html">this excellent blog post</a>.
Instead, I would like to talk about ways to solve these annoying warnings.</p>
<p>One obvious solution is simply to add each host to your <code>known_hosts</code> file.
This works okay when managing a handful of servers, but becomes unbearable when managing many servers.
In my case, I wanted to quickly spin up virtual machines using Duncan Mac-Vicar’s <a href="https://registry.terraform.io/providers/dmacvicar/libvirt/latest/docs">Terraform Libvirt provider</a>, without having to accept their host key before connecting.
The solution? Issuing SSH host certificates using an SSH certificate authority.</p>
<h2 id="ssh-certificate-authorities-vs-the-web">SSH Certificate Authorities vs. the Web</h2>
<p>The idea of an SSH certificate authority (CA) is quite easy to grasp, if you understand the web’s Public Key Infrastructure (PKI).
Just like with the web, a trusted party can issue certificates that are offered when establishing a connection.
The idea is, just by trusting the trusted party, you trust every certificate they issue.
In the case of the web’s PKI, this trusted party is bundled and trusted by <a href="https://wiki.mozilla.org/CA">your browser</a> or operating system.
However, in the case of SSH, the trusted party is you! (Okay you can also trust your own web certificate authority)
With this great power, comes great responsibility which we will abuse heavily in this article.</p>
<h2 id="ssh-certificate-authority-for-terraform">SSH Certificate Authority for Terraform</h2>
<p>So, let’s start with a plan.
I want to spawn virtual machines with Terraform which which are automatically provisioned with a SSH host certificate issued by my CA.
This CA will be another host on my private network, issuing certificates over SSH.</p>
<h3 id="fetching-the-ssh-host-certificate">Fetching the SSH Host Certificate</h3>
<p>First we generate an SSH key pair in Terraform.
Below is the code for that:</p>
<div class="language-terraform highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">resource</span> <span class="s2">"tls_private_key"</span> <span class="s2">"debian"</span> <span class="p">{</span>
<span class="nx">algorithm</span> <span class="p">=</span> <span class="s2">"ED25519"</span>
<span class="p">}</span>
<span class="k">data</span> <span class="s2">"tls_public_key"</span> <span class="s2">"debian"</span> <span class="p">{</span>
<span class="nx">private_key_pem</span> <span class="p">=</span> <span class="nx">tls_private_key</span><span class="p">.</span><span class="nx">debian</span><span class="p">.</span><span class="nx">private_key_pem</span>
<span class="p">}</span>
</code></pre></div></div>
<p>Now that we have an SSH key pair, we need to somehow make Terraform communicate this with the CA.
Lucky for us, there is a way for Terraform to execute an arbitrary command with the <code>external</code> data feature.
We call this script below:</p>
<div class="language-terraform highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">data</span> <span class="s2">"external"</span> <span class="s2">"cert"</span> <span class="p">{</span>
<span class="nx">program</span> <span class="p">=</span> <span class="p">[</span><span class="s2">"bash"</span><span class="p">,</span> <span class="s2">"</span><span class="k">${</span><span class="nx">path</span><span class="p">.</span><span class="k">module}</span><span class="s2">/get_cert.sh"</span><span class="p">]</span>
<span class="nx">query</span> <span class="p">=</span> <span class="p">{</span>
<span class="nx">pubkey</span> <span class="p">=</span> <span class="nx">trimspace</span><span class="p">(</span><span class="k">data</span><span class="p">.</span><span class="nx">tls_public_key</span><span class="p">.</span><span class="nx">debian</span><span class="p">.</span><span class="nx">public_key_openssh</span><span class="p">)</span>
<span class="nx">host</span> <span class="p">=</span> <span class="kd">var</span><span class="p">.</span><span class="nx">name</span>
<span class="nx">cahost</span> <span class="p">=</span> <span class="kd">var</span><span class="p">.</span><span class="nx">ca_host</span>
<span class="nx">cascript</span> <span class="p">=</span> <span class="kd">var</span><span class="p">.</span><span class="nx">ca_script</span>
<span class="nx">cakey</span> <span class="p">=</span> <span class="kd">var</span><span class="p">.</span><span class="nx">ca_key</span>
<span class="p">}</span>
<span class="p">}</span>
</code></pre></div></div>
<p>These query parameters will end up in the script’s stdin in JSON format.
We can then read these parameters, and send them to the CA over SSH.
The result must as well be in JSON format.</p>
<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="c">#!/bin/bash</span>
<span class="nb">set</span> <span class="nt">-euo</span> pipefail
<span class="nv">IFS</span><span class="o">=</span><span class="s1">$'</span><span class="se">\n\t</span><span class="s1">'</span>
<span class="c"># Read the query parameters</span>
<span class="nb">eval</span> <span class="s2">"</span><span class="si">$(</span>jq <span class="nt">-r</span> <span class="s1">'@sh "PUBKEY=\(.pubkey) HOST=\(.host) CAHOST=\(.cahost) CASCRIPT=\(.cascript) CAKEY=\(.cakey)"'</span><span class="si">)</span><span class="s2">"</span>
<span class="c"># Fetch certificate from the CA</span>
<span class="c"># Warning: extremely ugly code that I am to lazy to fix</span>
<span class="nv">CERT</span><span class="o">=</span><span class="si">$(</span>ssh <span class="nt">-o</span> <span class="nv">ConnectTimeout</span><span class="o">=</span>3 <span class="nt">-o</span> <span class="nv">ConnectionAttempts</span><span class="o">=</span>1 root@<span class="nv">$CAHOST</span> <span class="s1">'"'</span><span class="s2">"</span><span class="nv">$CASCRIPT</span><span class="s2">"</span><span class="s1">'" host "'</span><span class="s2">"</span><span class="nv">$CAKEY</span><span class="s2">"</span><span class="s1">'" "'</span><span class="s2">"</span><span class="nv">$PUBKEY</span><span class="s2">"</span><span class="s1">'" "'</span><span class="s2">"</span><span class="nv">$HOST</span><span class="s2">"</span><span class="s1">'".dmz'</span><span class="si">)</span>
jq <span class="nt">-n</span> <span class="nt">--arg</span> cert <span class="s2">"</span><span class="nv">$CERT</span><span class="s2">"</span> <span class="s1">'{"cert":$cert}'</span>
</code></pre></div></div>
<p>We see that a script is called on the remote host that issues the certificate.
This is just a simple wrapper around <code>ssh-keygen</code>, which you can see below.</p>
<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="c">#!/bin/bash</span>
<span class="nb">set</span> <span class="nt">-euo</span> pipefail
<span class="nv">IFS</span><span class="o">=</span><span class="s1">$'</span><span class="se">\n\t</span><span class="s1">'</span>
host<span class="o">()</span> <span class="o">{</span>
<span class="nv">CAKEY</span><span class="o">=</span><span class="s2">"</span><span class="nv">$2</span><span class="s2">"</span>
<span class="nv">PUBKEY</span><span class="o">=</span><span class="s2">"</span><span class="nv">$3</span><span class="s2">"</span>
<span class="nv">HOST</span><span class="o">=</span><span class="s2">"</span><span class="nv">$4</span><span class="s2">"</span>
<span class="nb">echo</span> <span class="s2">"</span><span class="nv">$PUBKEY</span><span class="s2">"</span> <span class="o">&gt;</span> /root/ca/<span class="s2">"</span><span class="nv">$HOST</span><span class="s2">"</span>.pub
ssh-keygen <span class="nt">-h</span> <span class="nt">-s</span> /root/ca/keys/<span class="s2">"</span><span class="nv">$CAKEY</span><span class="s2">"</span> <span class="nt">-I</span> <span class="s2">"</span><span class="nv">$HOST</span><span class="s2">"</span> <span class="nt">-n</span> <span class="s2">"</span><span class="nv">$HOST</span><span class="s2">"</span> /root/ca/<span class="s2">"</span><span class="nv">$HOST</span><span class="s2">"</span>.pub
<span class="nb">cat</span> /root/ca/<span class="s2">"</span><span class="nv">$HOST</span><span class="s2">"</span><span class="nt">-cert</span>.pub
<span class="nb">rm</span> /root/ca/<span class="s2">"</span><span class="nv">$HOST</span><span class="s2">"</span><span class="k">*</span>.pub
<span class="o">}</span>
<span class="s2">"</span><span class="nv">$1</span><span class="s2">"</span> <span class="s2">"</span><span class="nv">$@</span><span class="s2">"</span>
</code></pre></div></div>
<h3 id="appeasing-the-terraform-gods">Appeasing the Terraform Gods</h3>
<p>So nice, we can fetch the SSH host certificate from the CA.
We should just be able to use it right?
We can, but it brings a big annoyance with it: Terraform will fetch a new certificate every time it is run.
This is because the <code>external</code> feature of Terraform is a data source.
If we were to use this data source for a Terraform resource, it would need to be updated every time we run Terraform.
I have not been able to find a way to avoid fetching the certificate every time, except for writing my own resource provider which I’d rather not.
I have, however, found a way to hack around the issue.</p>
<p>The idea is as follows: we can use Terraform’s <code>ignore_changes</code> to, well, ignore any changes of a resource.
Unfortunately, we cannot use this for a <code>data</code> source, so we must create a glue <code>null_resource</code> that supports <code>ignore_changes</code>.
This is shown in the code snipppet below.
We use the <code>triggers</code> property simply to copy the certificate in; we don’t use it for it’s original purpose.</p>
<div class="language-terraform highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">resource</span> <span class="s2">"null_resource"</span> <span class="s2">"cert"</span> <span class="p">{</span>
<span class="nx">triggers</span> <span class="p">=</span> <span class="p">{</span>
<span class="nx">cert</span> <span class="p">=</span> <span class="k">data</span><span class="p">.</span><span class="nx">external</span><span class="p">.</span><span class="nx">cert</span><span class="p">.</span><span class="nx">result</span><span class="p">[</span><span class="s2">"cert"</span><span class="p">]</span>
<span class="p">}</span>
<span class="nx">lifecycle</span> <span class="p">{</span>
<span class="nx">ignore_changes</span> <span class="p">=</span> <span class="p">[</span>
<span class="nx">triggers</span>
<span class="p">]</span>
<span class="p">}</span>
<span class="p">}</span>
</code></pre></div></div>
<p>And voilà, we can now use <code>null_resource.cert.triggers[&quot;cert&quot;]</code> as our certificate, that won’t trigger replacements in Terraform.</p>
<h3 id="setting-the-host-certificate-with-cloud-init">Setting the Host Certificate with Cloud-Init</h3>
<p>Terraform’s Libvirt provider has native support for Cloud-Init, which is very handy.
We can give the host certificate directly to Cloud-Init and place it on the virtual machine.
Inside the Cloud-Init configuration, we can set the <code>ssh_keys</code> property to do this:</p>
<div class="language-yml highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="na">ssh_keys</span><span class="pi">:</span>
<span class="na">ed25519_private</span><span class="pi">:</span> <span class="pi">|</span>
<span class="s">${indent(4, private_key)}</span>
<span class="na">ed25519_certificate</span><span class="pi">:</span> <span class="s2">"</span><span class="s">${host_cert}"</span>
</code></pre></div></div>
<p>I hardcoded this to ED25519 keys, because this is all I use.</p>
<p>This works perfectly, and I never have to accept host certificates from virtual machines again.</p>
<h3 id="caveats">Caveats</h3>
<p>A sharp eye might have noticed the lifecycle of these host certificates is severely lacking.
Namely, the deployed host certificates have no expiration date nore is there revocation function.
There are ways to implement these, but for my home lab I did not deem this necessary at this point.
In a more professional environment, I would suggest using <a href="https://www.vaultproject.io/">Hashicorp’s Vault</a>.</p>
<p>This project did teach me about the limits and flexibility of Terraform, so all in all a success!
All code can be found on the git repository <a href="https://git.kun.is/home/tf-modules/src/branch/master/debian">here</a>.</p>
:ET

View file

@ -0,0 +1,62 @@
I"<p>For months, I’ve had a peculiar problem with my laptop: once in a while, seemingly without reason, my laptop screen would freeze.
This only happened on my laptop screen, and not on an external monitor.
I had kind of learned to live with it as I couldn’t find a solution online.
The only remedy I had was reloading my window manager, which would often unfreeze the screen.</p>
<p>Yesterday I tried Googling once more and I actually found <a href="https://bbs.archlinux.org/viewtopic.php?id=246841">a thread</a> about it on the Arch Linux forums!
They talk about the same laptop model, the Lenovo ThinkPad x260, having the problem.
Fortunately, they also propose <a href="https://bbs.archlinux.org/viewtopic.php?pid=1888932#p1888932">a temporary fix</a>.</p>
<h1 id="trying-the-fix">Trying the Fix</h1>
<p>Apparently, a problem with the Panel Self Refresh (PSR) feature of Intel iGPUs is the culprit.
According to the <a href="https://github.com/torvalds/linux/blob/45a3e24f65e90a047bef86f927ebdc4c710edaa1/drivers/gpu/drm/i915/display/intel_psr.c#L42">Linux source code</a>, PSR enables the display to go into a lower standby mode when the sytem is idle but the screen is in use.
These lower standby modes can reduce power usage of your device when idling.</p>
<p>This all seems useful, except when it makes your screen freeze!
The proposed fix disables the PSR feature entirely.
To do this, we need to change a parameter to the Intel Graphics Linux Kernel Module (LKM).
The LKM for Intel Graphics is called <code>i915</code>.
There are <a href="https://wiki.archlinux.org/title/Kernel_parameters">multiple ways</a> to change kernel parameters, but I chose to edit my Grub configuration.</p>
<p>First, I wanted to test whether it actually works.
When booting into my Linux partition via Grub, you can press <code>e</code> to edit the Grub definition.
Somewhere there, you can find the <code>linux</code> command which specifies to boot Linux and how to do that.
I simply appended the option <code>i915.enable_psr=0</code> to this line.
After rebooting, I noticed my screen no longer freezes!
Success!</p>
<h1 id="persisting-the-fix">Persisting the Fix</h1>
<p>To make the change permanent, we need to permanently change Grub’s configuration.
One way to do this, is by changing Grub’s defaults in <code>/etc/default/grub</code>.
Namely, the <code>GRUB_CMDLINE_LINUX_DEFAULT</code> option specifies what options Grub should pass to the Linux kernel by default.
For me, this is a nice solution as the problem exists for both Linux OSes I have installed.
I changed this option to:</p>
<div class="language-ini highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="py">GRUB_CMDLINE_LINUX_DEFAULT</span><span class="p">=</span><span class="s">"quiet splash i915.enable_psr=0"</span>
</code></pre></div></div>
<p>Next, I wanted to automate this solution using Ansible.
This turned out to be quite easy, as the Grub configuration looks a bit like an ini file (maybe it is?):</p>
<div class="language-yaml highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="pi">-</span> <span class="na">name</span><span class="pi">:</span> <span class="s">Edit grub to disable Panel Self Refresh</span>
<span class="na">become</span><span class="pi">:</span> <span class="no">true</span>
<span class="na">ini_file</span><span class="pi">:</span>
<span class="na">path</span><span class="pi">:</span> <span class="s">/etc/default/grub</span>
<span class="na">section</span><span class="pi">:</span> <span class="no">null</span>
<span class="na">option</span><span class="pi">:</span> <span class="s2">"</span><span class="s">GRUB_CMDLINE_LINUX_DEFAULT"</span>
<span class="na">value</span><span class="pi">:</span> <span class="s1">'</span><span class="s">"quiet</span><span class="nv"> </span><span class="s">splash</span><span class="nv"> </span><span class="s">i915.enable_psr=0"'</span>
<span class="na">no_extra_spaces</span><span class="pi">:</span> <span class="no">true</span>
<span class="na">notify</span><span class="pi">:</span> <span class="s">update grub</span>
</code></pre></div></div>
<p>Lastly, I created the <code>notify</code> hook to update the Grub configuration:</p>
<div class="language-yaml highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="pi">-</span> <span class="na">name</span><span class="pi">:</span> <span class="s">update grub</span>
<span class="na">become</span><span class="pi">:</span> <span class="no">true</span>
<span class="na">command</span><span class="pi">:</span>
<span class="na">cmd</span><span class="pi">:</span> <span class="s">update-grub</span>
</code></pre></div></div>
<h1 id="update-just-use-nix">Update: Just use Nix</h1>
<p>Lately, I have been learning a bit of NixOS with the intention of replacing my current setup.
Compared to Ansible, applying this fix is a breeze on NixOS:</p>
<div class="language-nix highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="p">{</span>
<span class="nv">boot</span><span class="o">.</span><span class="nv">kernelParams</span> <span class="o">=</span> <span class="p">[</span> <span class="s2">"i915.enable_psr=0"</span> <span class="p">];</span>
<span class="p">}</span>
</code></pre></div></div>
<p>That’s it, yep.</p>
<h1 id="conclusion">Conclusion</h1>
<p>It turned out to be quite easy to change Linux kernel parameters using Ansible.
Maybe some kernel gurus have better ways to change parameters, but this works for me for now.</p>
<p>As a sidenote, I started reading a bit more about NixOS and realised that it can solve issues like these much more nicely than Ansible does.
I might replace my OS with NixOS some day, if I manage to rewrite my Ansible for it.</p>
:ET

View file

@ -0,0 +1,214 @@
I"GG<p>I have been meaning to write about the current state of my home lab infrastructure for a while now.
Now that the most important parts are quite stable, I think the opportunity is ripe.
I expect this post to get quite long, so I might have to leave out some details along the way.</p>
<p>This post will be a starting point for future infrastructure snapshots which I can hopefully put out periodically.
That is, if there is enough worth talking about.</p>
<p>Keep an eye out for the <i class="fa-solid fa-code-branch"></i> icon, which links to the source code and configuration of anything mentioned.
Oh yeah, did I mention everything I do is open source?</p>
<h1 id="networking-and-infrastructure-overview">Networking and Infrastructure Overview</h1>
<h2 id="hardware-and-operating-systems">Hardware and Operating Systems</h2>
<p>Lets start with the basics: what kind of hardware do I use for my home lab?
The most important servers are my three <a href="https://www.gigabyte.com/Mini-PcBarebone/GB-BLCE-4105-rev-10">Gigabyte Brix GB-BLCE-4105</a>.
Two of them have 16 GB of memory, and one 8 GB.
I named these servers as follows:</p>
<ul>
<li><strong>Atlas</strong>: because this server was going to “lift” a lot of virtual machines.</li>
<li><strong>Lewis</strong>: we started out with a “Max” server named after the Formula 1 driver Max Verstappen, but it kind of became an unmanagable behemoth without infrastructure-as-code. Our second server we subsequently named Lewis after his colleague Lewis Hamilton. Note: people around me vetoed these names and I am no F1 fan!</li>
<li><strong>Jefke</strong>: its a funny Belgian name. Thats all.</li>
</ul>
<p>Here is a picture of them sitting in their cosy closet:</p>
<p><img src="servers.jpeg" alt="A picture of my servers." /></p>
<p>If you look look to the left, you will also see a Raspberry pi 4B.
I use this Pi to do some rudimentary monitoring whether servers and services are running.
More on this in the relevant section below.
The Pi is called <strong>Iris</strong> because its a messenger for the other servers.</p>
<p>I used to run Ubuntu on these systems, but I have since migrated away to Debian.
The main reasons were Canonical <a href="https://askubuntu.com/questions/1434512/how-to-get-rid-of-ubuntu-pro-advertisement-when-updating-apt">putting advertisements in my terminal</a> and pushing Snap which has a <a href="https://hackaday.com/2020/06/24/whats-the-deal-with-snap-packages/">proprietry backend</a>.
Two of my servers run the newly released Debian Bookworm, while one still runs Debian Bullseye.</p>
<h2 id="networking">Networking</h2>
<p>For networking, I wanted hypervisors and virtual machines separated by VLANs for security reasons.
The following picture shows a simplified view of the VLANs present in my home lab:</p>
<p><img src="vlans.png" alt="Picture showing the VLANS in my home lab." /></p>
<p>All virtual machines are connected to a virtual bridge which tags network traffic with the DMZ VLAN.
The hypervisors VLAN is used for traffic to and from the hypervisors.
Devices from the hypervisors VLAN are allowed to connect to devices in the DMZ, but not vice versa.
The hypervisors are connected to a switch using a trunk link, allows both DMZ and hypervisors traffic.</p>
<p>I realised the above design using ifupdown.
Below is the configuration for each hypervisor, which creates a new <code>enp3s0.30</code> interface with all DMZ traffic from the <code>enp3s0</code> interface <a href="https://git.kun.is/home/hypervisors/src/commit/71b96d462116e4160b6467533fc476f3deb9c306/ansible/dmz.conf.j2"><i class="fa-solid fa-code-branch"></i></a>.</p>
<div class="language-text highlighter-rouge"><div class="highlight"><pre class="highlight"><code>auto enp3s0.30
iface enp3s0.30 inet manual
iface enp3s0.30 inet6 auto
accept_ra 0
dhcp 0
request_prefix 0
privext 0
pre-up sysctl -w net/ipv6/conf/enp3s0.30/disable_ipv6=1
</code></pre></div></div>
<p>This configuration seems more complex than it actually is.
Most of it is to make sure the interface is not assigned an IPv4/6 address on the hypervisor host.
The magic <code>.30</code> at the end of the interface name makes this interface tagged with VLAN ID 30 (DMZ for me).</p>
<p>Now that we have an interface tagged for the DMZ VLAN, we can create a bridge where future virtual machines can connect to:</p>
<div class="language-text highlighter-rouge"><div class="highlight"><pre class="highlight"><code>auto dmzbr
iface dmzbr inet manual
bridge_ports enp3s0.30
bridge_stp off
iface dmzbr inet6 auto
accept_ra 0
dhcp 0
request_prefix 0
privext 0
pre-up sysctl -w net/ipv6/conf/dmzbr/disable_ipv6=1
</code></pre></div></div>
<p>Just like the previous config, this is quite bloated because I dont want the interface to be assigned an IP address on the host.
Most importantly, the <code>bridge_ports enp3s0.30</code> line here makes this interface a virtual bridge for the <code>enp3s0.30</code> interface.</p>
<p>And voilà, we now have a virtual bridge on each machine, where only DMZ traffic will flow.
Here I verify whether this configuration works:</p>
<details>
<summary>Show</summary>
<p>We can see that the two virtual interfaces are created, and are only assigned a MAC address and not a IP address:</p>
<div class="language-text highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@atlas:~# ip a show enp3s0.30
4: enp3s0.30@enp3s0: &lt;BROADCAST,MULTICAST,UP,LOWER_UP&gt; mtu 1500 qdisc noqueue master dmzbr state UP group default qlen 1000
link/ether d8:5e:d3:4c:70:38 brd ff:ff:ff:ff:ff:ff
5: dmzbr: &lt;BROADCAST,MULTICAST,UP,LOWER_UP&gt; mtu 1500 qdisc noqueue state UP group default qlen 1000
link/ether 4e:f7:1f:0f:ad:17 brd ff:ff:ff:ff:ff:ff
</code></pre></div></div>
<p>Pinging a VM from a hypervisor works:</p>
<div class="language-text highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@atlas:~# ping -c1 maestro.dmz
PING maestro.dmz (192.168.30.8) 56(84) bytes of data.
64 bytes from 192.168.30.8 (192.168.30.8): icmp_seq=1 ttl=63 time=0.457 ms
</code></pre></div></div>
<p>Pinging a hypervisor from a VM does not work:</p>
<div class="language-text highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@maestro:~# ping -c1 atlas.hyp
PING atlas.hyp (192.168.40.2) 56(84) bytes of data.
--- atlas.hyp ping statistics ---
1 packets transmitted, 0 received, 100% packet loss, time 0ms
</code></pre></div></div>
</details>
<h2 id="dns-and-dhcp">DNS and DHCP</h2>
<p>Now that we have a working DMZ network, lets build on it to get DNS and DHCP working.
This will enable new virtual machines to obtain a static or dynamic IP address and register their host in DNS.
This has actually been incredibly annoying due to our friend <a href="https://en.wikipedia.org/wiki/Network_address_translation?useskin=vector">Network address translation (NAT)</a>.</p>
<details>
<summary>NAT recap</summary>
<p>Network address translation (NAT) is a function of a router which allows multiple hosts to share a single IP address.
This is needed for IPv4, because IPv4 addresses are scarce and usually one household is only assigned a single IPv4 address.
This is one of the problems IPv6 attempts to solve (mainly by having so many IP addresses that they should never run out).
To solve the problem for IPv4, each host in a network is assigned a private IPv4 address, which can be reused for every network.</p>
<p>Then, the router must perform address translation.
It does this by keeping track of ports opened by hosts in its private network.
If a packet from the internet arrives at the router for such a port, it forwards this packet to the correct host.</p>
</details>
<p>I would like to host my own DNS on a virtual machine (called <strong>hermes</strong>, more on VMs later) in the DMZ network.
This basically gives two problems:</p>
<ol>
<li>The upstream DNS server will refer to the public internet-accessible IP address of our DNS server.
This IP-address has no meaning inside the private network due to NAT and the router will reject the packet.</li>
<li>Our DNS resolves hosts to their public internet-accessible IP address.
This is similar to the previous problem as the public IP address has no meaning.</li>
</ol>
<p>The first problem can be remediated by overriding the location of the DNS server for hosts inside the DMZ network.
This can be achieved on my router, which uses Unbound as its recursive DNS server:</p>
<p><img src="unbound_overrides.png" alt="Unbound overides for kun.is and dmz domains." /></p>
<p>Any DNS requests to Unbound to domains in either <code>dmz</code> or <code>kun.is</code> will now be forwarded <code>192.168.30.7</code> (port 5353).
This is the virtual machine hosting my DNS.</p>
<p>The second problem can be solved at the DNS server.
We need to do some magic overriding, which <a href="https://dnsmasq.org/docs/dnsmasq-man.html">dnsmasq</a> is perfect for <a href="https://git.kun.is/home/hermes/src/commit/488024a7725f2325b8992e7a386b4630023f1b52/ansible/roles/dnsmasq/files/dnsmasq.conf"><i class="fa-solid fa-code-branch"></i></a>:</p>
<div class="language-conf highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="n">alias</span>=<span class="m">84</span>.<span class="m">245</span>.<span class="m">14</span>.<span class="m">149</span>,<span class="m">192</span>.<span class="m">168</span>.<span class="m">30</span>.<span class="m">8</span>
<span class="n">server</span>=/<span class="n">kun</span>.<span class="n">is</span>/<span class="m">192</span>.<span class="m">168</span>.<span class="m">30</span>.<span class="m">7</span>
</code></pre></div></div>
<p>This always overrides the public IPv4 address to the private one.
It also overrides the DNS server for <code>kun.is</code> to <code>192.168.30.7</code>.</p>
<p>Finally, behind the dnsmasq server, I run <a href="https://www.powerdns.com/">Powerdns</a> as authoritative DNS server <a href="https://git.kun.is/home/hermes/src/branch/master/ansible/roles/powerdns"><i class="fa-solid fa-code-branch"></i></a>.
I like this DNS server because I can manage it with Terraform <a href="https://git.kun.is/home/hermes/src/commit/488024a7725f2325b8992e7a386b4630023f1b52/terraform/dns/kun_is.tf"><i class="fa-solid fa-code-branch"></i></a>.</p>
<p>Here is a small diagram showing my setup (my networking teacher would probably kill me for this):
<img src="nat.png" alt="Shitty diagram showing my DNS setup." /></p>
<h1 id="virtualization">Virtualization</h1>
<p><a href="https://github.com/containrrr/shepherd">https://github.com/containrrr/shepherd</a>
Now that we have laid out the basic networking, lets talk virtualization.
Each of my servers are configured to run KVM virtual machines, orchestrated using Libvirt.
Configuration of the physical hypervisor servers, including KVM/Libvirt is done using Ansible.
The VMs are spun up using Terraform and the <a href="https://registry.terraform.io/providers/dmacvicar/libvirt/latest/docs">dmacvicar/libvirt</a> Terraform provider.</p>
<p>This all isnt too exciting, except that I created a Terraform module that abstracts the Terraform Libvirt provider for my specific scenario <a href="https://git.kun.is/home/tf-modules/src/commit/e77d62f4a2a0c3847ffef4434c50a0f40f1fa794/debian/main.tf"><i class="fa-solid fa-code-branch"></i></a>:</p>
<div class="language-terraform highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">module</span> <span class="s2">"maestro"</span> <span class="p">{</span>
<span class="nx">source</span> <span class="p">=</span> <span class="s2">"git::https://git.kun.is/home/tf-modules.git//debian"</span>
<span class="nx">name</span> <span class="p">=</span> <span class="s2">"maestro"</span>
<span class="nx">domain_name</span> <span class="p">=</span> <span class="s2">"tf-maestro"</span>
<span class="nx">memory</span> <span class="p">=</span> <span class="mi">10240</span>
<span class="nx">mac</span> <span class="p">=</span> <span class="s2">"CA:FE:C0:FF:EE:08"</span>
<span class="p">}</span>
</code></pre></div></div>
<p>This automatically creates a Debian virtual machines with the properties specified.
It also sets up certificate-based SSH authentication which I talked about <a href="/homebrew-ssh-ca/">before</a>.</p>
<h1 id="clustering">Clustering</h1>
<p>With virtualization explained, lets move up one level further.
Each of my three physical servers hosts a virtual machine running Docker, which together form a Docker Swarm.
I use Traefik as a reverse proxy which routes requests to the correct container.</p>
<p>All data is hosted on a single machine and made available to containers using NFS.
This might not be very secure (as NFS is not encrypted and no proper authentication), it is quite fast.</p>
<p>As of today, I host the following services on my Docker Swarm <a href="https://git.kun.is/home/shoarma"><i class="fa-solid fa-code-branch"></i></a>:</p>
<ul>
<li><a href="https://forgejo.org/">Forgejo</a> as Git server</li>
<li><a href="https://www.freshrss.org/">FreshRSS</a> as RSS aggregator</li>
<li><a href="https://hedgedoc.org/">Hedgedoc</a> as Markdown note-taking</li>
<li><a href="https://hedgedoc.org/">Inbucket</a> for disposable email</li>
<li><a href="https://cyberchef.org/">Cyberchef</a> for the lulz</li>
<li><a href="https://kitchenowl.org/">Kitchenowl</a> for grocery lists</li>
<li><a href="https://joinmastodon.org/">Mastodon</a> for microblogging</li>
<li>A monitoring stack (read more below)</li>
<li><a href="https://nextcloud.com/">Nextcloud</a> for cloud storage</li>
<li><a href="https://pi-hole.net/">Pihole</a> to block advertisements</li>
<li><a href="https://radicale.org/v3.html">Radicale</a> for calendar and contacts sync</li>
<li><a href="https://www.seafile.com/en/home/">Seafile</a> for cloud storage and sync</li>
<li><a href="https://github.com/containrrr/shepherd">Shephard</a> for automatic container updates</li>
<li><a href="https://nginx.org/en/">Nginx</a> hosting static content (like this page!)</li>
<li><a href="https://hub.docker.com/r/charypar/swarm-dashboard/#!">Docker Swarm dashboard</a></li>
<li><a href="https://syncthing.net/">Syncthing</a> for file sync</li>
</ul>
<h1 id="ci--cd">CI / CD</h1>
<p>For CI / CD, I run <a href="https://concourse-ci.org/">Concourse CI</a> in a separate VM.
This is needed, because Concourse heavily uses containers to create reproducible builds.</p>
<p>Although I should probably use it for more, I currently use my Concourse for three pipelines:</p>
<ul>
<li>A pipeline to build this static website and create a container image of it.
The image is then uploaded to the image registry of my Forgejo instance.
I love it when I can use stuff I previously built :)
The pipeline finally deploys this new image to the Docker Swarm <a href="https://git.kun.is/pim/static/src/commit/eee4f0c70af6f2a49fabb730df761baa6475db22/pipeline.yml"><i class="fa-solid fa-code-branch"></i></a>.</li>
<li>A pipeline to create a Concourse resource that sends Apprise alerts (Concourse-ception?) <a href="https://git.kun.is/pim/concourse-apprise-notifier/src/commit/b5d4413c1cd432bc856c45ec497a358aca1b8b21/pipeline.yml"><i class="fa-solid fa-code-branch"></i></a></li>
<li>A pipeline to build a custom Fluentd image with plugins installed <a href="https://git.kun.is/pim/fluentd"><i class="fa-solid fa-code-branch"></i></a></li>
</ul>
<h1 id="backups">Backups</h1>
<p>To create backups, I use <a href="https://www.borgbackup.org/">Borg</a>.
As I keep all data on one machine, this backup process is quite simple.
In fact, all this data is stored in a single Libvirt volume.
To configure Borg with a simple declarative script, I use <a href="https://torsion.org/borgmatic/">Borgmatic</a>.</p>
<p>In order to back up the data inside the Libvirt volume, I create a snapshot to a file.
Then I can mount this snapshot in my file system.
The files can then be backed up while the system is still running.
It is also possible to simply back up the Libvirt image, but this takes more time and storage <a href="https://git.kun.is/home/hypervisors/src/commit/71b96d462116e4160b6467533fc476f3deb9c306/ansible/roles/borg/backup.yml.j2"><i class="fa-solid fa-code-branch"></i></a>.</p>
<h1 id="monitoring-and-alerting">Monitoring and Alerting</h1>
<p>The last topic I would like to talk about is monitoring and alerting.
This is something Im still actively improving and only just set up properly.</p>
<h2 id="alerting">Alerting</h2>
<p>For alerting, I wanted something that runs entirely on my own infrastructure.
I settled for Apprise + Ntfy.</p>
<p><a href="https://github.com/caronc/apprise">Apprise</a> is a server that is able to send notifications to dozens of services.
For application developers, it is thus only necessary to implement the Apprise API to gain access to all these services.
The Apprise API itself is also very simple.
By using Apprise, I can also easily switch to another notification service later.
<a href="https://ntfy.sh/">Ntfy</a> is free software made for mobile push notifications.</p>
<p>I use this alerting system in quite a lot of places in my infrastructure, for example when creating backups.</p>
<h2 id="uptime-monitoring">Uptime Monitoring</h2>
<p>The first monitoring setup I created, was using <a href="https://github.com/louislam/uptime-kuma">Uptime Kuma</a>.
Uptime Kuma periodically pings a service to see whether it is still running.
You can do a literal ping, test HTTP response codes, check database connectivity and much more.
I use it to check whether my services and VMs are online.
And the best part is, Uptime Kuma supports Apprise so I get push notifications on my phone whenever something goes down!</p>
<h2 id="metrics-and-log-monitoring">Metrics and Log Monitoring</h2>
<p>A new monitoring system I am still in the process of deploying is focused on metrics and logs.
I plan on creating a separate blog post about this, so keep an eye out on that (for example using RSS :)).
Safe to say, it is no basic ELK stack!</p>
<h1 id="conclusion">Conclusion</h1>
<p>Thats it for now!
Hopefully I inspired someone to build something… or how not to :)</p>
:ET

View file

@ -0,0 +1,2 @@
I"U<p><em>See the <a href="#update">Update</a> at the end of the article.</em></p>
:ET

View file

@ -0,0 +1,6 @@
I"f<p><a href="https://borgbackup.readthedocs.io/en/stable/">BorgBackup</a> and <a href="https://torsion.org/borgmatic/">Borgmatic</a> have been my go-to tools to create backups for my home lab since I started creating backups.
Using <a href="https://wiki.archlinux.org/title/systemd/Timers">Systemd Timers</a>, I regularly create a backup every night.
I also monitor successful execution of the backup process, in case some error occurs.
However, the way I set this up resulted in not receiving notifications.
Even though it boils down to RTFM, Id like to explain my error and how to handle errors correctly.</p>
:ET

View file

@ -0,0 +1,7 @@
I"1<p>When I was scaling up my home lab, I started thinking more about data management.
I hadnt (and still havent) set up any form of network storage.
I have, however, set up a backup mechanism using <a href="https://borgbackup.readthedocs.io/en/stable/">Borg</a>.
Still, I want to operate lots of virtual machines, and backing up each one of them separately seemed excessive.
So I started thinking, what if I just let the host machines back up the data?
After all, the amount of physical hosts I have in my home lab is unlikely to increase drastically.</p>
:ET

View file

@ -0,0 +1,4 @@
I"<p>I have been meaning to write about the current state of my home lab infrastructure for a while now.
Now that the most important parts are quite stable, I think the opportunity is ripe.
I expect this post to get quite long, so I might have to leave out some details along the way.</p>
:ET

View file

@ -0,0 +1,54 @@
I"Û<p>Previously, I have used <a href="https://github.com/prometheus/node_exporter">Prometheus’ node_exporter</a> to monitor the memory usage of my servers.
However, I am currently in the process of moving away from Prometheus to a new Monioring stack.
While I understand the advantages, I felt like Prometheus’ pull architecture does not scale nicely.
Everytime I spin up a new machine, I would have to centrally change Prometheus’ configuration in order for it to query the new server.</p>
<p>In order to collect metrics from my servers, I am now using <a href="https://fluentbit.io/">Fluent Bit</a>.
I love Fluent Bit’s way of configuration which I can easily express as code and automate, its focus on effiency and being vendor agnostic.
However, I have stumbled upon one, in my opinion, big issue with Fluent Bit: its <code>mem</code> plugin to monitor memory usage is <em>completely</em> useless.
In this post I will go over the problem and my temporary solution.</p>
<h1 id="the-problem-with-fluent-bits-codememcode-plugin">The Problem with Fluent Bit’s <code>mem</code> Plugin</h1>
<p>As can be seen in <a href="https://docs.fluentbit.io/manual/pipeline/inputs/memory-metrics">the documentation</a>, Fluent Bit’s <code>mem</code> input plugin exposes a few metrics regarding memory usage which should be self-explaining: <code>Mem.total</code>, <code>Mem.used</code>, <code>Mem.free</code>, <code>Swap.total</code>, <code>Swap.used</code> and <code>Swap.free</code>.
The problem is that <code>Mem.used</code> and <code>Mem.free</code> do not accurately reflect the machine’s actual memory usage.
This is because these metrics include caches and buffers, which can be reclaimed by other processes if needed.
Most tools reporting memory usage therefore include an additional metric that specifices the memory <em>available</em> on the system.
For example, the command <code>free -m</code> reports the following data on my laptop:</p>
<div class="language-text highlighter-rouge"><div class="highlight"><pre class="highlight"><code> total used free shared buff/cache available
Mem: 15864 3728 7334 518 5647 12136
Swap: 2383 663 1720
</code></pre></div></div>
<p>Notice that the <code>available</code> memory is more than <code>free</code> memory.</p>
<p>While the issue is known (see <a href="https://github.com/fluent/fluent-bit/pull/3092">this</a> and <a href="https://github.com/fluent/fluent-bit/pull/5237">this</a> link), it is unfortunately not yet fixed.</p>
<h1 id="a-temporary-solution">A Temporary Solution</h1>
<p>The issues I linked previously provide stand-alone plugins that fix the problem, which will hopefully be merged in the official project at some point.
However, I didn’t want to install another plugin so I used Fluent Bit’s <code>exec</code> input plugin and the <code>free</code> Linux command to query memory usage like so:</p>
<div class="language-conf highlighter-rouge"><div class="highlight"><pre class="highlight"><code>[<span class="n">INPUT</span>]
<span class="n">Name</span> <span class="n">exec</span>
<span class="n">Tag</span> <span class="n">memory</span>
<span class="n">Command</span> <span class="n">free</span> -<span class="n">m</span> | <span class="n">tail</span> -<span class="m">2</span> | <span class="n">tr</span> <span class="s1">'\n'</span> <span class="s1">' '</span>
<span class="n">Interval_Sec</span> <span class="m">1</span>
</code></pre></div></div>
<p>To interpret the command’s output, I created the following filter:</p>
<div class="language-conf highlighter-rouge"><div class="highlight"><pre class="highlight"><code>[<span class="n">FILTER</span>]
<span class="n">Name</span> <span class="n">parser</span>
<span class="n">Match</span> <span class="n">memory</span>
<span class="n">Key_Name</span> <span class="n">exec</span>
<span class="n">Parser</span> <span class="n">free</span>
</code></pre></div></div>
<p>Lastly, I created the following parser (warning: regex shitcode incoming):</p>
<div class="language-conf highlighter-rouge"><div class="highlight"><pre class="highlight"><code>[<span class="n">PARSER</span>]
<span class="n">Name</span> <span class="n">free</span>
<span class="n">Format</span> <span class="n">regex</span>
<span class="n">Regex</span> ^<span class="n">Mem</span>:\<span class="n">s</span>+(?&lt;<span class="n">mem_total</span>&gt;\<span class="n">d</span>+)\<span class="n">s</span>+(?&lt;<span class="n">mem_used</span>&gt;\<span class="n">d</span>+)\<span class="n">s</span>+(?&lt;<span class="n">mem_free</span>&gt;\<span class="n">d</span>+)\<span class="n">s</span>+(?&lt;<span class="n">mem_shared</span>&gt;\<span class="n">d</span>+)\<span class="n">s</span>+(?&lt;<span class="n">mem_buff_cache</span>&gt;\<span class="n">d</span>+)\<span class="n">s</span>+(?&lt;<span class="n">mem_available</span>&gt;\<span class="n">d</span>+) <span class="n">Swap</span>:\<span class="n">s</span>+(?&lt;<span class="n">swap_total</span>&gt;\<span class="n">d</span>+)\<span class="n">s</span>+(?&lt;<span class="n">swap_used</span>&gt;\<span class="n">d</span>+)\<span class="n">s</span>+(?&lt;<span class="n">swap_free</span>&gt;\<span class="n">d</span>+)
<span class="n">Types</span> <span class="n">mem_total</span>:<span class="n">integer</span> <span class="n">mem_used</span>:<span class="n">integer</span> <span class="n">mem_free</span>:<span class="n">integer</span> <span class="n">mem_shared</span>:<span class="n">integer</span> <span class="n">mem_buff_cache</span>:<span class="n">integer</span> <span class="n">mem_available</span>:<span class="n">integer</span> <span class="n">swap_total</span>:<span class="n">integer</span> <span class="n">swap_used</span>:<span class="n">integer</span>
</code></pre></div></div>
<p>With this configuration, you can use the <code>mem_available</code> metric to get accurate memory usage in Fluent Bit.</p>
<h1 id="conclusion">Conclusion</h1>
<p>Let’s hope Fluent Bit’s <code>mem</code> input plugin is improved upon soon so this hacky solution is not needed.
I also intend to document my new monitoring pipeline, which at the moment consists of:</p>
<ul>
<li>Fluent Bit</li>
<li>Fluentd</li>
<li>Elasticsearch</li>
<li>Grafana</li>
</ul>
:ET

View file

@ -0,0 +1,4 @@
I"<02><p>Recently, I deployed <a href="https://concourse-ci.org/">Concourse CI</a> because I wanted to get my feet wet with a CI/CD pipeline.
However, I had a practical use case lying around for a long time: automatically compiling my static website and deploying it to my docker Swarm.
This took some time getting right, but the result works like a charm (<a href="https://git.kun.is/pim/static">source code</a>).</p>
:ET

View file

@ -0,0 +1,5 @@
I"<p>For months, Ive had a peculiar problem with my laptop: once in a while, seemingly without reason, my laptop screen would freeze.
This only happened on my laptop screen, and not on an external monitor.
I had kind of learned to live with it as I couldnt find a solution online.
The only remedy I had was reloading my window manager, which would often unfreeze the screen.</p>
:ET

View file

@ -0,0 +1,9 @@
I"³<p>Finally, after several months this website is up and running again!</p>
<p>My homelab has completely changed, but the reason why it initially went offline is because of my failing CI installation.
I was using <a href="https://concourse-ci.org/">Concourse CI</a> which I was initially interested in due to the reproducible nature of its builds using containers.
However, for some reason pipelines were sporadically getting stuck when I reboot the virtual machine it was running on.
The fix was very annoying: I had to re-create the pipelines manually (which feels very backwards for a CI/CD system!)
Additionally, my virtual machine setup back then was also quite fragile and I decided to get rid of that as well.</p>
<p>I have learned that having an escape hatch to deploy something is probably a good idea 😅
Expect a new overview of my homelab soon, in the same vein as <a href="/infrastructure-snapshot/">this post from last year</a>!</p>
:ET

View file

@ -0,0 +1,38 @@
I": <p><em>See the <a href="#update">Update</a> at the end of the article.</em></p>
<p>Already a week ago, Hashicorp <a href="https://www.hashicorp.com/blog/hashicorp-adopts-business-source-license">announced</a> it would change the license on almost all its projects.
Unlike <a href="https://github.com/hashicorp/terraform/commit/ab411a1952f5b28e6c4bd73071194761da36a83f">their previous license</a>, which was the Mozilla Public License 2.0, their new license is no longer truly open source.
It is called the Business Source License™ and restricts use of their software for competitors.
In their own words:</p>
<blockquote>
<p>Vendors who provide competitive services built on our community products will no longer be able to incorporate future releases, bug fixes, or security patches contributed to our products.</p>
</blockquote>
<p>I found <a href="https://meshedinsights.com/2021/02/02/rights-ratchet/">a great article</a> by MeshedInsights that names this behaviour the “rights ratchet model”.
They define a script start-ups use to garner the interest of open source enthusiasts but eventually turn their back on them for profit.
The reason why Hashicorp can do this, is because contributors signed a copyright license agreement (CLA).
This agreement transfers the copyright of contributors code to Hashicorp, allowing them to change the license if they want to.</p>
<p>I find this action really regrettable because I like their products.
This sort of action was also why I wanted to avoid using an Elastic stack, which also had their <a href="https://www.elastic.co/pricing/faq/licensing">license changed</a>.<sup class="footnote-ref"><a href="#fn1" id="fnref1">1</a></sup>
These companies do not respect their contributors and the software stack beneath they built their product on, which is actually open source (Golang, Linux, etc.).</p>
<h1 id="impact-on-my-home-lab">Impact on my Home Lab</h1>
<p>I am using Terraform in my home lab to manage several important things:</p>
<ul>
<li>Libvirt virtual machines</li>
<li>PowerDNS records</li>
<li>Elasticsearch configuration</li>
</ul>
<p>With Hashicorps anti open source move, I intend to move away from Terraform in the future.
While I will not use Hashicorps products for new personal projects, I will leave my current setup as-is for some time because there is no real need to quickly migrate.</p>
<p>I might also investigate some of Terraforms competitors, like Pulumi.
Hopefully there is a project that respects open source which I can use in the future.</p>
<h1 id="update">Update</h1>
<p>A promising fork of Terraform has been announced called <a href="https://opentf.org/announcement">OpenTF</a>.
They intend to take part of the Cloud Native Computing Foundation, which I think is a good effort because Terraform is so important for modern cloud infrastructures.</p>
<h1 id="footnotes">Footnotes</h1>
<section class="footnotes">
<ol>
<li id="fn1">
<p>While I am still using Elasticsearch, I dont use the rest of the Elastic stack in order to prevent a vendor lock-in. <a href="#fnref1" class="footnote-backref">↩</a></p>
</li>
</ol>
</section>
:ET

View file

@ -0,0 +1,2 @@
I"H<p>Here I might post some personally identifiable information.</p>
:ET

View file

@ -0,0 +1,41 @@
I"q<p>When I was scaling up my home lab, I started thinking more about data management.
I hadnt (and still havent) set up any form of network storage.
I have, however, set up a backup mechanism using <a href="https://borgbackup.readthedocs.io/en/stable/">Borg</a>.
Still, I want to operate lots of virtual machines, and backing up each one of them separately seemed excessive.
So I started thinking, what if I just let the host machines back up the data?
After all, the amount of physical hosts I have in my home lab is unlikely to increase drastically.</p>
<h1 id="the-use-case-for-sharing-directories">The Use Case for Sharing Directories</h1>
<p>I started working out this idea further.
Without network storage, I needed a way for guest VMs to access the hosts disks.
Here there are two possibilities, either expose some block device or a file system.
Creating a whole virtual disk for just the data of some VMs seemed wasteful, and from my experiences also increases backup times dramatically.
I therefore searched for a way to mount a directory from the host OS on the guest VM.
This is when I stumbled upon <a href="https://rabexc.org/posts/p9-setup-in-libvirt">this blog</a> post talking about sharing directories with virtual machines.</p>
<h1 id="sharing-directories-with-virtio-9p">Sharing Directories with virtio-9p</h1>
<p>virtio-9p is a way to map a directory on the host OS to a special device on the virtual machine.
In <code>virt-manager</code>, it looks like the following:
<img src="virt-manager.png" alt="picture showing virt-manager configuration to map a directory to a VM" />
Under the hood, virtio-9p uses the 9pnet protocol.
Originally developed at Bell Labs, support for this is available in all modern Linux kernels.
If you share a directory with a VM, you can then mount it.
Below is an extract of my <code>/etc/fstab</code> to automatically mount the directory:</p>
<div class="language-text highlighter-rouge"><div class="highlight"><pre class="highlight"><code>data /mnt/data 9p trans=virtio,rw 0 0
</code></pre></div></div>
<p>The first argument (<code>data</code>) refers to the name you gave this share from the host
With the <code>trans</code> option we specify that this is a virtio share.</p>
<h1 id="problems-with-virtio-9p">Problems with virtio-9p</h1>
<p>At first I had no problems with my setup, but I am now contemplating just moving to a network storage based setup because of two problems.</p>
<p>The first problem is that some files have suddenly changed ownership from <code>libvirt-qemu</code> to <code>root</code>.
If the file is owned by <code>root</code>, the guest OS can still see it, but cannot access it.
I am not entirely sure the problem lies with virtio, but I suspect it is.
For anyone experiencing this problem, I wrote a small shell script to revert ownership to the <code>libvirt-qemu</code> user:</p>
<div class="language-shell highlighter-rouge"><div class="highlight"><pre class="highlight"><code>find <span class="nt">-printf</span> <span class="s2">"%h/%f %u</span><span class="se">\n</span><span class="s2">"</span> | <span class="nb">grep </span>root | <span class="nb">cut</span> <span class="nt">-d</span> <span class="s1">' '</span> <span class="nt">-f1</span> | xargs <span class="nb">chown </span>libvirt-qemu:libvirt-qemu
</code></pre></div></div>
<p>Another problem that I have experienced, is guests being unable to mount the directory at all.
I have only experienced this problem once, but it was highly annoying.
To fix it, I had to reboot the whole physical machine.</p>
<h1 id="alternatives">Alternatives</h1>
<p>virtio-9p seemed like a good idea, but as discussed, I had some problems with it.
It seems <a href="https://virtio-fs.gitlab.io/">virtioFS</a> might be a an interesting alternative as it is designed specifically for sharing directories with VMs.</p>
<p>As for me, I will probably finally look into deploying network storage either with NFS or SSHFS.</p>
:ET

View file

@ -0,0 +1,42 @@
I"<p><a href="https://borgbackup.readthedocs.io/en/stable/">BorgBackup</a> and <a href="https://torsion.org/borgmatic/">Borgmatic</a> have been my go-to tools to create backups for my home lab since I started creating backups.
Using <a href="https://wiki.archlinux.org/title/systemd/Timers">Systemd Timers</a>, I regularly create a backup every night.
I also monitor successful execution of the backup process, in case some error occurs.
However, the way I set this up resulted in not receiving notifications.
Even though it boils down to RTFM, Id like to explain my error and how to handle errors correctly.</p>
<p>I was using the <code>on_error</code> option to handle errors, like so:</p>
<div class="language-yaml highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="na">on_error</span><span class="pi">:</span>
<span class="pi">-</span> <span class="s1">'</span><span class="s">apprise</span><span class="nv"> </span><span class="s">--body="Error</span><span class="nv"> </span><span class="s">while</span><span class="nv"> </span><span class="s">performing</span><span class="nv"> </span><span class="s">backup"</span><span class="nv"> </span><span class="s">&lt;URL&gt;</span><span class="nv"> </span><span class="s">||</span><span class="nv"> </span><span class="s">true'</span>
</code></pre></div></div>
<p>However, <code>on_error</code> does not handle errors from the execution of <code>before_everything</code> and <code>after_everything</code> hooks.
My solution to this was moving the error handling up to the Systemd service that calls Borgmatic.
This results in the following Systemd service:</p>
<div class="language-systemd highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">[Unit]</span>
<span class="nt">Description</span><span class="p">=</span>Backup data using Borgmatic
<span class="c"># Added</span>
<span class="nt">OnFailure</span><span class="p">=</span>backup-failure.service
<span class="k">[Service]</span>
<span class="nt">ExecStart</span><span class="p">=</span>/usr/bin/borgmatic --config /root/backup.yml
<span class="nt">Type</span><span class="p">=</span>oneshot
</code></pre></div></div>
<p>This handles any error, be it from Borgmatics hooks or itself.
The <code>backup-failure</code> service is very simple, and just calls Apprise to send a notification:</p>
<div class="language-systemd highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">[Unit]</span>
<span class="nt">Description</span><span class="p">=</span>Send backup failure notification
<span class="k">[Service]</span>
<span class="nt">Type</span><span class="p">=</span>oneshot
<span class="nt">ExecStart</span><span class="p">=</span>apprise --body="Failed to create backup!" &lt;URL&gt;
<span class="k">[Install]</span>
<span class="nt">WantedBy</span><span class="p">=</span>multi-user.target
</code></pre></div></div>
<h1 id="the-aftermath-or-what-i-learned">The Aftermath (or what I learned)</h1>
<p>Because the error handling and alerting werent working propertly, my backups didnt succeed for two weeks straight.
And, of course, you only notice your backups arent working when you actually need them.
This is exactly what happened: my disk was full and a MariaDB database crashed as a result of that.
Actually, the whole database seemed to be corrupt and I find it worrying MariaDB does not seem to be very resilient to failures (in comparison a PostgreSQL database was able to recover automatically).
I then tried to recover the data using last nights backup, only to find out there was no such backup.
Fortunately, I had other means to recover the data so I incurred no data loss.</p>
<p>I already knew it is important to test backups, but I learned it is also important to test failures during backups!</p>
:ET

View file

@ -0,0 +1,2 @@
I"P<p>Finally, after several months this website is up and running again!</p>
:ET

View file

@ -0,0 +1,5 @@
I"ï<p>Previously, I have used <a href="https://github.com/prometheus/node_exporter">Prometheus’ node_exporter</a> to monitor the memory usage of my servers.
However, I am currently in the process of moving away from Prometheus to a new Monioring stack.
While I understand the advantages, I felt like Prometheus’ pull architecture does not scale nicely.
Everytime I spin up a new machine, I would have to centrally change Prometheus’ configuration in order for it to query the new server.</p>
:ET

5
src/404.md Normal file
View file

@ -0,0 +1,5 @@
---
title: "404"
layout: 404
permalink: "/404.html"
---

34
src/Gemfile Normal file
View file

@ -0,0 +1,34 @@
source "https://rubygems.org"
# Hello! This is where you manage which Jekyll version is used to run.
# When you want to use a different version, change it below, save the
# file and run `bundle install`. Run Jekyll with `bundle exec`, like so:
#
# bundle exec jekyll serve
#
# This will help ensure the proper Jekyll version is running.
# Happy Jekylling!
gem "jekyll", "~> 4.1.0"
# This is the default theme for new Jekyll sites. You may change this to anything you like.
# If you want to use GitHub Pages, remove the "gem "jekyll"" above and
# uncomment the line below. To upgrade, run `bundle update github-pages`.
# gem "github-pages", group: :jekyll_plugins
# If you have any plugins, put them here!
group :jekyll_plugins do
gem 'jekyll-feed', '~> 0.13'
gem 'jekyll-sitemap', '~> 1.4'
gem 'jekyll-compose', '~> 0.12.0'
gem 'jekyll-postfiles', '~> 3.1'
gem 'jekyll-commonmark-ghpages'
end
# Windows does not include zoneinfo files, so bundle the tzinfo-data gem
# gem "tzinfo-data", platforms: [:mingw, :mswin, :x64_mingw, :jruby]
# Performance-booster for watching directories on Windows
# gem "wdm", "~> 0.1.0" if Gem.win_platform?
gem "webrick", "~> 1.7"

93
src/Gemfile.lock Normal file
View file

@ -0,0 +1,93 @@
GEM
remote: https://rubygems.org/
specs:
addressable (2.8.1)
public_suffix (>= 2.0.2, < 6.0)
colorator (1.1.0)
commonmarker (0.17.13)
ruby-enum (~> 0.5)
concurrent-ruby (1.2.0)
em-websocket (0.5.3)
eventmachine (>= 0.12.9)
http_parser.rb (~> 0)
eventmachine (1.2.7)
ffi (1.15.5)
forwardable-extended (2.6.0)
http_parser.rb (0.8.0)
i18n (1.12.0)
concurrent-ruby (~> 1.0)
jekyll (4.1.1)
addressable (~> 2.4)
colorator (~> 1.0)
em-websocket (~> 0.5)
i18n (~> 1.0)
jekyll-sass-converter (~> 2.0)
jekyll-watch (~> 2.0)
kramdown (~> 2.1)
kramdown-parser-gfm (~> 1.0)
liquid (~> 4.0)
mercenary (~> 0.4.0)
pathutil (~> 0.9)
rouge (~> 3.0)
safe_yaml (~> 1.0)
terminal-table (~> 1.8)
jekyll-commonmark (1.3.1)
commonmarker (~> 0.14)
jekyll (>= 3.7, < 5.0)
jekyll-commonmark-ghpages (0.1.6)
commonmarker (~> 0.17.6)
jekyll-commonmark (~> 1.2)
rouge (>= 2.0, < 4.0)
jekyll-compose (0.12.0)
jekyll (>= 3.7, < 5.0)
jekyll-feed (0.17.0)
jekyll (>= 3.7, < 5.0)
jekyll-postfiles (3.1.0)
jekyll (>= 3.8.6, < 5)
jekyll-sass-converter (2.2.0)
sassc (> 2.0.1, < 3.0)
jekyll-sitemap (1.4.0)
jekyll (>= 3.7, < 5.0)
jekyll-watch (2.2.1)
listen (~> 3.0)
kramdown (2.4.0)
rexml
kramdown-parser-gfm (1.1.0)
kramdown (~> 2.0)
liquid (4.0.4)
listen (3.8.0)
rb-fsevent (~> 0.10, >= 0.10.3)
rb-inotify (~> 0.9, >= 0.9.10)
mercenary (0.4.0)
pathutil (0.16.2)
forwardable-extended (~> 2.6)
public_suffix (5.0.1)
rb-fsevent (0.11.2)
rb-inotify (0.10.1)
ffi (~> 1.0)
rexml (3.2.5)
rouge (3.30.0)
ruby-enum (0.9.0)
i18n
safe_yaml (1.0.5)
sassc (2.4.0)
ffi (~> 1.9)
terminal-table (1.8.0)
unicode-display_width (~> 1.1, >= 1.1.1)
unicode-display_width (1.8.0)
webrick (1.7.0)
PLATFORMS
x86_64-linux
DEPENDENCIES
jekyll (~> 4.1.0)
jekyll-commonmark-ghpages
jekyll-compose (~> 0.12.0)
jekyll-feed (~> 0.13)
jekyll-postfiles (~> 3.1)
jekyll-sitemap (~> 1.4)
webrick (~> 1.7)
BUNDLED WITH
2.4.5

77
src/_config.yml Normal file
View file

@ -0,0 +1,77 @@
title: Pim Kunis
description: A pig's gotta fly
lang: en-US
timezone: Europe/Amsterdam
image: assets/img/avatar.jpg
repo: https://git.kun.is/pim/static
mode: light
author:
name: Pim Kunis
bio: A pig's gotta fly
username: pim
avatar: /assets/img/avatar.jpg
url: "https://pim.kun.is"
baseurl: "/"
permalink: /:title/
collections:
posts:
output: true
markdown: CommonMarkGhPages
highlighter: rouge
kramdown:
syntax_highlighter: rouge
defaults:
- scope:
path: ""
values:
layout: post
comments: false
jekyll_compose:
post_default_front_matter:
modified:
tags: []
description:
draft_default_front_matter:
modified:
tags: []
description:
number_of_posts: 5
sass:
style: compressed
include:
- _redirects
- .htaccess
exclude:
- CNAME
- Gemfile
- Gemfile.lock
- LICENSE
- CHANGELOG.md
- README.md
- node_modules
- CODE_OF_CONDUCT.md
- CONTRIBUTING.md
- lighthouse.png
- klise-*.gem
- klise.gemspec
- gemset.nix
plugins:
- jekyll-feed
- jekyll-sitemap
- jekyll-postfiles
- jekyll-commonmark-ghpages
commonmark:
options: ["SMART", "FOOTNOTES"]
extensions: ["strikethrough", "autolink", "table", "tagfilter"]

16
src/_data/menus.yml Normal file
View file

@ -0,0 +1,16 @@
- title: home
url: /
external: false
- title: archive
url: /archive/
external: false
- title: about
url: /about/
external: false # set true if you using external link, see below
# Example:
# - title: github
# url: https://github.com/piharpi/jekyll-klise
# external: true

View file

@ -0,0 +1,105 @@
{% capture headingsWorkspace %}
{% comment %}
Version 1.0.4
https://github.com/allejo/jekyll-anchor-headings
"Be the pull request you wish to see in the world." ~Ben Balter
Usage:
{% include anchor_headings.html html=content %}
Parameters:
* html (string) - the HTML of compiled markdown generated by kramdown in Jekyll
Optional Parameters:
* beforeHeading (bool) : false - Set to true if the anchor should be placed _before_ the heading's content
* anchorAttrs (string) : '' - Any custom HTML attributes that will be added to the `<a>` tag; you may NOT use `href`, `class` or `title`
* anchorBody (string) : '' - The content that will be placed inside the anchor; the `%heading%` placeholder is available
* anchorClass (string) : '' - The class(es) that will be used for each anchor. Separate multiple classes with a space
* anchorTitle (string) : '' - The `title` attribute that will be used for anchors
* h_min (int) : 1 - The minimum header level to build an anchor for; any header lower than this value will be ignored
* h_max (int) : 6 - The maximum header level to build an anchor for; any header greater than this value will be ignored
* bodyPrefix (string) : '' - Anything that should be inserted inside of the heading tag _before_ its anchor and content
* bodySuffix (string) : '' - Anything that should be inserted inside of the heading tag _after_ its anchor and content
Output:
The original HTML with the addition of anchors inside of all of the h1-h6 headings.
{% endcomment %}
{% assign minHeader = include.h_min | default: 1 %}
{% assign maxHeader = include.h_max | default: 6 %}
{% assign beforeHeading = include.beforeHeading %}
{% assign nodes = include.html | split: '<h' %}
{% capture edited_headings %}{% endcapture %}
{% for _node in nodes %}
{% capture node %}{{ _node | strip }}{% endcapture %}
{% if node == "" %}
{% continue %}
{% endif %}
{% assign nextChar = node | replace: '"', '' | strip | slice: 0, 1 %}
{% assign headerLevel = nextChar | times: 1 %}
<!-- If the level is cast to 0, it means it's not a h1-h6 tag, so let's try to fix it -->
{% if headerLevel == 0 %}
{% if nextChar != '<' and nextChar != '' %}
{% capture node %}<h{{ node }}{% endcapture %}
{% endif %}
{% capture edited_headings %}{{ edited_headings }}{{ node }}{% endcapture %}
{% continue %}
{% endif %}
{% assign _workspace = node | split: '</h' %}
{% assign _idWorkspace = _workspace[0] | split: 'id="' %}
{% assign _idWorkspace = _idWorkspace[1] | split: '"' %}
{% assign html_id = _idWorkspace[0] %}
{% capture _hAttrToStrip %}{{ _workspace[0] | split: '>' | first }}>{% endcapture %}
{% assign header = _workspace[0] | replace: _hAttrToStrip, '' %}
<!-- Build the anchor to inject for our heading -->
{% capture anchor %}{% endcapture %}
{% if html_id and headerLevel >= minHeader and headerLevel <= maxHeader %}
{% capture anchor %}href="#{{ html_id }}"{% endcapture %}
{% if include.anchorClass %}
{% capture anchor %}{{ anchor }} class="{{ include.anchorClass }}"{% endcapture %}
{% endif %}
{% if include.anchorTitle %}
{% capture anchor %}{{ anchor }} title="{{ include.anchorTitle | replace: '%heading%', header }}"{% endcapture %}
{% endif %}
{% if include.anchorAttrs %}
{% capture anchor %}{{ anchor }} {{ include.anchorAttrs }}{% endcapture %}
{% endif %}
{% capture anchor %}<a {{ anchor }}>{{ include.anchorBody | replace: '%heading%', header | default: '' }}</a>{% endcapture %}
<!-- In order to prevent adding extra space after a heading, we'll let the 'anchor' value contain it -->
{% if beforeHeading %}
{% capture anchor %}{{ anchor }} {% endcapture %}
{% else %}
{% capture anchor %} {{ anchor }}{% endcapture %}
{% endif %}
{% endif %}
{% capture new_heading %}
<h{{ _hAttrToStrip }}
{{ include.bodyPrefix }}
{% if beforeHeading %}
{{ anchor }}{{ header }}
{% else %}
{{ header }}{{ anchor }}
{% endif %}
{{ include.bodySuffix }}
</h{{ _workspace | last }}
{% endcapture %}
{% capture edited_headings %}{{ edited_headings }}{{ new_heading }}{% endcapture %}
{% endfor %}
{% endcapture %}{% assign headingsWorkspace = '' %}{{ edited_headings | strip }}

View file

@ -0,0 +1,9 @@
<div class="author">
<img
class="author-avatar"
src="{{ site.author.avatar }}"
alt="{{ site.author.username }}"
/>
<h2 class="author-name">{{ site.author.name }}</h2>
<p class="author-bio">{{ site.author.bio }}</p>
</div>

View file

@ -0,0 +1,10 @@
<!-- unnecessary file, however you can still use for comment section, e.g disqus -->
<script
src="https://utteranc.es/client.js"
repo="username/reponame"
issue-term="pathname"
label="✨ comment ✨"
theme="github-light"
crossorigin="anonymous"
async
></script>

28
src/_includes/footer.html Normal file
View file

@ -0,0 +1,28 @@
<footer class="footer">
<a class="footer_item" href="https://git.kun.is/pim"><i class="fa-solid fa-code-branch"></i> Git</a>
<span class="footer_item">&copy; {{ site.time | date: "%Y" }}</span>
<small class="footer_copyright">
<!-- Klisé Theme: https://github.com/piharpi/jekyll-klise -->
<a
href="https://github.com/piharpi/jekyll-klise"
target="_blank"
rel="noreferrer noopener"
>klisé</a
>
theme on
<a href="https://jekyllrb.com" target="_blank" rel="noreferrer noopener"
>jekyll</a
>
</small>
</footer>
<script src="/assets/js/main.js" defer="defer"></script>
{%- if page.url == '/archive/' -%}
<script src="/assets/js/search.min.js"></script>
<script>
var sjs = SimpleJekyllSearch({
searchInput: document.getElementById('search-input'),
resultsContainer: document.getElementById('search-results'),
json: '/assets/search.json',
});
</script>
{%- endif -%}

165
src/_includes/header.html Normal file
View file

@ -0,0 +1,165 @@
<head prefix="og: http://ogp.me/ns#">
<meta charset="UTF-8" />
<meta http-equiv="X-UA-Compatible" content="ie=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta name="mobile-web-app-capable" content="yes" />
<meta name="apple-mobile-web-app-capable" content="yes" />
<meta name="application-name" content="{{ site.title }}" />
<meta name="apple-mobile-web-app-status-bar-style" content="#fff" />
<meta name="apple-mobile-web-app-title" content="{{ site.title }}" />
<title>
{% if page.title %}{{ page.title | escape }} - {{ site.title }}{% else %}{{
site.title | escape }}{% endif %}
</title>
<link
rel="alternate"
href="{{
page.url | remove: 'index.html' | remove: '.html' | absolute_url
}}"
hreflang="{{ site.lang }}"
/>
<link
rel="canonical"
href="{{
page.url | remove: 'index.html' | remove: '.html' | absolute_url
}}"
/>
{% if paginator.previous_page %}
<link
rel="prev"
href="{{
paginator.previous_page_path
| remove: 'index.html'
| remove: '.html'
}}"
/>
{% endif %} {% if paginator.next_page %}
<link
rel="next"
href="{{
paginator.next_page_path
| remove: 'index.html'
| remove: '.html'
}}"
/>
{% endif %}
<meta
name="description"
content="{{
page.description
| default: site.description
| strip_html
| normalize_whitespace
| truncate: 200
| escape
}}"
/>
<meta name="referrer" content="no-referrer-when-downgrade" />
<meta property="fb:app_id" content="{{ site.fb_appid }}" />
<meta
property="og:site_name"
content="{% if page.title %}{{ page.title | escape }} | {{
site.author.name
}}{% else %}{{ site.title | escape }}{% endif %}"
/>
<meta
property="og:title"
content="{% if page.title %}{{ page.title | escape }} | {{
site.author.name
}}{% else %}{{ site.title | escape }}{% endif %}"
/>
{% if page.location %}
<meta property="og:type" content="article" />
<meta
property="article:publisher"
content="https://web.facebook.com/{{ site.author.facebook }}"
/>
{% else %}
<meta property="og:type" content="website" />
{% endif %}
<meta
property="og:url"
content="{{
page.url | remove: 'index.html' | remove: '.html' | absolute_url
}}"
/>
<meta
property="og:description"
content="{{
page.description
| default: site.description
| strip_html
| normalize_whitespace
| truncate: 200
| escape
}}"
/>
{% if page.image %}
<meta property="og:image" content="{{ page.image | absolute_url }}" />
{% else %}
<meta property="og:image" content="{{ site.image | absolute_url }}" />
{% endif %}
<meta property="og:image:width" content="640" />
<meta property="og:image:height" content="640" />
<meta name="twitter:card" content="summary" />
<meta
name="twitter:title"
content="{% if page.title %}{{ page.title | escape }} | {{
site.author.twitter
}}{% else %}{{ site.title | escape }}{% endif %}"
/>
<meta
name="twitter:url"
content="{{
page.url | remove: 'index.html' | remove: '.html' | absolute_url
}}"
/>
<meta name="twitter:site" content="@{{ site.author.twitter }}" />
<meta name="twitter:creator" content="@{{ site.author.twitter }}" />
<meta
name="twitter:description"
content="{{
page.description
| default: site.description
| strip_html
| normalize_whitespace
| truncate: 200
| escape
}}"
/>
{% if page.image %}
<meta name="twitter:image" content="{{ page.image | absolute_url }}" />
{% else %}
<meta name="twitter:image" content="{{ site.image | absolute_url }}" />
{% endif %} {% feed_meta %}
<link
rel="apple-touch-icon"
sizes="180x180"
href="/assets/favicons/apple-touch-icon.png"
/>
<link
rel="icon"
type="image/png"
sizes="32x32"
href="/assets/favicons/favicon-32x32.png"
/>
<link
rel="icon"
type="image/png"
sizes="16x16"
href="/assets/favicons/favicon-16x16.png"
/>
<link rel="manifest" href="/assets/favicons/site.webmanifest" />
<link
rel="mask-icon"
href="/assets/favicons/safari-pinned-tab.svg"
color="#5bbad5"
/>
<meta name="apple-mobile-web-app-title" content="Jekyll Klise" />
<meta name="application-name" content="Jekyll Klise" />
<meta name="msapplication-TileColor" content="#da532c" />
<meta name="theme-color" content="#2c2c2c" />
<link rel="stylesheet" href="/assets/css/style.css" />
<link href="/assets/css/fontawesome.all.min.css" rel="stylesheet" />
</head>

211
src/_includes/navbar.html Normal file
View file

@ -0,0 +1,211 @@
<div class="navbar" role="navigation">
<nav class="menu">
<input type="checkbox" id="menu-trigger" class="menu-trigger" />
<label for="menu-trigger">
<span class="menu-icon">
<svg
xmlns="http://www.w3.org/2000/svg"
width="24"
height="24"
viewBox="0 0 512 512"
>
<path
d="M64,384H448V341.33H64Zm0-106.67H448V234.67H64ZM64,128v42.67H448V128Z"
/>
</svg>
</span>
</label>
<a id="mode">
<svg
class="mode-sunny"
xmlns="http://www.w3.org/2000/svg"
width="24"
height="24"
viewBox="0 0 512 512"
>
<title>LIGHT</title>
<line
x1="256"
y1="48"
x2="256"
y2="96"
style="stroke-linecap:round;stroke-miterlimit:10;stroke-width:32px"
/>
<line
x1="256"
y1="416"
x2="256"
y2="464"
style="stroke-linecap:round;stroke-miterlimit:10;stroke-width:32px"
/>
<line
x1="403.08"
y1="108.92"
x2="369.14"
y2="142.86"
style="stroke-linecap:round;stroke-miterlimit:10;stroke-width:32px"
/>
<line
x1="142.86"
y1="369.14"
x2="108.92"
y2="403.08"
style="stroke-linecap:round;stroke-miterlimit:10;stroke-width:32px"
/>
<line
x1="464"
y1="256"
x2="416"
y2="256"
style="stroke-linecap:round;stroke-miterlimit:10;stroke-width:32px"
/>
<line
x1="96"
y1="256"
x2="48"
y2="256"
style="stroke-linecap:round;stroke-miterlimit:10;stroke-width:32px"
/>
<line
x1="403.08"
y1="403.08"
x2="369.14"
y2="369.14"
style="stroke-linecap:round;stroke-miterlimit:10;stroke-width:32px"
/>
<line
x1="142.86"
y1="142.86"
x2="108.92"
y2="108.92"
style="stroke-linecap:round;stroke-miterlimit:10;stroke-width:32px"
/>
<circle
cx="256"
cy="256"
r="80"
style="stroke-linecap:round;stroke-miterlimit:10;stroke-width:32px"
/>
</svg>
<svg
class="mode-moon"
xmlns="http://www.w3.org/2000/svg"
width="24"
height="24"
viewBox="0 0 512 512"
>
<title>DARK</title>
<line
x1="256"
y1="48"
x2="256"
y2="96"
style="stroke-linecap:round;stroke-miterlimit:10;stroke-width:32px"
/>
<line
x1="256"
y1="416"
x2="256"
y2="464"
style="stroke-linecap:round;stroke-miterlimit:10;stroke-width:32px"
/>
<line
x1="403.08"
y1="108.92"
x2="369.14"
y2="142.86"
style="stroke-linecap:round;stroke-miterlimit:10;stroke-width:32px"
/>
<line
x1="142.86"
y1="369.14"
x2="108.92"
y2="403.08"
style="stroke-linecap:round;stroke-miterlimit:10;stroke-width:32px"
/>
<line
x1="464"
y1="256"
x2="416"
y2="256"
style="stroke-linecap:round;stroke-miterlimit:10;stroke-width:32px"
/>
<line
x1="96"
y1="256"
x2="48"
y2="256"
style="stroke-linecap:round;stroke-miterlimit:10;stroke-width:32px"
/>
<line
x1="403.08"
y1="403.08"
x2="369.14"
y2="369.14"
style="stroke-linecap:round;stroke-miterlimit:10;stroke-width:32px"
/>
<line
x1="142.86"
y1="142.86"
x2="108.92"
y2="108.92"
style="stroke-linecap:round;stroke-miterlimit:10;stroke-width:32px"
/>
<circle
cx="256"
cy="256"
r="80"
style="stroke-linecap:round;stroke-miterlimit:10;stroke-width:32px"
/>
</svg>
</a>
<div class="trigger">
<div class="trigger-container">
{%- assign url = page.url -%}
{%- assign menus = site.data.menus -%}
{%- if menus %}
{%- for menu in menus -%}
{%- if url == menu.url -%}
<a class="menu-link active" href="{{ menu.url }}">{{ menu.title }}</a>
{%- else -%}
{%- if menu.external -%}
<a
class="menu-link"
href="{{ menu.url }}"
target="_blank"
rel="noopener"
>{{ menu.title }}</a
>
{%- else -%}
<a class="menu-link" href="{{ menu.url }}">{{ menu.title }}</a>
{%- endif -%}
{%- endif -%}
{%- endfor -%}
{%- else -%}
<a class="menu-link {% if url == '/' %}active{% endif %}" href="/">home</a>
<a class="menu-link {% if url == '/about/' %}active{% endif %}" href="/about">about</a>
{%- endif -%}
<a class="menu-link rss" href="/feed.xml">
<svg
xmlns="http://www.w3.org/2000/svg"
width="17"
height="17"
viewBox="0 0 512 512"
fill="#ED812E"
>
<title>RSS</title>
<path
d="M108.56,342.78a60.34,60.34,0,1,0,60.56,60.44A60.63,60.63,0,0,0,108.56,342.78Z"
/>
<path
d="M48,186.67v86.55c52,0,101.94,15.39,138.67,52.11s52,86.56,52,138.67h86.66C325.33,312.44,199.67,186.67,48,186.67Z"
/>
<path
d="M48,48v86.56c185.25,0,329.22,144.08,329.22,329.44H464C464,234.66,277.67,48,48,48Z"
/>
</svg>
</a>
</div>
</div>
</nav>
</div>

View file

@ -0,0 +1,16 @@
<nav class="post-nav">
{% if page.previous %}
<a
class="post-nav-item post-nav-prev"
href="{{ page.previous | relative_url }}"
>
<div class="nav-arrow">Previous</div>
<span class="post-title">{{ page.previous.title }}</span>
</a>
{% endif %} {% if page.next %}
<a class="post-nav-item post-nav-next" href="{{ page.next | relative_url }}">
<div class="nav-arrow">Next</div>
<span class="post-title">{{ page.next.title }}</span>
</a>
{% endif %}
</nav>

View file

@ -0,0 +1,21 @@
<!-- NOTE: unused file, but u can use if necessary -->
<!-- <div class="pagination">
{% if paginator.previous_page %}
<a
class="page-previous"
href="{{ paginator.previous_page_path }}"
class="previous"
>
<span aria-hidden="true"></span> NEWER POSTS
</a>
{% endif %}
<span class="page_number"
>PAGE {{ paginator.page }} OF {{ paginator.total_pages }}</span
>
{% if paginator.next_page %}
<a class="page-next" href="{{ paginator.next_page_path }}" class="next"
>OLDER POSTS
<span aria-hidden="true"></span>
</a>
{% endif %}
</div> -->

45
src/_layouts/404.html Normal file
View file

@ -0,0 +1,45 @@
---
layout: compress
---
<!DOCTYPE html>
<html lang="{{ page.lang | default: site.lang | default: " en " }}">
{% include header.html %}
<body data-theme="{{ site.mode }}" class="notransition">
<script>
const body = document.body;
const data = body.getAttribute("data-theme");
const initTheme = (state) => {
if (state === "dark") {
body.setAttribute("data-theme", "dark");
} else if (state === "light") {
body.removeAttribute("data-theme");
} else {
localStorage.setItem("theme", data);
}
};
initTheme(localStorage.getItem("theme"));
setTimeout(() => body.classList.remove("notransition"), 75);
</script>
{% include navbar.html %}
<div class="wrapper">
<main aria-label="Content">
<div class="not-found">
<div class="container">
<div class="title">404</div>
<a class="solution" href="{{ site.url }}">get me the fuck out of here</a>
</div>
</div>
</main>
{% include footer.html %}
</div>
</body>
</html>

View file

@ -0,0 +1,4 @@
---
---
{% if site.compress_html.ignore.envs contains jekyll.environment %}{{ content }}{% else %}{% capture _content %}{{ content }}{% endcapture %}{% if site.compress_html.endings == "all" %}{% assign _endings = "html head body li dt dd p rt rp optgroup option colgroup caption thead tbody tfoot tr td th" | split: " " %}{% else %}{% assign _endings = site.compress_html.endings %}{% endif %}{% for _element in _endings %}{% capture _end %}</{{ _element }}>{% endcapture %}{% assign _content = _content | remove: _end %}{% endfor %}{% if site.compress_html.comments.size == 2 %}{% assign _comment_befores = _content | split: site.compress_html.comments.first %}{% for _comment_before in _comment_befores %}{% assign _comment_content = _comment_before | split: site.compress_html.comments.last | first %}{% if _comment_content %}{% capture _comment %}{{ site.compress_html.comments.first }}{{ _comment_content }}{{ site.compress_html.comments.last }}{% endcapture %}{% assign _content = _content | remove: _comment %}{% endif %}{% endfor %}{% endif %}{% assign _pre_befores = _content | split: "<pre" %}{% assign _content = "" %}{% for _pre_before in _pre_befores %}{% assign _pres = _pre_before | split: "</pre>" %}{% case _pres.size %}{% when 2 %}{% capture _content %}{{ _content }}<pre{{ _pres.first }}</pre>{{ _pres.last | split: " " | join: " " }}{% endcapture %}{% when 1 %}{% capture _content %}{{ _content }}{{ _pres.last | split: " " | join: " " }}{% endcapture %}{% endcase %}{% endfor %}{% if site.compress_html.clippings == "all" %}{% assign _clippings = "html head title base link meta style body article section nav aside h1 h2 h3 h4 h5 h6 hgroup header footer address p hr blockquote ol ul li dl dt dd figure figcaption main div table caption colgroup col tbody thead tfoot tr td th" | split: " " %}{% else %}{% assign _clippings = site.compress_html.clippings %}{% endif %}{% for _element in _clippings %}{% assign _edges = " <e;<e; </e>;</e>;</e> ;</e>" | replace: "e", _element | split: ";" %}{% assign _content = _content | replace: _edges[0], _edges[1] | replace: _edges[2], _edges[3] | replace: _edges[4], _edges[5] %}{% endfor %}{{ _content }}{% endif %}

38
src/_layouts/default.html Normal file
View file

@ -0,0 +1,38 @@
---
layout: compress
---
<!DOCTYPE html>
<html lang="{{ page.lang | default: site.lang | default: " en " }}">
{% include header.html %}
<body data-theme="{{ site.mode }}" class="notransition">
<script>
const body = document.body;
const data = body.getAttribute("data-theme");
const initTheme = (state) => {
if (state === "dark") {
body.setAttribute("data-theme", "dark");
} else if (state === "light") {
body.removeAttribute("data-theme");
} else {
localStorage.setItem("theme", data);
}
};
initTheme(localStorage.getItem("theme"));
setTimeout(() => body.classList.remove("notransition"), 75);
</script>
{% include navbar.html %}
<div class="wrapper">
{% include author.html %}
<main aria-label="Content">
{{ content }}
</main>
{% include footer.html %}
</div>
</body>
</html>

14
src/_layouts/home.html Normal file
View file

@ -0,0 +1,14 @@
---
layout: default
home: true
---
<h3 class="posts-item-note" aria-label="Recent Posts">Recent Posts</h3>
{%- for post in site.posts limit: site.number_of_posts -%}
<article class="post-item">
<span class="post-item-date">{{ post.date | date: "%b %d, %Y" }}</span>
<h4 class="post-item-title">
<a href="{{ post.url }}">{{ post.title | escape }}</a>
</h4>
</article>
{%- endfor -%}

43
src/_layouts/page.html Normal file
View file

@ -0,0 +1,43 @@
---
layout: compress
---
<!DOCTYPE html>
<html lang="{{ page.lang | default: site.lang | default: " en " }}">
{% include header.html %}
<body data-theme="{{ site.mode }}" class="notransition">
<script>
const body = document.body;
const data = body.getAttribute("data-theme");
const initTheme = (state) => {
if (state === "dark") {
body.setAttribute("data-theme", "dark");
} else if (state === "light") {
body.removeAttribute("data-theme");
} else {
localStorage.setItem("theme", data);
}
};
initTheme(localStorage.getItem("theme"));
setTimeout(() => body.classList.remove("notransition"), 75);
</script>
{% include navbar.html %}
<div class="wrapper">
<header class="header">
<h1 class="header-title center" itemprop="headline">{{ page.title | escape }}.</h1>
</header>
<main class="page-content" aria-label="Content">
{% include anchor_headings.html html=content anchorClass="anchor-head" beforeHeading=true h_min=4 h_max=4 %}
</main>
{% include footer.html %}
</div>
</body>
</html>

94
src/_layouts/post.html Normal file
View file

@ -0,0 +1,94 @@
---
layout: compress
---
<!DOCTYPE html>
<html lang="{{ page.lang | default: site.lang | default: "en" }}">
{% include header.html %}
<body data-theme="{{ site.mode }}" class="notransition">
<script>
const body = document.body;
const data = body.getAttribute("data-theme");
const initTheme = (state) => {
if (state === "dark") {
body.setAttribute("data-theme", "dark");
} else if (state === "light") {
body.removeAttribute("data-theme");
} else {
localStorage.setItem("theme", data);
}
};
initTheme(localStorage.getItem("theme"));
setTimeout(() => body.classList.remove("notransition"), 75);
</script>
{% include navbar.html %}
<div class="wrapper post">
<main class="page-content" aria-label="Content">
<article itemscope itemtype="https://schema.org/BlogPosting">
<header class="header">
{% if page.tags and page.tags != empty %}
<div class="tags">
{% assign tags = page.tags %}
<span itemprop="keywords">
{% for tag in tags %}
<a class="tag"
href="/tags/#{{tag | downcase | slugify}}">{{tag | upcase }}</a>{% unless forloop.last %},{% endunless %}
{% endfor %}
</span>
</div>
{% endif %}
<h1 class="header-title" itemprop="headline">{{ page.title | escape }}</h1>
{% if page.date %}
<div class="post-meta">
<time datetime="{{ page.date | date_to_xmlschema }}" itemprop="datePublished">
{{ page.date | date: "%b %d, %Y" }}
</time>
<span itemprop="author" itemscope itemtype="https://schema.org/Person">
<span itemprop="name">{{ site.author.name }}</span>
</span>
<time hidden datetime="{{ page.modified | date_to_xmlschema }}" itemprop="dateModified">
{{ page.date | date: "%b %d, %Y" }}
</time>
<span hidden itemprop="publisher" itemtype="Person">{{ site.author.name }}</span>
<span hidden itemprop="image">{{ page.image }}</span>
<span hidden itemprop="mainEntityOfPage">{{ page.excerpt }}</span>
</div>
{% endif %}
</header>
<div class="page-content" itemprop="articleBody">
{% include anchor_headings.html html=content anchorClass="anchor-head" beforeHeading=true h_min=1 h_max=4 %}
{% if page.tweet %}
<p>Comments this article on
<a href="https://twitter.com/{{site.twitter}}/status/{{page.tweet}}">Twitter</a>.
</p>
{% endif %}
</div>
</article>
{% if page.comments %}
{% include comments.html %}
{% endif %}
</main>
{% if page.modified %}
<small class="post-updated-at">updated_at {{page.modified | date: "%d-%m-%Y"}}</small>
{% endif %}
{% if page.next or page.previous %}
{% include navigation.html %}
{% endif %}
{% include footer.html %}
</div>
</body>
</html>

View file

@ -0,0 +1,87 @@
---
layout: post
title: Using Ansible to alter Kernel Parameters
date: 2023-06-19 09:31:00 Europe/Amsterdam
categories: ansible grub linux
---
For months, I've had a peculiar problem with my laptop: once in a while, seemingly without reason, my laptop screen would freeze.
This only happened on my laptop screen, and not on an external monitor.
I had kind of learned to live with it as I couldn't find a solution online.
The only remedy I had was reloading my window manager, which would often unfreeze the screen.
Yesterday I tried Googling once more and I actually found [a thread](https://bbs.archlinux.org/viewtopic.php?id=246841) about it on the Arch Linux forums!
They talk about the same laptop model, the Lenovo ThinkPad x260, having the problem.
Fortunately, they also propose [a temporary fix](https://bbs.archlinux.org/viewtopic.php?pid=1888932#p1888932).
# Trying the Fix
Apparently, a problem with the Panel Self Refresh (PSR) feature of Intel iGPUs is the culprit.
According to the [Linux source code](https://github.com/torvalds/linux/blob/45a3e24f65e90a047bef86f927ebdc4c710edaa1/drivers/gpu/drm/i915/display/intel_psr.c#L42), PSR enables the display to go into a lower standby mode when the sytem is idle but the screen is in use.
These lower standby modes can reduce power usage of your device when idling.
This all seems useful, except when it makes your screen freeze!
The proposed fix disables the PSR feature entirely.
To do this, we need to change a parameter to the Intel Graphics Linux Kernel Module (LKM).
The LKM for Intel Graphics is called `i915`.
There are [multiple ways](https://wiki.archlinux.org/title/Kernel_parameters) to change kernel parameters, but I chose to edit my Grub configuration.
First, I wanted to test whether it actually works.
When booting into my Linux partition via Grub, you can press `e` to edit the Grub definition.
Somewhere there, you can find the `linux` command which specifies to boot Linux and how to do that.
I simply appended the option `i915.enable_psr=0` to this line.
After rebooting, I noticed my screen no longer freezes!
Success!
# Persisting the Fix
To make the change permanent, we need to permanently change Grub's configuration.
One way to do this, is by changing Grub's defaults in `/etc/default/grub`.
Namely, the `GRUB_CMDLINE_LINUX_DEFAULT` option specifies what options Grub should pass to the Linux kernel by default.
For me, this is a nice solution as the problem exists for both Linux OSes I have installed.
I changed this option to:
```ini
GRUB_CMDLINE_LINUX_DEFAULT="quiet splash i915.enable_psr=0"
```
Next, I wanted to automate this solution using Ansible.
This turned out to be quite easy, as the Grub configuration looks a bit like an ini file (maybe it is?):
```yaml
- name: Edit grub to disable Panel Self Refresh
become: true
ini_file:
path: /etc/default/grub
section: null
option: "GRUB_CMDLINE_LINUX_DEFAULT"
value: '"quiet splash i915.enable_psr=0"'
no_extra_spaces: true
notify: update grub
```
Lastly, I created the `notify` hook to update the Grub configuration:
```yaml
- name: update grub
become: true
command:
cmd: update-grub
```
# Update: Just use Nix
Lately, I have been learning a bit of NixOS with the intention of replacing my current setup.
Compared to Ansible, applying this fix is a breeze on NixOS:
```nix
{
boot.kernelParams = [ "i915.enable_psr=0" ];
}
```
That's it, yep.
# Conclusion
It turned out to be quite easy to change Linux kernel parameters using Ansible.
Maybe some kernel gurus have better ways to change parameters, but this works for me for now.
As a sidenote, I started reading a bit more about NixOS and realised that it can solve issues like these much more nicely than Ansible does.
I might replace my OS with NixOS some day, if I manage to rewrite my Ansible for it.

View file

@ -0,0 +1,60 @@
---
layout: post
title: Error Handling in Borgmatic
date: 2023-08-08 11:51:00 Europe/Amsterdam
categories: backup borg borgmatic
---
[BorgBackup](https://borgbackup.readthedocs.io/en/stable/) and [Borgmatic](https://torsion.org/borgmatic/) have been my go-to tools to create backups for my home lab since I started creating backups.
Using [Systemd Timers](https://wiki.archlinux.org/title/systemd/Timers), I regularly create a backup every night.
I also monitor successful execution of the backup process, in case some error occurs.
However, the way I set this up resulted in not receiving notifications.
Even though it boils down to RTFM, I'd like to explain my error and how to handle errors correctly.
I was using the `on_error` option to handle errors, like so:
```yaml
on_error:
- 'apprise --body="Error while performing backup" <URL> || true'
```
However, `on_error` does not handle errors from the execution of `before_everything` and `after_everything` hooks.
My solution to this was moving the error handling up to the Systemd service that calls Borgmatic.
This results in the following Systemd service:
```systemd
[Unit]
Description=Backup data using Borgmatic
# Added
OnFailure=backup-failure.service
[Service]
ExecStart=/usr/bin/borgmatic --config /root/backup.yml
Type=oneshot
```
This handles any error, be it from Borgmatic's hooks or itself.
The `backup-failure` service is very simple, and just calls Apprise to send a notification:
```systemd
[Unit]
Description=Send backup failure notification
[Service]
Type=oneshot
ExecStart=apprise --body="Failed to create backup!" <URL>
[Install]
WantedBy=multi-user.target
```
# The Aftermath (or what I learned)
Because the error handling and alerting weren't working propertly, my backups didn't succeed for two weeks straight.
And, of course, you only notice your backups aren't working when you actually need them.
This is exactly what happened: my disk was full and a MariaDB database crashed as a result of that.
Actually, the whole database seemed to be corrupt and I find it worrying MariaDB does not seem to be very resilient to failures (in comparison a PostgreSQL database was able to recover automatically).
I then tried to recover the data using last night's backup, only to find out there was no such backup.
Fortunately, I had other means to recover the data so I incurred no data loss.
I already knew it is important to test backups, but I learned it is also important to test failures during backups!

View file

@ -0,0 +1,194 @@
---
layout: post
title: Sending Apprise Notifications from Concourse CI
date: 2023-06-14 23:39:00 Europe/Amsterdam
categories: concourse apprise
---
Recently, I deployed [Concourse CI](https://concourse-ci.org/) because I wanted to get my feet wet with a CI/CD pipeline.
However, I had a practical use case lying around for a long time: automatically compiling my static website and deploying it to my docker Swarm.
This took some time getting right, but the result works like a charm ([source code](https://git.kun.is/pim/static)).
It's comforting to know I don't have move a finger and my website is automatically deployed.
However, I would still like to receive some indication of what's happening.
And what's a better way to do that, than using my [Apprise](https://github.com/caronc/apprise) service to keep me up to date.
There's a little snag though: I could not find any Concourse resource that does this.
That's when I decided to just create it myself.
# The Plagiarism Hunt
As any good computer person, I am lazy.
I'd rather just copy someone's work, so that's what I did.
I found [this](https://github.com/mockersf/concourse-slack-notifier) GitHub repository that does the same thing but for Slack notifications.
For some reason it's archived, but it seemed like it should work.
I actually noticed lots of repositories for Concourse resource types are archived, so not sure what's going on there.
# Getting to know Concourse
Let's first understand what we need to do reach our end goal of sending Apprise notifications from Concourse.
A Concourse pipeline takes some inputs, performs some operations on them which result in some outputs.
These inputs and outputs are called _resources_ in Concourse.
For example, a Git repository could be a resource.
Each resource is an instance of a _resource type_.
A resource type therefore is simply a blueprint that can create multiple resources.
To continue the example, a resource type could be "Git repository".
We therefore need to create our own resource type that can send Apprise notifications.
A resource type is simply a container that includes three scripts:
- `check`: check for a new version of a resource
- `in`: retrieve a version of the resource
- `out`: create a version of the resource
As Apprise notifications are basically fire-and-forget, we will only implement the `out` script.
# Writing the `out` script
The whole script can be found [here](https://git.kun.is/pim/concourse-apprise-notifier/src/branch/master/out), but I will explain the most important bits of it.
Note that I only use Apprise's persistent storage solution, and not its stateless solution.
Concourse provides us with the working directory, which we `cd` to:
```bash
cd "${1}"
```
We create a timestamp, formatted in JSON, which we will use for the resource's new version later.
Concourse requires us to set a version for the resource, but since Apprise notifications don't have that, we use the timestamp:
```bash
timestamp="$(jq -n "{version:{timestamp:\"$(date +%s)\"}}")"
```
First some black magic Bash to redirect file descriptors.
Not sure why this is needed, but I copied it anyways.
After that, we create a temporary file holding resource's parameters.
```bash
exec 3>&1
exec 1>&2
payload=$(mktemp /tmp/resource-in.XXXXXX)
cat > "${payload}" <&0
```
We then extract the individual parameters.
The `source` key contains values how the resource type was specified, while the `params` key specifies parameters for this specific resource.
```bash
apprise_host="$(jq -r '.source.host' < "${payload}")"
apprise_key="$(jq -r '.source.key' < "${payload}")"
alert_body="$(jq -r '.params.body' < "${payload}")"
alert_title="$(jq -r '.params.title // null' < "${payload}")"
alert_type="$(jq -r '.params.type // null' < "${payload}")"
alert_tag="$(jq -r '.params.tag // null' < "${payload}")"
alert_format="$(jq -r '.params.format // null' < "${payload}")"
```
We then format the different parameters using JSON:
```bash
alert_body="$(eval "printf \"${alert_body}\"" | jq -R -s .)"
[ "${alert_title}" != "null" ] && alert_title="$(eval "printf \"${alert_title}\"" | jq -R -s .)"
[ "${alert_type}" != "null" ] && alert_type="$(eval "printf \"${alert_type}\"" | jq -R -s .)"
[ "${alert_tag}" != "null" ] && alert_tag="$(eval "printf \"${alert_tag}\"" | jq -R -s .)"
[ "${alert_format}" != "null" ] && alert_format="$(eval "printf \"${alert_format}\"" | jq -R -s .)"
```
Next, from the individual parameters we construct the final JSON message body we send to the Apprise endpoint.
```bash
body="$(cat <<EOF
{
"body": ${alert_body},
"title": ${alert_title},
"type": ${alert_type},
"tag": ${alert_tag},
"format": ${alert_format}
}
EOF
)"
```
Before sending it just yet, we compact the JSON and remove any values that are `null`:
```bash
compact_body="$(echo "${body}" | jq -c '.')"
echo "$compact_body" | jq 'del(..|nulls)' > /tmp/compact_body.json
```
Here is the most important line, where we send the payload to the Apprise endpoint.
It's quite straight-forward.
```bash
curl -v -X POST -T /tmp/compact_body.json -H "Content-Type: application/json" "${apprise_host}/notify/${apprise_key}"
```
Finally, we print the timestamp (fake version) in order to appease the Concourse gods.
```bash
echo "${timestamp}" >&3
```
# Building the Container
As said earlier, to actually use this script, we need to add it to a image.
I won't be explaining this whole process, but the source can be found [here](https://git.kun.is/pim/concourse-apprise-notifier/src/branch/master/pipeline.yml).
The most important take-aways are these:
- Use `concourse/oci-build-task` to build a image from a Dockerfile.
- Use `registry-image` to push the image to an image registry.
# Using the Resource Type
Using our newly created resource type is surprisingly simple.
I use it for the blog you are reading right now and the pipeline definition can be found [here](https://git.kun.is/pim/static/src/branch/main/pipeline.yml).
Here we specify the resource type in a Concourse pipeline:
```yaml
resource_types:
- name: apprise
type: registry-image
source:
repository: git.kun.is/pim/concourse-apprise-notifier
tag: "1.1.1"
```
We simply have to tell Concourse where to find the image, and which tag we want.
Next, we instantiate the resource type to create a resource:
```yaml
resources:
- name: apprise-notification
type: apprise
source:
host: https://apprise.kun.is:444
key: concourse
icon: bell
```
We simply specify the host to send Apprise notifications to.
Yeah, I even gave it a little bell because it's cute.
All that's left to do, is actually send the notification.
Let's see how that is done:
```yaml
- name: deploy-static-website
plan:
- task: deploy-site
config: ...
on_success:
put: apprise-notification
params:
title: "Static website deployed!"
body: "New version: $(cat version/version)"
no_get: true
```
As can be seen, the Apprise notification can be triggered when a task is executed successfully.
We do this using the `put` command, which execute the `out` script underwater.
We set the notification's title and body, and send it!
The result is seen below in my Ntfy app, which Apprise forwards the message to:
![picture showing my Ntfy app with the Apprise notification](ntfy.png)
And to finish this off, here is what it looks like in the Concourse web UI:
![the concourse web gui showing the pipeline of my static website including the the apprise notification resources](pipeline.png)
# Conclusion
Concourse's way of representing everything as an image/container is really interesting in my opinion.
A resource type is quite easily implemented as well, although Bash might not be the optimal way to do this.
I've seen some people implement it in Rust, which might be a good excuse to finally learn that language :)
Apart from Apprise notifications, I'm planning on creating a resource type to deploy to a Docker swarm eventually.
This seems like a lot harder than simply sending notifications though.

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

View file

@ -0,0 +1,74 @@
---
layout: post
title: Monitoring Correct Memory Usage in Fluent Bit
date: 2023-08-09 16:19:00 Europe/Amsterdam
categories: fluentd fluentbit memory
---
Previously, I have used [Prometheus' node_exporter](https://github.com/prometheus/node_exporter) to monitor the memory usage of my servers.
However, I am currently in the process of moving away from Prometheus to a new Monioring stack.
While I understand the advantages, I felt like Prometheus' pull architecture does not scale nicely.
Everytime I spin up a new machine, I would have to centrally change Prometheus' configuration in order for it to query the new server.
In order to collect metrics from my servers, I am now using [Fluent Bit](https://fluentbit.io/).
I love Fluent Bit's way of configuration which I can easily express as code and automate, its focus on effiency and being vendor agnostic.
However, I have stumbled upon one, in my opinion, big issue with Fluent Bit: its `mem` plugin to monitor memory usage is _completely_ useless.
In this post I will go over the problem and my temporary solution.
# The Problem with Fluent Bit's `mem` Plugin
As can be seen in [the documentation](https://docs.fluentbit.io/manual/pipeline/inputs/memory-metrics), Fluent Bit's `mem` input plugin exposes a few metrics regarding memory usage which should be self-explaining: `Mem.total`, `Mem.used`, `Mem.free`, `Swap.total`, `Swap.used` and `Swap.free`.
The problem is that `Mem.used` and `Mem.free` do not accurately reflect the machine's actual memory usage.
This is because these metrics include caches and buffers, which can be reclaimed by other processes if needed.
Most tools reporting memory usage therefore include an additional metric that specifices the memory _available_ on the system.
For example, the command `free -m` reports the following data on my laptop:
```text
total used free shared buff/cache available
Mem: 15864 3728 7334 518 5647 12136
Swap: 2383 663 1720
```
Notice that the `available` memory is more than `free` memory.
While the issue is known (see [this](https://github.com/fluent/fluent-bit/pull/3092) and [this](https://github.com/fluent/fluent-bit/pull/5237) link), it is unfortunately not yet fixed.
# A Temporary Solution
The issues I linked previously provide stand-alone plugins that fix the problem, which will hopefully be merged in the official project at some point.
However, I didn't want to install another plugin so I used Fluent Bit's `exec` input plugin and the `free` Linux command to query memory usage like so:
```conf
[INPUT]
Name exec
Tag memory
Command free -m | tail -2 | tr '\n' ' '
Interval_Sec 1
```
To interpret the command's output, I created the following filter:
```conf
[FILTER]
Name parser
Match memory
Key_Name exec
Parser free
```
Lastly, I created the following parser (warning: regex shitcode incoming):
```conf
[PARSER]
Name free
Format regex
Regex ^Mem:\s+(?<mem_total>\d+)\s+(?<mem_used>\d+)\s+(?<mem_free>\d+)\s+(?<mem_shared>\d+)\s+(?<mem_buff_cache>\d+)\s+(?<mem_available>\d+) Swap:\s+(?<swap_total>\d+)\s+(?<swap_used>\d+)\s+(?<swap_free>\d+)
Types mem_total:integer mem_used:integer mem_free:integer mem_shared:integer mem_buff_cache:integer mem_available:integer swap_total:integer swap_used:integer
```
With this configuration, you can use the `mem_available` metric to get accurate memory usage in Fluent Bit.
# Conclusion
Let's hope Fluent Bit's `mem` input plugin is improved upon soon so this hacky solution is not needed.
I also intend to document my new monitoring pipeline, which at the moment consists of:
- Fluent Bit
- Fluentd
- Elasticsearch
- Grafana

View file

@ -0,0 +1,45 @@
---
layout: post
title: Hashicorp's License Change and my Home Lab - Update
date: 2023-08-17 18:15:00 Europe/Amsterdam
categories: hashicorp terraform vault nomad
---
_See the [Update](#update) at the end of the article._
Already a week ago, Hashicorp [announced](https://www.hashicorp.com/blog/hashicorp-adopts-business-source-license) it would change the license on almost all its projects.
Unlike [their previous license](https://github.com/hashicorp/terraform/commit/ab411a1952f5b28e6c4bd73071194761da36a83f), which was the Mozilla Public License 2.0, their new license is no longer truly open source.
It is called the Business Source License™ and restricts use of their software for competitors.
In their own words:
> Vendors who provide competitive services built on our community products will no longer be able to incorporate future releases, bug fixes, or security patches contributed to our products.
I found [a great article](https://meshedinsights.com/2021/02/02/rights-ratchet/) by MeshedInsights that names this behaviour the "rights ratchet model".
They define a script start-ups use to garner the interest of open source enthusiasts but eventually turn their back on them for profit.
The reason why Hashicorp can do this, is because contributors signed a copyright license agreement (CLA).
This agreement transfers the copyright of contributors' code to Hashicorp, allowing them to change the license if they want to.
I find this action really regrettable because I like their products.
This sort of action was also why I wanted to avoid using an Elastic stack, which also had their [license changed](https://www.elastic.co/pricing/faq/licensing).[^elastic]
These companies do not respect their contributors and the software stack beneath they built their product on, which is actually open source (Golang, Linux, etc.).
# Impact on my Home Lab
I am using Terraform in my home lab to manage several important things:
- Libvirt virtual machines
- PowerDNS records
- Elasticsearch configuration
With Hashicorp's anti open source move, I intend to move away from Terraform in the future.
While I will not use Hashicorp's products for new personal projects, I will leave my current setup as-is for some time because there is no real need to quickly migrate.
I might also investigate some of Terraform's competitors, like Pulumi.
Hopefully there is a project that respects open source which I can use in the future.
# Update
A promising fork of Terraform has been announced called [OpenTF](https://opentf.org/announcement).
They intend to take part of the Cloud Native Computing Foundation, which I think is a good effort because Terraform is so important for modern cloud infrastructures.
# Footnotes
[^elastic]: While I am still using Elasticsearch, I don't use the rest of the Elastic stack in order to prevent a vendor lock-in.

View file

@ -0,0 +1,184 @@
---
layout: post
title: Homebrew SSH Certificate Authority for the Terraform Libvirt Provider
date: 2023-05-23 11:14:00 Europe/Amsterdam
categories: ssh terraform ansible
---
Ever SSH'ed into a freshly installed server and gotten the following annoying message?
```text
The authenticity of host 'host.tld (1.2.3.4)' can't be established.
ED25519 key fingerprint is SHA256:eUXGdm1YdsMAS7vkdx6dOJdOGHdem5gQp4tadCfdLB8.
Are you sure you want to continue connecting (yes/no)?
```
Or even more annoying:
```text
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! @
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
IT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY!
Someone could be eavesdropping on you right now (man-in-the-middle attack)!
It is also possible that a host key has just been changed.
The fingerprint for the ED25519 key sent by the remote host is
SHA256:eUXGdm1YdsMAS7vkdx6dOJdOGHdem5gQp4tadCfdLB8.
Please contact your system administrator.
Add correct host key in /home/user/.ssh/known_hosts to get rid of this message.
Offending ED25519 key in /home/user/.ssh/known_hosts:3
remove with:
ssh-keygen -f "/etc/ssh/ssh_known_hosts" -R "1.2.3.4"
ED25519 host key for 1.2.3.4 has changed and you have requested strict checking.
Host key verification failed.
```
Could it be that the programmers at OpenSSH simply like to annoy us with these confusing messages?
Maybe, but these warnings also serve as a way to notify users of a potential Man-in-the-Middle (MITM) attack.
I won't go into the details of this problem, but I refer you to [this excellent blog post](https://blog.g3rt.nl/ssh-host-key-validation-strict-yet-user-friendly.html).
Instead, I would like to talk about ways to solve these annoying warnings.
One obvious solution is simply to add each host to your `known_hosts` file.
This works okay when managing a handful of servers, but becomes unbearable when managing many servers.
In my case, I wanted to quickly spin up virtual machines using Duncan Mac-Vicar's [Terraform Libvirt provider](https://registry.terraform.io/providers/dmacvicar/libvirt/latest/docs), without having to accept their host key before connecting.
The solution? Issuing SSH host certificates using an SSH certificate authority.
## SSH Certificate Authorities vs. the Web
The idea of an SSH certificate authority (CA) is quite easy to grasp, if you understand the web's Public Key Infrastructure (PKI).
Just like with the web, a trusted party can issue certificates that are offered when establishing a connection.
The idea is, just by trusting the trusted party, you trust every certificate they issue.
In the case of the web's PKI, this trusted party is bundled and trusted by [your browser](https://wiki.mozilla.org/CA) or operating system.
However, in the case of SSH, the trusted party is you! (Okay you can also trust your own web certificate authority)
With this great power, comes great responsibility which we will abuse heavily in this article.
## SSH Certificate Authority for Terraform
So, let's start with a plan.
I want to spawn virtual machines with Terraform which which are automatically provisioned with a SSH host certificate issued by my CA.
This CA will be another host on my private network, issuing certificates over SSH.
### Fetching the SSH Host Certificate
First we generate an SSH key pair in Terraform.
Below is the code for that:
```terraform
resource "tls_private_key" "debian" {
algorithm = "ED25519"
}
data "tls_public_key" "debian" {
private_key_pem = tls_private_key.debian.private_key_pem
}
```
Now that we have an SSH key pair, we need to somehow make Terraform communicate this with the CA.
Lucky for us, there is a way for Terraform to execute an arbitrary command with the `external` data feature.
We call this script below:
```terraform
data "external" "cert" {
program = ["bash", "${path.module}/get_cert.sh"]
query = {
pubkey = trimspace(data.tls_public_key.debian.public_key_openssh)
host = var.name
cahost = var.ca_host
cascript = var.ca_script
cakey = var.ca_key
}
}
```
These query parameters will end up in the script's stdin in JSON format.
We can then read these parameters, and send them to the CA over SSH.
The result must as well be in JSON format.
```bash
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
# Read the query parameters
eval "$(jq -r '@sh "PUBKEY=\(.pubkey) HOST=\(.host) CAHOST=\(.cahost) CASCRIPT=\(.cascript) CAKEY=\(.cakey)"')"
# Fetch certificate from the CA
# Warning: extremely ugly code that I am to lazy to fix
CERT=$(ssh -o ConnectTimeout=3 -o ConnectionAttempts=1 root@$CAHOST '"'"$CASCRIPT"'" host "'"$CAKEY"'" "'"$PUBKEY"'" "'"$HOST"'".dmz')
jq -n --arg cert "$CERT" '{"cert":$cert}'
```
We see that a script is called on the remote host that issues the certificate.
This is just a simple wrapper around `ssh-keygen`, which you can see below.
```bash
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
host() {
CAKEY="$2"
PUBKEY="$3"
HOST="$4"
echo "$PUBKEY" > /root/ca/"$HOST".pub
ssh-keygen -h -s /root/ca/keys/"$CAKEY" -I "$HOST" -n "$HOST" /root/ca/"$HOST".pub
cat /root/ca/"$HOST"-cert.pub
rm /root/ca/"$HOST"*.pub
}
"$1" "$@"
```
### Appeasing the Terraform Gods
So nice, we can fetch the SSH host certificate from the CA.
We should just be able to use it right?
We can, but it brings a big annoyance with it: Terraform will fetch a new certificate every time it is run.
This is because the `external` feature of Terraform is a data source.
If we were to use this data source for a Terraform resource, it would need to be updated every time we run Terraform.
I have not been able to find a way to avoid fetching the certificate every time, except for writing my own resource provider which I'd rather not.
I have, however, found a way to hack around the issue.
The idea is as follows: we can use Terraform's `ignore_changes` to, well, ignore any changes of a resource.
Unfortunately, we cannot use this for a `data` source, so we must create a glue `null_resource` that supports `ignore_changes`.
This is shown in the code snipppet below.
We use the `triggers` property simply to copy the certificate in; we don't use it for it's original purpose.
```terraform
resource "null_resource" "cert" {
triggers = {
cert = data.external.cert.result["cert"]
}
lifecycle {
ignore_changes = [
triggers
]
}
}
```
And voilà, we can now use `null_resource.cert.triggers["cert"]` as our certificate, that won't trigger replacements in Terraform.
### Setting the Host Certificate with Cloud-Init
Terraform's Libvirt provider has native support for Cloud-Init, which is very handy.
We can give the host certificate directly to Cloud-Init and place it on the virtual machine.
Inside the Cloud-Init configuration, we can set the `ssh_keys` property to do this:
```yml
ssh_keys:
ed25519_private: |
${indent(4, private_key)}
ed25519_certificate: "${host_cert}"
```
I hardcoded this to ED25519 keys, because this is all I use.
This works perfectly, and I never have to accept host certificates from virtual machines again.
### Caveats
A sharp eye might have noticed the lifecycle of these host certificates is severely lacking.
Namely, the deployed host certificates have no expiration date nore is there revocation function.
There are ways to implement these, but for my home lab I did not deem this necessary at this point.
In a more professional environment, I would suggest using [Hashicorp's Vault](https://www.vaultproject.io/).
This project did teach me about the limits and flexibility of Terraform, so all in all a success!
All code can be found on the git repository [here](https://git.kun.is/home/tf-modules/src/branch/master/debian).

View file

@ -0,0 +1,283 @@
---
layout: post
title: Home Lab Infrastructure Snapshot August 2023
date: 2023-08-27 22:23:00 Europe/Amsterdam
categories: infrastructure homelab
---
I have been meaning to write about the current state of my home lab infrastructure for a while now.
Now that the most important parts are quite stable, I think the opportunity is ripe.
I expect this post to get quite long, so I might have to leave out some details along the way.
This post will be a starting point for future infrastructure snapshots which I can hopefully put out periodically.
That is, if there is enough worth talking about.
Keep an eye out for the <i class="fa-solid fa-code-branch"></i> icon, which links to the source code and configuration of anything mentioned.
Oh yeah, did I mention everything I do is open source?
# Networking and Infrastructure Overview
## Hardware and Operating Systems
Let's start with the basics: what kind of hardware do I use for my home lab?
The most important servers are my three [Gigabyte Brix GB-BLCE-4105](https://www.gigabyte.com/Mini-PcBarebone/GB-BLCE-4105-rev-10).
Two of them have 16 GB of memory, and one 8 GB.
I named these servers as follows:
- **Atlas**: because this server was going to "lift" a lot of virtual machines.
- **Lewis**: we started out with a "Max" server named after the Formula 1 driver Max Verstappen, but it kind of became an unmanagable behemoth without infrastructure-as-code. Our second server we subsequently named Lewis after his colleague Lewis Hamilton. Note: people around me vetoed these names and I am no F1 fan!
- **Jefke**: it's a funny Belgian name. That's all.
Here is a picture of them sitting in their cosy closet:
![A picture of my servers.](servers.jpeg)
If you look look to the left, you will also see a Raspberry pi 4B.
I use this Pi to do some rudimentary monitoring whether servers and services are running.
More on this in the relevant section below.
The Pi is called **Iris** because it's a messenger for the other servers.
I used to run Ubuntu on these systems, but I have since migrated away to Debian.
The main reasons were Canonical [putting advertisements in my terminal](https://askubuntu.com/questions/1434512/how-to-get-rid-of-ubuntu-pro-advertisement-when-updating-apt) and pushing Snap which has a [proprietry backend](https://hackaday.com/2020/06/24/whats-the-deal-with-snap-packages/).
Two of my servers run the newly released Debian Bookworm, while one still runs Debian Bullseye.
## Networking
For networking, I wanted hypervisors and virtual machines separated by VLANs for security reasons.
The following picture shows a simplified view of the VLANs present in my home lab:
![Picture showing the VLANS in my home lab.](vlans.png)
All virtual machines are connected to a virtual bridge which tags network traffic with the DMZ VLAN.
The hypervisors VLAN is used for traffic to and from the hypervisors.
Devices from the hypervisors VLAN are allowed to connect to devices in the DMZ, but not vice versa.
The hypervisors are connected to a switch using a trunk link, allows both DMZ and hypervisors traffic.
I realised the above design using ifupdown.
Below is the configuration for each hypervisor, which creates a new `enp3s0.30` interface with all DMZ traffic from the `enp3s0` interface [<i class="fa-solid fa-code-branch"></i>](https://git.kun.is/home/hypervisors/src/commit/71b96d462116e4160b6467533fc476f3deb9c306/ansible/dmz.conf.j2).
```text
auto enp3s0.30
iface enp3s0.30 inet manual
iface enp3s0.30 inet6 auto
accept_ra 0
dhcp 0
request_prefix 0
privext 0
pre-up sysctl -w net/ipv6/conf/enp3s0.30/disable_ipv6=1
```
This configuration seems more complex than it actually is.
Most of it is to make sure the interface is not assigned an IPv4/6 address on the hypervisor host.
The magic `.30` at the end of the interface name makes this interface tagged with VLAN ID 30 (DMZ for me).
Now that we have an interface tagged for the DMZ VLAN, we can create a bridge where future virtual machines can connect to:
```text
auto dmzbr
iface dmzbr inet manual
bridge_ports enp3s0.30
bridge_stp off
iface dmzbr inet6 auto
accept_ra 0
dhcp 0
request_prefix 0
privext 0
pre-up sysctl -w net/ipv6/conf/dmzbr/disable_ipv6=1
```
Just like the previous config, this is quite bloated because I don't want the interface to be assigned an IP address on the host.
Most importantly, the `bridge_ports enp3s0.30` line here makes this interface a virtual bridge for the `enp3s0.30` interface.
And voilà, we now have a virtual bridge on each machine, where only DMZ traffic will flow.
Here I verify whether this configuration works:
<details>
<summary>Show</summary>
We can see that the two virtual interfaces are created, and are only assigned a MAC address and not a IP address:
```text
root@atlas:~# ip a show enp3s0.30
4: enp3s0.30@enp3s0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master dmzbr state UP group default qlen 1000
link/ether d8:5e:d3:4c:70:38 brd ff:ff:ff:ff:ff:ff
5: dmzbr: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
link/ether 4e:f7:1f:0f:ad:17 brd ff:ff:ff:ff:ff:ff
```
Pinging a VM from a hypervisor works:
```text
root@atlas:~# ping -c1 maestro.dmz
PING maestro.dmz (192.168.30.8) 56(84) bytes of data.
64 bytes from 192.168.30.8 (192.168.30.8): icmp_seq=1 ttl=63 time=0.457 ms
```
Pinging a hypervisor from a VM does not work:
```text
root@maestro:~# ping -c1 atlas.hyp
PING atlas.hyp (192.168.40.2) 56(84) bytes of data.
--- atlas.hyp ping statistics ---
1 packets transmitted, 0 received, 100% packet loss, time 0ms
```
</details>
## DNS and DHCP
Now that we have a working DMZ network, let's build on it to get DNS and DHCP working.
This will enable new virtual machines to obtain a static or dynamic IP address and register their host in DNS.
This has actually been incredibly annoying due to our friend [Network address translation (NAT)](https://en.wikipedia.org/wiki/Network_address_translation?useskin=vector).
<details>
<summary>NAT recap</summary>
Network address translation (NAT) is a function of a router which allows multiple hosts to share a single IP address.
This is needed for IPv4, because IPv4 addresses are scarce and usually one household is only assigned a single IPv4 address.
This is one of the problems IPv6 attempts to solve (mainly by having so many IP addresses that they should never run out).
To solve the problem for IPv4, each host in a network is assigned a private IPv4 address, which can be reused for every network.
Then, the router must perform address translation.
It does this by keeping track of ports opened by hosts in its private network.
If a packet from the internet arrives at the router for such a port, it forwards this packet to the correct host.
</details>
I would like to host my own DNS on a virtual machine (called **hermes**, more on VMs later) in the DMZ network.
This basically gives two problems:
1. The upstream DNS server will refer to the public internet-accessible IP address of our DNS server.
This IP-address has no meaning inside the private network due to NAT and the router will reject the packet.
2. Our DNS resolves hosts to their public internet-accessible IP address.
This is similar to the previous problem as the public IP address has no meaning.
The first problem can be remediated by overriding the location of the DNS server for hosts inside the DMZ network.
This can be achieved on my router, which uses Unbound as its recursive DNS server:
![Unbound overides for kun.is and dmz domains.](unbound_overrides.png)
Any DNS requests to Unbound to domains in either `dmz` or `kun.is` will now be forwarded `192.168.30.7` (port 5353).
This is the virtual machine hosting my DNS.
The second problem can be solved at the DNS server.
We need to do some magic overriding, which [dnsmasq](https://dnsmasq.org/docs/dnsmasq-man.html) is perfect for [<i class="fa-solid fa-code-branch"></i>](https://git.kun.is/home/hermes/src/commit/488024a7725f2325b8992e7a386b4630023f1b52/ansible/roles/dnsmasq/files/dnsmasq.conf):
```conf
alias=84.245.14.149,192.168.30.8
server=/kun.is/192.168.30.7
```
This always overrides the public IPv4 address to the private one.
It also overrides the DNS server for `kun.is` to `192.168.30.7`.
Finally, behind the dnsmasq server, I run [Powerdns](https://www.powerdns.com/) as authoritative DNS server [<i class="fa-solid fa-code-branch"></i>](https://git.kun.is/home/hermes/src/branch/master/ansible/roles/powerdns).
I like this DNS server because I can manage it with Terraform [<i class="fa-solid fa-code-branch"></i>](https://git.kun.is/home/hermes/src/commit/488024a7725f2325b8992e7a386b4630023f1b52/terraform/dns/kun_is.tf).
Here is a small diagram showing my setup (my networking teacher would probably kill me for this):
![Shitty diagram showing my DNS setup.](nat.png)
# Virtualization
https://github.com/containrrr/shepherd
Now that we have laid out the basic networking, let's talk virtualization.
Each of my servers are configured to run KVM virtual machines, orchestrated using Libvirt.
Configuration of the physical hypervisor servers, including KVM/Libvirt is done using Ansible.
The VMs are spun up using Terraform and the [dmacvicar/libvirt](https://registry.terraform.io/providers/dmacvicar/libvirt/latest/docs) Terraform provider.
This all isn't too exciting, except that I created a Terraform module that abstracts the Terraform Libvirt provider for my specific scenario [<i class="fa-solid fa-code-branch"></i>](https://git.kun.is/home/tf-modules/src/commit/e77d62f4a2a0c3847ffef4434c50a0f40f1fa794/debian/main.tf):
```terraform
module "maestro" {
source = "git::https://git.kun.is/home/tf-modules.git//debian"
name = "maestro"
domain_name = "tf-maestro"
memory = 10240
mac = "CA:FE:C0:FF:EE:08"
}
```
This automatically creates a Debian virtual machines with the properties specified.
It also sets up certificate-based SSH authentication which I talked about [before]({% post_url homebrew-ssh-ca/2023-05-23-homebrew-ssh-ca %}).
# Clustering
With virtualization explained, let's move up one level further.
Each of my three physical servers hosts a virtual machine running Docker, which together form a Docker Swarm.
I use Traefik as a reverse proxy which routes requests to the correct container.
All data is hosted on a single machine and made available to containers using NFS.
This might not be very secure (as NFS is not encrypted and no proper authentication), it is quite fast.
As of today, I host the following services on my Docker Swarm [<i class="fa-solid fa-code-branch"></i>](https://git.kun.is/home/shoarma):
- [Forgejo](https://forgejo.org/) as Git server
- [FreshRSS](https://www.freshrss.org/) as RSS aggregator
- [Hedgedoc](https://hedgedoc.org/) as Markdown note-taking
- [Inbucket](https://hedgedoc.org/) for disposable email
- [Cyberchef](https://cyberchef.org/) for the lulz
- [Kitchenowl](https://kitchenowl.org/) for grocery lists
- [Mastodon](https://joinmastodon.org/) for microblogging
- A monitoring stack (read more below)
- [Nextcloud](https://nextcloud.com/) for cloud storage
- [Pihole](https://pi-hole.net/) to block advertisements
- [Radicale](https://radicale.org/v3.html) for calendar and contacts sync
- [Seafile](https://www.seafile.com/en/home/) for cloud storage and sync
- [Shephard](https://github.com/containrrr/shepherd) for automatic container updates
- [Nginx](https://nginx.org/en/) hosting static content (like this page!)
- [Docker Swarm dashboard](https://hub.docker.com/r/charypar/swarm-dashboard/#!)
- [Syncthing](https://syncthing.net/) for file sync
# CI / CD
For CI / CD, I run [Concourse CI](https://concourse-ci.org/) in a separate VM.
This is needed, because Concourse heavily uses containers to create reproducible builds.
Although I should probably use it for more, I currently use my Concourse for three pipelines:
- A pipeline to build this static website and create a container image of it.
The image is then uploaded to the image registry of my Forgejo instance.
I love it when I can use stuff I previously built :)
The pipeline finally deploys this new image to the Docker Swarm [<i class="fa-solid fa-code-branch"></i>](https://git.kun.is/pim/static/src/commit/eee4f0c70af6f2a49fabb730df761baa6475db22/pipeline.yml).
- A pipeline to create a Concourse resource that sends Apprise alerts (Concourse-ception?) [<i class="fa-solid fa-code-branch"></i>](https://git.kun.is/pim/concourse-apprise-notifier/src/commit/b5d4413c1cd432bc856c45ec497a358aca1b8b21/pipeline.yml)
- A pipeline to build a custom Fluentd image with plugins installed [<i class="fa-solid fa-code-branch"></i>](https://git.kun.is/pim/fluentd)
# Backups
To create backups, I use [Borg](https://www.borgbackup.org/).
As I keep all data on one machine, this backup process is quite simple.
In fact, all this data is stored in a single Libvirt volume.
To configure Borg with a simple declarative script, I use [Borgmatic](https://torsion.org/borgmatic/).
In order to back up the data inside the Libvirt volume, I create a snapshot to a file.
Then I can mount this snapshot in my file system.
The files can then be backed up while the system is still running.
It is also possible to simply back up the Libvirt image, but this takes more time and storage [<i class="fa-solid fa-code-branch"></i>](https://git.kun.is/home/hypervisors/src/commit/71b96d462116e4160b6467533fc476f3deb9c306/ansible/roles/borg/backup.yml.j2).
# Monitoring and Alerting
The last topic I would like to talk about is monitoring and alerting.
This is something I'm still actively improving and only just set up properly.
## Alerting
For alerting, I wanted something that runs entirely on my own infrastructure.
I settled for Apprise + Ntfy.
[Apprise](https://github.com/caronc/apprise) is a server that is able to send notifications to dozens of services.
For application developers, it is thus only necessary to implement the Apprise API to gain access to all these services.
The Apprise API itself is also very simple.
By using Apprise, I can also easily switch to another notification service later.
[Ntfy](https://ntfy.sh/) is free software made for mobile push notifications.
I use this alerting system in quite a lot of places in my infrastructure, for example when creating backups.
## Uptime Monitoring
The first monitoring setup I created, was using [Uptime Kuma](https://github.com/louislam/uptime-kuma).
Uptime Kuma periodically pings a service to see whether it is still running.
You can do a literal ping, test HTTP response codes, check database connectivity and much more.
I use it to check whether my services and VMs are online.
And the best part is, Uptime Kuma supports Apprise so I get push notifications on my phone whenever something goes down!
## Metrics and Log Monitoring
A new monitoring system I am still in the process of deploying is focused on metrics and logs.
I plan on creating a separate blog post about this, so keep an eye out on that (for example using RSS :)).
Safe to say, it is no basic ELK stack!
# Conclusion
That's it for now!
Hopefully I inspired someone to build something... or how not to :)

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 490 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

View file

@ -0,0 +1,17 @@
---
layout: post
title: "It's alive!"
date: 2024-04-21 10:02:00 Europe/Amsterdam
categories: jekyll blog
---
Finally, after several months this website is up and running again!
My homelab has completely changed, but the reason why it initially went offline is because of my failing CI installation.
I was using [Concourse CI](https://concourse-ci.org/) which I was initially interested in due to the reproducible nature of its builds using containers.
However, for some reason pipelines were sporadically getting stuck when I reboot the virtual machine it was running on.
The fix was very annoying: I had to re-create the pipelines manually (which feels very backwards for a CI/CD system!)
Additionally, my virtual machine setup back then was also quite fragile and I decided to get rid of that as well.
I have learned that having an escape hatch to deploy something is probably a good idea 😅
Expect a new overview of my homelab soon, in the same vein as [this post from last year]({% post_url infrastructure-snapshot/2023-08-13-infrastructure-snapshot %})!

View file

@ -0,0 +1,61 @@
---
layout: post
title: My Experiences with virtio-9p
date: 2023-05-31 14:18:00 Europe/Amsterdam
categories: libvirt virtio 9p
---
When I was scaling up my home lab, I started thinking more about data management.
I hadn't (and still haven't) set up any form of network storage.
I have, however, set up a backup mechanism using [Borg](https://borgbackup.readthedocs.io/en/stable/).
Still, I want to operate lots of virtual machines, and backing up each one of them separately seemed excessive.
So I started thinking, what if I just let the host machines back up the data?
After all, the amount of physical hosts I have in my home lab is unlikely to increase drastically.
# The Use Case for Sharing Directories
I started working out this idea further.
Without network storage, I needed a way for guest VMs to access the host's disks.
Here there are two possibilities, either expose some block device or a file system.
Creating a whole virtual disk for just the data of some VMs seemed wasteful, and from my experiences also increases backup times dramatically.
I therefore searched for a way to mount a directory from the host OS on the guest VM.
This is when I stumbled upon [this blog](https://rabexc.org/posts/p9-setup-in-libvirt) post talking about sharing directories with virtual machines.
# Sharing Directories with virtio-9p
virtio-9p is a way to map a directory on the host OS to a special device on the virtual machine.
In `virt-manager`, it looks like the following:
![picture showing virt-manager configuration to map a directory to a VM](virt-manager.png)
Under the hood, virtio-9p uses the 9pnet protocol.
Originally developed at Bell Labs, support for this is available in all modern Linux kernels.
If you share a directory with a VM, you can then mount it.
Below is an extract of my `/etc/fstab` to automatically mount the directory:
```text
data /mnt/data 9p trans=virtio,rw 0 0
```
The first argument (`data`) refers to the name you gave this share from the host
With the `trans` option we specify that this is a virtio share.
# Problems with virtio-9p
At first I had no problems with my setup, but I am now contemplating just moving to a network storage based setup because of two problems.
The first problem is that some files have suddenly changed ownership from `libvirt-qemu` to `root`.
If the file is owned by `root`, the guest OS can still see it, but cannot access it.
I am not entirely sure the problem lies with virtio, but I suspect it is.
For anyone experiencing this problem, I wrote a small shell script to revert ownership to the `libvirt-qemu` user:
```shell
find -printf "%h/%f %u\n" | grep root | cut -d ' ' -f1 | xargs chown libvirt-qemu:libvirt-qemu
```
Another problem that I have experienced, is guests being unable to mount the directory at all.
I have only experienced this problem once, but it was highly annoying.
To fix it, I had to reboot the whole physical machine.
# Alternatives
virtio-9p seemed like a good idea, but as discussed, I had some problems with it.
It seems [virtioFS](https://virtio-fs.gitlab.io/) might be a an interesting alternative as it is designed specifically for sharing directories with VMs.
As for me, I will probably finally look into deploying network storage either with NFS or SSHFS.

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

368
src/_sass/klise/_base.scss Normal file
View file

@ -0,0 +1,368 @@
// Reset some basic elements
* {
-webkit-transition: background-color 75ms ease-in, border-color 75ms ease-in;
-moz-transition: background-color 75ms ease-in, border-color 75ms ease-in;
-ms-transition: background-color 75ms ease-in, border-color 75ms ease-in;
-o-transition: background-color 75ms ease-in, border-color 75ms ease-in;
transition: background-color 75ms ease-in, border-color 75ms ease-in;
}
.notransition {
-webkit-transition: none;
-moz-transition: none;
-ms-transition: none;
-o-transition: none;
transition: none;
}
html {
overflow-x: hidden;
width: 100%;
}
body,
h1,
h2,
h3,
h4,
h5,
h6,
p,
blockquote,
pre,
hr,
dl,
dd,
ol,
ul,
figure {
margin: 0;
padding: 0;
}
// Basic styling
body {
min-height: 100vh;
overflow-x: hidden;
position: relative;
color: $text-base-color;
background-color: $white;
font: $normal-weight #{$base-font-size}/#{$base-line-height} $sans-family;
-webkit-text-size-adjust: 100%;
-webkit-font-smoothing: antialiased;
-webkit-font-feature-settings: "kern" 1;
-moz-font-feature-settings: "kern" 1;
-o-font-feature-settings: "kern" 1;
font-feature-settings: "kern" 1;
font-kerning: normal;
box-sizing: border-box;
}
// Set `margin-bottom` to maintain vertical rhythm
h1,
h2,
h3,
h4,
h5,
h6,
p,
blockquote,
pre,
ul,
ol,
dl,
figure,
%vertical-rhythm {
margin-top: $spacing-full - 20;
margin-bottom: $spacing-full - 20;
}
// strong | bold
strong,
b {
font-weight: $bold-weight;
color: $black;
}
// horizontal rule
hr {
border-bottom: 0;
border-style: solid;
border-color: $light;
}
// kbd tag
kbd {
-moz-border-radius: 3px;
-webkit-border-radius: 3px;
border: 1px solid $light;
border-radius: 2px;
color: $black;
display: inline-block;
font-size: $small-font-size;
line-height: 1.4;
font-family: $mono-family;
margin: 0 0.1em;
font-weight: $bold-weight;
padding: 0.01em 0.4em;
text-shadow: 0 1px 0 $white;
}
// Image
img {
max-width: 100%;
vertical-align: middle;
-webkit-user-drag: none;
margin: 0 auto;
text-align: center;
}
// Figure
figure {
position: relative;
}
// Image inside Figure tag
figure > img {
display: block;
position: relative;
}
// Image caption
figcaption {
font-size: 13px;
text-align: center;
}
// List
ul {
list-style: none;
li {
display: list-item;
text-align: -webkit-match-parent;
}
li::before {
content: "\FE63";
display: inline-block;
top: -1px;
width: 1.2em;
position: relative;
margin-left: -1.3em;
font-weight: 700;
}
}
ol {
list-style: none;
counter-reset: li;
li {
position: relative;
counter-increment: li;
&::before {
content: counter(li);
display: inline-block;
width: 1em;
margin-right: 0.5em;
margin-left: -1.6em;
text-align: right;
direction: rtl;
font-weight: $bold-weight;
font-size: $small-font-size;
}
}
}
ul,
ol {
margin-top: 0;
margin-left: $spacing-full;
}
li {
padding-bottom: 1px;
padding-top: 1px;
&:before {
color: $black;
}
> ul,
> ol {
margin-bottom: 2px;
margin-top: 0;
}
}
// Headings
h1,
h2,
h3,
h4,
h5,
h6 {
color: $black;
font-weight: $bold-weight;
& + ul,
& + ol {
margin-top: 10px;
}
@include media-query($on-mobile) {
scroll-margin-top: 65px;
}
}
// Headings with link
h1 > a,
h2 > a,
h3 > a,
h4 > a,
h5 > a,
h6 > a {
text-decoration: none;
border: none;
&:hover {
text-decoration: none;
border: none;
}
}
// Link
a {
color: inherit;
text-decoration-color: $smoke;
&:hover {
color: $text-link-blue;
}
&:focus {
outline: 3px solid rgba(0, 54, 199, 0.6);
outline-offset: 2px;
}
}
// Del
del {
color: inherit;
}
// Em
em {
color: inherit;
}
// Blockquotes
blockquote {
color: $gray;
font-style: italic;
text-align: center;
opacity: 0.9;
border-top: 1px solid $light;
border-bottom: 1px solid $light;
padding: 10px;
margin-left: 10px;
margin-right: 10px;
font-size: 1em;
> :last-child {
margin-bottom: 0;
margin-top: 0;
}
}
// Wrapper
.wrapper {
max-width: -webkit-calc(#{$narrow-size} - (#{$spacing-full} * 2));
max-width: calc(#{$narrow-size} - (#{$spacing-full} * 2));
position: relative;
margin-right: auto;
margin-left: auto;
padding-right: $spacing-full;
padding-left: $spacing-full;
@extend %clearfix;
@include media-query($on-mobile) {
max-width: -webkit-calc(#{$narrow-size} - (#{$spacing-full}));
max-width: calc(#{$narrow-size} - (#{$spacing-full}));
padding-right: $spacing-full - 10;
padding-left: $spacing-full - 10;
&.blurry {
animation: 0.2s ease-in forwards blur;
-webkit-animation: 0.2s ease-in forwards blur;
}
}
}
// Underline
u {
text-decoration-color: #d2c7c7;
}
// Small
small {
font-size: $small-font-size;
}
// Superscript
sup {
border-radius: 10%;
top: -3px;
left: 2px;
font-size: small;
position: relative;
margin-right: 2px;
}
// Table
.overflow-table {
overflow-x: auto;
}
table {
width: 100%;
margin-top: $spacing-half;
border-collapse: collapse;
font-size: $small-font-size;
thead {
font-weight: $bold-weight;
color: $black;
border-bottom: 1px solid $light;
}
th,
td,
tr {
border: 1px solid $light;
padding: 2px 7px;
}
}
// Clearfix
%clearfix:after {
content: "";
display: table;
clear: both;
}
// When mouse block a text set this color
mark,
::selection {
background: #fffba0;
color: $black;
}
// Github Gist clear border
.gist {
table {
border: 0;
tr,
td {
border: 0;
}
}
}

247
src/_sass/klise/_dark.scss Normal file
View file

@ -0,0 +1,247 @@
body[data-theme="dark"] {
color: $dark-text-base-color;
background-color: $dark-black;
// Heading
h1,
h2,
h3,
h4,
h5,
h6 {
color: $dark-white;
}
// Table
table {
thead {
color: $dark-white;
border-color: $dark-light;
}
th,
td,
tr {
border-color: $dark-light;
}
}
// Post
.page-content {
a {
color: $dark-text-link-blue;
&:hover,
&:active,
&:focus {
color: $dark-text-link-blue-active;
}
}
h3 {
border-color: $dark-light;
}
h1,
h2,
h3,
h4,
h5,
h6 {
.anchor-head {
color: $dark-text-link-blue;
}
}
}
// Syntax
code {
&.highlighter-rouge {
background-color: $dark-light;
}
}
// kbd tag
kbd {
border-color: $dark-light;
color: $dark-white;
text-shadow: 0 1px 0 $dark-black;
}
// horizontal rule
hr {
border-color: $dark-light;
}
// Post Meta
.post-meta {
color: $dark-gray;
time {
&::after {
background-color: $dark-light;
}
}
span[itemprop="author"] {
border-color: $dark-light;
}
}
// Link
a {
color: inherit;
text-decoration-color: $dark-smoke;
&:hover {
color: $dark-text-link-blue;
}
&:focus {
outline-color: rgba(255, 82, 119, 0.6);
}
}
// List
li {
&:before {
color: $dark-white;
}
}
// Blockquote
blockquote {
color: $dark-gray;
border-color: $dark-light;
}
// Strong, Bold
strong,
b {
color: $dark-white;
}
// Navbar
.navbar {
border-color: $dark-light;
.menu {
a#mode {
.mode-sunny {
display: block;
}
.mode-moon {
display: none;
}
}
.menu-link {
color: $dark-white;
}
@include media-query($on-mobile) {
background-color: $dark-black;
border-color: $dark-light;
.menu-icon {
> svg {
fill: $dark-white;
}
}
input[type="checkbox"]:checked ~ .trigger {
background: $dark-black;
}
}
}
}
// Post Item
.post-item {
&:not(:first-child) {
border-color: $dark-light;
}
.post-item-date {
color: $dark-white;
}
.post-item-title {
a {
color: $dark-text-base-color;
&:hover,
&focus {
color: $dark-white;
}
}
}
}
// Post Navigation
.post-nav {
border-color: $dark-light;
.post-nav-item {
font-weight: $bold-weight;
.post-title {
color: $dark-white;
opacity: 0.9;
}
&:hover,
&:focus {
.post-title {
color: $dark-text-link-blue-active;
}
}
.nav-arrow {
color: $dark-gray;
}
}
@include media-query($on-mobile) {
.post-nav-item:nth-child(even) {
border-color: $dark-light;
}
}
}
// Footer
.footer {
span.footer_item {
color: $dark-white;
}
a.footer_item:not(:last-child) {
color: $dark-white;
}
.footer_copyright {
color: $dark-gray;
opacity: 1;
}
}
// 404 Page
.not-found {
.title {
color: $dark-white;
text-shadow: 1px 0px 0px $dark-text-link-blue;
}
.phrase {
color: $dark-text-base-color;
}
.solution {
color: $dark-text-link-blue;
}
.solution:hover {
color: $dark-text-link-blue-active;
}
}
.search-article {
input[type="search"] {
color: $dark-text-base-color;
&::-webkit-input-placeholder {
color: rgba(128,128,128,0.8);
}
}
}
}

View file

@ -0,0 +1 @@
@charset "utf-8";

View file

@ -0,0 +1,380 @@
// Navbar
.navbar {
height: auto;
max-width: calc(#{$wide-size} - (#{$spacing-full} * 2));
max-width: -webkit-calc(#{$wide-size} - (#{$spacing-full} * 2));
position: relative;
margin-right: auto;
margin-left: auto;
border-bottom: 1px solid $light;
padding: $spacing-full - 15px $spacing-full;
@extend %clearfix;
}
// Navigation
.menu {
user-select: none;
-ms-user-select: none;
-webkit-user-select: none;
a#mode {
float: left;
left: 8px;
top: 6px;
position: relative;
clear: both;
-webkit-transform: scale(1, 1);
transform: scale(1, 1);
opacity: 0.7;
z-index: 1;
&:hover {
cursor: pointer;
opacity: 1;
}
&:active {
-webkit-transform: scale(0.9, 0.9);
transform: scale(0.9, 0.9);
}
.mode-moon {
display: block;
line {
stroke: $black;
fill: none;
}
circle {
fill: $black;
stroke: $black;
}
}
.mode-sunny {
display: none;
line {
stroke: $dark-white;
fill: none;
}
circle {
fill: none;
stroke: $dark-white;
}
}
}
.trigger {
float: right;
}
.menu-trigger {
display: none;
}
.menu-icon {
display: none;
}
.menu-link {
color: $black;
line-height: $base-line-height + 0.4;
text-decoration: none;
padding: 5px 8px;
opacity: 0.7;
letter-spacing: 0.3px;
&:hover {
opacity: 1;
}
&:not(:last-child) {
margin-right: 5px;
}
&.rss {
position: relative;
bottom: -3px;
outline: none;
}
@include media-query($on-mobile) {
opacity: 0.8;
}
}
.menu-link.active {
opacity: 1;
font-weight: 600;
}
@include media-query($on-mobile) {
position: fixed;
top: 0;
left: 0;
right: 0;
z-index: 2;
text-align: center;
height: 50px;
background-color: $white;
border-bottom: 1px solid $light;
a#mode {
left: 10px;
top: 12px;
}
.menu-icon {
display: block;
position: absolute;
right: 0;
width: 50px;
height: 23px;
line-height: 0;
padding-top: 13px;
padding-bottom: 15px;
cursor: pointer;
text-align: center;
z-index: 1;
> svg {
fill: $black;
opacity: 0.7;
}
&:hover {
> svg {
opacity: 1;
}
}
&:active {
-webkit-transform: scale(0.9, 0.9);
transform: scale(0.9, 0.9);
}
}
input[type="checkbox"]:not(:checked) ~ .trigger {
clear: both;
visibility: hidden;
}
input[type="checkbox"]:checked ~ .trigger {
position: fixed;
animation: 0.2s ease-in forwards fadein;
-webkit-animation: 0.2s ease-in forwards fadein;
display: flex;
flex-direction: row;
justify-content: center;
align-items: center;
background-color: $white;
height: 100vh;
width: 100%;
top: 0;
}
.menu-link {
display: block;
box-sizing: border-box;
font-size: 1.1em;
&:not(:last-child) {
margin: 0;
padding: 2px 0;
}
}
}
}
// Author
.author {
margin-top: 6.3rem;
margin-bottom: 7.2rem;
text-align: center;
@include media-query($on-mobile) {
margin-bottom: 3em;
}
.author-avatar {
width: 70px;
height: 70px;
border-radius: 100%;
user-select: none;
// background-color: $black;
-ms-user-select: none;
-webkit-user-select: none;
-webkit-animation: 0.5s ease-in forwards fadein;
animation: 0.5s ease-in forwards fadein;
opacity: 1;
}
.author-name {
font-size: 1.7em;
margin-bottom: 2px;
}
.author-bio {
margin: 0 auto;
opacity: 0.9;
max-width: 393px;
line-height: 1.688;
}
}
// Content
.posts-item-note {
font-size: $base-font-size;
font-weight: 700;
margin-bottom: 5px;
color: $black;
}
// List of posts
.post-item {
display: flex;
padding-top: 5px;
padding-bottom: 6px;
@extend %clearfix;
&:not(:first-child) {
border-top: 1px solid $light;
}
.post-item-date {
min-width: 96px;
color: $black;
font-weight: 700;
padding-right: 10px;
@include media-query($on-mobile) {
font-size: 16px;
}
}
.post-item-title {
margin: 0;
border: 0;
padding: 0;
font-size: $base-font-size;
font-weight: normal;
letter-spacing: 0.1px;
a {
color: $text-base-color;
&:hover,
&focus {
color: $black;
}
}
}
}
// Footer
.footer {
margin-top: 8em;
margin-bottom: 2em;
text-align: center;
@include media-query($on-mobile) {
margin-top: 3em;
}
span.footer_item {
color: $black;
opacity: 0.8;
font-weight: $bold-weight;
font-size: $small-font-size;
}
a.footer_item {
color: $black;
opacity: 0.8;
text-decoration: none;
&:not(:last-child) {
margin-right: 10px;
&:hover {
opacity: 1;
}
}
}
.footer_copyright {
font-size: $small-font-size - 1;
margin-top: 3px;
display: block;
color: $gray;
opacity: 0.8;
}
}
.not-found {
text-align: center;
display: flex;
justify-content: center;
flex-direction: column;
height: 75vh;
.title {
font-size: 5em;
font-weight: $bold-weight;
line-height: 1.1;
color: $black;
text-shadow: 1px 0px 0px $text-link-blue;
}
.phrase {
color: $text-base-color;
}
.solution {
color: $text-link-blue;
letter-spacing: 0.5px;
}
.solution:hover {
color: $text-link-blue-active;
}
}
.search-article {
position: relative;
margin-bottom: 50px;
label[for="search-input"] {
position: relative;
top: 10px;
left: 11px;
}
input[type="search"] {
top: 0;
left: 0;
border: 0;
width: 100%;
height: 40px;
outline: none;
position: absolute;
border-radius: 5px;
padding: 10px 10px 10px 35px;
color: $text-base-color;
-webkit-appearance: none;
font-size: $base-font-size;
background-color: rgba(128, 128, 128, 0.1);
border: 1px solid rgba(128, 128, 128, 0.1);
&::-webkit-input-placeholder {
color: #808080;
}
&::-webkit-search-decoration,
&::-webkit-search-results-decoration {
display: none;
}
}
}
#search-results {
text-align: center;
li {
text-align: left;
}
}
.archive-tags {
height: auto;
.tag-item {
padding: 1px 3px;
border-radius: 2px;
border: 1px solid rgba(128, 128, 128, 0.1);
background-color: rgba(128, 128, 128, 0.1);
}
}

View file

@ -0,0 +1,41 @@
// Animation fade-in
@keyframes fadein {
0% {
opacity: 0.2;
}
100% {
opacity: 0.8;
}
}
// Animation blur
@keyframes blur {
0% {
filter: blur(0px);
}
100% {
filter: blur(4px);
}
}
// Responsive embed video
.embed-responsive {
height: 0;
max-width: 100%;
overflow: hidden;
position: relative;
padding-bottom: 56.25%;
margin-top: 20px;
iframe,
object,
embed {
top: 0;
left: 0;
width: 100%;
height: 100%;
position: absolute;
}
}

261
src/_sass/klise/_post.scss Normal file
View file

@ -0,0 +1,261 @@
// Post wrapper
.wrapper.post {
@include media-query($on-mobile) {
padding-left: $spacing-half;
padding-right: $spacing-half;
}
}
// Post title
.header {
margin-top: 7.8em;
margin-bottom: 3em;
.tags {
margin-left: 3px;
letter-spacing: 0.5px;
.tag {
font-weight: $bold-weight;
font-size: $small-font-size - 2;
&:hover {
text-decoration: none;
}
}
}
.header-title {
font-size: 2em;
line-height: 1.2;
margin-top: 10px;
margin-bottom: 20px;
&.center {
text-align: center;
}
@include media-query($on-mobile) {
font-size: 1.9em;
}
}
}
// Post meta
.post-meta {
padding-top: 3px;
line-height: 1.3;
color: $gray;
time {
position: relative;
margin-right: 1.5em;
&::after {
background: $light;
bottom: 1px;
content: " ";
height: 2px;
position: absolute;
right: -20px;
width: 12px;
}
}
span[itemprop="author"] {
border-bottom: 1px dotted $light;
}
}
// Post content
.page-content {
padding-top: 8px;
iframe {
text-align: center;
}
figure {
img {
border-radius: 2px;
}
figcaption {
margin-top: 5px;
font-style: italic;
font-size: $small-font-size;
}
}
a {
color: $text-link-blue;
text-decoration: none;
&[target="_blank"]::after {
content: " \2197";
font-size: $small-font-size;
line-height: 0;
position: relative;
bottom: 5px;
vertical-align: baseline;
}
&:hover {
color: $text-link-blue-active;
}
&:focus {
color: $text-link-blue;
}
}
> p {
margin: 0;
padding-top: $spacing-full - 15;
padding-bottom: $spacing-full - 15;
}
ul.task-list {
list-style: none;
margin: 0;
li::before {
content: "";
}
li input[type="checkbox"] {
margin-right: 10px;
}
}
dl dt {
font-weight: $bold-weight;
}
h1,
h2,
h3,
h4,
h5,
h6 {
color: $black;
font-weight: $bold-weight;
margin-top: $spacing-full;
margin-bottom: 0;
&:hover {
.anchor-head {
color: $text-link-blue;
opacity: 1;
}
}
.anchor-head {
position: relative;
opacity: 0;
outline: none;
&::before {
content: "#";
position: absolute;
right: -3px;
width: 1em;
font-weight: $bold-weight;
}
}
}
h1 {
@include relative-font-size(1.5);
}
h2 {
@include relative-font-size(1.375);
}
h3 {
@include relative-font-size(1.25);
border-bottom: 1px solid $light;
padding-bottom: 4px;
}
h4 {
@include relative-font-size(1.25);
}
h5 {
@include relative-font-size(1);
}
h6 {
@include relative-font-size(0.875);
}
}
.post-nav {
display: flex;
position: relative;
margin-top: 5em;
border-top: 1px solid $light;
line-height: 1.4;
.post-nav-item {
border-bottom: 0;
font-weight: $bold-weight;
padding-bottom: 10px;
.post-title {
color: $black;
}
&:hover,
&:focus {
.post-title {
color: $text-link-blue-active;
opacity: 0.9;
}
}
.nav-arrow {
font-weight: $normal-weight;
font-size: $small-font-size;
color: $gray;
margin-bottom: 3px;
}
width: 50%;
padding-top: 10px;
text-decoration: none;
box-sizing: border-box;
&:nth-child(odd) {
padding-left: 0;
padding-right: 20px;
}
&:nth-child(even) {
text-align: right;
padding-right: 0;
padding-left: 20px;
}
}
@include media-query($on-mobile) {
display: block;
font-size: $small-font-size;
.post-nav-item {
display: block;
width: 100%;
}
.post-nav-item:nth-child(even) {
border-left: 0;
padding-left: 0;
border-top: 1px solid $light;
}
}
}
.post-updated-at {
font-family: "Ubuntu mono", "monospace";
}

View file

@ -0,0 +1,185 @@
// Code
code {
font-family: $mono-family;
text-rendering: optimizeLegibility;
font-feature-settings: "calt" 1;
font-variant-ligatures: normal;
white-space: pre;
word-spacing: normal;
word-break: normal;
word-wrap: normal;
font-size: inherit;
&.highlighter-rouge {
padding: 1px 3px;
position: relative;
top: -1px;
background-color: #f6f6f6;
border-radius: 2px;
border: 1px solid rgba(128,128,128,0.1);
}
}
// Codeblock Theme
pre.highlight, pre {
margin: 0 -27px;
@include media-query($on-mobile) {
margin: 0 calc(51% - 51vw);
padding-left: 20px;
}
border: 1px solid rgba(128,128,128,0.1);
background-color: #1a1b21;
border-radius: 2px;
padding: 10px;
display: block;
overflow-x: auto;
> code {
width: 100%;
max-width: 50rem;
margin-left: auto;
margin-right: auto;
line-height: 1.5;
display: block;
border: 0;
}
}
.highlight table td {
padding: 5px;
}
.highlight table pre {
margin: 0;
}
.highlight,
.highlight .w {
color: #fbf1c7;
// background-color: #1a1b21;
}
.highlight .err {
color: #fb4934;
// background-color: #1a1b21;
font-weight: bold;
}
.highlight .c,
.highlight .cd,
.highlight .cm,
.highlight .c1,
.highlight .cs {
color: #928374;
font-style: italic;
}
.highlight .cp {
color: #8ec07c;
}
.highlight .nt {
color: #fb4934;
}
.highlight .o,
.highlight .ow {
color: #fbf1c7;
}
.highlight .p,
.highlight .pi {
color: #fbf1c7;
}
.highlight .gi {
color: #b8bb26;
background-color: #282828;
}
.highlight .gd {
color: #fb4934;
background-color: #282828;
}
.highlight .gh {
color: #b8bb26;
font-weight: bold;
}
.highlight .k,
.highlight .kn,
.highlight .kp,
.highlight .kr,
.highlight .kv {
color: #fb4934;
}
.highlight .kc {
color: #d3869b;
}
.highlight .kt {
color: #fabd2f;
}
.highlight .kd {
color: #fe8019;
}
.highlight .s,
.highlight .sb,
.highlight .sc,
.highlight .sd,
.highlight .s2,
.highlight .sh,
.highlight .sx,
.highlight .s1 {
color: #b8bb26;
font-style: italic;
}
.highlight .si {
color: #b8bb26;
font-style: italic;
}
.highlight .sr {
color: #b8bb26;
font-style: italic;
}
.highlight .se {
color: #fe8019;
}
.highlight .nn {
color: #8ec07c;
}
.highlight .nc {
color: #8ec07c;
}
.highlight .no {
color: #d3869b;
}
.highlight .na {
color: #b8bb26;
}
.highlight .m,
.highlight .mf,
.highlight .mh,
.highlight .mi,
.highlight .il,
.highlight .mo,
.highlight .mb,
.highlight .mx {
color: #d3869b;
}
.highlight .ss {
color: #83a598;
}

63
src/_sass/main.scss Normal file
View file

@ -0,0 +1,63 @@
// Fonts preferences
$sans-family: Roboto, sans-serif;
$mono-family: Consolas, monospace;
$base-font-size: 16px;
$medium-font-size: $base-font-size * 0.938;
$small-font-size: $base-font-size * 0.875;
$base-line-height: 1.85;
// Font weight
// $light-weight: 300; // uncomment if necessary
$normal-weight: 400;
$bold-weight: 700;
// $black-weight: 900; // uncomment if necessary
//Light Colors
$text-base-color: #434648;
$text-link-blue: #003fff;
$text-link-blue-active: #0036c7;
$black: #0d122b;
$light: #ececec;
$smoke: #d2c7c7;
$gray: #6b7886;
$white: #fff;
// Dark Colors
$dark-text-base-color: #c7bebe;
$dark-text-link-blue: #ff5277;
$dark-text-link-blue-active: #ff2957;
$dark-black: #131418;
$dark-white: #eaeaea;
$dark-light: #1b1d25;
$dark-smoke: #4a4d56;
$dark-gray: #767f87;
// Width of the content area
$wide-size: 890px;
$narrow-size: 720px;
// Padding unit
$spacing-full: 30px;
$spacing-half: $spacing-full / 2;
// State of devices
$on-mobile: 768px;
$on-tablet: 769px;
$on-desktop: 1024px;
$on-widescreen: 1152px;
@mixin media-query($device) {
@media screen and (max-width: $device) {
@content;
}
}
@mixin relative-font-size($ratio) {
font-size: $base-font-size * $ratio;
}
// Import sass files
@import "klise/fonts", "klise/base", "klise/layout", "klise/post",
"klise/miscellaneous", "klise/syntax", "klise/dark";

1
src/_site/404.html Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 75 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 217 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 415 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 67 KiB

View file

@ -0,0 +1,9 @@
<?xml version="1.0" encoding="utf-8"?>
<browserconfig>
<msapplication>
<tile>
<square150x150logo src="/mstile-150x150.png"/>
<TileColor>#603cba</TileColor>
</tile>
</msapplication>
</browserconfig>

Binary file not shown.

After

Width:  |  Height:  |  Size: 940 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

View file

@ -0,0 +1,25 @@
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN"
"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
<svg version="1.0" xmlns="http://www.w3.org/2000/svg"
width="500.000000pt" height="500.000000pt" viewBox="0 0 500.000000 500.000000"
preserveAspectRatio="xMidYMid meet">
<metadata>
Created by potrace 1.11, written by Peter Selinger 2001-2013
</metadata>
<g transform="translate(0.000000,500.000000) scale(0.100000,-0.100000)"
fill="#000000" stroke="none">
<path d="M2230 4984 c-106 -12 -129 -16 -230 -35 -89 -17 -92 -17 -201 -50
-296 -87 -565 -225 -829 -423 -108 -81 -329 -297 -412 -401 -198 -252 -372
-577 -448 -840 -26 -93 -59 -223 -65 -260 -4 -22 -9 -52 -11 -66 -22 -127 -31
-269 -29 -454 2 -115 5 -219 8 -230 2 -11 8 -42 11 -70 12 -82 16 -106 37
-200 12 -49 22 -94 23 -100 10 -45 76 -238 107 -314 125 -299 301 -562 539
-801 358 -361 775 -585 1280 -691 97 -20 128 -25 260 -39 80 -8 475 -6 500 3
8 3 34 8 58 11 109 15 134 19 234 41 276 62 582 193 820 353 474 318 826 783
998 1319 31 97 36 114 46 163 3 14 9 41 14 60 39 151 55 312 55 550 -1 242
-22 415 -79 635 -16 62 -28 103 -36 120 -5 11 -13 36 -19 55 -54 203 -267 576
-452 795 -37 44 -224 232 -281 282 -68 60 -234 180 -326 237 -97 59 -352 186
-375 186 -7 0 -17 4 -22 9 -25 22 -319 108 -435 126 -25 4 -52 9 -60 10 -8 1
-37 6 -65 9 -27 4 -66 9 -85 12 -75 12 -429 11 -530 -2z"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.4 KiB

View file

@ -0,0 +1 @@
{"name":"","short_name":"","icons":[{"src":"/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"/android-chrome-512x512.png","sizes":"512x512","type":"image/png"}],"theme_color":"#ffffff","background_color":"#ffffff","display":"standalone"}

Binary file not shown.

After

Width:  |  Height:  |  Size: 63 KiB

View file

@ -0,0 +1 @@
!function(t,e,n){"use strict";var o=function(t,e){var n,o;return function(){var i=this,r=arguments,d=+new Date;n&&d<n+t?(clearTimeout(o),o=setTimeout(function(){n=d,e.apply(i,r)},t)):(n=d,e.apply(i,r))}},i=!1,r=!1,d=!1,s=!1,a="unloaded",u=!1,l=function(){if(!u||!e.body.contains(u)||"loaded"==u.disqusLoaderStatus)return!0;var n,o,i=t.pageYOffset,l=(n=u,o=n.getBoundingClientRect(),{top:o.top+e.body.scrollTop,left:o.left+e.body.scrollLeft}).top;if(l-i>t.innerHeight*r||i-l-u.offsetHeight-t.innerHeight*r>0)return!0;var c,f,p,y=e.getElementById("disqus_thread");y&&y.removeAttribute("id"),u.setAttribute("id","disqus_thread"),u.disqusLoaderStatus="loaded","loaded"==a?DISQUS.reset({reload:!0,config:d}):(t.disqus_config=d,"unloaded"==a&&(a="loading",c=s,f=function(){a="loaded"},(p=e.createElement("script")).src=c,p.async=!0,p.setAttribute("data-timestamp",+new Date),p.addEventListener("load",function(){"function"==typeof f&&f()}),(e.head||e.body).appendChild(p)))};t.addEventListener("scroll",o(i,l)),t.addEventListener("resize",o(i,l)),t.disqusLoader=function(t,n){n=function(t,e){var n,o={};for(n in t)Object.prototype.hasOwnProperty.call(t,n)&&(o[n]=t[n]);for(n in e)Object.prototype.hasOwnProperty.call(e,n)&&(o[n]=e[n]);return o}({laziness:1,throttle:250,scriptUrl:!1,disqusConfig:!1},n),r=n.laziness+1,i=n.throttle,d=n.disqusConfig,s=!1===s?n.scriptUrl:s,(u="string"==typeof t?e.querySelector(t):"number"==typeof t.length?t[0]:t)&&(u.disqusLoaderStatus="unloaded"),l()}}(window,document);

View file

@ -0,0 +1,31 @@
(() => {
// Theme switch
const body = document.body;
const lamp = document.getElementById("mode");
const toggleTheme = (state) => {
if (state === "dark") {
localStorage.setItem("theme", "light");
body.removeAttribute("data-theme");
} else if (state === "light") {
localStorage.setItem("theme", "dark");
body.setAttribute("data-theme", "dark");
} else {
initTheme(state);
}
};
lamp.addEventListener("click", () =>
toggleTheme(localStorage.getItem("theme"))
);
// Blur the content when the menu is open
const cbox = document.getElementById("menu-trigger");
cbox.addEventListener("change", function () {
const area = document.querySelector(".wrapper");
this.checked
? area.classList.add("blurry")
: area.classList.remove("blurry");
});
})();

6
src/_site/assets/js/search.min.js vendored Normal file
View file

@ -0,0 +1,6 @@
/*!
* Simple-Jekyll-Search
* Copyright 2015-2020, Christian Fei
* Licensed under the MIT License.
*/
!function(){"use strict";var i={compile:function(r){return o.template.replace(o.pattern,function(t,e){var n=o.middleware(e,r[e],o.template);return void 0!==n?n:r[e]||t})},setOptions:function(t){o.pattern=t.pattern||o.pattern,o.template=t.template||o.template,"function"==typeof t.middleware&&(o.middleware=t.middleware)}},o={};o.pattern=/\{(.*?)\}/g,o.template="",o.middleware=function(){};var n=function(t,e){var n=e.length,r=t.length;if(n<r)return!1;if(r===n)return t===e;t:for(var i=0,o=0;i<r;i++){for(var u=t.charCodeAt(i);o<n;)if(e.charCodeAt(o++)===u)continue t;return!1}return!0},e=new function(){this.matches=function(t,e){return n(e.toLowerCase(),t.toLowerCase())}};var r=new function(){this.matches=function(e,t){return!!e&&(e=e.trim().toLowerCase(),(t=t.trim().toLowerCase()).split(" ").filter(function(t){return 0<=e.indexOf(t)}).length===t.split(" ").length)}};var u={put:function(t){if(f(t))return p(t);if(function(t){return Boolean(t)&&"[object Array]"===Object.prototype.toString.call(t)}(t))return function(t){var e=[];l();for(var n=0,r=t.length;n<r;n++)f(t[n])&&e.push(p(t[n]));return e}(t);return undefined},clear:l,search:function(t){return t?function(t,e,n,r){for(var i=[],o=0;o<t.length&&i.length<r.limit;o++){var u=function(t,e,n,r){for(var i in t)if(!function(t,e){for(var n=!1,r=0,i=(e=e||[]).length;r<i;r++){var o=e[r];!n&&new RegExp(t).test(o)&&(n=!0)}return n}(t[i],r.exclude)&&n.matches(t[i],e))return t}(t[o],e,n,r);u&&i.push(u)}return i}(s,t,c.searchStrategy,c).sort(c.sort):[]},setOptions:function(t){(c=t||{}).fuzzy=t.fuzzy||!1,c.limit=t.limit||10,c.searchStrategy=t.fuzzy?e:r,c.sort=t.sort||a}};function a(){return 0}var s=[],c={};function l(){return s.length=0,s}function f(t){return Boolean(t)&&"[object Object]"===Object.prototype.toString.call(t)}function p(t){return s.push(t),s}c.fuzzy=!1,c.limit=10,c.searchStrategy=c.fuzzy?e:r,c.sort=a;var d={load:function(t,e){var n=window.XMLHttpRequest?new window.XMLHttpRequest:new ActiveXObject("Microsoft.XMLHTTP");n.open("GET",t,!0),n.onreadystatechange=function(e,n){return function(){if(4===e.readyState&&200===e.status)try{n(null,JSON.parse(e.responseText))}catch(t){n(t,null)}}}(n,e),n.send()}};var h={merge:function(t,e){var n={};for(var r in t)n[r]=t[r],"undefined"!=typeof e[r]&&(n[r]=e[r]);return n},isJSON:function(t){try{return t instanceof Object&&JSON.parse(JSON.stringify(t))?!0:!1}catch(e){return!1}}};var t,m,v,w;function y(t){u.put(t),m.searchInput.addEventListener("input",function(t){-1===[13,16,20,37,38,39,40,91].indexOf(t.which)&&(g(),z(t.target.value))})}function g(){m.resultsContainer.innerHTML=""}function O(t){m.resultsContainer.innerHTML+=t}function z(t){var e;(e=t)&&0<e.length&&(g(),function(t,e){var n=t.length;if(0===n)return O(m.noResultsText);for(var r=0;r<n;r++)t[r].query=e,O(i.compile(t[r]))}(u.search(t),t))}function S(t){throw new Error("SimpleJekyllSearch --- "+t)}t=window,m={searchInput:null,resultsContainer:null,json:[],success:Function.prototype,searchResultTemplate:'<li><a href="{url}" title="{description}">{title}</a></li>',templateMiddleware:Function.prototype,sortMiddleware:function(){return 0},noResultsText:"No results found",limit:10,fuzzy:!1,exclude:[]},w=function j(t){if(!((e=t)&&"undefined"!=typeof e.required&&e.required instanceof Array))throw new Error("-- OptionsValidator: required options missing");var e;if(!(this instanceof j))return new j(t);var r=t.required;this.getRequiredOptions=function(){return r},this.validate=function(e){var n=[];return r.forEach(function(t){"undefined"==typeof e[t]&&n.push(t)}),n}}({required:v=["searchInput","resultsContainer","json"]}),t.SimpleJekyllSearch=function(t){var n;0<w.validate(t).length&&S("You must specify the following required options: "+v),m=h.merge(m,t),i.setOptions({template:m.searchResultTemplate,middleware:m.templateMiddleware}),u.setOptions({fuzzy:m.fuzzy,limit:m.limit,sort:m.sortMiddleware}),h.isJSON(m.json)?y(m.json):(n=m.json,d.load(n,function(t,e){t&&S("failed to get JSON ("+n+")"),y(e)}));var e={search:z};return"function"==typeof m.success&&m.success.call(e),e}}();

View file

@ -0,0 +1,75 @@
[
{
"title" : "It&#39;s alive!",
"description" : "",
"tags" : "",
"url" : "//its-alive/",
"date" : "2024-04-21 10:02:00 +0200"
} ,
{
"title" : "Home Lab Infrastructure Snapshot August 2023",
"description" : "",
"tags" : "",
"url" : "//infrastructure-snapshot/",
"date" : "2023-08-27 22:23:00 +0200"
} ,
{
"title" : "Hashicorp&#39;s License Change and my Home Lab - Update",
"description" : "",
"tags" : "",
"url" : "//hashicorp-license-change/",
"date" : "2023-08-17 18:15:00 +0200"
} ,
{
"title" : "Monitoring Correct Memory Usage in Fluent Bit",
"description" : "",
"tags" : "",
"url" : "//fluent-bit-memory/",
"date" : "2023-08-09 16:19:00 +0200"
} ,
{
"title" : "Error Handling in Borgmatic",
"description" : "",
"tags" : "",
"url" : "//backup-failure/",
"date" : "2023-08-08 11:51:00 +0200"
} ,
{
"title" : "Using Ansible to alter Kernel Parameters",
"description" : "",
"tags" : "",
"url" : "//ansible-edit-grub/",
"date" : "2023-06-19 09:31:00 +0200"
} ,
{
"title" : "Sending Apprise Notifications from Concourse CI",
"description" : "",
"tags" : "",
"url" : "//concourse-apprise-notifier/",
"date" : "2023-06-14 23:39:00 +0200"
} ,
{
"title" : "My Experiences with virtio-9p",
"description" : "",
"tags" : "",
"url" : "//virtio-9p-experiences/",
"date" : "2023-05-31 14:18:00 +0200"
} ,
{
"title" : "Homebrew SSH Certificate Authority for the Terraform Libvirt Provider",
"description" : "",
"tags" : "",
"url" : "//homebrew-ssh-ca/",
"date" : "2023-05-23 11:14:00 +0200"
}
]

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show more