about summary refs log tree commit diff
path: root/nix
diff options
context:
space:
mode:
Diffstat (limited to 'nix')
-rw-r--r--nix/hosts/caladan/darwin-configuration.nix4
-rw-r--r--nix/hosts/caladan/home_emile.nix6
-rw-r--r--nix/hosts/corrino/configuration.nix14
-rwxr-xr-xnix/hosts/corrino/hetzner-dedicated-wipe-and-install-nixos-luks-raid-lvm.sh347
-rw-r--r--nix/hosts/corrino/ports.nix10
-rw-r--r--nix/hosts/corrino/www/ctf.emile.space.nix2
-rw-r--r--nix/hosts/corrino/www/emile.space.nix4
-rw-r--r--nix/hosts/corrino/www/git/cgit.nix8
-rw-r--r--nix/hosts/corrino/www/md.emile.space.nix8
-rw-r--r--nix/hosts/corrino/www/photo/default.nix8
-rw-r--r--nix/hosts/corrino/www/photo/photoprism.nix (renamed from nix/hosts/corrino/www/photo.emile.space.nix)2
-rw-r--r--nix/hosts/corrino/www/r2wa.rs.nix29
-rw-r--r--nix/hosts/corrino/www/social.emile.space.nix7
-rw-r--r--nix/hosts/corrino/www/sso.emile.space.nix2
-rw-r--r--nix/hosts/gamont/README.md3
-rw-r--r--nix/hosts/gamont/configuration.nix127
-rw-r--r--nix/hosts/lampadas/configuration.nix5
-rw-r--r--nix/hosts/lernaeus/configuration.nix4
l---------nix/hosts/pi1/result1
-rw-r--r--nix/hosts/pi2/default.nix47
l---------nix/hosts/pi2/result1
-rw-r--r--nix/modules/default.nix1
-rw-r--r--nix/modules/r2wars-web/default.nix73
-rw-r--r--nix/pkgs/overlay.nix3
-rw-r--r--nix/pkgs/r2wars-web/default.nix31
-rw-r--r--nix/pkgs/vokobe/.gitignore7
-rw-r--r--nix/pkgs/vokobe/Cargo.lock270
-rw-r--r--nix/pkgs/vokobe/Cargo.toml10
-rw-r--r--nix/pkgs/vokobe/LICENSE21
-rw-r--r--nix/pkgs/vokobe/README.md101
-rw-r--r--nix/pkgs/vokobe/default.nix16
-rw-r--r--nix/pkgs/vokobe/flaaaaake.nix44
-rw-r--r--nix/pkgs/vokobe/src/main.rs922
33 files changed, 2117 insertions, 21 deletions
diff --git a/nix/hosts/caladan/darwin-configuration.nix b/nix/hosts/caladan/darwin-configuration.nix
index c681b35..a181b35 100644
--- a/nix/hosts/caladan/darwin-configuration.nix
+++ b/nix/hosts/caladan/darwin-configuration.nix
@@ -42,12 +42,14 @@
       trusted-users = [ "root" "hydra" "emile" ];
 
       trusted-public-keys = [
-        "nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
         "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="
+        "nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
+        "cache.garnix.io:CTFPyKSLcx5RMJKfLo5EEPUObbA78b0YQ2DTCJXqr9g="
       ];
       substituters = [
         "https://cache.nixos.org"
         "https://nix-community.cachix.org"
+        "https://cache.garnix.io"
       ];
 
       experimental-features = [ "nix-command" "flakes" ];
diff --git a/nix/hosts/caladan/home_emile.nix b/nix/hosts/caladan/home_emile.nix
index a7009ab..f57daaf 100644
--- a/nix/hosts/caladan/home_emile.nix
+++ b/nix/hosts/caladan/home_emile.nix
@@ -165,6 +165,7 @@
 
     # c foo
     cmake
+    pkg-config
 
     # iot hack foo
     minicom
@@ -180,13 +181,18 @@
     virt-manager
 
     # lisp foo
+    #unstable.sbcl
     # sbcl
+    #clasp-common-lisp
+    clisp
 
     # infrastructure as code foo
     terraform ansible
 
     portmidi
 
+    tiny # irc
+
     # blender
 
   # ] ++ lib.optionals stdenv.isDarwin [
diff --git a/nix/hosts/corrino/configuration.nix b/nix/hosts/corrino/configuration.nix
index d23b6bf..1f054ff 100644
--- a/nix/hosts/corrino/configuration.nix
+++ b/nix/hosts/corrino/configuration.nix
@@ -25,7 +25,10 @@ in {
       ./www/hydra.emile.space.nix
       ./www/netbox.emile.space.nix
       ./www/grafana.emile.space.nix
-      ./www/photo.emile.space.nix
+
+      # ./www/photo.emile.space.nix
+      # ./www/photo
+
       ./www/tickets.emile.space.nix
       ./www/talks.emile.space.nix
       ./www/stream.emile.space.nix
@@ -34,6 +37,9 @@ in {
       ./www/sso.emile.space.nix
       ./www/s3.emile.space.nix
 
+      # ./www/irc.emile.space.nix
+      # ./www/irc
+
       ./www/ctf.emile.space.nix
       # ./www/magic-hash.emile.space.nix
 
@@ -450,8 +456,12 @@ in {
     };
   };
 
+  virtualisation.podman = {
+    enable = true;
+    autoPrune.enable = true;
+  };
   virtualisation = {
-    docker.enable = true;
+    # docker.enable = true;
     libvirtd = {
       enable = true;
       qemu = {
diff --git a/nix/hosts/corrino/hetzner-dedicated-wipe-and-install-nixos-luks-raid-lvm.sh b/nix/hosts/corrino/hetzner-dedicated-wipe-and-install-nixos-luks-raid-lvm.sh
new file mode 100755
index 0000000..de42261
--- /dev/null
+++ b/nix/hosts/corrino/hetzner-dedicated-wipe-and-install-nixos-luks-raid-lvm.sh
@@ -0,0 +1,347 @@
+#!/usr/bin/env bash
+
+# Installs NixOS on a Hetzner server, wiping the server.
+#
+# This is for a specific server configuration; adjust where needed.
+#
+# Prerequisites:
+#   * Update the script wherever FIXME is present
+#
+# Usage:
+#     ssh root@YOUR_SERVERS_IP bash -s < hetzner-dedicated-wipe-and-install-nixos.sh
+#
+# When the script is done, make sure to boot the server from HD, not rescue mode again.
+
+# Explanations:
+#
+# * Adapted from https://gist.github.com/nh2/78d1c65e33806e7728622dbe748c2b6a
+# * Following largely https://nixos.org/nixos/manual/index.html#sec-installing-from-other-distro.
+# * **Important:** We boot in legacy-BIOS mode, not UEFI, because that's what Hetzner uses.
+#   * NVMe devices aren't supported for booting (those require EFI boot)
+# * We set a custom `configuration.nix` so that we can connect to the machine afterwards,
+#   inspired by https://nixos.wiki/wiki/Install_NixOS_on_Hetzner_Online
+# * This server has 2 HDDs.
+#   We put everything on RAID1.
+#   Storage scheme: `partitions -> RAID -> LVM -> ext4`.
+# * A root user with empty password is created, so that you can just login
+#   as root and press enter when using the Hetzner spider KVM.
+#   Of course that empty-password login isn't exposed to the Internet.
+#   Change the password afterwards to avoid anyone with physical access
+#   being able to login without any authentication.
+# * The script reboots at the end.
+
+NIXOS_VERSION="22.11"
+
+echo "Enter New Hostname"
+HOSTNAME="corrino"
+
+echo "Enter LUKS Password"
+LUKS_PASSWORD="FIXME"
+
+set -eu
+set -o pipefail
+
+set -x
+
+# Inspect existing disks
+lsblk
+
+# Undo existing setups to allow running the script multiple times to iterate on it.
+# We allow these operations to fail for the case the script runs the first time.
+set +e
+umount /mnt/boot /mnt/dev /mnt/proc /mnt/run /mnt/sys /mnt
+vgchange -an
+cryptsetup close luks0
+rm initrd_ssh_host_ecdsa_key
+set -e
+
+# Stop all mdadm arrays that the boot may have activated.
+mdadm --stop --scan
+
+# Prevent mdadm from auto-assembling arrays.
+# Otherwise, as soon as we create the partition tables below, it will try to
+# re-assemple a previous RAID if any remaining RAID signatures are present,
+# before we even get the chance to wipe them.
+# From:
+#     https://unix.stackexchange.com/questions/166688/prevent-debian-from-auto-assembling-raid-at-boot/504035#504035
+# We use `>` because the file may already contain some detected RAID arrays,
+# which would take precedence over our `<ignore>`.
+echo 'AUTO -all
+ARRAY <ignore> UUID=00000000:00000000:00000000:00000000' > /etc/mdadm/mdadm.conf
+
+# Create partition tables (--script to not ask)
+parted --script /dev/nvme0n1 mklabel gpt
+parted --script /dev/nvme1n1 mklabel gpt
+
+# Create partitions (--script to not ask)
+#
+# We create the 1MB BIOS boot partition at the front.
+#
+# Note we use "MB" instead of "MiB" because otherwise `--align optimal` has no effect;
+# as per documentation https://www.gnu.org/software/parted/manual/html_node/unit.html#unit:
+# > Note that as of parted-2.4, when you specify start and/or end values using IEC
+# > binary units like "MiB", "GiB", "TiB", etc., parted treats those values as exact
+#
+# Note: When using `mkpart` on GPT, as per
+#   https://www.gnu.org/software/parted/manual/html_node/mkpart.html#mkpart
+# the first argument to `mkpart` is not a `part-type`, but the GPT partition name:
+#   ... part-type is one of 'primary', 'extended' or 'logical', and may be specified only with 'msdos' or 'dvh' partition tables.
+#   A name must be specified for a 'gpt' partition table.
+# GPT partition names are limited to 36 UTF-16 chars, see https://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_entries_(LBA_2-33).
+parted --script --align optimal /dev/nvme0n1 -- mklabel gpt mkpart 'bios' 1MB 2MB set 1 bios_grub on mkpart 'boot' 2MB 1000MB mkpart 'root' 1000MB '100%'
+parted --script --align optimal /dev/nvme1n1 -- mklabel gpt mkpart 'bios' 1MB 2MB set 1 bios_grub on mkpart 'boot' 2MB 1000MB mkpart 'root' 1000MB '100%'
+
+# Relaod partitions
+partprobe
+
+# Wait for all devices to exist
+udevadm settle --timeout=5 --exit-if-exists=/dev/nvme0n1p1
+udevadm settle --timeout=5 --exit-if-exists=/dev/nvme0n1p2
+udevadm settle --timeout=5 --exit-if-exists=/dev/nvme0n1p3
+
+udevadm settle --timeout=5 --exit-if-exists=/dev/nvme1n1p1
+udevadm settle --timeout=5 --exit-if-exists=/dev/nvme1n1p2
+udevadm settle --timeout=5 --exit-if-exists=/dev/nvme1n1p3
+
+# Wipe any previous RAID signatures
+mdadm --zero-superblock --force /dev/nvme0n1p2
+mdadm --zero-superblock --force /dev/nvme0n1p3
+mdadm --zero-superblock --force /dev/nvme1n1p2
+mdadm --zero-superblock --force /dev/nvme1n1p3
+
+# Create RAIDs
+# Note that during creating and boot-time assembly, mdadm cares about the
+# host name, and the existence and contents of `mdadm.conf`!
+# This also affects the names appearing in /dev/md/ being different
+# before and after reboot in general (but we take extra care here
+# to pass explicit names, and set HOMEHOST for the rebooting system further
+# down, so that the names appear the same).
+# Almost all details of this are explained in
+#   https://bugzilla.redhat.com/show_bug.cgi?id=606481#c14
+# and the followup comments by Doug Ledford.
+#mdadm --create --run --verbose /dev/md0 --level=1 --raid-devices=2 --homehost=lxc11 --name=root0 /dev/nvme0n1p2 /dev/nvme1n1p2
+mdadm --create --run --verbose /dev/md0 --level=1 --raid-devices=2 --homehost=$HOSTNAME --name=md0 /dev/nvme0n1p2 /dev/nvme1n1p2
+mdadm --create --run --verbose /dev/md1 --level=1 --raid-devices=2 --homehost=$HOSTNAME --name=md1 /dev/nvme0n1p3 /dev/nvme1n1p3
+
+# Assembling the RAID can result in auto-activation of previously-existing LVM
+# groups, preventing the RAID block device wiping below with
+# `Device or resource busy`. So disable all VGs first.
+vgchange -an
+
+# Wipe filesystem signatures that might be on the RAID from some
+# possibly existing older use of the disks (RAID creation does not do that).
+# See https://serverfault.com/questions/911370/why-does-mdadm-zero-superblock-preserve-file-system-information
+wipefs -a /dev/md0
+wipefs -a /dev/md1
+
+# Disable RAID recovery. We don't want this to slow down machine provisioning
+# in the rescue mode. It can run in normal operation after reboot.
+echo 0 > /proc/sys/dev/raid/speed_limit_max
+
+# LUKS
+echo "$LUKS_PASSWORD" | cryptsetup luksFormat --type luks2 -h sha512 /dev/md1
+echo "$LUKS_PASSWORD" | cryptsetup luksOpen /dev/md1 luks0
+
+# LVM
+# PVs
+pvcreate /dev/mapper/luks0
+#pvcreate /dev/md0
+
+# VGs
+#vgcreate vg0 /dev/md0
+vgcreate vg0 /dev/mapper/luks0
+
+# LVs (--yes to automatically wipe detected file system signatures)
+lvcreate --yes --extents 95%FREE -n root vg0  # 5% slack space
+
+# Filesystems (-F to not ask on preexisting FS)
+mkfs.ext4 -F -L boot /dev/md0
+mkfs.ext4 -F -L root /dev/vg0/root
+
+# Creating file systems changes their UUIDs.
+# Trigger udev so that the entries in /dev/disk/by-uuid get refreshed.
+# `nixos-generate-config` depends on those being up-to-date.
+# See https://github.com/NixOS/nixpkgs/issues/62444
+udevadm trigger
+
+# Wait for FS labels to appear
+udevadm settle --timeout=5 --exit-if-exists=/dev/disk/by-label/boot
+udevadm settle --timeout=5 --exit-if-exists=/dev/disk/by-label/root
+
+# NixOS pre-installation mounts
+
+# Mount target root partition
+mount /dev/disk/by-label/root /mnt
+mkdir /mnt/boot
+mount /dev/disk/by-label/boot /mnt/boot
+
+# Installing nix
+
+# Installing nix requires `sudo`; the Hetzner rescue mode doesn't have it.
+apt-get install -y sudo
+
+# Allow installing nix as root, see
+#   https://github.com/NixOS/nix/issues/936#issuecomment-475795730
+mkdir -p /etc/nix
+echo "build-users-group =" > /etc/nix/nix.conf
+
+curl -L https://nixos.org/nix/install | sh
+set +u +x # sourcing this may refer to unset variables that we have no control over
+. $HOME/.nix-profile/etc/profile.d/nix.sh
+set -u -x
+
+# FIXME Keep in sync with `system.stateVersion` set below!
+nix-channel --add https://nixos.org/channels/nixos-$NIXOS_VERSION nixpkgs
+nix-channel --update
+
+# Getting NixOS installation tools
+nix-env -iE "_: with import <nixpkgs/nixos> { configuration = {}; }; with config.system.build; [ nixos-generate-config nixos-install nixos-enter manual.manpages ]"
+
+nixos-generate-config --root /mnt
+
+# Find the name of the network interface that connects us to the Internet.
+# Inspired by https://unix.stackexchange.com/questions/14961/how-to-find-out-which-interface-am-i-using-for-connecting-to-the-internet/302613#302613
+RESCUE_INTERFACE=$(ip route get 8.8.8.8 | grep -Po '(?<=dev )(\S+)')
+
+# Find what its name will be under NixOS, which uses stable interface names.
+# See https://major.io/2015/08/21/understanding-systemds-predictable-network-device-names/#comment-545626
+# NICs for most Hetzner servers are not onboard, which is why we use
+# `ID_NET_NAME_PATH`otherwise it would be `ID_NET_NAME_ONBOARD`.
+INTERFACE_DEVICE_PATH=$(udevadm info -e | grep -Po "(?<=^P: )(.*${RESCUE_INTERFACE})")
+UDEVADM_PROPERTIES_FOR_INTERFACE=$(udevadm info --query=property "--path=$INTERFACE_DEVICE_PATH")
+NIXOS_INTERFACE=$(echo "$UDEVADM_PROPERTIES_FOR_INTERFACE" | grep -o -E 'ID_NET_NAME_PATH=\w+' | cut -d= -f2)
+echo "Determined NIXOS_INTERFACE as '$NIXOS_INTERFACE'"
+# DOESNT WORK on PX server there it was eno1
+
+IP_V4=$(ip route get 8.8.8.8 | grep -Po '(?<=src )(\S+)')
+echo "Determined IP_V4 as $IP_V4"
+
+# Determine Internet IPv6 by checking route, and using ::1
+# (because Hetzner rescue mode uses ::2 by default).
+# The `ip -6 route get` output on Hetzner looks like:
+#   # ip -6 route get 2001:4860:4860:0:0:0:0:8888
+#   2001:4860:4860::8888 via fe80::1 dev eth0 src 2a01:4f8:151:62aa::2 metric 1024  pref medium
+IP_V6="$(ip route get 2001:4860:4860:0:0:0:0:8888 | head -1 | cut -d' ' -f7 | cut -d: -f1-4)::1"
+echo "Determined IP_V6 as $IP_V6"
+
+
+# From https://stackoverflow.com/questions/1204629/how-do-i-get-the-default-gateway-in-linux-given-the-destination/15973156#15973156
+read _ _ DEFAULT_GATEWAY _ < <(ip route list match 0/0); echo "$DEFAULT_GATEWAY"
+echo "Determined DEFAULT_GATEWAY as $DEFAULT_GATEWAY"
+
+# Generate `configuration.nix`. Note that we splice in shell variables.
+cat > /mnt/etc/nixos/configuration.nix <<EOF
+{ config, pkgs, lib, ... }:
+{
+  imports =
+    [ # Include the results of the hardware scan.
+      ./hardware-configuration.nix
+    ];
+  # Use GRUB2 as the boot loader.
+  # We don't use systemd-boot because Hetzner uses BIOS legacy boot.
+  boot.loader.systemd-boot.enable = false;
+  
+  boot.loader.grub = {
+    enable = true;
+    efiSupport = false;
+    version = 2;
+    enableCryptodisk = true;
+    device = "nodev";
+    devices = [ "/dev/nvme0n1" "/dev/nvme1n1"];
+  };
+  networking.hostName = "$HOSTNAME";
+  boot.initrd.kernelModules = [ "dm-snapshot" ];
+  boot.initrd.availableKernelModules = [ "cryptd" "aesni_intel" "igb" ];#"FIXME Your network driver" ];
+  boot.initrd.network = {
+    enable = true;
+    ssh = {
+      enable = true;
+      
+      # ssh port during boot for luks decryption
+      port = 2222;
+      authorizedKeys = config.users.users.root.openssh.authorizedKeys.keys;
+      hostKeys = [ "/initrd_ssh_host_ecdsa_key" ];
+    };
+    postCommands = ''
+      echo 'cryptsetup-askpass' >> /root/.profile
+    '';
+  };
+  boot.kernelParams = [ "ip=$IP_V4::$DEFAULT_GATEWAY:255.255.255.192:$HOSTNAME:$NIXOS_INTERFACE:off:8.8.8.8:8.8.4.4:" ];
+  boot.loader.supportsInitrdSecrets = true;
+  boot.initrd.luks.forceLuksSupportInInitrd = true;
+  boot.initrd.luks.devices = {
+    root = {
+      preLVM = true;
+      device = "/dev/md1";
+      allowDiscards = true;
+    };
+  };
+                  
+  boot.initrd.secrets = {
+    "/initrd_ssh_host_ecdsa_key" = "/initrd_ssh_host_ecdsa_key";
+  };
+  # The mdadm RAID1s were created with 'mdadm --create ... --homehost=hetzner',
+  # but the hostname for each machine may be different, and mdadm's HOMEHOST
+  # setting defaults to '<system>' (using the system hostname).
+  # This results mdadm considering such disks as "foreign" as opposed to
+  # "local", and showing them as e.g. '/dev/md/hetzner:root0'
+  # instead of '/dev/md/root0'.
+  # This is mdadm's protection against accidentally putting a RAID disk
+  # into the wrong machine and corrupting data by accidental sync, see
+  # https://bugzilla.redhat.com/show_bug.cgi?id=606481#c14 and onward.
+  # We do not worry about plugging disks into the wrong machine because
+  # we will never exchange disks between machines, so we tell mdadm to
+  # ignore the homehost entirely.
+  environment.etc."mdadm.conf".text = ''
+    HOMEHOST <ignore>
+  '';
+  # The RAIDs are assembled in stage1, so we need to make the config
+  # available there.
+  boot.initrd.services.swraid.mdadmConf = config.environment.etc."mdadm.conf".text;
+  # Network (Hetzner uses static IP assignments, and we don't use DHCP here)
+  networking.useDHCP = false;
+  networking.interfaces."$NIXOS_INTERFACE".ipv4.addresses = [
+    {
+      address = "$IP_V4";
+      
+      # FIXME Lookup for right netmask prefix length within rescu system
+      prefixLength = 26;
+    }
+  ];
+  networking.interfaces."$NIXOS_INTERFACE".ipv6.addresses = [
+    {
+      address = "$IP_V6";
+      prefixLength = 64;
+    }
+  ];
+  networking.defaultGateway = "$DEFAULT_GATEWAY";
+  networking.defaultGateway6 = { address = "fe80::1"; interface = "$NIXOS_INTERFACE"; };
+  networking.nameservers = [ "8.8.8.8" "8.8.4.4" ];
+  # Initial empty root password for easy login:
+  users.users.root.initialHashedPassword = "";
+  services.openssh.permitRootLogin = "prohibit-password";
+  users.users.root.openssh.authorizedKeys.keys = [
+    # FIXME Replace this by your SSH pubkey!
+    "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPZi43zHEsoWaQomLGaftPE5k0RqVrZyiTtGqZlpWsew"
+  ];
+  services.openssh.enable = true;
+  
+  # FIXME
+  # This value determines the NixOS release with which your system is to be
+  # compatible, in order to avoid breaking some software such as database
+  # servers. You should change this only after NixOS release notes say you
+  # should.
+  system.stateVersion = "$NIXOS_VERSION"; # Did you read the comment?
+}
+EOF
+
+ssh-keygen -t ecdsa -N "" -f initrd_ssh_host_ecdsa_key;
+cp initrd_ssh_host_ecdsa_key /mnt/initrd_ssh_host_ecdsa_key;
+
+# Install NixOS
+PATH="$PATH" `which nixos-install` --no-root-passwd --root /mnt --max-jobs 40
+
+umount /mnt/boot
+umount /mnt
+
+echo "DONE"
diff --git a/nix/hosts/corrino/ports.nix b/nix/hosts/corrino/ports.nix
index 6be514d..2d7ba06 100644
--- a/nix/hosts/corrino/ports.nix
+++ b/nix/hosts/corrino/ports.nix
@@ -2,15 +2,23 @@
 	emile.ports = {
 		stream_rtmp = 1935;
 		initrd_ssh = 2222;
-		photo = 2342;
+		photo = {
+			photoprism = 2342;
+			immich = 2343;
+		};
 		git = 3000;
 		hydra = 3001;
 		grafana = 3002;
 		md = 3003;
 		gotosocial = 3004;
+		irc = {
+			 clear = 6667;
+			 ssl = 6697;
+		};
 		stream = 8080;
 		netbox = 8001;
 		restic = 8002;
+		r2wars-web = 8089;
 		ctf = 8338;
 		magic-hash = 8339;
 		tickets = 8349;
diff --git a/nix/hosts/corrino/www/ctf.emile.space.nix b/nix/hosts/corrino/www/ctf.emile.space.nix
index 1d8b382..6eee75f 100644
--- a/nix/hosts/corrino/www/ctf.emile.space.nix
+++ b/nix/hosts/corrino/www/ctf.emile.space.nix
@@ -13,7 +13,7 @@
   };
 
   virtualisation.oci-containers = {
-    backend = "docker";
+    # backend = "docker";
     containers = {
       "ctfd" = {
         image = "ctfd/ctfd";
diff --git a/nix/hosts/corrino/www/emile.space.nix b/nix/hosts/corrino/www/emile.space.nix
index 9cca880..c39ca31 100644
--- a/nix/hosts/corrino/www/emile.space.nix
+++ b/nix/hosts/corrino/www/emile.space.nix
@@ -13,6 +13,10 @@
         ''; 
       };
 
+      "/@hanemile".extraConfig = ''
+        return 301 https://social.emile.space/@hanemile;
+      '';
+
       #"/.well-known" = {
       #  root = "/var/www/emile.space";
       #  extraConfig = ''
diff --git a/nix/hosts/corrino/www/git/cgit.nix b/nix/hosts/corrino/www/git/cgit.nix
index e6983e5..1e63dfc 100644
--- a/nix/hosts/corrino/www/git/cgit.nix
+++ b/nix/hosts/corrino/www/git/cgit.nix
@@ -72,6 +72,12 @@
 					section = "Radare2";
 					owner = "emile";
 				};
+				r2wars-web = {
+					desc = "The software behind https://r2wa.rs";	
+					path = "/var/lib/git/repositories/r2wars-web.git";
+					section = "Radare2";
+					owner = "emile";
+				};
 				r2wars-rs = {
 					desc = "A rust implementation of radare2";	
 					path = "/var/lib/git/repositories/r2wars-rs.git";
@@ -538,7 +544,7 @@
 
 		# exposing stuff
 		gitDaemon = {
-			enable = false;
+			enable = true;
 
 			user = "git";
 			group = "git";
diff --git a/nix/hosts/corrino/www/md.emile.space.nix b/nix/hosts/corrino/www/md.emile.space.nix
index 7ad7a94..52b4a53 100644
--- a/nix/hosts/corrino/www/md.emile.space.nix
+++ b/nix/hosts/corrino/www/md.emile.space.nix
@@ -4,17 +4,9 @@
 	services.nginx.virtualHosts."md.emile.space" = {
 		forceSSL = true;
 		enableACME = true;
-
-		# TODO(emile): figure out why this doesn't work when enabled, has to do with authelia
-		# extraConfig = authelia-location;
-
 		locations = {
 			"/" = {
-				# proxyPass = "http://127.0.0.1:3003";
         proxyPass = "http://127.0.0.1:${toString config.services.hedgedoc.settings.port}";
-
-				# TODO(emile): figure out why this doesn't work when enabled, has to do with authelia
-				# extraConfig = authelia-authrequest;
 			};
 		};
 	};
diff --git a/nix/hosts/corrino/www/photo/default.nix b/nix/hosts/corrino/www/photo/default.nix
new file mode 100644
index 0000000..dd555e4
--- /dev/null
+++ b/nix/hosts/corrino/www/photo/default.nix
@@ -0,0 +1,8 @@
+{ ... }:
+
+{
+	imports = [
+		./photoprism.nix
+		# ./immich.nix
+	];
+}
diff --git a/nix/hosts/corrino/www/photo.emile.space.nix b/nix/hosts/corrino/www/photo/photoprism.nix
index 9c1e97a..c1cbbf8 100644
--- a/nix/hosts/corrino/www/photo.emile.space.nix
+++ b/nix/hosts/corrino/www/photo/photoprism.nix
@@ -17,7 +17,7 @@
     enable = true;
 
     address = "127.0.0.1";
-    port = config.emile.ports.photo;
+    port = config.emile.ports.photo.photoprism;
 
     passwordFile = config.age.secrets.photoprism_password.path;
 
diff --git a/nix/hosts/corrino/www/r2wa.rs.nix b/nix/hosts/corrino/www/r2wa.rs.nix
index f7a0a7e..7da11e1 100644
--- a/nix/hosts/corrino/www/r2wa.rs.nix
+++ b/nix/hosts/corrino/www/r2wa.rs.nix
@@ -1,16 +1,37 @@
-{ ... }:
+{ config, pkgs, ... }:
 
 {
   services.nginx.virtualHosts."r2wa.rs" = {
     forceSSL = true;
     enableACME = true;
 
-    # kTLS = true;
-
     locations = {
       "/" = {
-				return = "301 http://emile.space/blog/2020/r2wars/";
+        proxyPass = "http://127.0.0.1:${toString config.emile.ports.r2wars-web}";
       };
 		};
 	};
+
+  environment.systemPackages = with pkgs; [ radare2 ];
+
+  # deploy:
+  # - push code
+  # - build in order to get the new hash (nix build .#r2war-sweb)
+  # - update hash in the package (//nix/pkgs/r2wars-web/default.nix)
+  # - deploy
+
+  services.emile.r2wars-web = {
+    enable = true;
+
+    host = "127.0.0.1";
+    port = config.emile.ports.r2wars-web;
+
+    # TODO(emile): change these when going live
+    sessionKey = "insecuretmpkey";
+    salt = "insecuresalt";
+
+    logfilePath = "/var/lib/r2wars/r2wars.log";
+    databasePath = "/var/lib/r2wars/main.db";
+    sessiondbPath = "/var/lib/r2wars/session.db";
+  };
 }
diff --git a/nix/hosts/corrino/www/social.emile.space.nix b/nix/hosts/corrino/www/social.emile.space.nix
index 62e1933..9f9a6f1 100644
--- a/nix/hosts/corrino/www/social.emile.space.nix
+++ b/nix/hosts/corrino/www/social.emile.space.nix
@@ -91,4 +91,11 @@
 		};
 		environmentFile = config.age.secrets.gotosocial_environment_file.path;
 	};
+
+  systemd.services.gotosocial = {
+    after = [ "authelia-main.service" ];
+    serviceConfig = {
+      Restart = "on-failure";
+    };
+  };
 }
diff --git a/nix/hosts/corrino/www/sso.emile.space.nix b/nix/hosts/corrino/www/sso.emile.space.nix
index 0f77197..27988fa 100644
--- a/nix/hosts/corrino/www/sso.emile.space.nix
+++ b/nix/hosts/corrino/www/sso.emile.space.nix
@@ -192,7 +192,7 @@ in {
 				};
 
 				totp = {
-				  disable = false;
+				  disable = true;
 				  issuer = "sso.emile.space";
 				  algorithm = "sha1";
 				  digits = 6;
diff --git a/nix/hosts/gamont/README.md b/nix/hosts/gamont/README.md
new file mode 100644
index 0000000..dc77dc0
--- /dev/null
+++ b/nix/hosts/gamont/README.md
@@ -0,0 +1,3 @@
+# gamont
+
+The WIFI Cableā„¢
diff --git a/nix/hosts/gamont/configuration.nix b/nix/hosts/gamont/configuration.nix
new file mode 100644
index 0000000..4ea1678
--- /dev/null
+++ b/nix/hosts/gamont/configuration.nix
@@ -0,0 +1,127 @@
+{ config, pkgs, lib, ... }:
+
+let
+  user = "nixos";
+  password = "";
+  SSID = "%p%p%p";
+  SSIDpassword = "";
+  interface = "wlan0";
+  hostname = "gamont";
+  keys = [
+    "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPZi43zHEsoWaQomLGaftPE5k0RqVrZyiTtGqZlpWsew emile@caladan"
+    "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEzLZ56SEgwZZ0OusTdSDDhpMlxSg1zPNdRLuxKOfrR5 emile@chusuk"
+  ];
+in {
+
+  boot = {
+    kernelPackages = pkgs.linuxKernel.packages.linux_rpi4;
+    kernel.sysctl = {
+      "net.ipv4.conf.all.forwarding" = true;
+    };
+    initrd.availableKernelModules = [ "xhci_pci" "usbhid" "usb_storage" ];
+    loader = {
+      grub.enable = false;
+      generic-extlinux-compatible.enable = true;
+    };
+  };
+
+  fileSystems = {
+    "/" = {
+      device = "/dev/disk/by-label/NIXOS_SD";
+      fsType = "ext4";
+      options = [ "noatime" ];
+    };
+  };
+
+  networking = {
+    hostName = hostname;
+    wireless = {
+      enable = true;
+      networks."${SSID}".psk = SSIDpassword;
+      interfaces = [ interface ];
+    };
+
+    firewall = {
+      allowedTCPPorts = [ 53 ];
+      allowedUDPPorts = [ 53 ];
+    };
+
+    interfaces.end0 = {
+      ipv4.addresses = [ { address = "192.168.1.1"; prefixLength = 24; } ];
+    };
+
+    nftables = {
+      enable = true;
+      ruleset = ''
+        table inet filter {
+          chain input {
+            type filter hook input priority 0;            
+            accept
+          }
+
+          chain output {
+            type filter hook output priority 0;
+            accept
+          }
+          
+          chain forward {
+            type filter hook forward priority 0;
+            accept
+          }
+        }
+
+        table ip nat {
+        	chain postrouting {
+        		type nat hook postrouting priority srcnat; policy accept;
+        		masquerade
+        	}
+        }
+      '';
+    };
+  };
+
+  environment.systemPackages = with pkgs; [ 
+    helix
+    vim 
+    dnsmasq
+    tcpdump
+    curl
+    iptables nftables
+  ];
+
+  services = {
+    openssh.enable = true;
+    dnsmasq = {
+      enable = true;
+      settings = {
+        server = [
+          "8.8.8.8"
+          "8.8.4.4"
+        ];
+        dhcp-authoritative = true;
+        domain-needed = true;
+        dhcp-range = [ "192.168.1.10,192.168.1.254" ];
+
+        interface = [ "end0" ];
+
+      };
+    };
+  };
+
+  users = {
+    mutableUsers = false;
+    users."${user}" = {
+      isNormalUser = true;
+      password = password;
+      extraGroups = [ "wheel" ];
+      openssh.authorizedKeys.keys = keys;
+    };
+
+    users.root = {
+      openssh.authorizedKeys.keys = keys;
+    };
+  };
+
+  hardware.enableRedistributableFirmware = true;
+  system.stateVersion = "23.11";
+}
diff --git a/nix/hosts/lampadas/configuration.nix b/nix/hosts/lampadas/configuration.nix
index ae3af87..2453a88 100644
--- a/nix/hosts/lampadas/configuration.nix
+++ b/nix/hosts/lampadas/configuration.nix
@@ -54,6 +54,11 @@ in {
   networking = {
     hostName = "lampadas";
     firewall.enable = true;
+
+    # iperf
+    firewall.allowedTCPPorts = [ 5201 ];
+    firewall.allowedUDPPorts = [ 5201 ];
+
     nameservers = [ "8.8.8.8" "8.8.4.4" "1.1.1.1"];
   };
 
diff --git a/nix/hosts/lernaeus/configuration.nix b/nix/hosts/lernaeus/configuration.nix
index 9522b76..cfbc35f 100644
--- a/nix/hosts/lernaeus/configuration.nix
+++ b/nix/hosts/lernaeus/configuration.nix
@@ -44,6 +44,10 @@ in {
   networking = {
     hostName = "lernaeus";
     firewall.enable = true;
+
+    # iperf
+    firewall.allowedTCPPorts = [ 5201 ];
+    firewall.allowedUDPPorts = [ 5201 ];
   };
 
   time.timeZone = "Europe/Berlin";
diff --git a/nix/hosts/pi1/result b/nix/hosts/pi1/result
new file mode 120000
index 0000000..8da9c2f
--- /dev/null
+++ b/nix/hosts/pi1/result
@@ -0,0 +1 @@
+/nix/store/k4i56ilirmnfdg0izgpq40hwz45x2lmw-nixos-sd-image-23.05pre482756.12ba1a5f90b-armv6l-linux.img-armv6l-unknown-linux-gnueabihf
\ No newline at end of file
diff --git a/nix/hosts/pi2/default.nix b/nix/hosts/pi2/default.nix
new file mode 100644
index 0000000..939027f
--- /dev/null
+++ b/nix/hosts/pi2/default.nix
@@ -0,0 +1,47 @@
+# build the sd image for the pi using
+# ; nix-build '<nixpkgs/nixos>' -A config.system.build.sdImage -I nixos-config='./default.nix'
+
+# after booting
+# ; nix-channel --list
+# ; nix-channel --remove nixos
+# ; nix-channel --add https://channels.nixos.org/nixos-unstable nixos
+# ; nix-channel --update nixos
+# (this takes quite some time)
+# ; nixos-rebuild switch
+
+{ lib, pkgs, ... }:
+
+{
+  imports = [
+    <nixpkgs/nixos/modules/installer/sd-card/sd-image-raspberrypi.nix>
+    # <nixpkgs/nixos/modules/installer/sd-card/sd-image-armv7l-multiplatform.nix>
+    # <nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-installer.nix>
+
+    # For nixpkgs cache
+    # <nixpkgs/nixos/modules/installer/cd-dvd/channel.nix>
+  ];
+
+  users.users = {
+    emile = {
+      isNormalUser = true;
+      hashedPassword = "$y$j9T$gKt6Iovrn.SAkMxnTCqqV1$55.sKRrjWTbe7Z6Xi17G0e3G7GbAGc65YXtX9zD5AR3";
+      extraGroups = [ "wheel" ];
+    };
+  };
+
+  nixpkgs = {
+    # crossSystem = lib.systems.examples.raspberryPi;
+    crossSystem = lib.systems.examples.armv7l-hf-multiplatform;
+    # localSystem = { system = "x86_64-linux"; };
+    localSystem = { system = "aarch64-darwin"; };
+    overlays = [
+      (final: super: {
+        # Due to https://github.com/NixOS/nixpkgs/issues/154163#issuecomment-1350599022
+        makeModulesClosure = x:
+          super.makeModulesClosure (x // { allowMissing = true; });
+      })
+    ];
+  };
+  system.stateVersion = "24.05";
+}
+
diff --git a/nix/hosts/pi2/result b/nix/hosts/pi2/result
new file mode 120000
index 0000000..8da9c2f
--- /dev/null
+++ b/nix/hosts/pi2/result
@@ -0,0 +1 @@
+/nix/store/k4i56ilirmnfdg0izgpq40hwz45x2lmw-nixos-sd-image-23.05pre482756.12ba1a5f90b-armv6l-linux.img-armv6l-unknown-linux-gnueabihf
\ No newline at end of file
diff --git a/nix/modules/default.nix b/nix/modules/default.nix
index 767e466..6e6faae 100644
--- a/nix/modules/default.nix
+++ b/nix/modules/default.nix
@@ -3,5 +3,6 @@
 {
   imports = [
     ./ports
+    ./r2wars-web
   ];
 }
diff --git a/nix/modules/r2wars-web/default.nix b/nix/modules/r2wars-web/default.nix
new file mode 100644
index 0000000..7e37b26
--- /dev/null
+++ b/nix/modules/r2wars-web/default.nix
@@ -0,0 +1,73 @@
+{ config, lib, pkgs, ... }: 
+
+let
+  cfg = config.services.emile.r2wars-web;
+in with lib; {
+
+  options.services.emile.r2wars-web = {
+    enable = mkEnableOption "Enable r2wars-web";
+
+    # ip and port to listen on
+    host = mkOption {
+      type = types.str;
+      default = "127.0.0.1";
+      example = "0.0.0.0";
+      description = "The host the service listens on";
+    };
+
+    port = mkOption {
+      type = types.int;
+      default = 8080;
+      example = 8080;
+      description = "The port the service listens on";
+    };
+
+    # env vars with secrets to set
+    sessionKey = mkOption {
+      type = types.str;
+      default = "";
+      example = "abc1Itheich4aeQu9Ouz7ahcaiVoogh9";
+      description = "The sessionKey passed to the bin as an env var";
+    };
+
+    salt = mkOption {
+      type = types.str;
+      default = "";
+      example = "OhD0ki5aLieMoowah8Eemaim2beaf2Na";
+      description = "The salt passed to the bin as an env var";
+    };
+
+    # paths to files
+    logfilePath = mkOption {
+      type = types.str;
+      default = "/var/lib/r2wars.log";
+      example = "/var/lib/r2wars.log";
+      description = "The path to the logfile";
+    };
+
+    databasePath = mkOption {
+      type = types.str;
+      default = "/var/lib/main.db";
+      example = "/var/lib/main.db";
+      description = "The path to the main database";
+    };
+
+    sessiondbPath = mkOption {
+      type = types.str;
+      default = "/var/lib/sessions.db";
+      example = "/var/lib/sessions.db";
+      description = "The path to the sessions database";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.r2wars-web = {
+      wantedBy = [ "multi-user.target" ];
+      environment = {
+        SESSION_KEY = cfg.sessionKey;
+        SALT = cfg.salt;
+      };
+      serviceConfig.ExecStart = "${pkgs.r2wars-web}/bin/r2wars-web -h ${cfg.host} -p ${toString cfg.port} --logfilepath ${cfg.logfilePath} --databasepath ${cfg.databasePath} --sessiondbpath ${cfg.sessiondbPath} --templates ${pkgs.r2wars-web}/templates";
+    };
+  };
+}
diff --git a/nix/pkgs/overlay.nix b/nix/pkgs/overlay.nix
index 11531f2..d9e3999 100644
--- a/nix/pkgs/overlay.nix
+++ b/nix/pkgs/overlay.nix
@@ -1,4 +1,5 @@
 final: prev: {
-	vokobe = final.callPackage ../../web/vokobe { inherit (final) naersk; };
+	vokobe = final.callPackage ./vokobe { inherit (final) naersk; };
+	r2wars-web = final.callPackage ./r2wars-web { };
 }
 
diff --git a/nix/pkgs/r2wars-web/default.nix b/nix/pkgs/r2wars-web/default.nix
new file mode 100644
index 0000000..2e46665
--- /dev/null
+++ b/nix/pkgs/r2wars-web/default.nix
@@ -0,0 +1,31 @@
+{ pkgs, lib, fetchgit }:
+
+pkgs.buildGoModule rec {
+  name = "r2wars-web-${version}";
+  version = "0.1.0";
+
+  src = fetchgit {
+    url = "git://git.emile.space/r2wars-web.git";
+    hash = "sha256-n+La+C1diNCkxlGIxLu9nGQ//tJ5eDUjvXvdGP4Mdnk=";
+  };
+
+	vendorHash = null;
+  CGO_ENABLED=0;
+  subPackages = [ "src" ];
+
+  postInstall = ''
+    mkdir -p $out
+    cp -r templates $out
+
+    mv $out/bin/src $out/bin/r2wars-web
+  '';
+  
+  doCheck = false;
+
+  meta = {
+    description = "A golang implementation of r2wars";
+    homepage = "https://r2wa.rs";
+    license = lib.licenses.mit;
+    maintainers = with lib.maintainers; [ hanemile ];
+  };
+}
diff --git a/nix/pkgs/vokobe/.gitignore b/nix/pkgs/vokobe/.gitignore
new file mode 100644
index 0000000..b774d54
--- /dev/null
+++ b/nix/pkgs/vokobe/.gitignore
@@ -0,0 +1,7 @@
+# the cargo build artefacts
+debug/
+target/
+
+# the nix result symlink
+result
+
diff --git a/nix/pkgs/vokobe/Cargo.lock b/nix/pkgs/vokobe/Cargo.lock
new file mode 100644
index 0000000..cdf64e1
--- /dev/null
+++ b/nix/pkgs/vokobe/Cargo.lock
@@ -0,0 +1,270 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "aho-corasick"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "ansi_term"
+version = "0.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "atty"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
+dependencies = [
+ "hermit-abi",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "clap"
+version = "2.34.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
+dependencies = [
+ "ansi_term",
+ "atty",
+ "bitflags",
+ "strsim",
+ "textwrap",
+ "unicode-width",
+ "vec_map",
+]
+
+[[package]]
+name = "heck"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c"
+dependencies = [
+ "unicode-segmentation",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "lazy_static"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+
+[[package]]
+name = "libc"
+version = "0.2.119"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1bf2e165bb3457c8e098ea76f3e3bc9db55f87aa90d52d0e6be741470916aaa4"
+
+[[package]]
+name = "memchr"
+version = "2.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149"
+
+[[package]]
+name = "proc-macro-error"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
+dependencies = [
+ "proc-macro-error-attr",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro-error-attr"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.36"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029"
+dependencies = [
+ "unicode-xid",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "regex"
+version = "1.10.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-automata",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.4.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f"
+
+[[package]]
+name = "strsim"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
+
+[[package]]
+name = "structopt"
+version = "0.3.26"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c6b5c64445ba8094a6ab0c3cd2ad323e07171012d9c98b0b15651daf1787a10"
+dependencies = [
+ "clap",
+ "lazy_static",
+ "structopt-derive",
+]
+
+[[package]]
+name = "structopt-derive"
+version = "0.4.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0"
+dependencies = [
+ "heck",
+ "proc-macro-error",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "syn"
+version = "1.0.86"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-xid",
+]
+
+[[package]]
+name = "textwrap"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
+dependencies = [
+ "unicode-width",
+]
+
+[[package]]
+name = "unicode-segmentation"
+version = "1.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7e8820f5d777f6224dc4be3632222971ac30164d4a258d595640799554ebfd99"
+
+[[package]]
+name = "unicode-width"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
+
+[[package]]
+name = "unicode-xid"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
+
+[[package]]
+name = "vec_map"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
+
+[[package]]
+name = "version_check"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
+
+[[package]]
+name = "vokobe"
+version = "0.1.3"
+dependencies = [
+ "regex",
+ "structopt",
+]
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
diff --git a/nix/pkgs/vokobe/Cargo.toml b/nix/pkgs/vokobe/Cargo.toml
new file mode 100644
index 0000000..9c01d4e
--- /dev/null
+++ b/nix/pkgs/vokobe/Cargo.toml
@@ -0,0 +1,10 @@
+[package]
+name = "vokobe"
+version = "0.1.3"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+regex = "1.10.3"
+structopt = "0.3"
diff --git a/nix/pkgs/vokobe/LICENSE b/nix/pkgs/vokobe/LICENSE
new file mode 100644
index 0000000..cb5d6ff
--- /dev/null
+++ b/nix/pkgs/vokobe/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2023 Emile Hansmaennel
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/nix/pkgs/vokobe/README.md b/nix/pkgs/vokobe/README.md
new file mode 100644
index 0000000..c2c54a9
--- /dev/null
+++ b/nix/pkgs/vokobe/README.md
@@ -0,0 +1,101 @@
+# Vokobe
+
+A minimal static site generator tailored to my needs.
+
+CI: [https://hydra.emile.space/project/vokobe](https://hydra.emile.space/project/vokobe)
+
+## Build
+
+```bash
+; cargo build --release
+```
+    
+## Usage/Examples
+
+```bash
+; ./target/release/vokobe --help
+vokobe 0.1.0
+A static site generator
+
+USAGE:
+    vokobe [FLAGS] <in-path> <out-path> <site-name>
+
+FLAGS:
+    -a, --analytics    Activate sending analytics to stats.emile.space
+    -h, --help         Prints help information
+    -V, --version      Prints version information
+
+ARGS:
+    <in-path>      Input path
+    <out-path>     Output path
+    <site-name>    Site name (e.g. emile.space)
+```
+
+
+## Deployment
+
+The following subsections contain some example for small shell scripts that might be useful for Deployment.
+
+### build.sh
+
+Remove the output dir, build it from scratch and update the perms.
+
+I'm actually considering rebuilding vokobe with incremental builds in mind, as it can take a bit to create some really large projects.
+
+```bash
+rm -rf out/
+vokobe -a ./in ./out emile.space
+chmod -R +r out/
+```
+
+### sync.sh
+
+Syncronize the generated output to the remote host for hosting it.
+
+```bash
+rsync -avz --delete <out-path>/* <user>@<host>:<path>
+```
+
+### publish.sh
+
+Build and Syncronize.
+
+```bash
+./build.sh
+./sync.sh
+```
+
+### host.sh
+
+Host the local version
+
+```bash
+python3 -m http.server 8081 -d <outpath>/ -b 0.0.0.0
+```
+
+### watchbuild.sh
+
+rebuild on changes
+
+```bash
+#! /usr/bin/env nix-shell
+#! nix-shell -i bash -p fd entr
+
+while sleep 0.5; do
+  fd . in | entr -d ./build.sh
+done
+```
+
+### local.sh
+
+run a script updating it on changes and one hosting the output.
+
+```bash
+sh ./watchbuild.sh &
+sh ./host.sh
+```
+
+
+## Contributing
+
+Send patches!
diff --git a/nix/pkgs/vokobe/default.nix b/nix/pkgs/vokobe/default.nix
new file mode 100644
index 0000000..7257962
--- /dev/null
+++ b/nix/pkgs/vokobe/default.nix
@@ -0,0 +1,16 @@
+{ pkgs, naersk, ... }:
+
+let
+	naersk' = pkgs.callPackage naersk {};
+in naersk'.buildPackage {
+	src = ./.;
+
+	meta = with pkgs.lib; {
+		description = "A minimal static site generator tailored to my needs.";
+		homepage    = "https://git.emile.space/hanemile/vokobe";
+		license     = licenses.mit;
+		platforms   = platforms.all;
+		maintainers = with maintainers; [ hanemile ];
+	};
+}
+
diff --git a/nix/pkgs/vokobe/flaaaaake.nix b/nix/pkgs/vokobe/flaaaaake.nix
new file mode 100644
index 0000000..7cf2f03
--- /dev/null
+++ b/nix/pkgs/vokobe/flaaaaake.nix
@@ -0,0 +1,44 @@
+{
+  inputs = {
+    flake-utils.url = "github:numtide/flake-utils";
+    naersk.url = "github:nix-community/naersk";
+    nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
+  };
+
+  outputs = { self, flake-utils, naersk, nixpkgs }:
+    let
+      pkgs = (import nixpkgs) {
+        system = "x86_64-linux";
+      };
+
+      naersk' = pkgs.callPackage naersk {};
+      
+    in rec {
+      packages."x86_64-linux".vokobe = naersk'.buildPackage {
+        src = ./.;
+
+        meta = with pkgs.lib; {
+          description = "A minimal static site generator tailored to my needs.";
+          homepage    = "https://git.emile.space/hanemile/vokobe";
+          license     = licenses.mit;
+          platforms   = platforms.all;
+          maintainers = with maintainers; [
+            hanemile
+          ];
+        };
+      };
+    
+      # For `nix build` & `nix run`:
+      defaultPackage = packages."x86_64-linux".vokobe;
+
+      # For `nix develop` (optional, can be skipped):
+      devShell = pkgs.mkShell {
+        nativeBuildInputs = with pkgs; [ rustc cargo ];
+      };
+
+      # hydraJobs."<attr>"."<system>" = derivation;
+      hydraJobs = {
+        build."x86_64-linux" = packages."x86_64-linux".vokobe;
+      };
+    };
+}
\ No newline at end of file
diff --git a/nix/pkgs/vokobe/src/main.rs b/nix/pkgs/vokobe/src/main.rs
new file mode 100644
index 0000000..ab26457
--- /dev/null
+++ b/nix/pkgs/vokobe/src/main.rs
@@ -0,0 +1,922 @@
+/*
+pull the std into scope and inline it so that we get documentation for it,
+even when running offline
+*/
+#[doc(inline)]
+pub use std;
+
+use std::path::{Path, PathBuf};
+use std::io::{self, Read, Write, BufRead, BufReader};
+use std::fs::{self, File};
+use std::time;
+use std::collections::HashMap;
+
+use structopt::StructOpt;
+use regex::Regex;
+
+#[derive(Debug, StructOpt)]
+#[structopt(name = "vokobe", about = "A static site generator")]
+struct Opt {
+    /// Input path 
+    #[structopt(parse(from_os_str))]
+    input_path: PathBuf,
+
+    /// Output path
+    #[structopt(parse(from_os_str))]
+    output_path: PathBuf,
+
+    /// Site name (e.g. emile.space)
+    site_name: String,
+
+    /// Activate sending analytics to stats.emile.space
+    // -a and --analytics will be generated
+    // analytics are sent to stats.emile.space
+    #[structopt(short, long)]
+    analytics: bool,
+}
+
+fn main() -> std::io::Result<()> {
+
+    let mut internal_links: HashMap<String, Vec<String>> = HashMap::new();
+
+    let opt = Opt::from_args();
+
+    let in_path = opt.input_path;
+    let output_path = opt.output_path;
+
+    // read the style
+    let style_path = Path::new(&in_path).join("style.css");
+    let mut style_file = File::open(style_path)
+        .expect("could not open style file");
+    let mut style = String::new();
+    style_file.read_to_string(&mut style)
+        .expect("could not read style file to string");
+
+    // read all dirs in the input path
+    let pathes = recursive_read_dir(&in_path, false)?;
+
+    // pass 1: store the backlinks
+
+    for path in &pathes {
+        if path.ends_with("README.md") {
+            // open the file and read it as a string
+            let mut readme_file = File::open(path)?;
+            let mut readme = String::new();
+            readme_file.read_to_string(&mut readme)?;
+
+            let internal_links_in_file
+                = parse_internal_links(readme.as_str());
+
+            for link in internal_links_in_file {
+
+                internal_links.entry(link).or_insert_with(Vec::new).push(path.to_string_lossy().into_owned())
+            }
+        }
+    }
+
+
+    // for each markdown_file in markdown_files {
+    //     let internal_links_in_file = parse_internal_links(markdown_file);
+    //     internal_links.insert(markdown_file, internal_links_in_file);
+    // }
+
+    // pass 2: create the html
+
+    println!("Got {} files", pathes.len());
+    let mut readme_counter = 0;
+
+    for path in pathes {
+        let stripped_path = path.strip_prefix(&in_path)
+            .expect(format!(
+                "could not strip the in_path prefix: {:?}", in_path).as_str());
+
+        // copy images and other files to the output folder
+        if path.is_file() {
+
+            // define the source and destination
+            let src = Path::new(&in_path).join(stripped_path);
+            let dst = Path::new(&output_path).join(stripped_path);
+
+            // define the destination folder (the dst path without the file) and create it
+            let mut dst_folder = dst.clone();
+            dst_folder.pop(); // remove the file itself from the path
+            fs::create_dir_all(dst_folder)?;
+
+            // copy the file to the destination
+            std::fs::copy(src, dst.as_path())?;
+        }
+
+        if stripped_path.ends_with("README.md") {
+            readme_counter += 1;
+
+            // define the "raw" path (no infile prefix, no file)
+            let mut ancestors = stripped_path.ancestors();
+            ancestors.next();
+
+            let raw_path = ancestors.next()
+                .expect("could not extract next ancestor");
+
+            // out + rawpath
+            let index_path = output_path.join(raw_path);
+
+            // (out + rawpath) + "index.html"
+            let index_file = index_path.join("index.html");
+
+            // - create the dir for the index.html as well as the index.html
+            // itself
+            fs::create_dir_all(index_path)?;
+            let mut file = File::create(&index_file)?;
+
+            // this is the main block calling all other smaller functions. The
+            // whole output is compsed here
+            write_header(&mut file, &opt.site_name, &style)?;
+            write_body_start(&mut file, &opt.site_name)?;
+            write_nav(&mut file, in_path.as_path(), raw_path, opt.analytics)?;
+            write_same_level(&mut file, in_path.as_path(), raw_path)?;
+            write_readme_content(&mut file, in_path.as_path(), raw_path)?;
+            write_footer(&mut file, raw_path, &internal_links)?;
+
+            file.write_all("".as_bytes())?;
+        }
+    }
+
+    println!("Got {readme_counter} README.md files");
+
+    Ok(())
+}
+
+fn parse_internal_links(markdown_file: &str) -> Vec<String> {
+    // Define a regular expression to match markdown-style links
+    let link_regex = Regex::new(r"\[([^\]]+)\]\(([^)]+)\)").unwrap();
+
+    // Initialize a vector to store internal links found in the markdown file
+    let mut internal_links = Vec::new();
+
+    // Iterate over each match of the regular expression in the markdown content
+    for capture in link_regex.captures_iter(&markdown_file) {
+        // Extract the link text and URL from the capture groups
+        // let link_text = &capture[1];
+        let mut link_url = &capture[2];
+
+        // Check if the link is an internal link (e.g., relative URL)
+        // You can customize this condition based on your site's URL structure
+        if link_url.starts_with('/') || link_url.starts_with("../") {
+            if link_url.ends_with('/') {
+                link_url = link_url.trim_end_matches('/');
+            }
+            internal_links.push(link_url.to_string());
+        }
+    }
+
+    internal_links
+}
+
+/// Write the html header including the style file
+/// TODO: Don't add the style file into each compiled html output, as the
+/// style can be included allowing the user to cache the style file in their
+/// browser.
+fn write_header(file: &mut File, site_name: &String, style: &String) -> std::io::Result<()>{
+
+    // write the header including the style file
+    file.write_all(format!(r#"<!DOCTYPE html>
+<html lang="en">
+<head>
+  <meta charset="UTF-8">
+  <meta http-equiv="X-UA-Compatible" content="IE=edge">
+  <meta name="viewport" content="width=device-width, initial-scale=1.0">
+  <title>{}</title>
+
+  <style>
+  {}
+  </style>
+</head>
+    "#, site_name, style).as_bytes())?;
+
+    Ok(())
+}
+
+/// write the start of the html body tag and the header linking back to the
+/// site itself.
+fn write_body_start(file: &mut File, site_name: &String) -> std::io::Result<()>{
+    file.write_all(format!(r#"
+<body>
+  <header>
+    <a href="/">{}</a>
+  </header>"#, site_name).as_bytes())?;
+
+    Ok(())
+}
+
+/// Write the navigation section to the given file
+fn write_nav(file: &mut File, in_path: &Path, raw_path: &Path, analytics: bool)
+    -> std::io::Result<()> {
+
+    if analytics == true {
+        /*
+        file.write_all(format!(r#"
+  <img src="https://stats.emile.space/count?p=/{}">
+  <nav>
+    <ul>"#, raw_path.to_str().unwrap()).as_bytes())?;
+        */
+        file.write_all(format!(r#"
+  <nav>
+    <ul>"#,).as_bytes())?;
+    } else {
+        file.write_all(format!(r#"
+  <nav>
+    <ul>"#).as_bytes())?;
+    }
+
+    // get the nav bar components
+    let components = raw_path.components().collect::<Vec<_>>();
+    
+    // for each list of components (["a"], ["a", "b"], ["a", "b", "c"]), create
+    // the path for the list, view all other dirs at that path and write the
+    // result to the file
+    let mut i = 0;
+    let slice = components.as_slice();
+
+    // for each navbar component
+    for component in slice {
+
+        // get the items belonging to that navbar item
+        // (["a"], ["a", "b"], ["a", "b", "c"])
+        let subpath_components = &slice[..i+1];
+        i += 1;
+
+        let mut subpath_path = PathBuf::new();
+
+        // push the inpath, so we've got a basis from where we can read the
+        // subpath items
+        // subpath_path = inpath + ???
+        subpath_path.push(in_path);
+
+        let mut nav_breadcrumb_link = PathBuf::new();
+
+        // for each item in the subpath, push it into the subpath_path so that
+        // in the end, we've got something like this:
+        // "inpath" + "a" + "b" + "c"
+        for subpath_component in subpath_components {
+            subpath_path.push(subpath_component);
+            nav_breadcrumb_link.push(subpath_component);
+        }
+
+        // make the nav_breadcrumb_link an absolute by prefixing it with a /
+        // (this is in scope of the web-page, so this is find) and make it a
+        // string
+        let nav_breadcrumb_link_absolute 
+            = Path::new("/")
+                .join(nav_breadcrumb_link);
+
+        let nav_breadcrumb_link
+            = nav_breadcrumb_link_absolute.to_str().unwrap();
+
+        // define the name of the breadcrumb
+        let nav_breadcrumb_name = component.as_os_str().to_str().unwrap();
+
+        ////////////////////////////////////////////////////////////////////////
+        file.write_all(format!(r#"
+        <li>
+            <a href="{}">{}</a>
+            <ul>"#, nav_breadcrumb_link, nav_breadcrumb_name).as_bytes())?;
+        ////////////////////////////////////////////////////////////////////////
+
+        // as we don't want to get the items for the individial entry, but on
+        // the same level, we push a ".."
+        // the subpath_path is now: inpath + subpath + ../
+        subpath_path.push("..");
+
+        // read all dirs in the subpath_path, add them to the dirs vector, so
+        // that we get a vector containing all the dirs we want
+        let mut dirs = Vec::new();
+        for entry in fs::read_dir(subpath_path)? {
+            let path = &entry?.path();
+            if path.is_dir() {
+                dirs.push(path.to_path_buf());
+            }
+        }
+
+        dirs.sort();
+
+        // DROPDOWN
+        // extract the link and name for each directory found
+        for dir in dirs {
+            let d = dir.canonicalize()?;
+            let abs_inpath = in_path.canonicalize()?;
+
+            let name = d.file_name().unwrap().to_str().unwrap();
+            let rel_link 
+                = d.strip_prefix(abs_inpath)
+                    .expect(format!(
+                        "could not strip the in_path prefix: {:?}",
+                        d).as_str());
+
+            let link = Path::new("/").join(rel_link);
+            let link = link.as_path().to_str().unwrap();
+
+            // don't add the current page to the dropdown, we're on it already!
+            if name == nav_breadcrumb_name {
+                continue
+            }
+
+            // don't add items starting with a dot to the dropdown, they're
+            // hidden!
+            if name.starts_with(".") {
+                continue
+            }
+
+            ////////////////////////////////////////////////////////////////////
+            file.write_all(format!(r#"
+                <li><a href="{}">{}/</a></li>"#, link, name).as_bytes())?;
+            ////////////////////////////////////////////////////////////////////
+        }
+
+        ////////////////////////////////////////////////////////////////////////
+        file.write_all(r#"
+            </ul>
+        </li>"#.as_bytes())?;
+        ////////////////////////////////////////////////////////////////////////
+    }
+
+    ////////////////////////////////////////////////////////////////////////////
+    file.write_all(format!(r#"
+    </ul>
+    <ul style="float: right">
+        <li>{:?}</li>
+        <li>
+            <a href="README.md">.md</a>
+        </li>
+    </ul>
+  </nav>"#, in_path.metadata()?.modified()?.duration_since(std::time::UNIX_EPOCH).unwrap().as_secs()).as_bytes())?;
+    ////////////////////////////////////////////////////////////////////////////
+
+    Ok(())
+}
+
+
+fn write_same_level(file: &mut File, in_path: &Path, raw_path: &Path)
+    -> std::io::Result<()> {
+
+    let search_path = Path::new(in_path).join(raw_path);
+
+    let mut dirs: Vec<PathBuf> = Vec::new();
+    let mut files: Vec<PathBuf> = Vec::new();
+
+    let mut vertical: bool = false;
+    let mut show_files: bool = false;
+
+    for entry in fs::read_dir(search_path)? {
+        let path = &entry?.path();
+
+        if path.is_dir() {
+            dirs.push(path.to_path_buf());
+        }
+        if path.is_file() {
+            files.push(path.to_path_buf());
+            if path.file_name().unwrap() == "vertical" {
+                vertical = true;
+            }
+            if path.file_name().unwrap() == "show_files" {
+                show_files = true;
+            }
+        }
+    }
+
+    dirs.sort();
+    files.sort();
+
+    let in_path = in_path.canonicalize()?;
+
+    if vertical == true {
+        file.write_all(format!(r#"
+  <ul class="vert">"#).as_bytes())?;
+    } else {
+        file.write_all(format!(r#"
+  <ul>"#).as_bytes())?;
+    }
+
+    for dir in dirs {
+        let dir = dir.canonicalize()?;
+        let dir = dir.strip_prefix(&in_path)
+            .expect("could not strip in_path prefix");
+
+        let link = Path::new("/").join(dir);
+        let link_str = link.as_path().to_str().unwrap();
+        let name = link.file_name().unwrap().to_str().unwrap();
+
+        if name.starts_with(".") {
+            continue
+        }
+
+        file.write_all(format!(r#"
+    <li><a href="{}">{}/</a></li>"#, link_str, name).as_bytes())?;
+    }
+
+    file.write_all(format!(r#"
+  </ul>"#).as_bytes())?;
+
+    if files.len() >= 1 && show_files == true {
+        file.write_all(format!(r#"<br>
+    <ul>"#).as_bytes())?;
+
+        for f in files {
+            let f = f.canonicalize()?;
+            let f = f.strip_prefix(&in_path)
+                .expect("could not strip in_path prefix");
+
+            let link = Path::new("/").join(f);
+            let link_str = link.as_path().to_str().unwrap();
+            let name = link.file_name().unwrap().to_str().unwrap();
+
+            if name == "README.md"
+                || name == "show_files"
+                || name.starts_with(".")
+                {
+                continue
+            };
+
+            file.write_all(format!(r#"
+        <li><a href="{}">{}</a></li>"#, link_str, name).as_bytes())?;
+        }
+
+        file.write_all(format!(r#"
+    </ul>"#).as_bytes())?;
+    }
+
+
+    Ok(())
+}
+
+fn write_readme_content(file: &mut File, in_path: &Path, raw_path: &Path) 
+    -> std::io::Result<()> {
+
+    // define the path of the README.md file
+    let readme_file_path 
+        = Path::new(in_path).join(raw_path).join("README.md");
+
+    // open the file and read it as a string
+    let mut readme_file = File::open(readme_file_path)?;
+    let mut readme = String::new();
+    readme_file.read_to_string(&mut readme)?;
+
+    // replace all "markdown" style links with HTML links
+    // let re = Regex::new(r"\[([^\[]+)\]\(([^\(]+)\)").unwrap();
+    let re = Regex::new(r"\[([^]]+)\]\(([^)]+)\)").unwrap();
+    let readme = re.replace_all(&readme, "<a href=\"$2\">$1</a>");
+
+    file.write_all(format!("<pre>").as_bytes())?;
+
+    // counting the occurrence of `---`
+    let mut hrule_count = 0;
+    let mut in_yaml_metadata_block= false;
+
+    let mut level_1_heading_num = 0;
+    let mut level_2_heading_num = 0;
+    let mut level_3_heading_num = 0;
+    let mut level_4_heading_num = 0;
+    let mut level_5_heading_num = 0;
+
+    // cheap markdown 2 html converter
+    for line in readme.split('\n') {
+
+        // 1 == 2, as I'm not sure how to comment out the file write 5 lines or so below
+        if in_yaml_metadata_block && 1 == 2 {
+            // if we find the end of the yaml metadata block, break this
+            if line.starts_with("---") {
+                in_yaml_metadata_block = false;
+                continue
+            } else {
+                file.write_all(format!(r##"yaml_line: {}
+"##, line).as_bytes())?;
+                continue
+            }
+        }
+
+        // if we've got a horizontal rule, it can be two things: the start and
+        // end of a yaml-metadata block or an actual horizontal rule.
+        //
+        // If it's yaml metadata, read it all, but don't print it, store it
+        // for later
+        // If it's a horizontal rule, print the horizontal rule
+        if line.starts_with("---") {
+
+            // store the yaml metadata
+            if hrule_count == 0 {
+                in_yaml_metadata_block = true;
+                continue
+            }                 
+            hrule_count += 1;
+
+            // print the horizontal rule
+            file.write_all(format!(r##"
+            <hr>"##).as_bytes())?;
+
+        } else if line.starts_with("#####") {
+            let heading = line.get(6..).unwrap();
+            let heading_sanitized = sanitize(heading.to_string());
+
+            level_5_heading_num += 1;
+
+            file.write_all(format!(r##"</pre>
+            <span id="{a}"></span>
+            <h5><a href="#{a}">{h1}.{h2}.{h3}.{h4}.{h5}. {b}</a></h3>
+            <pre>"##,
+                a = heading_sanitized,
+                b = heading,
+                h1 = level_1_heading_num,
+                h2 = level_2_heading_num,
+                h3 = level_3_heading_num,
+                h4 = level_4_heading_num,
+                h5 = level_5_heading_num,
+            ).as_bytes())?;
+
+        } else if line.starts_with("####") {
+            let heading = line.get(5..).unwrap();
+            let heading_sanitized = sanitize(heading.to_string());
+
+            level_4_heading_num += 1;
+            level_5_heading_num = 0;
+
+            file.write_all(format!(r##"</pre>
+            <span id="{a}"></span>
+            <h4><a href="#{a}">{h1}.{h2}.{h3}.{h4}. {b}</a></h3>
+            <pre>"##,
+                a = heading_sanitized,
+                b = heading,
+                h1 = level_1_heading_num,
+                h2 = level_2_heading_num,
+                h3 = level_3_heading_num,
+                h4 = level_4_heading_num,
+            ).as_bytes())?;
+
+        } else if line.starts_with("###") {
+            let heading = line.get(4..).unwrap();
+            let heading_sanitized = sanitize(heading.to_string());
+
+            level_3_heading_num += 1;
+            level_4_heading_num = 0;
+            level_5_heading_num = 0;
+
+            file.write_all(format!(r##"</pre>
+            <span id="{a}"></span>
+            <h3><a href="#{a}">{h1}.{h2}.{h3}. {b}</a></h3>
+            <pre>"##,
+                a = heading_sanitized,
+                b = heading,
+                h1 = level_1_heading_num,
+                h2 = level_2_heading_num,
+                h3 = level_3_heading_num,
+            ).as_bytes())?;
+
+        } else if line.starts_with("##") {
+            let heading = line.get(3..).unwrap();
+            let heading_sanitized = sanitize(heading.to_string());
+
+            level_2_heading_num += 1;
+            level_3_heading_num = 0;
+            level_4_heading_num = 0;
+            level_5_heading_num = 0;
+
+            file.write_all(format!(r##"</pre>
+            <span id="{a}"></span>
+            <h2><a href="#{a}">{h1}.{h2}. {b}</a></h2>
+            <pre>"##,
+                a = heading_sanitized,
+                b = heading,
+                h1 = level_1_heading_num,
+                h2 = level_2_heading_num,
+            ).as_bytes())?;
+
+        } else if line.starts_with("#") {
+            let heading = line.get(2..).unwrap();
+            let heading_sanitized = sanitize(heading.to_string());
+
+            level_1_heading_num += 1;
+            level_2_heading_num = 0;
+            level_3_heading_num = 0;
+            level_4_heading_num = 0;
+            level_5_heading_num = 0;
+
+            file.write_all(format!(r##"</pre>
+            <span id="{a}"></span>
+            <h1><a href="#{a}">{h1}. {b}</a></h1>
+            <pre>"##,
+                a = heading_sanitized,
+                b = heading,
+                h1 = level_1_heading_num
+            ).as_bytes())?;
+
+        } else if line.starts_with("> ") {
+            let line = line.replace("<", "&lt");
+            let line = line.get(2..).unwrap();
+            file.write_all(format!("</pre><pre class=\"code\">{}</pre><pre>\n", line).as_bytes())?;
+            
+        } else if line.starts_with(":::tree") {
+
+            // TODO: add some parameter controlling if the list is ascending or descending (reverse the list before writing)
+
+            // get all dirs in the current dir recursively
+            let tree_files_path = Path::new(in_path).join(raw_path);
+            let mut tree_files
+                = recursive_read_dir(&tree_files_path, true)?;
+
+            // sort them, otherwise we'll get complete chaos
+            tree_files.sort();
+
+            for path in tree_files {
+                
+                // strip the inpath prefix and raw_path prefix, as we don't need
+                // them
+                let path 
+                    = path.strip_prefix(in_path)
+                        .expect("could not strip in_file prefix")
+                        .strip_prefix(raw_path)
+                        .expect("could not strip raw_path prefix");
+
+                // convert the path to a string, check if it contains a hidden
+                // path by checking if it contains a `/.`, if so, skip this one
+                if String::from(path.to_str().unwrap()).contains("/.") {
+                    continue
+                }
+                if String::from(path.to_str().unwrap()).starts_with(".") {
+                    continue
+                }
+
+                // write the link and the entry name to the file
+                let link = Path::new(raw_path).join(path);
+                let name = path.file_name().unwrap().to_str().unwrap();
+
+                // count the amount of segments in the path and write spaces for
+                // each
+                let segments = path.iter().count();
+                for _ in 0..(segments-1) {
+                    file.write_all(r#"    "#.as_bytes())?;
+                }
+
+                file.write_all(
+                    format!("<a href=\"/{}\">{}</a>\n",
+                        link.display(), name, 
+                        ).as_bytes()
+                )?;
+            }
+
+        } else if line.starts_with(":::toc") {
+
+            // TODO: depth parameter for controlling the depth of the table of contents
+
+            let mut level_1_num = 0;
+            let mut level_2_num = 0;
+            let mut level_3_num = 0;
+            let mut level_4_num = 0;
+            let mut level_5_num = 0;
+
+            for line in readme.split('\n') {
+                if line.starts_with("#####") {
+                    let line = line.get(6..).unwrap();
+                    // trim the line to remove the trailing whitespace
+                    let line = line.trim();
+                    level_5_num += 1;
+                    file.write_all(
+                        format!(
+                            r##"           <a href="#{}">{}.{}.{}.{}.{}. {}</a>
+"##,
+                            sanitize(line.to_string()),
+                            level_1_num,
+                            level_2_num,
+                            level_3_num,
+                            level_4_num,
+                            level_5_num,
+                            line
+                        ).as_bytes()
+                    )?;
+                } else if line.starts_with("####") {
+                    let line = line.get(5..).unwrap();
+                    // trim the line to remove the trailing whitespace
+                    let line = line.trim();
+                    level_4_num += 1;
+                    level_5_num = 0;
+                    file.write_all(
+                        format!(
+                            r##"         <a href="#{}">{}.{}.{}.{}. {}</a>
+"##,
+                            sanitize(line.to_string()),
+                            level_1_num,
+                            level_2_num,
+                            level_3_num,
+                            level_4_num,
+                            line
+                        ).as_bytes()
+                    )?;
+                } else if line.starts_with("###") {
+                    let line = line.get(4..).unwrap();
+                    // trim the line to remove the trailing whitespace
+                    let line = line.trim();
+                    level_3_num += 1;
+                    level_4_num = 0;
+                    level_5_num = 0;
+                    file.write_all(
+                        format!(
+                            r##"       <a href="#{}">{}.{}.{}. {}</a>
+"##,
+                            sanitize(line.to_string()),
+                            level_1_num,
+                            level_2_num,
+                            level_3_num,
+                            line
+                        ).as_bytes()
+                    )?;
+                } else if line.starts_with("##") {
+                    let line = line.get(3..).unwrap();
+                    let line = line.trim();
+                    level_2_num += 1;
+                    level_3_num = 0;
+                    level_4_num = 0;
+                    level_5_num = 0;
+
+                    file.write_all(
+                        format!(
+                            //r##"    <a href="#{}">{}.{}. {}</a>
+                            r##"    <a href="#{}">{}.{}. {}</a>
+"##,
+                            sanitize(line.to_string()),
+                            level_1_num,
+                            level_2_num,
+                            line
+                        ).as_bytes()
+                    )?;
+                } else if line.starts_with("#") {
+                    let line = line.get(2..).unwrap();
+                    let line = line.trim();
+                    level_1_num += 1;
+                    level_2_num = 0;
+                    level_3_num = 0;
+                    level_4_num = 0;
+                    level_5_num = 0;
+
+                    file.write_all(
+                        format!(
+                            r##"<a href="#{}">{}. {}</a>
+"##,
+                            sanitize(line.to_string()),
+                            level_1_num,
+                            line
+                        ).as_bytes()
+                    )?;
+                }
+            }
+
+        } else {
+
+            // for the case that nothing of the above matches, just write the
+            // content into the html body as it is
+            file.write_all(format!("{}\n", line).as_bytes())?;
+        }
+    }
+
+    Ok(())
+}
+
+fn write_footer(file: &mut File, raw_path: &Path, internal_links: &HashMap<String, Vec<String>>) -> std::io::Result<()> {
+
+    // add some padding before the whole footer stuff
+    file.write_all(b"<br><br><br>")?;
+
+    // Backlinks
+
+    let search_path = Path::new("/").join(raw_path).into_os_string().into_string().unwrap();
+
+    match internal_links.get(&search_path) {
+        Some(values) => {
+
+            // only write "backlinks" if we've actually got some
+            file.write_all(b"backlinks:\n")?;
+
+            for link in values {
+
+                // strip the "in" prefix 
+                // strip the "README.md" suffix
+                // TODO: do all this magic by parsing it as a path and removing the unneeded parts, bonus by creating a function doing this and removing the horrible string mashing in this codebase
+                let a = link
+                    .strip_prefix("in")
+                    .expect("no prefix to strip")
+                    .strip_suffix("README.md")
+                    .expect("no README.md suffix to remove");
+
+                file.write_all(format!(r#"- <a href="{a}">{a}</a>
+"#).as_bytes())?;
+            }
+        }
+        None => (),
+    }
+
+    // The actual footer
+
+    file.write_all(format!(r#"
+    </pre>
+<a href="https://chaos.social/@hanemile.rss" target="_blank" rel="noopener" class="icon"><img class="webring" src="/rss.svg" alt="rss feed of @hanemile@chaos.social mastodon" height="32px"/></a>
+<a href="https://lieu.cblgh.org/" target="_blank" rel="noopener" class="icon"><img class="webring" src="/lieu.svg" alt="lieu webring search engine" height="32px"/></a>
+<a href="https://webring.xxiivv.com/#emile" target="_blank" rel="noopener" class="icon"><img class="webring" src="/webring.svg" alt="XXIIVV webring" height="32px"/></a>
+<a rel="me" href="https://chaos.social/@hanemile" target="_blank" class="icon"><img class="webring" src="/mastodon.svg" alt="mastodon" height="32px"/></a>
+    <pre>emile - {:?} - generated using <a href="https://github.com/hanemile/vokobe">vokobe {:?}</a><pre>
+</body>
+</html>
+"#,
+    time::SystemTime::now().duration_since(time::SystemTime::UNIX_EPOCH).unwrap(),
+    env!("CARGO_PKG_VERSION")
+    ).as_bytes())?;
+
+    Ok(())
+}
+
+/// sanitize the given string (to lower + space to hypen + keep only
+/// [a-zA-Z0-9])
+fn sanitize(input: String) -> String {
+    let input = input.replace(" ", "-");
+
+    input
+        .chars()
+        .filter(|c| c.is_ascii_alphanumeric() || c.eq(&'-'))
+        .collect::<String>()
+        .to_lowercase()
+}
+
+/// Return a list of all files in the directory, recursively.
+fn recursive_read_dir(dir: &PathBuf, dir_only: bool) -> io::Result<Vec<PathBuf>> {
+
+    // return an empty vec if the given path is not a directory
+    if dir.is_dir() == false {
+        return Ok(vec![]);
+    }
+
+    if dir.starts_with(".") {
+       return Ok(vec![]); 
+    }
+
+        // get all entries in the gitignore file, if it exists
+    let gitignore_entries: Vec<PathBuf> = gitignore_entries(&dir)?;
+
+    // store the child pathes
+    let mut entries: Vec<PathBuf> = Vec::new();
+    
+    // iterate over all items in the dir, pushing the dirs pathes to the dirs
+    // vector for returning it
+    'outer: for entry in fs::read_dir(dir)? {
+        let dir_entry = &entry?;
+        let path = dir_entry.path();
+
+        // skip hidden folders
+        if path.starts_with(".") {
+            //continue 'outer;
+            break 'outer;
+        }
+        if dir.starts_with(".") {
+            //continue 'outer;
+            break 'outer;
+        }
+
+        // check if the current entry is part of the gitignore, if so, skip it
+        for gitignore_entry in &gitignore_entries {
+            if gitignore_entry.to_str() == Some("") {
+                continue;
+            }
+            if path.ends_with(gitignore_entry) {
+                continue 'outer;
+            }
+        }
+
+        if dir_only == true {
+            if path.is_dir() {
+                entries.push(path.to_path_buf());
+            }
+        } else {
+            entries.push(path.to_path_buf());
+        }
+
+        // recursively push all dirs from all children to the dirs vector
+        let subdirs = recursive_read_dir(&path, dir_only)?;
+
+        for subdir in subdirs {
+            entries.push(subdir)
+        }
+    }
+
+    // return the dirs, the ones from this folder and the ones from all child folders
+    Ok(entries)
+}
+
+// try to open the gitignore file and read all entries from there.
+fn gitignore_entries(dir: &PathBuf) -> io::Result<Vec<PathBuf>> {
+    let gitignore_path = Path::new(&dir)
+        .join(Path::new(".gitignore"));
+
+    let mut entries: Vec<PathBuf> = Vec::new();
+    if let Ok(gitignore) = File::open(&gitignore_path) {
+        let reader = BufReader::new(gitignore);
+
+        for line in reader.lines() {
+            entries.push(PathBuf::from(line?));
+        }
+    }
+
+    Ok(entries)
+}