Begin moving openbao and authentik server to new setup

Former-commit-id: 8cd2737aca
This commit is contained in:
Kaare Hoff Skovgaard 2025-07-14 23:34:02 +02:00
parent 3a42f156f7
commit 7a995baca4
43 changed files with 1006 additions and 481 deletions

488
flake.lock generated
View file

@ -16,6 +16,58 @@
"type": "github"
}
},
"authentik-nix": {
"inputs": {
"authentik-src": "authentik-src",
"flake-compat": "flake-compat",
"flake-parts": [
"flake-parts"
],
"flake-utils": [
"flake-utils"
],
"napalm": "napalm",
"nixpkgs": [
"nixpkgs"
],
"pyproject-build-systems": "pyproject-build-systems",
"pyproject-nix": "pyproject-nix",
"systems": [
"systems"
],
"uv2nix": "uv2nix"
},
"locked": {
"lastModified": 1751033152,
"narHash": "sha256-0ANu9OLQJszcEyvnfDB7G957uqskZwCrTzRXz/yfAmE=",
"owner": "nix-community",
"repo": "authentik-nix",
"rev": "1a4d6a5dd6fef39b99eb7ea4db79c5d5c7d7f1bf",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "authentik-nix",
"type": "github"
}
},
"authentik-src": {
"flake": false,
"locked": {
"lastModified": 1751031262,
"narHash": "sha256-SNgRMQUjL3DTlWkMyRMan+pY1FfIV+DMeq5BiTM0N0k=",
"owner": "goauthentik",
"repo": "authentik",
"rev": "b34665fabd8d938d81ce871a4e86ca528c5f253b",
"type": "github"
},
"original": {
"owner": "goauthentik",
"ref": "version/2025.4.3",
"repo": "authentik",
"type": "github"
}
},
"base16": {
"inputs": {
"fromYaml": "fromYaml"
@ -83,41 +135,11 @@
"type": "github"
}
},
"bats-assert": {
"flake": false,
"locked": {
"lastModified": 1636059754,
"narHash": "sha256-ewME0l27ZqfmAwJO4h5biTALc9bDLv7Bl3ftBzBuZwk=",
"owner": "bats-core",
"repo": "bats-assert",
"rev": "34551b1d7f8c7b677c1a66fc0ac140d6223409e5",
"type": "github"
},
"original": {
"owner": "bats-core",
"repo": "bats-assert",
"type": "github"
}
},
"bats-support": {
"flake": false,
"locked": {
"lastModified": 1548869839,
"narHash": "sha256-Gr4ntadr42F2Ks8Pte2D4wNDbijhujuoJi4OPZnTAZU=",
"owner": "bats-core",
"repo": "bats-support",
"rev": "d140a65044b2d6810381935ae7f0c94c7023c8c3",
"type": "github"
},
"original": {
"owner": "bats-core",
"repo": "bats-support",
"type": "github"
}
},
"cosmic-manager": {
"inputs": {
"flake-parts": "flake-parts",
"flake-parts": [
"flake-parts"
],
"home-manager": [
"home-manager"
],
@ -174,28 +196,6 @@
"type": "github"
}
},
"disko_2": {
"inputs": {
"nixpkgs": [
"nixos-anywhere",
"nixpkgs"
]
},
"locked": {
"lastModified": 1748225455,
"narHash": "sha256-AzlJCKaM4wbEyEpV3I/PUq5mHnib2ryEy32c+qfj6xk=",
"owner": "nix-community",
"repo": "disko",
"rev": "a894f2811e1ee8d10c50560551e50d6ab3c392ba",
"type": "github"
},
"original": {
"owner": "nix-community",
"ref": "master",
"repo": "disko",
"type": "github"
}
},
"firefox-gnome-theme": {
"flake": false,
"locked": {
@ -218,7 +218,9 @@
"nixpkgs"
],
"snowfall-lib": "snowfall-lib",
"treefmt-nix": "treefmt-nix"
"treefmt-nix": [
"treefmt-nix"
]
},
"locked": {
"lastModified": 1751834884,
@ -235,6 +237,22 @@
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1747046372,
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1650374568,
@ -252,80 +270,14 @@
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": [
"cosmic-manager",
"nixpkgs"
]
"nixpkgs-lib": "nixpkgs-lib"
},
"locked": {
"lastModified": 1738453229,
"narHash": "sha256-7H9XgNiGLKN1G1CgRh0vUL4AheZSYzPm+zmZ7vxbJdo=",
"lastModified": 1751413152,
"narHash": "sha256-Tyw1RjYEsp5scoigs1384gIg6e0GoBVjms4aXFfRssQ=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "32ea77a06711b758da0ad9bd6a844c5740a87abd",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-parts_2": {
"inputs": {
"nixpkgs-lib": [
"nixos-anywhere",
"nixpkgs"
]
},
"locked": {
"lastModified": 1743550720,
"narHash": "sha256-hIshGgKZCgWh6AYJpJmRgFdR3WUbkY04o82X05xqQiY=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "c621e8422220273271f52058f618c94e405bb0f5",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-parts_3": {
"inputs": {
"nixpkgs-lib": [
"stylix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1749398372,
"narHash": "sha256-tYBdgS56eXYaWVW3fsnPQ/nFlgWi/Z2Ymhyu21zVM98=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "9305fe4e5c2a6fcf5ba6a3ff155720fbe4076569",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-parts_4": {
"inputs": {
"nixpkgs-lib": [
"terranix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1736143030,
"narHash": "sha256-+hu54pAoLDEZT9pjHlqL9DNzWz0NbUn8NEAHP7PQPzU=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "b905f6fc23a9051a6e1b741e1438dbfc0634c6de",
"rev": "77826244401ea9de6e3bac47c2db46005e1f30b5",
"type": "github"
},
"original": {
@ -372,27 +324,17 @@
}
},
"flake-utils_2": {
"locked": {
"lastModified": 1634851050,
"narHash": "sha256-N83GlSGPJJdcqhUxSCS/WwW5pksYf3VP1M13cDRTSVA=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "c91f3de5adaf1de973b797ef7485e441a65b8935",
"type": "github"
"inputs": {
"systems": [
"systems"
]
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_3": {
"locked": {
"lastModified": 1634851050,
"narHash": "sha256-N83GlSGPJJdcqhUxSCS/WwW5pksYf3VP1M13cDRTSVA=",
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "c91f3de5adaf1de973b797ef7485e441a65b8935",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
@ -455,6 +397,32 @@
"type": "github"
}
},
"napalm": {
"inputs": {
"flake-utils": [
"authentik-nix",
"flake-utils"
],
"nixpkgs": [
"authentik-nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1725806412,
"narHash": "sha256-lGZjkjds0p924QEhm/r0BhAxbHBJE1xMOldB/HmQH04=",
"owner": "willibutz",
"repo": "napalm",
"rev": "b492440d9e64ae20736d3bec5c7715ffcbde83f5",
"type": "github"
},
"original": {
"owner": "willibutz",
"ref": "avoid-foldl-stack-overflow",
"repo": "napalm",
"type": "github"
}
},
"nix-vm-test": {
"inputs": {
"nixpkgs": [
@ -478,15 +446,21 @@
},
"nixos-anywhere": {
"inputs": {
"disko": "disko_2",
"flake-parts": "flake-parts_2",
"disko": [
"disko"
],
"flake-parts": [
"flake-parts"
],
"nix-vm-test": "nix-vm-test",
"nixos-images": "nixos-images",
"nixos-stable": "nixos-stable",
"nixpkgs": [
"nixpkgs"
],
"treefmt-nix": "treefmt-nix_2"
"treefmt-nix": [
"treefmt-nix"
]
},
"locked": {
"lastModified": 1749105224,
@ -560,18 +534,18 @@
"type": "github"
}
},
"nixpkgs_2": {
"nixpkgs-lib": {
"locked": {
"lastModified": 1636273007,
"narHash": "sha256-eb6HcZNacO9vIP/KcJ5CoCRYSGfD+VxzYs2cCafEo4Y=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "c69c6533c820c55c3f1d924b399d8f6925a1e41a",
"lastModified": 1751159883,
"narHash": "sha256-urW/Ylk9FIfvXfliA1ywh75yszAbiTEVgpPeinFyVZo=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "14a40a1d7fb9afa4739275ac642ed7301a9ba1ab",
"type": "github"
},
"original": {
"owner": "nixos",
"repo": "nixpkgs",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"type": "github"
}
},
@ -585,7 +559,7 @@
"stylix",
"nixpkgs"
],
"treefmt-nix": "treefmt-nix_3"
"treefmt-nix": "treefmt-nix"
},
"locked": {
"lastModified": 1751320053,
@ -601,20 +575,75 @@
"type": "github"
}
},
"pyproject-build-systems": {
"inputs": {
"nixpkgs": [
"authentik-nix",
"nixpkgs"
],
"pyproject-nix": [
"authentik-nix",
"pyproject-nix"
],
"uv2nix": [
"authentik-nix",
"uv2nix"
]
},
"locked": {
"lastModified": 1749519371,
"narHash": "sha256-UJONN7mA2stweZCoRcry2aa1XTTBL0AfUOY84Lmqhos=",
"owner": "pyproject-nix",
"repo": "build-system-pkgs",
"rev": "7c06967eca687f3482624250428cc12f43c92523",
"type": "github"
},
"original": {
"owner": "pyproject-nix",
"repo": "build-system-pkgs",
"type": "github"
}
},
"pyproject-nix": {
"inputs": {
"nixpkgs": [
"authentik-nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1750499893,
"narHash": "sha256-ThKBd8XSvITAh2JqU7enOp8AfKeQgf9u7zYC41cnBE4=",
"owner": "pyproject-nix",
"repo": "pyproject.nix",
"rev": "e824458bd917b44bf4c38795dea2650336b2f55d",
"type": "github"
},
"original": {
"owner": "pyproject-nix",
"repo": "pyproject.nix",
"type": "github"
}
},
"root": {
"inputs": {
"advisory-db": "advisory-db",
"authentik-nix": "authentik-nix",
"cosmic-manager": "cosmic-manager",
"crane": "crane",
"disko": "disko",
"flake-base": "flake-base",
"flake-parts": "flake-parts",
"flake-utils": "flake-utils_2",
"home-manager": "home-manager",
"nixos-anywhere": "nixos-anywhere",
"nixpkgs": "nixpkgs",
"rust-overlay": "rust-overlay",
"stylix": "stylix",
"systems": "systems_2",
"terranix": "terranix",
"terranix-hcloud": "terranix-hcloud"
"terranix-hcloud": "terranix-hcloud",
"treefmt-nix": "treefmt-nix_2"
}
},
"rust-overlay": {
@ -639,7 +668,7 @@
},
"snowfall-lib": {
"inputs": {
"flake-compat": "flake-compat",
"flake-compat": "flake-compat_2",
"flake-utils-plus": "flake-utils-plus",
"nixpkgs": [
"flake-base",
@ -667,13 +696,17 @@
"base16-helix": "base16-helix",
"base16-vim": "base16-vim",
"firefox-gnome-theme": "firefox-gnome-theme",
"flake-parts": "flake-parts_3",
"flake-parts": [
"flake-parts"
],
"gnome-shell": "gnome-shell",
"nixpkgs": [
"nixpkgs"
],
"nur": "nur",
"systems": "systems_2",
"systems": [
"systems"
],
"tinted-foot": "tinted-foot",
"tinted-kitty": "tinted-kitty",
"tinted-schemes": "tinted-schemes",
@ -742,7 +775,9 @@
},
"terranix": {
"inputs": {
"flake-parts": "flake-parts_4",
"flake-parts": [
"flake-parts"
],
"nixpkgs": [
"nixpkgs"
],
@ -762,28 +797,17 @@
"type": "github"
}
},
"terranix-examples": {
"locked": {
"lastModified": 1633465925,
"narHash": "sha256-BfXRW1ZHpK5jh5CVcw7eFpGsWE1CyVxL8R+V7uXemaU=",
"owner": "terranix",
"repo": "terranix-examples",
"rev": "70bf5d5a1ad4eabef1e4e71c1eb101021decd5a4",
"type": "github"
},
"original": {
"owner": "terranix",
"repo": "terranix-examples",
"type": "github"
}
},
"terranix-hcloud": {
"inputs": {
"flake-utils": "flake-utils_2",
"flake-utils": [
"flake-utils"
],
"nixpkgs": [
"nixpkgs"
],
"terranix": "terranix_2"
"terranix": [
"terranix"
]
},
"locked": {
"lastModified": 1745572802,
@ -799,29 +823,6 @@
"type": "github"
}
},
"terranix_2": {
"inputs": {
"bats-assert": "bats-assert",
"bats-support": "bats-support",
"flake-utils": "flake-utils_3",
"nixpkgs": "nixpkgs_2",
"terranix-examples": "terranix-examples"
},
"locked": {
"lastModified": 1636274023,
"narHash": "sha256-HDiyJGgyDUoLnpL8N+wDm3cM/vEfYYc/p4N1kKH/kLk=",
"owner": "terranix",
"repo": "terranix",
"rev": "342ec8490bc948c8589414eb89f26b265cbfd62a",
"type": "github"
},
"original": {
"owner": "terranix",
"ref": "develop",
"repo": "terranix",
"type": "github"
}
},
"tinted-foot": {
"flake": false,
"locked": {
@ -904,48 +905,6 @@
}
},
"treefmt-nix": {
"inputs": {
"nixpkgs": [
"flake-base",
"nixpkgs"
]
},
"locked": {
"lastModified": 1750931469,
"narHash": "sha256-0IEdQB1nS+uViQw4k3VGUXntjkDp7aAlqcxdewb/hAc=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "ac8e6f32e11e9c7f153823abc3ab007f2a65d3e1",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "treefmt-nix",
"type": "github"
}
},
"treefmt-nix_2": {
"inputs": {
"nixpkgs": [
"nixos-anywhere",
"nixpkgs"
]
},
"locked": {
"lastModified": 1748243702,
"narHash": "sha256-9YzfeN8CB6SzNPyPm2XjRRqSixDopTapaRsnTpXUEY8=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "1f3f7b784643d488ba4bf315638b2b0a4c5fb007",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "treefmt-nix",
"type": "github"
}
},
"treefmt-nix_3": {
"inputs": {
"nixpkgs": [
"stylix",
@ -966,6 +925,51 @@
"repo": "treefmt-nix",
"type": "github"
}
},
"treefmt-nix_2": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1752055615,
"narHash": "sha256-19m7P4O/Aw/6+CzncWMAJu89JaKeMh3aMle1CNQSIwM=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "c9d477b5d5bd7f26adddd3f96cfd6a904768d4f9",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "treefmt-nix",
"type": "github"
}
},
"uv2nix": {
"inputs": {
"nixpkgs": [
"authentik-nix",
"nixpkgs"
],
"pyproject-nix": [
"authentik-nix",
"pyproject-nix"
]
},
"locked": {
"lastModified": 1750987094,
"narHash": "sha256-GujDElxLgYatnNvuL1U6qd18lcuG6anJMjpfYRScV08=",
"owner": "pyproject-nix",
"repo": "uv2nix",
"rev": "4b703d851b61e664a70238711a8ff0efa1aa2f52",
"type": "github"
},
"original": {
"owner": "pyproject-nix",
"repo": "uv2nix",
"type": "github"
}
}
},
"root": "root",

View file

@ -2,26 +2,61 @@
description = "A very basic flake";
inputs = {
authentik-nix = {
url = "github:nix-community/authentik-nix";
inputs = {
flake-utils.follows = "flake-utils";
nixpkgs.follows = "nixpkgs";
flake-parts.follows = "flake-parts";
systems.follows = "systems";
};
};
nixpkgs.url = "github:nixos/nixpkgs/nixos-25.05";
flake-base = {
url = "git+https://khs.codes/nix/flake-base";
inputs.nixpkgs.follows = "nixpkgs";
inputs = {
nixpkgs.follows = "nixpkgs";
treefmt-nix.follows = "treefmt-nix";
};
};
flake-utils = {
url = "github:numtide/flake-utils";
inputs = {
systems.follows = "systems";
};
};
flake-parts = {
url = "github:hercules-ci/flake-parts";
};
disko = {
url = "github:nix-community/disko";
inputs.nixpkgs.follows = "nixpkgs";
inputs = {
nixpkgs.follows = "nixpkgs";
};
};
terranix = {
url = "github:terranix/terranix";
inputs.nixpkgs.follows = "nixpkgs";
inputs = {
nixpkgs.follows = "nixpkgs";
flake-parts.follows = "flake-parts";
};
};
home-manager = {
url = "github:nix-community/home-manager/release-25.05";
inputs.nixpkgs.follows = "nixpkgs";
inputs = {
nixpkgs.follows = "nixpkgs";
};
};
terranix-hcloud = {
url = "github:terranix/terranix-hcloud";
inputs.nixpkgs.follows = "nixpkgs";
inputs = {
flake-utils.follows = "flake-utils";
nixpkgs.follows = "nixpkgs";
terranix.follows = "terranix";
};
};
systems = {
url = "github:nix-systems/default";
};
crane.url = "github:ipetkov/crane";
advisory-db = {
@ -34,16 +69,27 @@
nixpkgs.follows = "nixpkgs";
};
};
treefmt-nix = {
url = "github:numtide/treefmt-nix";
inputs = {
nixpkgs.follows = "nixpkgs";
};
};
nixos-anywhere = {
url = "github:nix-community/nixos-anywhere/1.11.0";
inputs = {
nixpkgs.follows = "nixpkgs";
flake-parts.follows = "flake-parts";
treefmt-nix.follows = "treefmt-nix";
disko.follows = "disko";
};
};
stylix = {
url = "github:nix-community/stylix/release-25.05";
inputs = {
nixpkgs.follows = "nixpkgs";
flake-parts.follows = "flake-parts";
systems.follows = "systems";
};
};
cosmic-manager = {
@ -51,6 +97,7 @@
inputs = {
nixpkgs.follows = "nixpkgs";
home-manager.follows = "home-manager";
flake-parts.follows = "flake-parts";
};
};
};
@ -61,6 +108,7 @@
inputNixosModules = [
inputs.disko.nixosModules.disko
inputs.stylix.nixosModules.stylix
inputs.authentik-nix.nixosModules.default
];
inputHomeModules = [
inputs.cosmic-manager.homeManagerModules.cosmic-manager

View file

@ -1,3 +1,6 @@
{
...
}:
{
khscodes.khs.enable = true;
khscodes.khs.shell.oh-my-posh.enable = true;

View file

@ -0,0 +1,7 @@
{
...
}:
{
khscodes.khs.enable = true;
khscodes.khs.shell.oh-my-posh.enable = true;
}

View file

@ -0,0 +1,10 @@
{
lib,
config,
...
}:
{
khscodes.khs.enable = true;
khscodes.khs.shell.oh-my-posh.enable = true;
imports = lib.lists.optional config.khscodes.desktop.enable ./desktop.nix;
}

View file

@ -0,0 +1,9 @@
{ pkgs, lib, ... }:
{
imports = lib.lists.optional (lib.strings.hasSuffix "-linux" pkgs.system) ./linux-desktop.nix;
home.packages = [
pkgs.bitwarden-cli
pkgs.nerd-fonts.inconsolata
pkgs.google-chrome
];
}

View file

@ -1,5 +1,4 @@
{ pkgs, ... }:
{
imports = [ ./khs-desktop.nix ];
home.packages = [ pkgs.spotify ];
}

View file

@ -1,4 +0,0 @@
{ inputs, ... }:
{
imports = [ "${inputs.self}/nix/profiles/home/khs-linux-desktop.nix" ];
}

View file

@ -53,7 +53,7 @@ in
dnsNames = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "DNS names for the server";
default = lib.lists.unique ([ fqdn ] ++ config.networking.aliases);
default = lib.lists.unique ([ fqdn ] ++ config.khscodes.networking.aliases);
};
bucket = {
key = lib.mkOption {
@ -62,14 +62,6 @@ in
default = "${fqdn}.tfstate";
};
};
secretsSource = lib.mkOption {
type = lib.types.enum [
"bitwarden"
"vault"
];
description = "Whether to load opentofu secrets from Bitwarden or Vault";
default = "vault";
};
datacenter = lib.mkOption {
type = lib.types.str;
description = "The Hetzner datacenter to create a server in";
@ -159,7 +151,7 @@ in
inherit labels;
name = fqdn;
initial_image = "debian-12";
rdns = fqdn;
rdns = lib.mkIf cfg.mapRdns fqdn;
ssh_keys = [ config.khscodes.hcloud.output.data.ssh_key.khs.id ];
user_data = provisioningUserData;
};
@ -211,7 +203,6 @@ in
khscodes.services.read-vault-auth-from-userdata.url = "http://169.254.169.254/latest/user-data";
khscodes.infrastructure.provisioning.pre = {
modules = modules;
secretsSource = cfg.secretsSource;
};
}
);

View file

@ -0,0 +1,9 @@
{ lib, ... }:
{
options.khscodes.infrastructure.openbao = {
domain = lib.mkOption {
type = lib.types.str;
default = "vault.kaareskovgaard.net";
};
};
}

View file

@ -7,6 +7,8 @@ let
cfg = config.khscodes.infrastructure.vault-loki-sender;
fqdn = config.khscodes.networking.fqdn;
vaultRoleName = config.khscodes.infrastructure.vault-server-approle.role_name;
client_key = "/var/lib/alloy/loki.key";
client_cert = "/var/lib/alloy/loki.cert";
in
{
options.khscodes.infrastructure.vault-loki-sender = {
@ -56,8 +58,8 @@ in
{{ .Key }}
{{ .Cert }}
{{ .CA }}
{{ .Key | writeToFile "${config.khscodes.services.alloy.loki.client_key}" "${config.khscodes.services.alloy.user}" "${config.khscodes.services.alloy.group}" "0600" }}
{{ .Cert | writeToFile "${config.khscodes.services.alloy.loki.client_cert}" "${config.khscodes.services.alloy.user}" "${config.khscodes.services.alloy.group}" "0644" }}
{{ .Key | writeToFile "${client_key}" "${config.khscodes.services.alloy.user}" "${config.khscodes.services.alloy.group}" "0600" }}
{{ .Cert | writeToFile "${client_cert}" "${config.khscodes.services.alloy.user}" "${config.khscodes.services.alloy.group}" "0644" }}
{{- end -}}
'';
destination = "/var/lib/alloy/cache.key";
@ -69,10 +71,13 @@ in
];
khscodes.services.alloy = {
enable = true;
loki = {
client_key = "/var/lib/alloy/loki_cert.key";
client_cert = "/var/lib/alloy/loki_cert.pem";
environment = {
LOKI_CLIENT_KEY = client_key;
LOKI_CLIENT_CERT = client_cert;
};
};
environment.etc."alloy/loki.alloy" = {
source = ./loki.alloy;
};
};
}

View file

@ -0,0 +1,78 @@
// Collect logs from systemd journal for node_exporter integration
loki.source.journal "logs_integrations_integrations_node_exporter_journal_scrape" {
// Only collect logs from the last 24 hours
max_age = "24h0m0s"
// Apply relabeling rules to the logs
relabel_rules = discovery.relabel.logs_integrations_integrations_node_exporter_journal_scrape.rules
// Send logs to the local Loki instance
forward_to = [loki.write.local.receiver]
}
// Define which log files to collect for node_exporter
local.file_match "logs_integrations_integrations_node_exporter_direct_scrape" {
path_targets = [{
// Target localhost for log collection
__address__ = "localhost",
// Collect standard system logs
__path__ = "/var/log/{syslog,messages,*.log}",
// Add instance label with hostname
instance = constants.hostname,
// Add job label for logs
job = "integrations/node_exporter",
}]
}
// Define relabeling rules for systemd journal logs
discovery.relabel "logs_integrations_integrations_node_exporter_journal_scrape" {
targets = []
rule {
// Extract systemd unit information into a label
source_labels = ["__journal__systemd_unit"]
target_label = "unit"
}
rule {
// Extract boot ID information into a label
source_labels = ["__journal__boot_id"]
target_label = "boot_id"
}
rule {
// Extract transport information into a label
source_labels = ["__journal__transport"]
target_label = "transport"
}
rule {
// Extract log priority into a level label
source_labels = ["__journal_priority_keyword"]
target_label = "level"
}
rule {
// Set the instance label to the hostname of the machine
target_label = "instance"
replacement = constants.hostname
}
}
// Collect logs from files for node_exporter
loki.source.file "logs_integrations_integrations_node_exporter_direct_scrape" {
// Use targets defined in local.file_match
targets = local.file_match.logs_integrations_integrations_node_exporter_direct_scrape.targets
// Send logs to the local Loki instance
forward_to = [loki.write.local.receiver]
}
// Define where to send logs for storage
loki.write "local" {
endpoint {
// Send logs to a locally running Loki instance
url = "https://loki.kaareskovgaard.net/loki/api/v1/push"
tls_config {
cert_file = sys.env("LOKI_CLIENT_CERT")
key_file = sys.env("LOKI_CLIENT_KEY")
}
}
}

View file

@ -7,13 +7,15 @@ let
cfg = config.khscodes.infrastructure.vault-prometheus-sender;
fqdn = config.khscodes.networking.fqdn;
vaultRoleName = config.khscodes.infrastructure.vault-server-approle.role_name;
client_key = "/var/lib/alloy/prometheus.key";
client_cert = "/var/lib/alloy/prometheus.cert";
in
{
options.khscodes.infrastructure.vault-prometheus-sender = {
enable = lib.mkEnableOption "Configures the server approle to allow sending data to prometheus";
terranixBackendName = lib.mkOption {
type = lib.types.str;
description = "This should only be configured for the server hosting prometheus, to allow setting up dependencies in terraform";
description = "This should only be configured for the server hosting vault, to allow setting up dependencies in terraform";
default = "prometheus-mtls";
};
};
@ -56,8 +58,8 @@ in
{{ .Key }}
{{ .Cert }}
{{ .CA }}
{{ .Key | writeToFile "${config.khscodes.services.alloy.prometheus.client_key}" "${config.khscodes.services.alloy.user}" "${config.khscodes.services.alloy.group}" "0600" }}
{{ .Cert | writeToFile "${config.khscodes.services.alloy.prometheus.client_cert}" "${config.khscodes.services.alloy.user}" "${config.khscodes.services.alloy.group}" "0644" }}
{{ .Key | writeToFile "${client_key}" "${config.khscodes.services.alloy.user}" "${config.khscodes.services.alloy.group}" "0600" }}
{{ .Cert | writeToFile "${client_cert}" "${config.khscodes.services.alloy.user}" "${config.khscodes.services.alloy.group}" "0644" }}
{{- end -}}
'';
destination = "/var/lib/alloy/cache.key";
@ -69,10 +71,13 @@ in
];
khscodes.services.alloy = {
enable = true;
prometheus = {
client_key = "/var/lib/alloy/prometheus_cert.key";
client_cert = "/var/lib/alloy/prometheus_cert.pem";
environment = {
PROMETHEUS_CLIENT_KEY = client_key;
PROMETHEUS_CLIENT_CERT = client_cert;
};
};
environment.etc."alloy/prometheus.alloy" = {
source = ./prometheus.alloy;
};
};
}

View file

@ -0,0 +1,72 @@
// This block relabels metrics coming from node_exporter to add standard labels
discovery.relabel "integrations_node_exporter" {
targets = prometheus.exporter.unix.integrations_node_exporter.targets
rule {
// Set the instance label to the hostname of the machine
target_label = "instance"
replacement = constants.hostname
}
rule {
// Set a standard job name for all node_exporter metrics
target_label = "job"
replacement = "integrations/node_exporter"
}
}
//
// Configure the node_exporter integration to collect system metrics
prometheus.exporter.unix "integrations_node_exporter" {
// Disable unnecessary collectors to reduce overhead
disable_collectors = ["ipvs", "btrfs", "infiniband", "xfs", "zfs"]
enable_collectors = ["meminfo"]
filesystem {
// Exclude filesystem types that aren't relevant for monitoring
fs_types_exclude = "^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|tmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$"
// Exclude mount points that aren't relevant for monitoring
mount_points_exclude = "^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+)($|/)"
// Timeout for filesystem operations
mount_timeout = "5s"
}
netclass {
// Ignore virtual and container network interfaces
ignored_devices = "^(veth.*|cali.*|[a-f0-9]{15})$"
}
netdev {
// Exclude virtual and container network interfaces from device metrics
device_exclude = "^(veth.*|cali.*|[a-f0-9]{15})$"
}
}
// Define how to scrape metrics from the node_exporter
prometheus.scrape "integrations_node_exporter" {
scrape_interval = "15s"
// Use the targets with labels from the discovery.relabel component
targets = discovery.relabel.integrations_node_exporter.output
// Send the scraped metrics to the relabeling component
forward_to = [otelcol.receiver.prometheus.default.receiver]
}
otelcol.receiver.prometheus "default" {
output {
metrics = [otelcol.exporter.otlphttp.default.input]
}
}
// Define where to send the metrics for storage
otelcol.exporter.otlphttp "default" {
client {
endpoint = "https://prometheus.kaareskovgaard.net/api/v1/otlp/"
tls {
cert_file = sys.env("PROMETHEUS_CLIENT_CERT")
key_file = sys.env("PROMETHEUS_CLIENT_KEY")
}
}
encoding = "proto"
}

View file

@ -6,6 +6,7 @@
}:
let
cfg = config.khscodes.infrastructure.vault-server-approle;
vaultDomain = config.khscodes.infrastructure.openbao.domain;
in
{
options.khscodes.infrastructure.vault-server-approle = {
@ -18,6 +19,11 @@ in
description = "The provisioning stage that should include the provisioning. This should be pre for every server except the OpenBAO server itself";
default = "pre";
};
path = lib.mkOption {
type = lib.types.str;
default = "approle";
description = "Sets the path, as a terraform expression, for the approle to get created in. Not useful for most instances, but useful when doing bootstrapping, to establish a dependency.";
};
role_name = lib.mkOption {
type = lib.types.str;
description = "Name of the role being created";
@ -52,18 +58,30 @@ in
};
config = lib.mkIf cfg.enable {
khscodes.services.read-vault-auth-from-userdata.enable = true;
khscodes.services.read-vault-auth-from-userdata.enable = cfg.stage == "pre";
khscodes.services.vault-agent.enable = true;
khscodes.infrastructure.provisioning.${cfg.stage} = {
modules = [
(
{ config, ... }:
{ config, lib, ... }:
{
imports = [ inputs.self.terranixModules.vault ];
output = lib.mkIf (cfg.stage == "post") {
role-id = {
value = config.khscodes.vault.output.approle_auth_backend_role.${cfg.role_name}.role_id;
sensitive = false;
};
secret-id-wrapped = {
value =
config.khscodes.vault.output.approle_auth_backend_role_secret_id.${cfg.role_name}.wrapping_token;
sensitive = true;
};
};
khscodes.vault = {
enable = true;
domain = vaultDomain;
approle_auth_backend_role.${cfg.role_name} = {
backend = "approle";
backend = cfg.path;
role_name = cfg.role_name;
# Secret IDs never expire, to allow vault agent to restart without issues.
# TODO: Look into doing this in a better way going forward, such that this won't
@ -76,7 +94,7 @@ in
token_policies = [ cfg.role_name ];
};
approle_auth_backend_role_secret_id.${cfg.role_name} = {
backend = "approle";
backend = cfg.path;
# Not hardcoding the role name here, as reading it like this will create a dependency
# on the role being created first, which is needed.
role_name = config.khscodes.vault.output.approle_auth_backend_role.${cfg.role_name}.role_name;

View file

@ -0,0 +1,25 @@
{ config, lib, ... }:
let
cfg = config.khscodes.machine;
in
rec {
options.khscodes.machine = {
type = lib.mkOption {
type = lib.types.enum [
"server"
"desktop"
];
description = "The kind of machine that is running";
};
};
config = {
home-manager.sharedModules = [
{
inherit options;
config = {
khscodes.desktop.enable = cfg.type == "desktop";
};
}
];
};
}

View file

@ -0,0 +1,15 @@
{ config, lib, ... }:
let
cfg = config.khscodes.nix;
in
{
options.khscodes.nix = {
nix-community.enable = lib.mkEnableOption "Enables nix-community substituters";
};
config = {
nix.settings = lib.mkIf cfg.nix-community.enable {
substituters = [ "https://nix-community.cachix.org" ];
trusted-public-keys = [ "nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" ];
};
};
}

View file

@ -0,0 +1,55 @@
{
config,
lib,
inputs,
pkgs,
...
}:
let
cfg = config.khscodes.os.auto-update;
upgradePath = "/var/lib/system-upgrade";
upgradeVersion = "/var/lib/system-upgrade.version";
prepareUpgrade = pkgs.writeShellApplication {
runtimeInputs = [
pkgs.uutils-coreutils-noprefix
pkgs.nix
];
name = "nixos-prepare-upgrade";
text = ''
current_version=""
if [[ -f ${upgradeVersion} ]]; then
current_version="$(cat ${upgradeVersion})"
fi
if [[ "$current_version" != "${inputs.self.outPath}" ]]; then
rm -rf ${upgradePath}
cp -r ${inputs.self.outPath} ${upgradePath}
echo -n ${inputs.self.outPath} > ${upgradeVersion}
fi
cd ${upgradePath}
NIX_CONFIG="extra-experimental-features=flake nix-command" nix flake update
'';
};
in
{
options.khscodes.os.auto-update = {
enable = lib.mkEnableOption "Enables automatic OS updates";
dates = "02:00";
randomizedDelaySec = "45min";
};
config = lib.mkIf cfg.enable {
system.autoUpgrade = {
enable = true;
flake = upgradePath;
};
systemd.services.nixos-upgrade-prepare-flake = {
wantedBy = [ "nixos-upgrade.service" ];
before = [ "nixos-upgrade.service" ];
serviceConfig = {
Type = "oneshot";
ExecStart = lib.getExe prepareUpgrade;
};
};
};
}

View file

@ -1,151 +0,0 @@
// This block relabels metrics coming from node_exporter to add standard labels
discovery.relabel "integrations_node_exporter" {
targets = prometheus.exporter.unix.integrations_node_exporter.targets
rule {
// Set the instance label to the hostname of the machine
target_label = "instance"
replacement = constants.hostname
}
rule {
// Set a standard job name for all node_exporter metrics
target_label = "job"
replacement = "integrations/node_exporter"
}
}
//
// Configure the node_exporter integration to collect system metrics
prometheus.exporter.unix "integrations_node_exporter" {
// Disable unnecessary collectors to reduce overhead
disable_collectors = ["ipvs", "btrfs", "infiniband", "xfs", "zfs"]
enable_collectors = ["meminfo"]
filesystem {
// Exclude filesystem types that aren't relevant for monitoring
fs_types_exclude = "^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|tmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$"
// Exclude mount points that aren't relevant for monitoring
mount_points_exclude = "^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+)($|/)"
// Timeout for filesystem operations
mount_timeout = "5s"
}
netclass {
// Ignore virtual and container network interfaces
ignored_devices = "^(veth.*|cali.*|[a-f0-9]{15})$"
}
netdev {
// Exclude virtual and container network interfaces from device metrics
device_exclude = "^(veth.*|cali.*|[a-f0-9]{15})$"
}
}
// Define how to scrape metrics from the node_exporter
prometheus.scrape "integrations_node_exporter" {
scrape_interval = "15s"
// Use the targets with labels from the discovery.relabel component
targets = discovery.relabel.integrations_node_exporter.output
// Send the scraped metrics to the relabeling component
forward_to = [otelcol.receiver.prometheus.default.receiver]
}
otelcol.receiver.prometheus "default" {
output {
metrics = [otelcol.exporter.otlphttp.default.input]
}
}
// Define where to send the metrics for storage
otelcol.exporter.otlphttp "default" {
client {
endpoint = "https://prometheus.kaareskovgaard.net/api/v1/otlp/"
tls {
cert_file = "$PROMETHEUS_CLIENT_CERT"
key_file = "$PROMETHEUS_CLIENT_KEY"
}
}
encoding = "proto"
}
// Collect logs from systemd journal for node_exporter integration
loki.source.journal "logs_integrations_integrations_node_exporter_journal_scrape" {
// Only collect logs from the last 24 hours
max_age = "24h0m0s"
// Apply relabeling rules to the logs
relabel_rules = discovery.relabel.logs_integrations_integrations_node_exporter_journal_scrape.rules
// Send logs to the local Loki instance
forward_to = [loki.write.local.receiver]
}
// Define which log files to collect for node_exporter
local.file_match "logs_integrations_integrations_node_exporter_direct_scrape" {
path_targets = [{
// Target localhost for log collection
__address__ = "localhost",
// Collect standard system logs
__path__ = "/var/log/{syslog,messages,*.log}",
// Add instance label with hostname
instance = constants.hostname,
// Add job label for logs
job = "integrations/node_exporter",
}]
}
// Define relabeling rules for systemd journal logs
discovery.relabel "logs_integrations_integrations_node_exporter_journal_scrape" {
targets = []
rule {
// Extract systemd unit information into a label
source_labels = ["__journal__systemd_unit"]
target_label = "unit"
}
rule {
// Extract boot ID information into a label
source_labels = ["__journal__boot_id"]
target_label = "boot_id"
}
rule {
// Extract transport information into a label
source_labels = ["__journal__transport"]
target_label = "transport"
}
rule {
// Extract log priority into a level label
source_labels = ["__journal_priority_keyword"]
target_label = "level"
}
rule {
// Set the instance label to the hostname of the machine
target_label = "instance"
replacement = constants.hostname
}
}
// Collect logs from files for node_exporter
loki.source.file "logs_integrations_integrations_node_exporter_direct_scrape" {
// Use targets defined in local.file_match
targets = local.file_match.logs_integrations_integrations_node_exporter_direct_scrape.targets
// Send logs to the local Loki instance
forward_to = [loki.write.local.receiver]
}
// Define where to send logs for storage
loki.write "local" {
endpoint {
// Send logs to a locally running Loki instance
url = "https://loki.kaareskovgaard.net/loki/api/v1/push"
tls_config {
cert_file = "$LOKI_CLIENT_CERT"
key_file = "$LOKI_CLIENT_KEY"
}
}
}

View file

@ -1,31 +1,10 @@
{ config, lib, ... }:
let
cfg = config.khscodes.services.alloy;
configFile =
lib.strings.replaceStrings
[ "$LOKI_CLIENT_KEY" "$LOKI_CLIENT_CERT" "$PROMETHEUS_CLIENT_KEY" "$PROMETHEUS_CLIENT_CERT" ]
[ cfg.loki.client_key cfg.loki.client_cert cfg.prometheus.client_key cfg.prometheus.client_cert ]
(builtins.readFile ./config.alloy);
in
{
options.khscodes.services.alloy = {
enable = lib.mkEnableOption "Enables alloy";
loki = {
client_key = lib.mkOption {
type = lib.types.str;
};
client_cert = lib.mkOption {
type = lib.types.str;
};
};
prometheus = {
client_key = lib.mkOption {
type = lib.types.str;
};
client_cert = lib.mkOption {
type = lib.types.str;
};
};
user = lib.mkOption {
type = lib.types.str;
default = "alloy";
@ -34,6 +13,10 @@ in
type = lib.types.str;
default = "alloy";
};
environment = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = { };
};
};
config = lib.mkIf cfg.enable {
@ -44,6 +27,7 @@ in
User = "${cfg.user}";
Group = "${cfg.group}";
};
environment = cfg.environment;
};
users.users.${cfg.user} = {
description = "Alloy service user";
@ -51,8 +35,5 @@ in
group = cfg.group;
};
users.groups.${cfg.group} = { };
environment.etc."alloy/config.alloy" = {
text = configFile;
};
};
}

View file

@ -7,6 +7,10 @@ in
enable = lib.mkEnableOption "Enables openssh service for the instance";
hostCertificate = {
enable = lib.mkEnableOption "Enables getting host certificates from OpenBAO";
path = lib.mkOption {
type = lib.types.str;
default = "ssh-host";
};
hostNames = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "The list of host names to get certificates for";
@ -48,7 +52,7 @@ in
{
khscodes.vault.ssh_secret_backend_role.${vaultRoleName} = {
name = fqdn;
backend = sshHostBackend;
backend = cfg.hostCertificate.path;
key_type = "ca";
allow_host_certificates = true;
allow_bare_domains = true;

View file

@ -93,7 +93,7 @@ in
address = lib.mkOption {
type = lib.types.str;
description = "Address of the Vault/OpenBAO service";
default = "https://vault.kaareskovgaard.net";
default = "https://${config.khscodes.infrastructure.openbao.domain}";
};
roleIdFilePath = lib.mkOption {
type = lib.types.str;

View file

@ -5,6 +5,9 @@ in
{
options.khscodes.vault = {
enable = lib.mkEnableOption "Enables the openbao provider";
domain = lib.mkOption {
type = lib.types.str;
};
policy = lib.mkOption {
type = lib.types.attrsOf (
lib.khscodes.mkSubmodule {
@ -34,7 +37,7 @@ in
config = lib.mkIf cfg.enable {
provider.vault = {
address = "https://vault.kaareskovgaard.net";
address = "https://${cfg.domain}";
};
terraform.required_providers.vault = {
source = "hashicorp/vault";

View file

@ -18,15 +18,25 @@ in
default = null;
};
default_lease_ttl_seconds = lib.mkOption {
type = lib.types.int;
type = lib.types.nullOr lib.types.int;
description = "Default lease ttl in seconds";
default = null;
};
max_lease_ttl_seconds = lib.mkOption {
type = lib.types.int;
type = lib.types.nullOr lib.types.int;
description = "Max lease ttl in seconds";
default = null;
};
options = lib.mkOption {
type = lib.types.nullOr lib.types.attrs;
description = "Options for the mount";
default = null;
};
description = lib.mkOption {
type = lib.types.nullOr lib.types.str;
description = "Usage description for the mount";
default = null;
};
};
description = "vault_mount";
}

View file

@ -13,11 +13,28 @@ in
type = lib.types.str;
description = "The name of the role. Can be used instead of hardcoding the role, to create a dependency in OpenTofu";
};
role_id = lib.mkOption {
type = lib.types.str;
description = "ID of the role";
};
};
description = "vault_approle_auth_backend_role output";
}
);
};
approle_auth_backend_role_secret_id = lib.mkOption {
type = lib.types.attrsOf (
lib.khscodes.mkSubmodule {
options = {
wrapping_token = lib.mkOption {
type = lib.types.str;
description = "The generated wrapping token";
};
};
description = "vault_approle_auth_backend_role_secret_id";
}
);
};
mount = lib.mkOption {
type = lib.types.attrsOf (
lib.khscodes.mkSubmodule {
@ -41,8 +58,18 @@ in
in
{
role_name = "\${ vault_approle_auth_backend_role.${sanitizedName}.role_name }";
role_id = "\${ vault_approle_auth_backend_role.${sanitizedName}.role_id }";
}
) cfg.approle_auth_backend_role;
khscodes.vault.output.approle_auth_backend_role_secret_id = lib.mapAttrs (
name: value:
let
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
wrapping_token = "\${ vault_approle_auth_backend_role_secret_id.${sanitizedName}.wrapping_token }";
}
) cfg.approle_auth_backend_role_secret_id;
khscodes.vault.output.mount = lib.mapAttrs (
name: value:
let

View file

@ -25,7 +25,7 @@
"AUTHENTIK_TOKEN" = "Admin API Token";
"TF_VAR_authentik_username" = "login.username";
};
"vault.kaareskovgaard.net" = {
"vault-test.kaareskovgaard.net" = {
"VAULT_TOKEN" = "Initial root token";
};
}

View file

@ -0,0 +1,33 @@
{
inputs,
pkgs,
}:
pkgs.writeShellApplication {
name = "pre-provisioning";
runtimeInputs = [
pkgs.nix
pkgs.khscodes.bw-opentofu
pkgs.khscodes.instance-opentofu
pkgs.khscodes.openbao-helper
pkgs.jq
];
# TODO: Use secret source and required secrets to set up the correct env variables
text = ''
hostname="$1"
cmd="''${2:-apply}"
baseAttr='${inputs.self}#nixosConfigurations."'"$hostname"'".config.khscodes.infrastructure.provisioning'
config="$(nix build --no-link --print-out-paths "''${baseAttr}.postConfig")"
secretsSource="$(nix eval --raw "''${baseAttr}.post.secretsSource")"
endpoints="$(nix eval --show-trace --json "''${baseAttr}.postEndpoints")"
if [[ "$config" == "null" ]]; then
echo "No preprovisioning needed"
exit 0
fi
if [[ "$secretsSource" == "vault" ]]; then
readarray -t endpoints_args < <(echo "$endpoints" | jq -cr 'map(["-e", .])[][]')
openbao-helper wrap-program "''${endpoints_args[@]}" -- instance-opentofu "$hostname" "$config" "$cmd"
exit 0
fi
bw-opentofu "$hostname" "$config" "$cmd"
'';
}

View file

@ -1,12 +0,0 @@
{ pkgs, ... }:
{
imports = [ ./khs-base.nix ];
home.packages = [
pkgs.bitwarden-cli
pkgs.nerd-fonts.inconsolata
pkgs.google-chrome
];
khscodes = {
desktop.enable = true;
};
}

View file

@ -1,8 +1,7 @@
{ ... }:
{
imports = [ ./nix-base.nix ];
imports = [ ./khs-server.nix ];
config.khscodes = {
hetzner.enable = true;
services.openssh.enable = true;
};
}

View file

@ -1,4 +1,4 @@
{ pkgs, ... }:
{ pkgs, config, ... }:
{
imports = [ ./nix-base.nix ];
snowfallorg.users.khs.admin = true;
@ -12,7 +12,7 @@
environment = {
systemPackages = [ pkgs.openbao ];
variables = {
BAO_ADDR = "https://vault.kaareskovgaard.net";
BAO_ADDR = "https://${config.khscodes.infrastructure.openbao.domain}";
};
};
}

View file

@ -7,6 +7,7 @@
{
imports = [ ./khs-base.nix ];
khscodes.virtualisation.qemu-guest.enableWhenVmTarget = true;
khscodes.machine.type = "desktop";
services.desktopManager.cosmic.enable = true;
services.displayManager.cosmic-greeter.enable = true;

View file

@ -1,11 +1,21 @@
{ lib, ... }:
{ lib, pkgs, ... }:
{
imports = [ ./nix-base.nix ];
config.khscodes = {
config = {
khscodes = {
services.openssh.enable = true;
machine.type = "server";
os.auto-update.enable = true;
infrastructure = {
vault-server-approle.enable = lib.mkDefault true;
vault-loki-sender = lib.mkDefault true;
vault-loki-sender.enable = lib.mkDefault true;
};
};
stylix = {
enable = true;
autoEnable = false;
base16Scheme = lib.mkDefault "${pkgs.base16-schemes}/share/themes/solarized-dark.yaml";
targets.console.enable = true;
};
};
}

View file

@ -0,0 +1,5 @@
# After creating the instance
Open https://vault.kaareskovgaard.net and initialize OpenBAO. Remember to get some sort of auto unsealing set up afterwards, currently this is implemented with a cronjob on TrueNAS. Doing it this way allows various certificates to continue getting issued, even as OpenBAO gets sealed (due to auto updates).
After this, run the post provisioning script to initialize the various OpenBAO parts needed. Then `nix run '.#bitwarden-to-vault` can transfer the needed Bitwarden secrets to vault.

View file

@ -0,0 +1,61 @@
# { config, ... }:
# let
# secretsFile = "/var/lib/authentik/authentik-env";
# domain = "auth-test.kaareskovgaard.net";
# in
# {
# config = {
# khscodes.nix.nix-community.enable = true;
# services.authentik = {
# enable = true;
# environmentFile = secretsFile;
# settings = {
# email = {
# host = "smtp.soverin.net";
# port = 587;
# username = "kaare@kaareskovgaard.net";
# use_tls = true;
# use_ssl = false;
# from = "kaare@kaareskovgaard.net";
# };
# disable_startup_analytics = true;
# avatars = "initials";
# };
# };
# khscodes.services.nginx.virtualHosts.${domain} = {
# locations."/" = {
# proxyPass = "https://localhost:9443";
# recommendedProxySettings = true;
# };
# };
# services.postgresqlBackup = {
# enable = true;
# databases = [ "authentik" ];
# };
# systemd.services = {
# authentik-migrate = {
# unitConfig = {
# ConditionPathExists = secretsFile;
# };
# };
# authentik-worker = {
# unitConfig = {
# ConditionPathExists = secretsFile;
# };
# serviceConfig = {
# LoadCredential = [
# "${domain}.pem:${config.security.acme.certs.${domain}.directory}/fullchain.pem"
# "${domain}.key:${config.security.acme.certs.${domain}.directory}/key.pem"
# ];
# };
# };
# authentik = {
# unitConfig = {
# ConditionPathExists = secretsFile;
# };
# };
# };
# };
# }
{ }

View file

@ -0,0 +1,33 @@
{
inputs,
...
}:
{
imports = [
"${inputs.self}/nix/profiles/nixos/hetzner-server.nix"
./authentik.nix
./openbao.nix
./post/openbao
];
khscodes.services.nginx.enable = true;
khscodes.infrastructure.hetzner-instance = {
enable = true;
server_type = "cax11";
};
virtualisation.vmVariant.virtualisation.host.pkgs = import inputs.nixpkgs {
system = "aarch64-darwin";
};
# Cannot use vault for secrets source, as this is the server containing vault.
khscodes.infrastructure.provisioning.pre.secretsSource = "bitwarden";
khscodes.infrastructure.provisioning.post.secretsSource = "bitwarden";
khscodes.infrastructure.vault-server-approle.stage = "post";
khscodes.networking.fqdn = "security.kaareskovgaard.net";
users.users.khs = {
initialPassword = "changeme";
openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCqY0FHnWFKfLG2yfgr4qka5sR9CK+EMAhzlHUkaQyWHTKD+G0/vC/fNPyL1VV3Dxc/ajxGuPzVE+mBMoyxazL3EtuCDOVvHJ5CR+MUSEckg/DDwcGHqy6rC8BvVVpTAVL04ByQdwFnpE1qNSBaQLkxaFVdtriGKkgMkc7+UNeYX/bv7yn+APqfP1a3xr6wdkSSdO8x4N2jsSygOIMx10hLyCV4Ueu7Kp8Ww4rGY8j5o7lKJhbgfItBfSOuQHdppHVF/GKYRhdnK6Y2fZVYbhq4KipUtclbZ6O/VYd8/sOO98+LMm7cOX+K35PQjUpYgcoNy5+Sw3CNS/NHn4JvOtTaUEYP7fK6c9LhMULOO3T7Cm6TMdiFjUKHkyG+s2Mu/LXJJoilw571zwuh6chkeitW8+Ht7k0aPV96kNEvTdoXwLhBifVEaChlAsLAzSUjUq+YYCiXVk0VIXCZQWKj8LoVNTmaqDksWwbcT64fw/FpVC0N18WHbKcFUEIW/O4spJMa30CQwf9FeqpoWoaF1oRClCSDPvX0AauCu0JcmRinz1/JmlXljnXWbSfm20/V+WyvktlI0wTD0cdpNuSasT9vS77YfJ8nutcWWZKSkCj4R4uHeCNpDTX5YXzapy7FxpM9ANCXLIvoGX7Yafba2Po+er7SSsUIY1AsnBBr8ZoDVw=="
];
};
khscodes.infrastructure.openbao.domain = "vault-test.kaareskovgaard.net";
system.stateVersion = "25.05";
}

View file

@ -0,0 +1,51 @@
{ pkgs, config, ... }:
let
domain = "vault-test.kaareskovgaard.net";
in
{
config = {
services.openbao = {
enable = true;
package = pkgs.openbao;
settings = {
ui = true;
listener.tcp = {
type = "tcp";
tls_cert_file = "${config.security.acme.certs.${domain}.directory}/fullchain.pem";
tls_key_file = "${config.security.acme.certs.${domain}.directory}/key.pem";
};
api_addr = "https://${domain}";
storage.postgresql.connection_url = "postgres://openbao?host=/run/postgresql";
};
};
security.acme.certs.${domain}.reloadServices = [ "openbao.service" ];
systemd.services.openbao.after = [ "postgresql.service" ];
# Allow openbao to read the certificate file
users.groups.nginx.members = [ "openbao" ];
services.postgresql = {
enable = true;
ensureDatabases = [ "openbao" ];
ensureUsers = [
{
name = "openbao";
ensureDBOwnership = true;
}
];
};
services.postgresqlBackup = {
enable = true;
databases = [ "openbao" ];
};
khscodes.services.nginx.virtualHosts.${domain} = {
locations."/" = {
proxyPass = "https://${config.services.openbao.settings.listener.tcp.address}/";
recommendedProxySettings = true;
};
};
};
}

View file

@ -0,0 +1,10 @@
{
khscodes.infrastructure.vault-server-approle.path = "\${ vault_auth_backend.approle.path }";
khscodes.infrastructure.provisioning.post.modules = [
{
resource.vault_auth_backend.approle = {
type = "approle";
};
}
];
}

View file

@ -0,0 +1,29 @@
{
imports = [
./approle.nix
./ssh-host.nix
./loki-mtls.nix
./prometheus-mtls.nix
];
khscodes.infrastructure.vault-server-approle.path = "\${ vault_auth_backend.approle.path }";
khscodes.infrastructure.provisioning.post.modules = [
(
{ config, ... }:
{
khscodes.vault.mount.opentofu = {
path = "opentofu";
type = "kv";
options = {
version = "2";
};
description = "Secrets used during provisioning";
};
resource.vault_kv_secret_backend_v2.opentofu = {
mount = config.khscodes.vault.output.mount.opentofu.path;
max_versions = 5;
cas_required = false;
};
}
)
];
}

View file

@ -0,0 +1,26 @@
# This should go into the setup of the vault server itself, as the vault server also needs stuff that depends on this.
{
khscodes.infrastructure.vault-loki-sender = {
terranixBackendName = "\${ vault_mount.loki-mtls.path }";
};
khscodes.infrastructure.provisioning.post.modules = [
(
{ config, ... }:
{
khscodes.vault.enable = true;
khscodes.vault.mount.loki-mtls = {
type = "pki";
path = "loki-mtls";
max_lease_ttl_seconds = 10 * 365 * 24 * 60 * 60;
default_lease_ttl_seconds = 60 * 60;
};
khscodes.vault.pki_secret_backend_root_cert.loki-mtls = {
backend = config.khscodes.vault.output.mount.loki-mtls.path;
type = "internal";
common_name = "loki.kaareskovgaard.net";
issuer_name = "loki-mtls-root-ca";
};
}
)
];
}

View file

@ -0,0 +1,27 @@
# This should go into the setup of the vault server itself, as the vault server also needs stuff that depends on this.
{
khscodes.infrastructure.vault-prometheus-sender = {
# This is quite ugly, but should get the job done. Sadly I cannot reference the output from here.
terranixBackendName = "\${ vault_mount.prometheus-mtls.path }";
};
khscodes.infrastructure.provisioning.post.modules = [
(
{ config, ... }:
{
khscodes.vault.enable = true;
khscodes.vault.mount.prometheus-mtls = {
type = "pki";
path = "prometheus-mtls";
max_lease_ttl_seconds = 10 * 365 * 24 * 60 * 60;
default_lease_ttl_seconds = 60 * 60;
};
khscodes.vault.pki_secret_backend_root_cert.prometheus-mtls = {
backend = config.khscodes.vault.output.mount.prometheus-mtls.path;
type = "internal";
common_name = "prometheus.kaareskovgaard.net";
issuer_name = "prometheus-mtls-root-ca";
};
}
)
];
}

View file

@ -0,0 +1,22 @@
{
khscodes.services.openssh.hostCertificate.path = "\${ vault_mount.ssh-host.path }";
khscodes.infrastructure.provisioning.post.modules = [
(
{ config, ... }:
{
khscodes.vault.mount.ssh-host = {
type = "ssh";
path = "ssh-host";
default_lease_ttl_seconds = 24 * 60 * 60;
max_lease_ttl_seconds = 24 * 60 * 60;
};
resource.vault_ssh_secret_backend_ca.ssh-host = {
backend = config.khscodes.vault.output.mount.ssh-host.path;
generate_signing_key = true;
key_type = "ed25519";
};
}
)
];
}

View file

@ -10,5 +10,11 @@
device = "/dev/sda";
diskName = "nixos";
};
users.users.khs = {
initialPassword = "changeme";
openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCqY0FHnWFKfLG2yfgr4qka5sR9CK+EMAhzlHUkaQyWHTKD+G0/vC/fNPyL1VV3Dxc/ajxGuPzVE+mBMoyxazL3EtuCDOVvHJ5CR+MUSEckg/DDwcGHqy6rC8BvVVpTAVL04ByQdwFnpE1qNSBaQLkxaFVdtriGKkgMkc7+UNeYX/bv7yn+APqfP1a3xr6wdkSSdO8x4N2jsSygOIMx10hLyCV4Ueu7Kp8Ww4rGY8j5o7lKJhbgfItBfSOuQHdppHVF/GKYRhdnK6Y2fZVYbhq4KipUtclbZ6O/VYd8/sOO98+LMm7cOX+K35PQjUpYgcoNy5+Sw3CNS/NHn4JvOtTaUEYP7fK6c9LhMULOO3T7Cm6TMdiFjUKHkyG+s2Mu/LXJJoilw571zwuh6chkeitW8+Ht7k0aPV96kNEvTdoXwLhBifVEaChlAsLAzSUjUq+YYCiXVk0VIXCZQWKj8LoVNTmaqDksWwbcT64fw/FpVC0N18WHbKcFUEIW/O4spJMa30CQwf9FeqpoWoaF1oRClCSDPvX0AauCu0JcmRinz1/JmlXljnXWbSfm20/V+WyvktlI0wTD0cdpNuSasT9vS77YfJ8nutcWWZKSkCj4R4uHeCNpDTX5YXzapy7FxpM9ANCXLIvoGX7Yafba2Po+er7SSsUIY1AsnBBr8ZoDVw=="
];
};
system.stateVersion = "25.05";
}

1
result Symbolic link
View file

@ -0,0 +1 @@
/nix/store/v800qb1c26k2hplgc35af8xnffkax9y5-env-20250714222538