Actually get zfs mounting working
Some checks failed
/ dev-shell (push) Successful in 3m50s
/ check (push) Failing after 6m55s
/ terraform-providers (push) Successful in 14m1s
/ systems (push) Successful in 48m20s
/ rust-packages (push) Successful in 6m46s

This is done by not using the built in mounting, but
relying on ZFS to mount correctly after importing the pool
and loading the encryption key
This commit is contained in:
Kaare Hoff Skovgaard 2025-08-04 01:07:05 +02:00
parent fa8320b805
commit f410517ffa
Signed by: khs
GPG key ID: C7D890804F01E9F0
5 changed files with 112 additions and 69 deletions

View file

@ -19,8 +19,7 @@ in
config = lib.mkIf cfg.enable {
security.acme = {
acceptTerms = true;
defaults =
{
defaults = {
email = "kaare@kaareskovgaard.net";
}
// lib.attrsets.optionalAttrs cfg.dns01Enabled {

View file

@ -74,8 +74,8 @@ in
echo -n "$secret_id_wrapped" > ${cacheFilePath}
chmod 0600 ${cacheFilePath}
chown root:root ${cacheFilePath}
echo "Role id and secret id copied, restarting vault-agent"
systemctl restart vault-agent-openbao.service
echo "Role id and secret id copied, rebooting system"
reboot
'';
}
);

View file

@ -0,0 +1,16 @@
# mx.kaareskovgaard.net
This is a setup of a Mail eXchange server. The goal is to have a fast, secure, hassle-free mail server running.
The server is using a 2-disk setup in hetzner, with the data disk (storing the mails) being a zfs zpool disk. It is done this way to:
1. Be encrypted, I don't want my mails lying freely around on other peoples hardware.
2. Be easy to back up daily on my TrueNAS server. I want to be reasonably sure that my emails don't just disppear on me. It is not _critical_, but also I would like to avoid loosing them. Also they should compress nicely, so this might give a slight boost.
## Add new domains
To add new domains to the MX, simply add them to domains in `default.nix`. This will enable setting up cloudflare DNS records for the domain as well as enabling the address to be used. Accounts are specified in `users.nix`. User accounts are expected to have a user in kanidm on `security.kaareskovgaard.net`, and the user should have a POSIX account with a POSIX password set there to be able to authenticate over IMAP and SMTP. A future version of Kanidm may change this, as Application specific passwords over LDAP should become a thing.
## Loading of encryption key for the zpool
The encryption key for the zpool is stored in OpenBAO, and is loaded during boot and should just work. The key is never stored on the server disk itself, and should never hit the disk itself.

View file

@ -11,6 +11,14 @@ let
in
assert (lib.lists.length split) == 2;
lib.lists.head split;
domains = [
"agerlin-skovgaard.dk"
"agerlinskovgaard.dk"
"k.agerlin-skovgaard.dk"
"k.agerlinskovgaard.dk"
"kas.codes"
];
in
{
imports = [
@ -31,7 +39,7 @@ in
{
resource.hcloud_volume.zroot-disk1 = {
name = "mx.kaareskovgaard.net-zroot-disk1";
size = 100;
size = 10;
location = locationFromDatacenter config.khscodes.hcloud.server.compute.datacenter;
};
resource.hcloud_volume_attachment.zroot-disk1 = {
@ -60,15 +68,9 @@ in
];
};
"mx.kaareskovgaard.net" = {
inherit domains;
postmaster = "kaare+postmaster@agerlin-skovgaard.dk";
abuse = "kaare+abuse@agerlin-skovgaard.dk";
domains = [
"agerlin-skovgaard.dk"
"agerlinskovgaard.dk"
"k.agerlin-skovgaard.dk"
"k.agerlinskovgaard.dk"
"kas.codes"
];
accounts = import ./users.nix;
};
};

View file

@ -11,12 +11,9 @@ let
rootPartName = "primary";
volumeGroupName = "mainpool";
rootLvName = "root";
zrootKey = "/run/secret/zroot.key";
# Don't ask me why this changes when there's more than one volume attached.
nixosDisk = "/dev/sdb";
zrootDisk1Disk = "/dev/sda";
vmailUser = config.mailserver.vmailUserName;
vmailGroup = config.mailserver.vmailGroupName;
downloadZrootKey = pkgs.writeShellApplication {
name = "zfs-download-zroot-key";
@ -25,12 +22,41 @@ let
pkgs.zfs
pkgs.uutils-coreutils-noprefix
pkgs.jq
pkgs.gawk
];
text = ''
if [[ "$(zfs list -j -o keystatus zroot/mailserver | jq --raw-output '.datasets."zroot/mailserver".properties.keystatus.value')" == "available" ]]; then
>&2 echo "Key already loaded, exiting"
exit 0
poolReady() {
pool="$1"
state="$(zpool import -d "/dev/disk/by-id" 2>/dev/null | awk "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
if [[ "$state" = "ONLINE" ]]; then
return 0
else
echo "Pool $pool in state $state, waiting"
return 1
fi
}
poolImported() {
pool="$1"
zpool list "$pool" >/dev/null 2>/dev/null
}
poolImport() {
pool="$1"
zpool import -d "/dev/disk/by-id" -N "$pool"
}
if ! poolImported "zroot"; then
echo -n "importing ZFS pool \"zroot\"..."
# Loop across the import until it succeeds, because the devices needed may not be discovered yet.
for _ in $(seq 1 60); do
poolReady "zroot" && poolImport "zroot" && break
sleep 1
done
poolImported "zroot" || poolImport "zroot" # Try one last time, e.g. to import a degraded pool.
fi
if ! poolImported "zroot"; then
echo "Could not import zroot"
exit 1
fi
if [[ "$(zfs list -j -o keystatus zroot/mailserver | jq --raw-output '.datasets."zroot/mailserver".properties.keystatus.value')" == "unavailable" ]]; then
# The vault cli insists on needing a token helper, disable it
HOME="$(mktemp -d)"
export HOME
@ -45,12 +71,16 @@ let
rm -rf "$HOME"
echo "$encryption_key" | zfs load-key -L file:///dev/stdin zroot/mailserver
fi
zfs mount -a
'';
};
in
{
systemd.services = {
dovecot2 = {
after = [ "zfs-download-zroot-key.service" ];
wants = [ "zfs-download-zroot-key.service" ];
unitConfig.RequiresMountsFor = [
"/var/mailserver/vmail"
"/var/mailserver/indices"
@ -68,19 +98,33 @@ in
INSTALL_ARGS+=("/run/secret/zroot.key")
INSTALL_ARGS+=("$tmpfile")
'';
boot.supportedFilesystems = {
zfs = true;
};
boot.zfs = {
forceImportRoot = false;
requestEncryptionCredentials = false;
};
systemd.services.zfs-mount.enable = false;
systemd.services.zfs-import-zroot.enable = false;
fileSystems = {
"/var/mailserver/vmail" = {
enable = lib.mkForce false;
};
"/var/mailserver/indices" = {
enable = lib.mkForce false;
};
};
systemd.services.zfs-download-zroot-key = {
after = [
"network-online.target"
"zfs-import-zroot.service"
"read-vault-auth-from-userdata.service"
];
wants = [
"network-online.target"
"zfs-import-zroot.service"
"read-vault-auth-from-userdata.service"
];
wantedBy = [ "zfs-mount.target" ];
before = [ "zfs-mount.target" ];
wantedBy = [
"multi-user.target"
];
environment = {
BAO_ADDR = config.khscodes.services.vault-agent.vault.address;
};
@ -89,32 +133,11 @@ in
RemainAfterExit = true;
ExecStart = lib.getExe downloadZrootKey;
};
unitConfig.ConditionPathExists = [
"/var/lib/vault-agent/role-id"
"/var/lib/vault-agent/secret-id"
];
};
khscodes.services.vault-agent.templates = [
{
contents = ''
{{- with secret "opentofu/data/mx.kaareskovgaard.net" -}}
{{ .Data.data.MX_KAARESKOVGAARD_NET_ZROOT_ENCRYPTION_KEY }}
{{- end -}}
'';
destination = zrootKey;
owner = "root";
group = "root";
perms = "0600";
exec = ''
chown ${lib.escapeShellArg vmailUser}:${lib.escapeShellArg vmailGroup} /var/mailserver/vmail
chmod 2770 /var/mailserver/vmail
chown ${lib.escapeShellArg vmailUser}:${lib.escapeShellArg vmailGroup} /var/mailserver/indices
chmod 0700 /var/mailserver/indices
'';
restartUnits = [
"zfs-mount.service"
"postfix.service"
"dovecot2.service"
"rspamd.service"
];
}
];
mailserver.mailDirectory = "/var/mailserver/vmail";
mailserver.indexDir = "/var/mailserver/indices";
khscodes.infrastructure.vault-server-approle.policy = {
@ -194,7 +217,10 @@ in
xattr = "sa";
"com.sun:auto-snapshot" = "true";
};
options.ashift = "12";
options = {
ashift = "12";
autoexpand = "on";
};
datasets = {
"mailserver" = {
type = "zfs_fs";