Add some automatic backups of postgresql databases
Some checks failed
/ dev-shell (push) Successful in 4m10s
/ rust-packages (push) Successful in 10m52s
/ check (push) Failing after 11m2s
/ systems (push) Successful in 44m21s
/ terraform-providers (push) Successful in 7m21s

when using zfs volume
This commit is contained in:
Kaare Hoff Skovgaard 2025-08-10 22:56:36 +02:00
parent 457eb3f6b0
commit 1ca3a407f2
Signed by: khs
GPG key ID: C7D890804F01E9F0
5 changed files with 263 additions and 197 deletions

View file

@ -59,8 +59,10 @@ pkgs.nixosTest {
inputs.self.nixosModules."fs/zfs" inputs.self.nixosModules."fs/zfs"
inputs.self.nixosModules."networking/fqdn" inputs.self.nixosModules."networking/fqdn"
inputs.self.nixosModules."infrastructure/vault-server-approle" inputs.self.nixosModules."infrastructure/vault-server-approle"
inputs.self.nixosModules."infrastructure/vault-prometheus-sender"
inputs.self.nixosModules."infrastructure/provisioning" inputs.self.nixosModules."infrastructure/provisioning"
inputs.self.nixosModules."infrastructure/openbao" inputs.self.nixosModules."infrastructure/openbao"
inputs.self.nixosModules."services/alloy"
inputs.self.nixosModules."services/vault-agent" inputs.self.nixosModules."services/vault-agent"
inputs.self.nixosModules."services/read-vault-auth-from-userdata" inputs.self.nixosModules."services/read-vault-auth-from-userdata"
inputs.self.nixosModules."services/openssh" inputs.self.nixosModules."services/openssh"

View file

@ -5,6 +5,7 @@
... ...
}: }:
let let
inherit (import ./options.nix { inherit lib config; }) zpoolModule;
cfg = config.khscodes.fs.zfs; cfg = config.khscodes.fs.zfs;
isTest = cfg.test; isTest = cfg.test;
zpoolSetup = lib.getExe pkgs.khscodes.zpool-setup; zpoolSetup = lib.getExe pkgs.khscodes.zpool-setup;
@ -26,79 +27,6 @@ let
${lib.escapeShellArg name} ${lib.escapeShellArg name}
''; '';
setupZpools = lib.lists.map setupZpool (lib.attrsToList cfg.zpools); setupZpools = lib.lists.map setupZpool (lib.attrsToList cfg.zpools);
vdevModule = lib.khscodes.mkSubmodule {
description = "vdev";
options = {
mode = lib.mkOption {
type = lib.types.enum [
"mirror"
"raidz"
"raidz1"
"raidz2"
"raidz3"
];
description = "Mode of the vdev";
default = "mirror";
};
members = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "Member disks of the vdev. Given as symbolic names, expected to be mapped to actual disks elsewhere.";
};
};
};
datasetModule = lib.khscodes.mkSubmodule {
description = "dataset";
options = {
options = lib.mkOption {
description = "Options for the dataset";
type = lib.types.attrsOf lib.types.str;
default = { };
};
mountpoint = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Path to mount the dataset to";
};
};
};
zpoolModule = lib.khscodes.mkSubmodule {
description = "zpool";
options = {
vdevs = lib.mkOption {
type = lib.types.listOf vdevModule;
default = [ ];
};
encryptionKeyOpenbao = {
mount = lib.mkOption {
type = lib.types.str;
default = "opentofu";
description = "The mountpoint of the encryption key";
};
name = lib.mkOption {
type = lib.types.str;
description = "The name of the encryption key in the mount";
default = config.khscodes.networking.fqdn;
};
field = lib.mkOption {
type = lib.types.str;
description = "Field name of the encryption key";
};
};
rootFsOptions = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = { };
};
zpoolOptions = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = { };
};
datasets = lib.mkOption {
type = lib.types.attrsOf datasetModule;
description = "Datasets for the zpool";
default = { };
};
};
};
in in
{ {
options.khscodes.fs.zfs = { options.khscodes.fs.zfs = {
@ -120,126 +48,85 @@ in
"${cfg.mainPoolName}" = { }; "${cfg.mainPoolName}" = { };
}; };
}; };
services = {
postgresql = {
enable = lib.option {
description = "Enables storing postgresql data on a zfs zpool";
type = lib.types.bool;
default = cfg.enable && config.services.postgresql.enable;
};
pool = lib.mkOption {
type = lib.types.str;
default = cfg.mainPoolName;
};
datasetName = lib.mkOption {
type = lib.types.str;
default = "database/postgresql";
};
datasetConfig = lib.mkOption {
type = datasetModule;
default = {
mountpoint = config.services.postgresql.dataDir;
};
};
};
};
}; };
config = lib.mkMerge [ config = lib.mkIf cfg.enable {
(lib.mkIf cfg.enable { # TODO: Verify that each member disk is uniquely named, and exists somewhere?
# TODO: Verify that each member disk is uniquely named, and exists somewhere? assertions = lib.lists.map (
assertions = lib.lists.map ( { name, value }:
{ name, value }: {
{ assertion = (lib.lists.length value.vdevs) > 0;
assertion = (lib.lists.length value.vdevs) > 0; message = "Zpool ${name} contains no vdevs";
message = "Zpool ${name} contains no vdevs"; }
} ) (lib.attrsToList cfg.zpools);
) (lib.attrsToList cfg.zpools); boot.supportedFilesystems = {
boot.supportedFilesystems = { zfs = true;
zfs = true; };
}; # On servers, we handle importing, creating and mounting of the pool manually.
# On servers, we handle importing, creating and mounting of the pool manually. boot.zfs = {
boot.zfs = { forceImportRoot = false;
forceImportRoot = false; requestEncryptionCredentials = false;
requestEncryptionCredentials = false; };
}; services.zfs.autoScrub.enable = true;
services.zfs.autoScrub.enable = true; systemd.services.zfs-mount.enable = false;
systemd.services.zfs-mount.enable = false; systemd.services.zfs-import-zroot.enable = false;
systemd.services.zfs-import-zroot.enable = false; systemd.services.khscodes-zpool-setup = {
systemd.services.khscodes-zpool-setup = { after = [
after = [ "network-online.target"
"network-online.target"
];
wants = [
"network-online.target"
];
wantedBy = [
"multi-user.target"
];
environment = {
BAO_ADDR = config.khscodes.services.vault-agent.vault.address;
VAULT_ROLE_ID_FILE = "/var/lib/vault-agent/role-id";
VAULT_SECRET_ID_FILE = "/var/lib/vault-agent/secret-id";
DISK_MAPPING_FILE = "/run/secret/disk-mapping.json";
LOGLEVEL = "trace";
}
// (lib.attrsets.optionalAttrs isTest {
ZFS_TEST = "true";
});
unitConfig.ConditionPathExists = [
"/run/secret/disk-mapping.json"
]
++ lib.lists.optionals (!isTest) [
"/var/lib/vault-agent/role-id"
"/var/lib/vault-agent/secret-id"
];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = ''
${lib.strings.concatStringsSep "\n" setupZpools}
'';
};
};
khscodes.infrastructure.vault-server-approle.policy = lib.mapAttrs' (name: value: {
name = "${value.encryptionKeyOpenbao.mount}/data/${value.encryptionKeyOpenbao.name}";
value = {
capabilities = [ "read" ];
};
}) cfg.zpools;
# Reading the disk setup through anopenbao secret allows
# the service to be restarted when adding new disks, or resizing existing disks.
khscodes.services.vault-agent.templates = [
{
contents = ''
{{- with secret "data-disks/data/${config.khscodes.networking.fqdn}" -}}
{{ .Data.data | toUnescapedJSON }}
{{- end -}}
'';
destination = "/run/secret/disk-mapping.json";
owner = "root";
group = "root";
perms = "0644";
restartUnits = [ "khscodes-zpool-setup.service" ];
}
]; ];
services.prometheus.exporters.zfs.enable = true; wants = [
khscodes.infrastructure.vault-prometheus-sender.exporters.enabled = [ "zfs" ]; "network-online.target"
}) ];
(lib.mkIf (cfg.enable && cfg.services.postgresql.enable) { wantedBy = [
khscodes.fs.zfs.zpools."${cfg.services.postgresql.pool "multi-user.target"
}".datasets."${cfg.services.postgresql.datasetName}" = ];
cfg.services.postgresql.datasetConfig; environment = {
systemd.services.postgresql = { BAO_ADDR = config.khscodes.services.vault-agent.vault.address;
after = [ "khscodes-zpool-setup.service" ]; VAULT_ROLE_ID_FILE = "/var/lib/vault-agent/role-id";
unitConfig = { VAULT_SECRET_ID_FILE = "/var/lib/vault-agent/secret-id";
RequiresMountsFor = cfg.services.postgresql.datasetConfig.mountpoint; DISK_MAPPING_FILE = "/run/secret/disk-mapping.json";
}; LOGLEVEL = "trace";
}; }
systemd.services.khscodes-zpool-setup = { // (lib.attrsets.optionalAttrs isTest {
ExecStartPost = '' ZFS_TEST = "true";
chown ${config.services.postgresql.user}:${config.services.postgresql.group} ${lib.escapeShellArg cfg.services.postgresql.datasetConfig.mountpoint} });
unitConfig.ConditionPathExists = [
"/run/secret/disk-mapping.json"
]
++ lib.lists.optionals (!isTest) [
"/var/lib/vault-agent/role-id"
"/var/lib/vault-agent/secret-id"
];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = ''
${lib.strings.concatStringsSep "\n" setupZpools}
''; '';
}; };
}) };
]; khscodes.infrastructure.vault-server-approle.policy = lib.mapAttrs' (name: value: {
name = "${value.encryptionKeyOpenbao.mount}/data/${value.encryptionKeyOpenbao.name}";
value = {
capabilities = [ "read" ];
};
}) cfg.zpools;
# Reading the disk setup through anopenbao secret allows
# the service to be restarted when adding new disks, or resizing existing disks.
khscodes.services.vault-agent.templates = [
{
contents = ''
{{- with secret "data-disks/data/${config.khscodes.networking.fqdn}" -}}
{{ .Data.data | toUnescapedJSON }}
{{- end -}}
'';
destination = "/run/secret/disk-mapping.json";
owner = "root";
group = "root";
perms = "0644";
restartUnits = [ "khscodes-zpool-setup.service" ];
}
];
services.prometheus.exporters.zfs.enable = true;
khscodes.infrastructure.vault-prometheus-sender.exporters.enabled = [ "zfs" ];
};
} }

View file

@ -0,0 +1,76 @@
{ lib, config, ... }:
rec {
vdevModule = lib.khscodes.mkSubmodule {
description = "vdev";
options = {
mode = lib.mkOption {
type = lib.types.enum [
"mirror"
"raidz"
"raidz1"
"raidz2"
"raidz3"
];
description = "Mode of the vdev";
default = "mirror";
};
members = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "Member disks of the vdev. Given as symbolic names, expected to be mapped to actual disks elsewhere.";
};
};
};
datasetModule = lib.khscodes.mkSubmodule {
description = "dataset";
options = {
options = lib.mkOption {
description = "Options for the dataset";
type = lib.types.attrsOf lib.types.str;
default = { };
};
mountpoint = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Path to mount the dataset to";
};
};
};
zpoolModule = lib.khscodes.mkSubmodule {
description = "zpool";
options = {
vdevs = lib.mkOption {
type = lib.types.listOf vdevModule;
default = [ ];
};
encryptionKeyOpenbao = {
mount = lib.mkOption {
type = lib.types.str;
default = "opentofu";
description = "The mountpoint of the encryption key";
};
name = lib.mkOption {
type = lib.types.str;
description = "The name of the encryption key in the mount";
default = config.khscodes.networking.fqdn;
};
field = lib.mkOption {
type = lib.types.str;
description = "Field name of the encryption key";
};
};
rootFsOptions = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = { };
};
zpoolOptions = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = { };
};
datasets = lib.mkOption {
type = lib.types.attrsOf datasetModule;
description = "Datasets for the zpool";
default = { };
};
};
};
}

View file

@ -0,0 +1,88 @@
{
config,
lib,
pkgs,
...
}:
let
inherit (import ../../options.nix { inherit config lib; }) datasetModule;
zfsCfg = config.khscodes.fs.zfs;
cfg = zfsCfg.services.postgresql;
pgCfg = config.services.postgresql;
in
{
options.khscodes.fs.zfs.services.postgresql = {
enable = lib.mkOption {
description = "Enables storing postgresql data on a zfs zpool";
type = lib.types.bool;
default = zfsCfg.enable && pgCfg.enable;
};
pool = lib.mkOption {
type = lib.types.str;
default = zfsCfg.mainPoolName;
};
datasetName = lib.mkOption {
type = lib.types.str;
default = "database/postgresql";
};
backupDatasetName = lib.mkOption {
type = lib.types.str;
default = "backup/database/postgresql";
};
datasetConfig = lib.mkOption {
type = datasetModule;
default = {
mountpoint = "/var/lib/postgresql";
};
};
backupDatasetConfig = lib.mkOption {
type = datasetModule;
default = {
mountpoint = "/var/backup/postgresql";
};
};
backupDatabases = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = pgCfg.ensureDatabases;
};
};
config = lib.mkMerge [
(lib.mkIf (zfsCfg.enable && cfg.enable) {
khscodes.fs.zfs.zpools."${cfg.pool}".datasets."${cfg.datasetName}" = cfg.datasetConfig;
systemd.services.postgresql = {
after = [ "khscodes-zpool-setup.service" ];
unitConfig = {
RequiresMountsFor = [ cfg.datasetConfig.mountpoint ];
};
};
services.postgresql.dataDir = "${cfg.datasetConfig.mountpoint}/${pgCfg.package.psqlSchema}";
})
(lib.mkIf (zfsCfg.enable && cfg.enable) {
khscodes.fs.zfs.zpools."${cfg.pool}".datasets."${cfg.backupDatasetName}" = cfg.backupDatasetConfig;
services.postgresqlBackup = {
enable = true;
databases = cfg.backupDatabases;
};
systemd.services =
(lib.listToAttrs (
lib.lists.map (db: {
name = "postgresqlBackup-${db}";
value = {
after = [ "khscodes-zpool-setup.service" ];
unitConfig = {
RequiresMountsFor = [ cfg.backupDatasetConfig.mountpoint ];
};
};
}) cfg.backupDatabases
))
// {
khscodes-zpool-setup.serviceConfig = {
ExecStartPost = [
"${lib.getExe' pkgs.uutils-coreutils-noprefix "chown"} ${config.systemd.services.postgresql.serviceConfig.User}:${config.systemd.services.postgresql.serviceConfig.Group} ${lib.escapeShellArg cfg.backupDatasetConfig.mountpoint}"
"${lib.getExe' pkgs.uutils-coreutils-noprefix "chmod"} 0700 ${lib.escapeShellArg cfg.backupDatasetConfig.mountpoint}"
];
};
};
})
];
}

View file

@ -1,6 +1,8 @@
use serde::Deserialize; use serde::Deserialize;
use std::{ use std::{
collections::{BTreeMap, HashMap}, collections::{BTreeMap, HashMap},
ffi::OsStr,
os::unix::ffi::OsStrExt,
path::{Path, PathBuf}, path::{Path, PathBuf},
}; };
@ -151,13 +153,13 @@ struct ZpoolStatusPool {
vdevs: HashMap<String, ZpoolStatusVdev>, vdevs: HashMap<String, ZpoolStatusVdev>,
} }
#[derive(Clone, Copy, Deserialize, PartialEq)] #[derive(Clone, Copy, Debug, Deserialize, PartialEq)]
enum ZpoolState { enum ZpoolState {
#[serde(rename = "ONLINE")] #[serde(rename = "ONLINE")]
Online, Online,
} }
#[derive(Deserialize)] #[derive(Deserialize, Debug)]
#[serde(tag = "vdev_type")] #[serde(tag = "vdev_type")]
enum ZpoolStatusVdev { enum ZpoolStatusVdev {
#[serde(rename = "root")] #[serde(rename = "root")]
@ -177,11 +179,22 @@ impl ZpoolStatusVdev {
} }
} }
pub fn is_vdev_for_disk(&self, disk_path: &Path) -> bool { pub fn is_vdev_for_disk(&self, disk_path: &Path) -> bool {
matches!(self, Self::Disk(disk) if disk.path == disk_path) // Zpool status returns the partition 1 as the path to the device, even if zfs was given the entire disk to work with during pool creation
// Depending on whether we use a device like /dev/vdb or /dev/disk/by-id/XXXX this will be reported as /dev/vdb1 or /dev/disk/by-id/XXXX-part1
matches!(self, Self::Disk(disk) if strip_path_inline_suffix(&disk.path, OsStr::from_bytes(b"-part1")) == disk_path || strip_path_inline_suffix(&disk.path, OsStr::from_bytes(b"1")) == disk_path)
} }
} }
#[derive(Deserialize)] fn strip_path_inline_suffix<'a>(path: &'a Path, suffix: &OsStr) -> &'a Path {
Path::new(OsStr::from_bytes(
path.as_os_str()
.as_encoded_bytes()
.strip_suffix(suffix.as_encoded_bytes())
.unwrap_or(path.as_os_str().as_encoded_bytes()),
))
}
#[derive(Deserialize, Debug)]
struct ZpoolStatusVdevRoot { struct ZpoolStatusVdevRoot {
#[allow(dead_code)] #[allow(dead_code)]
name: String, name: String,
@ -190,7 +203,7 @@ struct ZpoolStatusVdevRoot {
vdevs: HashMap<String, ZpoolStatusVdev>, vdevs: HashMap<String, ZpoolStatusVdev>,
} }
#[derive(Deserialize)] #[derive(Deserialize, Debug)]
struct ZpoolStatusVdevDisk { struct ZpoolStatusVdevDisk {
#[allow(dead_code)] #[allow(dead_code)]
name: String, name: String,
@ -199,7 +212,7 @@ struct ZpoolStatusVdevDisk {
path: PathBuf, path: PathBuf,
} }
#[derive(Deserialize)] #[derive(Deserialize, Debug)]
struct ZpoolStatusVdevMirror { struct ZpoolStatusVdevMirror {
#[allow(dead_code)] #[allow(dead_code)]
name: String, name: String,