Add some automatic backups of postgresql databases
when using zfs volume
This commit is contained in:
parent
457eb3f6b0
commit
1ca3a407f2
5 changed files with 263 additions and 197 deletions
|
@ -59,8 +59,10 @@ pkgs.nixosTest {
|
|||
inputs.self.nixosModules."fs/zfs"
|
||||
inputs.self.nixosModules."networking/fqdn"
|
||||
inputs.self.nixosModules."infrastructure/vault-server-approle"
|
||||
inputs.self.nixosModules."infrastructure/vault-prometheus-sender"
|
||||
inputs.self.nixosModules."infrastructure/provisioning"
|
||||
inputs.self.nixosModules."infrastructure/openbao"
|
||||
inputs.self.nixosModules."services/alloy"
|
||||
inputs.self.nixosModules."services/vault-agent"
|
||||
inputs.self.nixosModules."services/read-vault-auth-from-userdata"
|
||||
inputs.self.nixosModules."services/openssh"
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
...
|
||||
}:
|
||||
let
|
||||
inherit (import ./options.nix { inherit lib config; }) zpoolModule;
|
||||
cfg = config.khscodes.fs.zfs;
|
||||
isTest = cfg.test;
|
||||
zpoolSetup = lib.getExe pkgs.khscodes.zpool-setup;
|
||||
|
@ -26,79 +27,6 @@ let
|
|||
${lib.escapeShellArg name}
|
||||
'';
|
||||
setupZpools = lib.lists.map setupZpool (lib.attrsToList cfg.zpools);
|
||||
vdevModule = lib.khscodes.mkSubmodule {
|
||||
description = "vdev";
|
||||
options = {
|
||||
mode = lib.mkOption {
|
||||
type = lib.types.enum [
|
||||
"mirror"
|
||||
"raidz"
|
||||
"raidz1"
|
||||
"raidz2"
|
||||
"raidz3"
|
||||
];
|
||||
description = "Mode of the vdev";
|
||||
default = "mirror";
|
||||
};
|
||||
members = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "Member disks of the vdev. Given as symbolic names, expected to be mapped to actual disks elsewhere.";
|
||||
};
|
||||
};
|
||||
};
|
||||
datasetModule = lib.khscodes.mkSubmodule {
|
||||
description = "dataset";
|
||||
options = {
|
||||
options = lib.mkOption {
|
||||
description = "Options for the dataset";
|
||||
type = lib.types.attrsOf lib.types.str;
|
||||
default = { };
|
||||
};
|
||||
mountpoint = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
description = "Path to mount the dataset to";
|
||||
};
|
||||
};
|
||||
};
|
||||
zpoolModule = lib.khscodes.mkSubmodule {
|
||||
description = "zpool";
|
||||
options = {
|
||||
vdevs = lib.mkOption {
|
||||
type = lib.types.listOf vdevModule;
|
||||
default = [ ];
|
||||
};
|
||||
encryptionKeyOpenbao = {
|
||||
mount = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "opentofu";
|
||||
description = "The mountpoint of the encryption key";
|
||||
};
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "The name of the encryption key in the mount";
|
||||
default = config.khscodes.networking.fqdn;
|
||||
};
|
||||
field = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Field name of the encryption key";
|
||||
};
|
||||
};
|
||||
rootFsOptions = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.str;
|
||||
default = { };
|
||||
};
|
||||
zpoolOptions = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.str;
|
||||
default = { };
|
||||
};
|
||||
datasets = lib.mkOption {
|
||||
type = lib.types.attrsOf datasetModule;
|
||||
description = "Datasets for the zpool";
|
||||
default = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
options.khscodes.fs.zfs = {
|
||||
|
@ -120,126 +48,85 @@ in
|
|||
"${cfg.mainPoolName}" = { };
|
||||
};
|
||||
};
|
||||
services = {
|
||||
postgresql = {
|
||||
enable = lib.option {
|
||||
description = "Enables storing postgresql data on a zfs zpool";
|
||||
type = lib.types.bool;
|
||||
default = cfg.enable && config.services.postgresql.enable;
|
||||
};
|
||||
pool = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = cfg.mainPoolName;
|
||||
};
|
||||
datasetName = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "database/postgresql";
|
||||
};
|
||||
datasetConfig = lib.mkOption {
|
||||
type = datasetModule;
|
||||
default = {
|
||||
mountpoint = config.services.postgresql.dataDir;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
config = lib.mkMerge [
|
||||
(lib.mkIf cfg.enable {
|
||||
# TODO: Verify that each member disk is uniquely named, and exists somewhere?
|
||||
assertions = lib.lists.map (
|
||||
{ name, value }:
|
||||
{
|
||||
assertion = (lib.lists.length value.vdevs) > 0;
|
||||
message = "Zpool ${name} contains no vdevs";
|
||||
}
|
||||
) (lib.attrsToList cfg.zpools);
|
||||
boot.supportedFilesystems = {
|
||||
zfs = true;
|
||||
};
|
||||
# On servers, we handle importing, creating and mounting of the pool manually.
|
||||
boot.zfs = {
|
||||
forceImportRoot = false;
|
||||
requestEncryptionCredentials = false;
|
||||
};
|
||||
services.zfs.autoScrub.enable = true;
|
||||
systemd.services.zfs-mount.enable = false;
|
||||
systemd.services.zfs-import-zroot.enable = false;
|
||||
systemd.services.khscodes-zpool-setup = {
|
||||
after = [
|
||||
"network-online.target"
|
||||
];
|
||||
wants = [
|
||||
"network-online.target"
|
||||
];
|
||||
wantedBy = [
|
||||
"multi-user.target"
|
||||
];
|
||||
environment = {
|
||||
BAO_ADDR = config.khscodes.services.vault-agent.vault.address;
|
||||
VAULT_ROLE_ID_FILE = "/var/lib/vault-agent/role-id";
|
||||
VAULT_SECRET_ID_FILE = "/var/lib/vault-agent/secret-id";
|
||||
DISK_MAPPING_FILE = "/run/secret/disk-mapping.json";
|
||||
LOGLEVEL = "trace";
|
||||
}
|
||||
// (lib.attrsets.optionalAttrs isTest {
|
||||
ZFS_TEST = "true";
|
||||
});
|
||||
unitConfig.ConditionPathExists = [
|
||||
"/run/secret/disk-mapping.json"
|
||||
]
|
||||
++ lib.lists.optionals (!isTest) [
|
||||
"/var/lib/vault-agent/role-id"
|
||||
"/var/lib/vault-agent/secret-id"
|
||||
];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = ''
|
||||
${lib.strings.concatStringsSep "\n" setupZpools}
|
||||
'';
|
||||
};
|
||||
};
|
||||
khscodes.infrastructure.vault-server-approle.policy = lib.mapAttrs' (name: value: {
|
||||
name = "${value.encryptionKeyOpenbao.mount}/data/${value.encryptionKeyOpenbao.name}";
|
||||
value = {
|
||||
capabilities = [ "read" ];
|
||||
};
|
||||
}) cfg.zpools;
|
||||
# Reading the disk setup through anopenbao secret allows
|
||||
# the service to be restarted when adding new disks, or resizing existing disks.
|
||||
khscodes.services.vault-agent.templates = [
|
||||
{
|
||||
contents = ''
|
||||
{{- with secret "data-disks/data/${config.khscodes.networking.fqdn}" -}}
|
||||
{{ .Data.data | toUnescapedJSON }}
|
||||
{{- end -}}
|
||||
'';
|
||||
destination = "/run/secret/disk-mapping.json";
|
||||
owner = "root";
|
||||
group = "root";
|
||||
perms = "0644";
|
||||
restartUnits = [ "khscodes-zpool-setup.service" ];
|
||||
}
|
||||
config = lib.mkIf cfg.enable {
|
||||
# TODO: Verify that each member disk is uniquely named, and exists somewhere?
|
||||
assertions = lib.lists.map (
|
||||
{ name, value }:
|
||||
{
|
||||
assertion = (lib.lists.length value.vdevs) > 0;
|
||||
message = "Zpool ${name} contains no vdevs";
|
||||
}
|
||||
) (lib.attrsToList cfg.zpools);
|
||||
boot.supportedFilesystems = {
|
||||
zfs = true;
|
||||
};
|
||||
# On servers, we handle importing, creating and mounting of the pool manually.
|
||||
boot.zfs = {
|
||||
forceImportRoot = false;
|
||||
requestEncryptionCredentials = false;
|
||||
};
|
||||
services.zfs.autoScrub.enable = true;
|
||||
systemd.services.zfs-mount.enable = false;
|
||||
systemd.services.zfs-import-zroot.enable = false;
|
||||
systemd.services.khscodes-zpool-setup = {
|
||||
after = [
|
||||
"network-online.target"
|
||||
];
|
||||
services.prometheus.exporters.zfs.enable = true;
|
||||
khscodes.infrastructure.vault-prometheus-sender.exporters.enabled = [ "zfs" ];
|
||||
})
|
||||
(lib.mkIf (cfg.enable && cfg.services.postgresql.enable) {
|
||||
khscodes.fs.zfs.zpools."${cfg.services.postgresql.pool
|
||||
}".datasets."${cfg.services.postgresql.datasetName}" =
|
||||
cfg.services.postgresql.datasetConfig;
|
||||
systemd.services.postgresql = {
|
||||
after = [ "khscodes-zpool-setup.service" ];
|
||||
unitConfig = {
|
||||
RequiresMountsFor = cfg.services.postgresql.datasetConfig.mountpoint;
|
||||
};
|
||||
};
|
||||
systemd.services.khscodes-zpool-setup = {
|
||||
ExecStartPost = ''
|
||||
chown ${config.services.postgresql.user}:${config.services.postgresql.group} ${lib.escapeShellArg cfg.services.postgresql.datasetConfig.mountpoint}
|
||||
wants = [
|
||||
"network-online.target"
|
||||
];
|
||||
wantedBy = [
|
||||
"multi-user.target"
|
||||
];
|
||||
environment = {
|
||||
BAO_ADDR = config.khscodes.services.vault-agent.vault.address;
|
||||
VAULT_ROLE_ID_FILE = "/var/lib/vault-agent/role-id";
|
||||
VAULT_SECRET_ID_FILE = "/var/lib/vault-agent/secret-id";
|
||||
DISK_MAPPING_FILE = "/run/secret/disk-mapping.json";
|
||||
LOGLEVEL = "trace";
|
||||
}
|
||||
// (lib.attrsets.optionalAttrs isTest {
|
||||
ZFS_TEST = "true";
|
||||
});
|
||||
unitConfig.ConditionPathExists = [
|
||||
"/run/secret/disk-mapping.json"
|
||||
]
|
||||
++ lib.lists.optionals (!isTest) [
|
||||
"/var/lib/vault-agent/role-id"
|
||||
"/var/lib/vault-agent/secret-id"
|
||||
];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = ''
|
||||
${lib.strings.concatStringsSep "\n" setupZpools}
|
||||
'';
|
||||
};
|
||||
})
|
||||
];
|
||||
};
|
||||
khscodes.infrastructure.vault-server-approle.policy = lib.mapAttrs' (name: value: {
|
||||
name = "${value.encryptionKeyOpenbao.mount}/data/${value.encryptionKeyOpenbao.name}";
|
||||
value = {
|
||||
capabilities = [ "read" ];
|
||||
};
|
||||
}) cfg.zpools;
|
||||
# Reading the disk setup through anopenbao secret allows
|
||||
# the service to be restarted when adding new disks, or resizing existing disks.
|
||||
khscodes.services.vault-agent.templates = [
|
||||
{
|
||||
contents = ''
|
||||
{{- with secret "data-disks/data/${config.khscodes.networking.fqdn}" -}}
|
||||
{{ .Data.data | toUnescapedJSON }}
|
||||
{{- end -}}
|
||||
'';
|
||||
destination = "/run/secret/disk-mapping.json";
|
||||
owner = "root";
|
||||
group = "root";
|
||||
perms = "0644";
|
||||
restartUnits = [ "khscodes-zpool-setup.service" ];
|
||||
}
|
||||
];
|
||||
services.prometheus.exporters.zfs.enable = true;
|
||||
khscodes.infrastructure.vault-prometheus-sender.exporters.enabled = [ "zfs" ];
|
||||
};
|
||||
}
|
||||
|
|
76
nix/modules/nixos/fs/zfs/options.nix
Normal file
76
nix/modules/nixos/fs/zfs/options.nix
Normal file
|
@ -0,0 +1,76 @@
|
|||
{ lib, config, ... }:
|
||||
rec {
|
||||
vdevModule = lib.khscodes.mkSubmodule {
|
||||
description = "vdev";
|
||||
options = {
|
||||
mode = lib.mkOption {
|
||||
type = lib.types.enum [
|
||||
"mirror"
|
||||
"raidz"
|
||||
"raidz1"
|
||||
"raidz2"
|
||||
"raidz3"
|
||||
];
|
||||
description = "Mode of the vdev";
|
||||
default = "mirror";
|
||||
};
|
||||
members = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "Member disks of the vdev. Given as symbolic names, expected to be mapped to actual disks elsewhere.";
|
||||
};
|
||||
};
|
||||
};
|
||||
datasetModule = lib.khscodes.mkSubmodule {
|
||||
description = "dataset";
|
||||
options = {
|
||||
options = lib.mkOption {
|
||||
description = "Options for the dataset";
|
||||
type = lib.types.attrsOf lib.types.str;
|
||||
default = { };
|
||||
};
|
||||
mountpoint = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
description = "Path to mount the dataset to";
|
||||
};
|
||||
};
|
||||
};
|
||||
zpoolModule = lib.khscodes.mkSubmodule {
|
||||
description = "zpool";
|
||||
options = {
|
||||
vdevs = lib.mkOption {
|
||||
type = lib.types.listOf vdevModule;
|
||||
default = [ ];
|
||||
};
|
||||
encryptionKeyOpenbao = {
|
||||
mount = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "opentofu";
|
||||
description = "The mountpoint of the encryption key";
|
||||
};
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "The name of the encryption key in the mount";
|
||||
default = config.khscodes.networking.fqdn;
|
||||
};
|
||||
field = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Field name of the encryption key";
|
||||
};
|
||||
};
|
||||
rootFsOptions = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.str;
|
||||
default = { };
|
||||
};
|
||||
zpoolOptions = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.str;
|
||||
default = { };
|
||||
};
|
||||
datasets = lib.mkOption {
|
||||
type = lib.types.attrsOf datasetModule;
|
||||
description = "Datasets for the zpool";
|
||||
default = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
88
nix/modules/nixos/fs/zfs/services/postgresql/default.nix
Normal file
88
nix/modules/nixos/fs/zfs/services/postgresql/default.nix
Normal file
|
@ -0,0 +1,88 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inherit (import ../../options.nix { inherit config lib; }) datasetModule;
|
||||
zfsCfg = config.khscodes.fs.zfs;
|
||||
cfg = zfsCfg.services.postgresql;
|
||||
pgCfg = config.services.postgresql;
|
||||
in
|
||||
{
|
||||
options.khscodes.fs.zfs.services.postgresql = {
|
||||
enable = lib.mkOption {
|
||||
description = "Enables storing postgresql data on a zfs zpool";
|
||||
type = lib.types.bool;
|
||||
default = zfsCfg.enable && pgCfg.enable;
|
||||
};
|
||||
pool = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = zfsCfg.mainPoolName;
|
||||
};
|
||||
datasetName = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "database/postgresql";
|
||||
};
|
||||
backupDatasetName = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "backup/database/postgresql";
|
||||
};
|
||||
datasetConfig = lib.mkOption {
|
||||
type = datasetModule;
|
||||
default = {
|
||||
mountpoint = "/var/lib/postgresql";
|
||||
};
|
||||
};
|
||||
backupDatasetConfig = lib.mkOption {
|
||||
type = datasetModule;
|
||||
default = {
|
||||
mountpoint = "/var/backup/postgresql";
|
||||
};
|
||||
};
|
||||
backupDatabases = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = pgCfg.ensureDatabases;
|
||||
};
|
||||
};
|
||||
config = lib.mkMerge [
|
||||
(lib.mkIf (zfsCfg.enable && cfg.enable) {
|
||||
khscodes.fs.zfs.zpools."${cfg.pool}".datasets."${cfg.datasetName}" = cfg.datasetConfig;
|
||||
systemd.services.postgresql = {
|
||||
after = [ "khscodes-zpool-setup.service" ];
|
||||
unitConfig = {
|
||||
RequiresMountsFor = [ cfg.datasetConfig.mountpoint ];
|
||||
};
|
||||
};
|
||||
services.postgresql.dataDir = "${cfg.datasetConfig.mountpoint}/${pgCfg.package.psqlSchema}";
|
||||
})
|
||||
(lib.mkIf (zfsCfg.enable && cfg.enable) {
|
||||
khscodes.fs.zfs.zpools."${cfg.pool}".datasets."${cfg.backupDatasetName}" = cfg.backupDatasetConfig;
|
||||
services.postgresqlBackup = {
|
||||
enable = true;
|
||||
databases = cfg.backupDatabases;
|
||||
};
|
||||
systemd.services =
|
||||
(lib.listToAttrs (
|
||||
lib.lists.map (db: {
|
||||
name = "postgresqlBackup-${db}";
|
||||
value = {
|
||||
after = [ "khscodes-zpool-setup.service" ];
|
||||
unitConfig = {
|
||||
RequiresMountsFor = [ cfg.backupDatasetConfig.mountpoint ];
|
||||
};
|
||||
};
|
||||
}) cfg.backupDatabases
|
||||
))
|
||||
// {
|
||||
khscodes-zpool-setup.serviceConfig = {
|
||||
ExecStartPost = [
|
||||
"${lib.getExe' pkgs.uutils-coreutils-noprefix "chown"} ${config.systemd.services.postgresql.serviceConfig.User}:${config.systemd.services.postgresql.serviceConfig.Group} ${lib.escapeShellArg cfg.backupDatasetConfig.mountpoint}"
|
||||
"${lib.getExe' pkgs.uutils-coreutils-noprefix "chmod"} 0700 ${lib.escapeShellArg cfg.backupDatasetConfig.mountpoint}"
|
||||
];
|
||||
};
|
||||
};
|
||||
})
|
||||
];
|
||||
}
|
|
@ -1,6 +1,8 @@
|
|||
use serde::Deserialize;
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
ffi::OsStr,
|
||||
os::unix::ffi::OsStrExt,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
|
@ -151,13 +153,13 @@ struct ZpoolStatusPool {
|
|||
vdevs: HashMap<String, ZpoolStatusVdev>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Deserialize, PartialEq)]
|
||||
#[derive(Clone, Copy, Debug, Deserialize, PartialEq)]
|
||||
enum ZpoolState {
|
||||
#[serde(rename = "ONLINE")]
|
||||
Online,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[serde(tag = "vdev_type")]
|
||||
enum ZpoolStatusVdev {
|
||||
#[serde(rename = "root")]
|
||||
|
@ -177,11 +179,22 @@ impl ZpoolStatusVdev {
|
|||
}
|
||||
}
|
||||
pub fn is_vdev_for_disk(&self, disk_path: &Path) -> bool {
|
||||
matches!(self, Self::Disk(disk) if disk.path == disk_path)
|
||||
// Zpool status returns the partition 1 as the path to the device, even if zfs was given the entire disk to work with during pool creation
|
||||
// Depending on whether we use a device like /dev/vdb or /dev/disk/by-id/XXXX this will be reported as /dev/vdb1 or /dev/disk/by-id/XXXX-part1
|
||||
matches!(self, Self::Disk(disk) if strip_path_inline_suffix(&disk.path, OsStr::from_bytes(b"-part1")) == disk_path || strip_path_inline_suffix(&disk.path, OsStr::from_bytes(b"1")) == disk_path)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
fn strip_path_inline_suffix<'a>(path: &'a Path, suffix: &OsStr) -> &'a Path {
|
||||
Path::new(OsStr::from_bytes(
|
||||
path.as_os_str()
|
||||
.as_encoded_bytes()
|
||||
.strip_suffix(suffix.as_encoded_bytes())
|
||||
.unwrap_or(path.as_os_str().as_encoded_bytes()),
|
||||
))
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct ZpoolStatusVdevRoot {
|
||||
#[allow(dead_code)]
|
||||
name: String,
|
||||
|
@ -190,7 +203,7 @@ struct ZpoolStatusVdevRoot {
|
|||
vdevs: HashMap<String, ZpoolStatusVdev>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct ZpoolStatusVdevDisk {
|
||||
#[allow(dead_code)]
|
||||
name: String,
|
||||
|
@ -199,7 +212,7 @@ struct ZpoolStatusVdevDisk {
|
|||
path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct ZpoolStatusVdevMirror {
|
||||
#[allow(dead_code)]
|
||||
name: String,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue