Compare commits
No commits in common. "1ca3a407f2c7b5c99ad76171ae236346ed8f5776" and "9fbfd0ce02e3331859c765ca6df248651d789b16" have entirely different histories.
1ca3a407f2
...
9fbfd0ce02
6 changed files with 222 additions and 288 deletions
48
flake.lock
generated
48
flake.lock
generated
|
@ -3,11 +3,11 @@
|
|||
"advisory-db": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1754472784,
|
||||
"narHash": "sha256-b390kY06Sm+gzwGiaXrVzIg4mjxwt/oONlDu49260lM=",
|
||||
"lastModified": 1753275806,
|
||||
"narHash": "sha256-E+Cu/AFVGwoQo4KPgcWmFS9zU7fJgXoK0o25EP3j48g=",
|
||||
"owner": "rustsec",
|
||||
"repo": "advisory-db",
|
||||
"rev": "388a3128c3cda69c6f466de2015aadfae9f9bc75",
|
||||
"rev": "c62e71ad8c5256ffa3cafbb1a8c687db60869e98",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -127,11 +127,11 @@
|
|||
},
|
||||
"crane": {
|
||||
"locked": {
|
||||
"lastModified": 1754269165,
|
||||
"narHash": "sha256-0tcS8FHd4QjbCVoxN9jI+PjHgA4vc/IjkUSp+N3zy0U=",
|
||||
"lastModified": 1753316655,
|
||||
"narHash": "sha256-tzWa2kmTEN69OEMhxFy+J2oWSvZP5QhEgXp3TROOzl0=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "444e81206df3f7d92780680e45858e31d2f07a08",
|
||||
"rev": "f35a3372d070c9e9ccb63ba7ce347f0634ddf3d2",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -237,11 +237,11 @@
|
|||
"nixpkgs-lib": "nixpkgs-lib"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1754487366,
|
||||
"narHash": "sha256-pHYj8gUBapuUzKV/kN/tR3Zvqc7o6gdFB9XKXIp1SQ8=",
|
||||
"lastModified": 1753121425,
|
||||
"narHash": "sha256-TVcTNvOeWWk1DXljFxVRp+E0tzG1LhrVjOGGoMHuXio=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "af66ad14b28a127c5c0f3bbb298218fc63528a18",
|
||||
"rev": "644e0fc48951a860279da645ba77fe4a6e814c5e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -506,11 +506,11 @@
|
|||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1754689972,
|
||||
"narHash": "sha256-eogqv6FqZXHgqrbZzHnq43GalnRbLTkbBbFtEfm1RSc=",
|
||||
"lastModified": 1753749649,
|
||||
"narHash": "sha256-+jkEZxs7bfOKfBIk430K+tK9IvXlwzqQQnppC2ZKFj4=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "fc756aa6f5d3e2e5666efcf865d190701fef150a",
|
||||
"rev": "1f08a4df998e21f4e8be8fb6fbf61d11a1a5076a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -538,11 +538,11 @@
|
|||
},
|
||||
"nixpkgs-lib": {
|
||||
"locked": {
|
||||
"lastModified": 1753579242,
|
||||
"narHash": "sha256-zvaMGVn14/Zz8hnp4VWT9xVnhc8vuL3TStRqwk22biA=",
|
||||
"lastModified": 1751159883,
|
||||
"narHash": "sha256-urW/Ylk9FIfvXfliA1ywh75yszAbiTEVgpPeinFyVZo=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"rev": "0f36c44e01a6129be94e3ade315a5883f0228a6e",
|
||||
"rev": "14a40a1d7fb9afa4739275ac642ed7301a9ba1ab",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -605,11 +605,11 @@
|
|||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1754794262,
|
||||
"narHash": "sha256-5SEz135CaJ0LfHILi+CzWMXQmcvD2QeIf4FKwXAxtxA=",
|
||||
"lastModified": 1754016903,
|
||||
"narHash": "sha256-mRB5OOx7H5kFwW8Qtc/7dO3qHsBQtZ/eYQEj93/Noo8=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "d754da7c068c6e122f84d84c3e6bcd353ee48635",
|
||||
"rev": "ddd488184f01603b712ddbb6dc9fe0b8447eb7fc",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -691,11 +691,11 @@
|
|||
"tinted-zed": "tinted-zed"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1754852587,
|
||||
"narHash": "sha256-M+CDFvZ4ZuKK3mlbxv+37yAwL6X3tIklYgurqbhO7Q4=",
|
||||
"lastModified": 1753979771,
|
||||
"narHash": "sha256-MdMdQymbivEWWkC5HqeLYtP8FYu0SqiSpiRlyw9Fm3Y=",
|
||||
"owner": "nix-community",
|
||||
"repo": "stylix",
|
||||
"rev": "61ffae2453d00cb63a133b750232804b209db4d1",
|
||||
"rev": "5b81b0c4fbab3517b39d63f493760d33287150ad",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -910,11 +910,11 @@
|
|||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1754847726,
|
||||
"narHash": "sha256-2vX8QjO5lRsDbNYvN9hVHXLU6oMl+V/PsmIiJREG4rE=",
|
||||
"lastModified": 1754061284,
|
||||
"narHash": "sha256-ONcNxdSiPyJ9qavMPJYAXDNBzYobHRxw0WbT38lKbwU=",
|
||||
"owner": "numtide",
|
||||
"repo": "treefmt-nix",
|
||||
"rev": "7d81f6fb2e19bf84f1c65135d1060d829fae2408",
|
||||
"rev": "58bd4da459f0a39e506847109a2a5cfceb837796",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
|
@ -59,10 +59,8 @@ pkgs.nixosTest {
|
|||
inputs.self.nixosModules."fs/zfs"
|
||||
inputs.self.nixosModules."networking/fqdn"
|
||||
inputs.self.nixosModules."infrastructure/vault-server-approle"
|
||||
inputs.self.nixosModules."infrastructure/vault-prometheus-sender"
|
||||
inputs.self.nixosModules."infrastructure/provisioning"
|
||||
inputs.self.nixosModules."infrastructure/openbao"
|
||||
inputs.self.nixosModules."services/alloy"
|
||||
inputs.self.nixosModules."services/vault-agent"
|
||||
inputs.self.nixosModules."services/read-vault-auth-from-userdata"
|
||||
inputs.self.nixosModules."services/openssh"
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
...
|
||||
}:
|
||||
let
|
||||
inherit (import ./options.nix { inherit lib config; }) zpoolModule;
|
||||
cfg = config.khscodes.fs.zfs;
|
||||
isTest = cfg.test;
|
||||
zpoolSetup = lib.getExe pkgs.khscodes.zpool-setup;
|
||||
|
@ -27,6 +26,79 @@ let
|
|||
${lib.escapeShellArg name}
|
||||
'';
|
||||
setupZpools = lib.lists.map setupZpool (lib.attrsToList cfg.zpools);
|
||||
vdevModule = lib.khscodes.mkSubmodule {
|
||||
description = "vdev";
|
||||
options = {
|
||||
mode = lib.mkOption {
|
||||
type = lib.types.enum [
|
||||
"mirror"
|
||||
"raidz"
|
||||
"raidz1"
|
||||
"raidz2"
|
||||
"raidz3"
|
||||
];
|
||||
description = "Mode of the vdev";
|
||||
default = "mirror";
|
||||
};
|
||||
members = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "Member disks of the vdev. Given as symbolic names, expected to be mapped to actual disks elsewhere.";
|
||||
};
|
||||
};
|
||||
};
|
||||
datasetModule = lib.khscodes.mkSubmodule {
|
||||
description = "dataset";
|
||||
options = {
|
||||
options = lib.mkOption {
|
||||
description = "Options for the dataset";
|
||||
type = lib.types.attrsOf lib.types.str;
|
||||
default = { };
|
||||
};
|
||||
mountpoint = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
description = "Path to mount the dataset to";
|
||||
};
|
||||
};
|
||||
};
|
||||
zpoolModule = lib.khscodes.mkSubmodule {
|
||||
description = "zpool";
|
||||
options = {
|
||||
vdevs = lib.mkOption {
|
||||
type = lib.types.listOf vdevModule;
|
||||
default = [ ];
|
||||
};
|
||||
encryptionKeyOpenbao = {
|
||||
mount = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "opentofu";
|
||||
description = "The mountpoint of the encryption key";
|
||||
};
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "The name of the encryption key in the mount";
|
||||
default = config.khscodes.networking.fqdn;
|
||||
};
|
||||
field = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Field name of the encryption key";
|
||||
};
|
||||
};
|
||||
rootFsOptions = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.str;
|
||||
default = { };
|
||||
};
|
||||
zpoolOptions = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.str;
|
||||
default = { };
|
||||
};
|
||||
datasets = lib.mkOption {
|
||||
type = lib.types.attrsOf datasetModule;
|
||||
description = "Datasets for the zpool";
|
||||
default = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
options.khscodes.fs.zfs = {
|
||||
|
@ -48,85 +120,126 @@ in
|
|||
"${cfg.mainPoolName}" = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
config = lib.mkIf cfg.enable {
|
||||
# TODO: Verify that each member disk is uniquely named, and exists somewhere?
|
||||
assertions = lib.lists.map (
|
||||
{ name, value }:
|
||||
{
|
||||
assertion = (lib.lists.length value.vdevs) > 0;
|
||||
message = "Zpool ${name} contains no vdevs";
|
||||
}
|
||||
) (lib.attrsToList cfg.zpools);
|
||||
boot.supportedFilesystems = {
|
||||
zfs = true;
|
||||
};
|
||||
# On servers, we handle importing, creating and mounting of the pool manually.
|
||||
boot.zfs = {
|
||||
forceImportRoot = false;
|
||||
requestEncryptionCredentials = false;
|
||||
};
|
||||
services.zfs.autoScrub.enable = true;
|
||||
systemd.services.zfs-mount.enable = false;
|
||||
systemd.services.zfs-import-zroot.enable = false;
|
||||
systemd.services.khscodes-zpool-setup = {
|
||||
after = [
|
||||
"network-online.target"
|
||||
];
|
||||
wants = [
|
||||
"network-online.target"
|
||||
];
|
||||
wantedBy = [
|
||||
"multi-user.target"
|
||||
];
|
||||
environment = {
|
||||
BAO_ADDR = config.khscodes.services.vault-agent.vault.address;
|
||||
VAULT_ROLE_ID_FILE = "/var/lib/vault-agent/role-id";
|
||||
VAULT_SECRET_ID_FILE = "/var/lib/vault-agent/secret-id";
|
||||
DISK_MAPPING_FILE = "/run/secret/disk-mapping.json";
|
||||
LOGLEVEL = "trace";
|
||||
}
|
||||
// (lib.attrsets.optionalAttrs isTest {
|
||||
ZFS_TEST = "true";
|
||||
});
|
||||
unitConfig.ConditionPathExists = [
|
||||
"/run/secret/disk-mapping.json"
|
||||
]
|
||||
++ lib.lists.optionals (!isTest) [
|
||||
"/var/lib/vault-agent/role-id"
|
||||
"/var/lib/vault-agent/secret-id"
|
||||
];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = ''
|
||||
${lib.strings.concatStringsSep "\n" setupZpools}
|
||||
'';
|
||||
services = {
|
||||
postgresql = {
|
||||
enable = lib.option {
|
||||
description = "Enables storing postgresql data on a zfs zpool";
|
||||
type = lib.types.bool;
|
||||
default = cfg.enable && config.services.postgresql.enable;
|
||||
};
|
||||
pool = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = cfg.mainPoolName;
|
||||
};
|
||||
datasetName = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "database/postgresql";
|
||||
};
|
||||
datasetConfig = lib.mkOption {
|
||||
type = datasetModule;
|
||||
default = {
|
||||
mountpoint = config.services.postgresql.dataDir;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
khscodes.infrastructure.vault-server-approle.policy = lib.mapAttrs' (name: value: {
|
||||
name = "${value.encryptionKeyOpenbao.mount}/data/${value.encryptionKeyOpenbao.name}";
|
||||
value = {
|
||||
capabilities = [ "read" ];
|
||||
};
|
||||
}) cfg.zpools;
|
||||
# Reading the disk setup through anopenbao secret allows
|
||||
# the service to be restarted when adding new disks, or resizing existing disks.
|
||||
khscodes.services.vault-agent.templates = [
|
||||
{
|
||||
contents = ''
|
||||
{{- with secret "data-disks/data/${config.khscodes.networking.fqdn}" -}}
|
||||
{{ .Data.data | toUnescapedJSON }}
|
||||
{{- end -}}
|
||||
'';
|
||||
destination = "/run/secret/disk-mapping.json";
|
||||
owner = "root";
|
||||
group = "root";
|
||||
perms = "0644";
|
||||
restartUnits = [ "khscodes-zpool-setup.service" ];
|
||||
}
|
||||
];
|
||||
services.prometheus.exporters.zfs.enable = true;
|
||||
khscodes.infrastructure.vault-prometheus-sender.exporters.enabled = [ "zfs" ];
|
||||
};
|
||||
config = lib.mkMerge [
|
||||
(lib.mkIf cfg.enable {
|
||||
# TODO: Verify that each member disk is uniquely named, and exists somewhere?
|
||||
assertions = lib.lists.map (
|
||||
{ name, value }:
|
||||
{
|
||||
assertion = (lib.lists.length value.vdevs) > 0;
|
||||
message = "Zpool ${name} contains no vdevs";
|
||||
}
|
||||
) (lib.attrsToList cfg.zpools);
|
||||
boot.supportedFilesystems = {
|
||||
zfs = true;
|
||||
};
|
||||
# On servers, we handle importing, creating and mounting of the pool manually.
|
||||
boot.zfs = {
|
||||
forceImportRoot = false;
|
||||
requestEncryptionCredentials = false;
|
||||
};
|
||||
services.zfs.autoScrub.enable = true;
|
||||
systemd.services.zfs-mount.enable = false;
|
||||
systemd.services.zfs-import-zroot.enable = false;
|
||||
systemd.services.khscodes-zpool-setup = {
|
||||
after = [
|
||||
"network-online.target"
|
||||
];
|
||||
wants = [
|
||||
"network-online.target"
|
||||
];
|
||||
wantedBy = [
|
||||
"multi-user.target"
|
||||
];
|
||||
environment = {
|
||||
BAO_ADDR = config.khscodes.services.vault-agent.vault.address;
|
||||
VAULT_ROLE_ID_FILE = "/var/lib/vault-agent/role-id";
|
||||
VAULT_SECRET_ID_FILE = "/var/lib/vault-agent/secret-id";
|
||||
DISK_MAPPING_FILE = "/run/secret/disk-mapping.json";
|
||||
LOGLEVEL = "trace";
|
||||
}
|
||||
// (lib.attrsets.optionalAttrs isTest {
|
||||
ZFS_TEST = "true";
|
||||
});
|
||||
unitConfig.ConditionPathExists = [
|
||||
"/run/secret/disk-mapping.json"
|
||||
]
|
||||
++ lib.lists.optionals (!isTest) [
|
||||
"/var/lib/vault-agent/role-id"
|
||||
"/var/lib/vault-agent/secret-id"
|
||||
];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = ''
|
||||
${lib.strings.concatStringsSep "\n" setupZpools}
|
||||
'';
|
||||
};
|
||||
};
|
||||
khscodes.infrastructure.vault-server-approle.policy = lib.mapAttrs' (name: value: {
|
||||
name = "${value.encryptionKeyOpenbao.mount}/data/${value.encryptionKeyOpenbao.name}";
|
||||
value = {
|
||||
capabilities = [ "read" ];
|
||||
};
|
||||
}) cfg.zpools;
|
||||
# Reading the disk setup through anopenbao secret allows
|
||||
# the service to be restarted when adding new disks, or resizing existing disks.
|
||||
khscodes.services.vault-agent.templates = [
|
||||
{
|
||||
contents = ''
|
||||
{{- with secret "data-disks/data/${config.khscodes.networking.fqdn}" -}}
|
||||
{{ .Data.data | toUnescapedJSON }}
|
||||
{{- end -}}
|
||||
'';
|
||||
destination = "/run/secret/disk-mapping.json";
|
||||
owner = "root";
|
||||
group = "root";
|
||||
perms = "0644";
|
||||
restartUnits = [ "khscodes-zpool-setup.service" ];
|
||||
}
|
||||
];
|
||||
services.prometheus.exporters.zfs.enable = true;
|
||||
khscodes.infrastructure.vault-prometheus-sender.exporters.enabled = [ "zfs" ];
|
||||
})
|
||||
(lib.mkIf (cfg.enable && cfg.services.postgresql.enable) {
|
||||
khscodes.fs.zfs.zpools."${cfg.services.postgresql.pool
|
||||
}".datasets."${cfg.services.postgresql.datasetName}" =
|
||||
cfg.services.postgresql.datasetConfig;
|
||||
systemd.services.postgresql = {
|
||||
after = [ "khscodes-zpool-setup.service" ];
|
||||
unitConfig = {
|
||||
RequiresMountsFor = cfg.services.postgresql.datasetConfig.mountpoint;
|
||||
};
|
||||
};
|
||||
systemd.services.khscodes-zpool-setup = {
|
||||
ExecStartPost = ''
|
||||
chown ${config.services.postgresql.user}:${config.services.postgresql.group} ${lib.escapeShellArg cfg.services.postgresql.datasetConfig.mountpoint}
|
||||
'';
|
||||
};
|
||||
})
|
||||
];
|
||||
}
|
||||
|
|
|
@ -1,76 +0,0 @@
|
|||
{ lib, config, ... }:
|
||||
rec {
|
||||
vdevModule = lib.khscodes.mkSubmodule {
|
||||
description = "vdev";
|
||||
options = {
|
||||
mode = lib.mkOption {
|
||||
type = lib.types.enum [
|
||||
"mirror"
|
||||
"raidz"
|
||||
"raidz1"
|
||||
"raidz2"
|
||||
"raidz3"
|
||||
];
|
||||
description = "Mode of the vdev";
|
||||
default = "mirror";
|
||||
};
|
||||
members = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "Member disks of the vdev. Given as symbolic names, expected to be mapped to actual disks elsewhere.";
|
||||
};
|
||||
};
|
||||
};
|
||||
datasetModule = lib.khscodes.mkSubmodule {
|
||||
description = "dataset";
|
||||
options = {
|
||||
options = lib.mkOption {
|
||||
description = "Options for the dataset";
|
||||
type = lib.types.attrsOf lib.types.str;
|
||||
default = { };
|
||||
};
|
||||
mountpoint = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
description = "Path to mount the dataset to";
|
||||
};
|
||||
};
|
||||
};
|
||||
zpoolModule = lib.khscodes.mkSubmodule {
|
||||
description = "zpool";
|
||||
options = {
|
||||
vdevs = lib.mkOption {
|
||||
type = lib.types.listOf vdevModule;
|
||||
default = [ ];
|
||||
};
|
||||
encryptionKeyOpenbao = {
|
||||
mount = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "opentofu";
|
||||
description = "The mountpoint of the encryption key";
|
||||
};
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "The name of the encryption key in the mount";
|
||||
default = config.khscodes.networking.fqdn;
|
||||
};
|
||||
field = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Field name of the encryption key";
|
||||
};
|
||||
};
|
||||
rootFsOptions = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.str;
|
||||
default = { };
|
||||
};
|
||||
zpoolOptions = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.str;
|
||||
default = { };
|
||||
};
|
||||
datasets = lib.mkOption {
|
||||
type = lib.types.attrsOf datasetModule;
|
||||
description = "Datasets for the zpool";
|
||||
default = { };
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inherit (import ../../options.nix { inherit config lib; }) datasetModule;
|
||||
zfsCfg = config.khscodes.fs.zfs;
|
||||
cfg = zfsCfg.services.postgresql;
|
||||
pgCfg = config.services.postgresql;
|
||||
in
|
||||
{
|
||||
options.khscodes.fs.zfs.services.postgresql = {
|
||||
enable = lib.mkOption {
|
||||
description = "Enables storing postgresql data on a zfs zpool";
|
||||
type = lib.types.bool;
|
||||
default = zfsCfg.enable && pgCfg.enable;
|
||||
};
|
||||
pool = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = zfsCfg.mainPoolName;
|
||||
};
|
||||
datasetName = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "database/postgresql";
|
||||
};
|
||||
backupDatasetName = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "backup/database/postgresql";
|
||||
};
|
||||
datasetConfig = lib.mkOption {
|
||||
type = datasetModule;
|
||||
default = {
|
||||
mountpoint = "/var/lib/postgresql";
|
||||
};
|
||||
};
|
||||
backupDatasetConfig = lib.mkOption {
|
||||
type = datasetModule;
|
||||
default = {
|
||||
mountpoint = "/var/backup/postgresql";
|
||||
};
|
||||
};
|
||||
backupDatabases = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = pgCfg.ensureDatabases;
|
||||
};
|
||||
};
|
||||
config = lib.mkMerge [
|
||||
(lib.mkIf (zfsCfg.enable && cfg.enable) {
|
||||
khscodes.fs.zfs.zpools."${cfg.pool}".datasets."${cfg.datasetName}" = cfg.datasetConfig;
|
||||
systemd.services.postgresql = {
|
||||
after = [ "khscodes-zpool-setup.service" ];
|
||||
unitConfig = {
|
||||
RequiresMountsFor = [ cfg.datasetConfig.mountpoint ];
|
||||
};
|
||||
};
|
||||
services.postgresql.dataDir = "${cfg.datasetConfig.mountpoint}/${pgCfg.package.psqlSchema}";
|
||||
})
|
||||
(lib.mkIf (zfsCfg.enable && cfg.enable) {
|
||||
khscodes.fs.zfs.zpools."${cfg.pool}".datasets."${cfg.backupDatasetName}" = cfg.backupDatasetConfig;
|
||||
services.postgresqlBackup = {
|
||||
enable = true;
|
||||
databases = cfg.backupDatabases;
|
||||
};
|
||||
systemd.services =
|
||||
(lib.listToAttrs (
|
||||
lib.lists.map (db: {
|
||||
name = "postgresqlBackup-${db}";
|
||||
value = {
|
||||
after = [ "khscodes-zpool-setup.service" ];
|
||||
unitConfig = {
|
||||
RequiresMountsFor = [ cfg.backupDatasetConfig.mountpoint ];
|
||||
};
|
||||
};
|
||||
}) cfg.backupDatabases
|
||||
))
|
||||
// {
|
||||
khscodes-zpool-setup.serviceConfig = {
|
||||
ExecStartPost = [
|
||||
"${lib.getExe' pkgs.uutils-coreutils-noprefix "chown"} ${config.systemd.services.postgresql.serviceConfig.User}:${config.systemd.services.postgresql.serviceConfig.Group} ${lib.escapeShellArg cfg.backupDatasetConfig.mountpoint}"
|
||||
"${lib.getExe' pkgs.uutils-coreutils-noprefix "chmod"} 0700 ${lib.escapeShellArg cfg.backupDatasetConfig.mountpoint}"
|
||||
];
|
||||
};
|
||||
};
|
||||
})
|
||||
];
|
||||
}
|
|
@ -1,8 +1,6 @@
|
|||
use serde::Deserialize;
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
ffi::OsStr,
|
||||
os::unix::ffi::OsStrExt,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
|
@ -153,13 +151,13 @@ struct ZpoolStatusPool {
|
|||
vdevs: HashMap<String, ZpoolStatusVdev>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Deserialize, PartialEq)]
|
||||
#[derive(Clone, Copy, Deserialize, PartialEq)]
|
||||
enum ZpoolState {
|
||||
#[serde(rename = "ONLINE")]
|
||||
Online,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[derive(Deserialize)]
|
||||
#[serde(tag = "vdev_type")]
|
||||
enum ZpoolStatusVdev {
|
||||
#[serde(rename = "root")]
|
||||
|
@ -179,22 +177,11 @@ impl ZpoolStatusVdev {
|
|||
}
|
||||
}
|
||||
pub fn is_vdev_for_disk(&self, disk_path: &Path) -> bool {
|
||||
// Zpool status returns the partition 1 as the path to the device, even if zfs was given the entire disk to work with during pool creation
|
||||
// Depending on whether we use a device like /dev/vdb or /dev/disk/by-id/XXXX this will be reported as /dev/vdb1 or /dev/disk/by-id/XXXX-part1
|
||||
matches!(self, Self::Disk(disk) if strip_path_inline_suffix(&disk.path, OsStr::from_bytes(b"-part1")) == disk_path || strip_path_inline_suffix(&disk.path, OsStr::from_bytes(b"1")) == disk_path)
|
||||
matches!(self, Self::Disk(disk) if disk.path == disk_path)
|
||||
}
|
||||
}
|
||||
|
||||
fn strip_path_inline_suffix<'a>(path: &'a Path, suffix: &OsStr) -> &'a Path {
|
||||
Path::new(OsStr::from_bytes(
|
||||
path.as_os_str()
|
||||
.as_encoded_bytes()
|
||||
.strip_suffix(suffix.as_encoded_bytes())
|
||||
.unwrap_or(path.as_os_str().as_encoded_bytes()),
|
||||
))
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[derive(Deserialize)]
|
||||
struct ZpoolStatusVdevRoot {
|
||||
#[allow(dead_code)]
|
||||
name: String,
|
||||
|
@ -203,7 +190,7 @@ struct ZpoolStatusVdevRoot {
|
|||
vdevs: HashMap<String, ZpoolStatusVdev>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[derive(Deserialize)]
|
||||
struct ZpoolStatusVdevDisk {
|
||||
#[allow(dead_code)]
|
||||
name: String,
|
||||
|
@ -212,7 +199,7 @@ struct ZpoolStatusVdevDisk {
|
|||
path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
#[derive(Deserialize)]
|
||||
struct ZpoolStatusVdevMirror {
|
||||
#[allow(dead_code)]
|
||||
name: String,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue