Attempt to improve zfs setup a bit
This commit is contained in:
parent
dbe31fd176
commit
9fbfd0ce02
3 changed files with 211 additions and 102 deletions
|
@ -120,85 +120,126 @@ in
|
||||||
"${cfg.mainPoolName}" = { };
|
"${cfg.mainPoolName}" = { };
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
services = {
|
||||||
config = lib.mkIf cfg.enable {
|
postgresql = {
|
||||||
# TODO: Verify that each member disk is uniquely named, and exists somewhere?
|
enable = lib.option {
|
||||||
assertions = lib.lists.map (
|
description = "Enables storing postgresql data on a zfs zpool";
|
||||||
{ name, value }:
|
type = lib.types.bool;
|
||||||
{
|
default = cfg.enable && config.services.postgresql.enable;
|
||||||
assertion = (lib.lists.length value.vdevs) > 0;
|
};
|
||||||
message = "Zpool ${name} contains no vdevs";
|
pool = lib.mkOption {
|
||||||
}
|
type = lib.types.str;
|
||||||
) (lib.attrsToList cfg.zpools);
|
default = cfg.mainPoolName;
|
||||||
boot.supportedFilesystems = {
|
};
|
||||||
zfs = true;
|
datasetName = lib.mkOption {
|
||||||
};
|
type = lib.types.str;
|
||||||
# On servers, we handle importing, creating and mounting of the pool manually.
|
default = "database/postgresql";
|
||||||
boot.zfs = {
|
};
|
||||||
forceImportRoot = false;
|
datasetConfig = lib.mkOption {
|
||||||
requestEncryptionCredentials = false;
|
type = datasetModule;
|
||||||
};
|
default = {
|
||||||
services.zfs.autoScrub.enable = true;
|
mountpoint = config.services.postgresql.dataDir;
|
||||||
systemd.services.zfs-mount.enable = false;
|
};
|
||||||
systemd.services.zfs-import-zroot.enable = false;
|
};
|
||||||
systemd.services.khscodes-zpool-setup = {
|
|
||||||
after = [
|
|
||||||
"network-online.target"
|
|
||||||
];
|
|
||||||
wants = [
|
|
||||||
"network-online.target"
|
|
||||||
];
|
|
||||||
wantedBy = [
|
|
||||||
"multi-user.target"
|
|
||||||
];
|
|
||||||
environment = {
|
|
||||||
BAO_ADDR = config.khscodes.services.vault-agent.vault.address;
|
|
||||||
VAULT_ROLE_ID_FILE = "/var/lib/vault-agent/role-id";
|
|
||||||
VAULT_SECRET_ID_FILE = "/var/lib/vault-agent/secret-id";
|
|
||||||
DISK_MAPPING_FILE = "/run/secret/disk-mapping.json";
|
|
||||||
LOGLEVEL = "trace";
|
|
||||||
}
|
|
||||||
// (lib.attrsets.optionalAttrs isTest {
|
|
||||||
ZFS_TEST = "true";
|
|
||||||
});
|
|
||||||
unitConfig.ConditionPathExists = [
|
|
||||||
"/run/secret/disk-mapping.json"
|
|
||||||
]
|
|
||||||
++ lib.lists.optionals (!isTest) [
|
|
||||||
"/var/lib/vault-agent/role-id"
|
|
||||||
"/var/lib/vault-agent/secret-id"
|
|
||||||
];
|
|
||||||
serviceConfig = {
|
|
||||||
Type = "oneshot";
|
|
||||||
RemainAfterExit = true;
|
|
||||||
ExecStart = ''
|
|
||||||
${lib.strings.concatStringsSep "\n" setupZpools}
|
|
||||||
'';
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
khscodes.infrastructure.vault-server-approle.policy = lib.mapAttrs' (name: value: {
|
|
||||||
name = "${value.encryptionKeyOpenbao.mount}/data/${value.encryptionKeyOpenbao.name}";
|
|
||||||
value = {
|
|
||||||
capabilities = [ "read" ];
|
|
||||||
};
|
|
||||||
}) cfg.zpools;
|
|
||||||
# Reading the disk setup through anopenbao secret allows
|
|
||||||
# the service to be restarted when adding new disks, or resizing existing disks.
|
|
||||||
khscodes.services.vault-agent.templates = [
|
|
||||||
{
|
|
||||||
contents = ''
|
|
||||||
{{- with secret "data-disks/data/${config.khscodes.networking.fqdn}" -}}
|
|
||||||
{{ .Data.data | toUnescapedJSON }}
|
|
||||||
{{- end -}}
|
|
||||||
'';
|
|
||||||
destination = "/run/secret/disk-mapping.json";
|
|
||||||
owner = "root";
|
|
||||||
group = "root";
|
|
||||||
perms = "0644";
|
|
||||||
restartUnits = [ "khscodes-zpool-setup.service" ];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
services.prometheus.exporters.zfs.enable = true;
|
|
||||||
khscodes.infrastructure.vault-prometheus-sender.exporters.enabled = [ "zfs" ];
|
|
||||||
};
|
};
|
||||||
|
config = lib.mkMerge [
|
||||||
|
(lib.mkIf cfg.enable {
|
||||||
|
# TODO: Verify that each member disk is uniquely named, and exists somewhere?
|
||||||
|
assertions = lib.lists.map (
|
||||||
|
{ name, value }:
|
||||||
|
{
|
||||||
|
assertion = (lib.lists.length value.vdevs) > 0;
|
||||||
|
message = "Zpool ${name} contains no vdevs";
|
||||||
|
}
|
||||||
|
) (lib.attrsToList cfg.zpools);
|
||||||
|
boot.supportedFilesystems = {
|
||||||
|
zfs = true;
|
||||||
|
};
|
||||||
|
# On servers, we handle importing, creating and mounting of the pool manually.
|
||||||
|
boot.zfs = {
|
||||||
|
forceImportRoot = false;
|
||||||
|
requestEncryptionCredentials = false;
|
||||||
|
};
|
||||||
|
services.zfs.autoScrub.enable = true;
|
||||||
|
systemd.services.zfs-mount.enable = false;
|
||||||
|
systemd.services.zfs-import-zroot.enable = false;
|
||||||
|
systemd.services.khscodes-zpool-setup = {
|
||||||
|
after = [
|
||||||
|
"network-online.target"
|
||||||
|
];
|
||||||
|
wants = [
|
||||||
|
"network-online.target"
|
||||||
|
];
|
||||||
|
wantedBy = [
|
||||||
|
"multi-user.target"
|
||||||
|
];
|
||||||
|
environment = {
|
||||||
|
BAO_ADDR = config.khscodes.services.vault-agent.vault.address;
|
||||||
|
VAULT_ROLE_ID_FILE = "/var/lib/vault-agent/role-id";
|
||||||
|
VAULT_SECRET_ID_FILE = "/var/lib/vault-agent/secret-id";
|
||||||
|
DISK_MAPPING_FILE = "/run/secret/disk-mapping.json";
|
||||||
|
LOGLEVEL = "trace";
|
||||||
|
}
|
||||||
|
// (lib.attrsets.optionalAttrs isTest {
|
||||||
|
ZFS_TEST = "true";
|
||||||
|
});
|
||||||
|
unitConfig.ConditionPathExists = [
|
||||||
|
"/run/secret/disk-mapping.json"
|
||||||
|
]
|
||||||
|
++ lib.lists.optionals (!isTest) [
|
||||||
|
"/var/lib/vault-agent/role-id"
|
||||||
|
"/var/lib/vault-agent/secret-id"
|
||||||
|
];
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
RemainAfterExit = true;
|
||||||
|
ExecStart = ''
|
||||||
|
${lib.strings.concatStringsSep "\n" setupZpools}
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
khscodes.infrastructure.vault-server-approle.policy = lib.mapAttrs' (name: value: {
|
||||||
|
name = "${value.encryptionKeyOpenbao.mount}/data/${value.encryptionKeyOpenbao.name}";
|
||||||
|
value = {
|
||||||
|
capabilities = [ "read" ];
|
||||||
|
};
|
||||||
|
}) cfg.zpools;
|
||||||
|
# Reading the disk setup through anopenbao secret allows
|
||||||
|
# the service to be restarted when adding new disks, or resizing existing disks.
|
||||||
|
khscodes.services.vault-agent.templates = [
|
||||||
|
{
|
||||||
|
contents = ''
|
||||||
|
{{- with secret "data-disks/data/${config.khscodes.networking.fqdn}" -}}
|
||||||
|
{{ .Data.data | toUnescapedJSON }}
|
||||||
|
{{- end -}}
|
||||||
|
'';
|
||||||
|
destination = "/run/secret/disk-mapping.json";
|
||||||
|
owner = "root";
|
||||||
|
group = "root";
|
||||||
|
perms = "0644";
|
||||||
|
restartUnits = [ "khscodes-zpool-setup.service" ];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
services.prometheus.exporters.zfs.enable = true;
|
||||||
|
khscodes.infrastructure.vault-prometheus-sender.exporters.enabled = [ "zfs" ];
|
||||||
|
})
|
||||||
|
(lib.mkIf (cfg.enable && cfg.services.postgresql.enable) {
|
||||||
|
khscodes.fs.zfs.zpools."${cfg.services.postgresql.pool
|
||||||
|
}".datasets."${cfg.services.postgresql.datasetName}" =
|
||||||
|
cfg.services.postgresql.datasetConfig;
|
||||||
|
systemd.services.postgresql = {
|
||||||
|
after = [ "khscodes-zpool-setup.service" ];
|
||||||
|
unitConfig = {
|
||||||
|
RequiresMountsFor = cfg.services.postgresql.datasetConfig.mountpoint;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
systemd.services.khscodes-zpool-setup = {
|
||||||
|
ExecStartPost = ''
|
||||||
|
chown ${config.services.postgresql.user}:${config.services.postgresql.group} ${lib.escapeShellArg cfg.services.postgresql.datasetConfig.mountpoint}
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
})
|
||||||
|
];
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,10 +1,13 @@
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use std::{collections::BTreeMap, path::PathBuf};
|
use std::{
|
||||||
|
collections::{BTreeMap, HashMap},
|
||||||
|
path::{Path, PathBuf},
|
||||||
|
};
|
||||||
|
|
||||||
use anyhow::Context as _;
|
use anyhow::Context as _;
|
||||||
use clap::{Parser, Subcommand};
|
use clap::{Parser, Subcommand};
|
||||||
|
|
||||||
use crate::cli::Dataset;
|
use crate::cli::{Dataset, VdevMode};
|
||||||
|
|
||||||
mod cli;
|
mod cli;
|
||||||
mod disk_mapping;
|
mod disk_mapping;
|
||||||
|
@ -158,23 +161,52 @@ enum ZpoolState {
|
||||||
#[serde(tag = "vdev_type")]
|
#[serde(tag = "vdev_type")]
|
||||||
enum ZpoolStatusVdev {
|
enum ZpoolStatusVdev {
|
||||||
#[serde(rename = "root")]
|
#[serde(rename = "root")]
|
||||||
Root {
|
Root(ZpoolStatusVdevRoot),
|
||||||
name: String,
|
|
||||||
state: ZpoolState,
|
|
||||||
vdevs: HashMap<String, ZpoolStatusVdev>,
|
|
||||||
},
|
|
||||||
#[serde(rename = "disk")]
|
#[serde(rename = "disk")]
|
||||||
Disk {
|
Disk(ZpoolStatusVdevDisk),
|
||||||
name: String,
|
|
||||||
state: ZpoolState,
|
|
||||||
path: PathBuf,
|
|
||||||
},
|
|
||||||
#[serde(rename = "mirror")]
|
#[serde(rename = "mirror")]
|
||||||
Mirror {
|
#[allow(dead_code)]
|
||||||
name: String,
|
Mirror(ZpoolStatusVdevMirror),
|
||||||
state: ZpoolState,
|
}
|
||||||
vdevs: HashMap<String, ZpoolStatusVdev>,
|
|
||||||
|
impl ZpoolStatusVdev {
|
||||||
|
pub fn as_root(&self) -> anyhow::Result<&ZpoolStatusVdevRoot> {
|
||||||
|
match self {
|
||||||
|
Self::Root(root) => Ok(root),
|
||||||
|
_ => Err(anyhow::format_err!("VDev was not a root vdev")),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
pub fn is_vdev_for_disk(&self, disk_path: &Path) -> bool {
|
||||||
|
matches!(self, Self::Disk(disk) if disk.path == disk_path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct ZpoolStatusVdevRoot {
|
||||||
|
#[allow(dead_code)]
|
||||||
|
name: String,
|
||||||
|
#[allow(dead_code)]
|
||||||
|
state: ZpoolState,
|
||||||
|
vdevs: HashMap<String, ZpoolStatusVdev>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct ZpoolStatusVdevDisk {
|
||||||
|
#[allow(dead_code)]
|
||||||
|
name: String,
|
||||||
|
#[allow(dead_code)]
|
||||||
|
state: ZpoolState,
|
||||||
|
path: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct ZpoolStatusVdevMirror {
|
||||||
|
#[allow(dead_code)]
|
||||||
|
name: String,
|
||||||
|
#[allow(dead_code)]
|
||||||
|
state: ZpoolState,
|
||||||
|
#[allow(dead_code)]
|
||||||
|
vdevs: HashMap<String, ZpoolStatusVdev>,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn setup_zpool(p: SetupZpool) -> anyhow::Result<()> {
|
fn setup_zpool(p: SetupZpool) -> anyhow::Result<()> {
|
||||||
|
@ -208,15 +240,34 @@ fn setup_zpool(p: SetupZpool) -> anyhow::Result<()> {
|
||||||
{
|
{
|
||||||
return Err(anyhow::format_err!("Zpool {} is not online", p.pool_name));
|
return Err(anyhow::format_err!("Zpool {} is not online", p.pool_name));
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Run through the existing VDevs and add any missing vdevs, and add any missing disks
|
let root_vdev = pool
|
||||||
// as needed to any vdevs. Not exactly sure how this should be coded, but I guess we can utilize
|
.vdevs
|
||||||
// the fact we cannot really change vdev type beyond turning a disk vdev into a mirror vdev,
|
.get(&p.pool_name)
|
||||||
// and any single disk can only belong to one vdev. So we can simply not support moving disks between vdevs.
|
.ok_or_else(|| anyhow::format_err!("Root vdev of pool not found"))?;
|
||||||
// Also, to begin with, we can simply not support any vdev other than disk vdevs, as it doesn't make much
|
let root_vdev = root_vdev.as_root()?;
|
||||||
// sense for my use case.
|
|
||||||
|
|
||||||
for vdev in p.vdevs.iter() {
|
for vdev in p.vdevs.iter() {
|
||||||
|
if vdev.mode != VdevMode::Mirror {
|
||||||
|
return Err(anyhow::format_err!(
|
||||||
|
"Vdev contains non mirror mode vdev, this is currently not supported"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if vdev.members.len() != 1 {
|
||||||
|
return Err(anyhow::format_err!(
|
||||||
|
"Vdev contains more than one member, this is currently not supported"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let main_member = &vdev.members[0];
|
||||||
|
let resolved_main_member = disk_mapping.resolve(main_member)?;
|
||||||
|
let resolved_main_member = PathBuf::from(resolved_main_member);
|
||||||
|
if !root_vdev
|
||||||
|
.vdevs
|
||||||
|
.iter()
|
||||||
|
.any(|(_, vdev)| vdev.is_vdev_for_disk(&resolved_main_member))
|
||||||
|
{
|
||||||
|
zfs::add_vdev_to_pool(&p.pool_name, &disk_mapping, vdev)?;
|
||||||
|
}
|
||||||
for member in vdev.members.iter() {
|
for member in vdev.members.iter() {
|
||||||
let resolved = disk_mapping.resolve(member)?;
|
let resolved = disk_mapping.resolve(member)?;
|
||||||
zfs::resize_disk(&p.pool_name, &resolved)?;
|
zfs::resize_disk(&p.pool_name, &resolved)?;
|
||||||
|
|
|
@ -4,7 +4,11 @@ use anyhow::Context as _;
|
||||||
use common::proc::Command;
|
use common::proc::Command;
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
|
|
||||||
use crate::{SetupZpool, cli::Dataset, disk_mapping::DiskMapping};
|
use crate::{
|
||||||
|
SetupZpool,
|
||||||
|
cli::{Dataset, Vdev},
|
||||||
|
disk_mapping::DiskMapping,
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, PartialEq)]
|
#[derive(Debug, Deserialize, PartialEq)]
|
||||||
enum ZpoolState {
|
enum ZpoolState {
|
||||||
|
@ -88,6 +92,19 @@ pub fn create_pool(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn add_vdev_to_pool(
|
||||||
|
pool_name: &str,
|
||||||
|
disk_mapping: &DiskMapping,
|
||||||
|
vdev: &Vdev,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let mut proc = Command::new("zpool");
|
||||||
|
proc.args(["add", pool_name]);
|
||||||
|
proc.args(vdev.cli_args(disk_mapping)?);
|
||||||
|
|
||||||
|
proc.try_spawn_to_bytes()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn create_dataset_recursive(
|
pub fn create_dataset_recursive(
|
||||||
pool_name: &str,
|
pool_name: &str,
|
||||||
dataset_name: &str,
|
dataset_name: &str,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue