Attempt to get merging of zfs options in zpool setup working
I have not yet tested addition of new datasets, or the removal/ unmounting of newly disappeared datasets.
This commit is contained in:
parent
89a3e16ab7
commit
5abaa9322e
5 changed files with 224 additions and 12 deletions
|
@ -155,6 +155,7 @@ in
|
||||||
VAULT_ROLE_ID_FILE = "/var/lib/vault-agent/role-id";
|
VAULT_ROLE_ID_FILE = "/var/lib/vault-agent/role-id";
|
||||||
VAULT_SECRET_ID_FILE = "/var/lib/vault-agent/secret-id";
|
VAULT_SECRET_ID_FILE = "/var/lib/vault-agent/secret-id";
|
||||||
DISK_MAPPING_FILE = "/run/secret/disk-mapping.json";
|
DISK_MAPPING_FILE = "/run/secret/disk-mapping.json";
|
||||||
|
LOGLEVEL = "trace";
|
||||||
}
|
}
|
||||||
// (lib.attrsets.optionalAttrs isTest {
|
// (lib.attrsets.optionalAttrs isTest {
|
||||||
ZFS_TEST = "true";
|
ZFS_TEST = "true";
|
||||||
|
|
|
@ -26,7 +26,7 @@ in
|
||||||
dataDisks = [
|
dataDisks = [
|
||||||
{
|
{
|
||||||
name = "mx.kaareskovgaard.net-zroot-disk1";
|
name = "mx.kaareskovgaard.net-zroot-disk1";
|
||||||
size = 10;
|
size = 15;
|
||||||
zfs = true;
|
zfs = true;
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use std::{borrow::Cow, collections::BTreeMap, path::PathBuf, str::FromStr};
|
use std::{borrow::Cow, collections::BTreeMap, ops::Deref, path::PathBuf, str::FromStr};
|
||||||
|
|
||||||
use serde::Deserialize;
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::disk_mapping::DiskMapping;
|
use crate::disk_mapping::DiskMapping;
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ impl Vdev {
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
#[serde(transparent)]
|
#[serde(transparent)]
|
||||||
pub struct Vdevs(pub Vec<Vdev>);
|
pub struct Vdevs(Vec<Vdev>);
|
||||||
|
|
||||||
impl FromStr for Vdevs {
|
impl FromStr for Vdevs {
|
||||||
type Err = anyhow::Error;
|
type Err = anyhow::Error;
|
||||||
|
@ -62,9 +62,17 @@ impl FromStr for Vdevs {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
impl Deref for Vdevs {
|
||||||
|
type Target = Vec<Vdev>;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)]
|
||||||
#[serde(transparent)]
|
#[serde(transparent)]
|
||||||
pub struct Options(pub BTreeMap<String, String>);
|
pub struct Options(BTreeMap<String, String>);
|
||||||
|
|
||||||
impl FromStr for Options {
|
impl FromStr for Options {
|
||||||
type Err = anyhow::Error;
|
type Err = anyhow::Error;
|
||||||
|
@ -73,13 +81,36 @@ impl FromStr for Options {
|
||||||
common::json::from_str(s)
|
common::json::from_str(s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl Deref for Options {
|
||||||
|
type Target = BTreeMap<String, String>;
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)]
|
||||||
pub struct Dataset {
|
pub struct Dataset {
|
||||||
pub options: Options,
|
pub options: Options,
|
||||||
pub mountpoint: Option<PathBuf>,
|
pub mountpoint: Option<PathBuf>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Dataset {
|
||||||
|
pub fn without_mountpoint(&self) -> Dataset {
|
||||||
|
Self {
|
||||||
|
mountpoint: None,
|
||||||
|
options: self.options.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_options(options: &Options) -> Self {
|
||||||
|
Self {
|
||||||
|
mountpoint: None,
|
||||||
|
options: options.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
#[serde(transparent)]
|
#[serde(transparent)]
|
||||||
pub struct Datasets(pub BTreeMap<String, Dataset>);
|
pub struct Datasets(pub BTreeMap<String, Dataset>);
|
||||||
|
|
|
@ -4,6 +4,8 @@ use std::{collections::BTreeMap, path::PathBuf};
|
||||||
use anyhow::Context as _;
|
use anyhow::Context as _;
|
||||||
use clap::{Parser, Subcommand};
|
use clap::{Parser, Subcommand};
|
||||||
|
|
||||||
|
use crate::cli::Dataset;
|
||||||
|
|
||||||
mod cli;
|
mod cli;
|
||||||
mod disk_mapping;
|
mod disk_mapping;
|
||||||
mod zfs;
|
mod zfs;
|
||||||
|
@ -183,12 +185,57 @@ fn setup_zpool(p: SetupZpool) -> anyhow::Result<()> {
|
||||||
return Err(anyhow::format_err!("Zpool {} is not online", p.pool_name));
|
return Err(anyhow::format_err!("Zpool {} is not online", p.pool_name));
|
||||||
}
|
}
|
||||||
|
|
||||||
for vdev in p.vdevs.0.iter() {
|
for vdev in p.vdevs.iter() {
|
||||||
for member in vdev.members.iter() {
|
for member in vdev.members.iter() {
|
||||||
let resolved = disk_mapping.resolve(member)?;
|
let resolved = disk_mapping.resolve(member)?;
|
||||||
zfs::resize_disk(&p.pool_name, &resolved)?;
|
zfs::resize_disk(&p.pool_name, &resolved)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut existing_datasets = zfs::list_datasets(&p.pool_name)?;
|
||||||
|
log::info!("Existing datasets found: {existing_datasets:?}");
|
||||||
|
|
||||||
|
let rootfs_datset = existing_datasets
|
||||||
|
.get(&p.pool_name)
|
||||||
|
.context("No root dataset found in existing pool")?;
|
||||||
|
zfs::update_dataset(
|
||||||
|
&p.pool_name,
|
||||||
|
Some(&Dataset::with_options(&p.root_fs_options)),
|
||||||
|
rootfs_datset.as_ref(),
|
||||||
|
)?;
|
||||||
|
for (key, dataset) in p.datasets.0.iter() {
|
||||||
|
let full_path = format!("{}/{key}", p.pool_name);
|
||||||
|
let existing_dataset = existing_datasets.get(&full_path);
|
||||||
|
if let Some(existing_dataset) = existing_dataset {
|
||||||
|
log::info!("Updating {key} as {full_path}");
|
||||||
|
zfs::update_dataset(&full_path, Some(dataset), existing_dataset.as_ref())?;
|
||||||
|
} else {
|
||||||
|
log::info!("Creating new dataset {key} as {full_path}");
|
||||||
|
zfs::create_dataset_recursive(&p.pool_name, key.as_str(), dataset)?;
|
||||||
|
// Creating a dataset might create more than one dataset. For now just be lazy and recalculate the world
|
||||||
|
existing_datasets = zfs::list_datasets(&p.pool_name)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (path, dataset) in existing_datasets.iter().filter_map(|(s, d)| {
|
||||||
|
if s == &p.pool_name {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
if let Some(d) = d {
|
||||||
|
let non_full_path = s
|
||||||
|
.strip_prefix(&p.pool_name)
|
||||||
|
.and_then(|s| s.strip_prefix("/"))
|
||||||
|
.expect("Non root fs dataset should start with 'pool_name/'");
|
||||||
|
if !p.datasets.0.keys().any(|k| k.starts_with(non_full_path)) {
|
||||||
|
return Some((s, d));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}) {
|
||||||
|
log::info!("Removing mountpoint for dataset {path}, as it was removed from the mapping");
|
||||||
|
// Don't delete datasets when they are removed from the map, just unset the mountpoint
|
||||||
|
zfs::update_dataset(path, Some(&dataset.without_mountpoint()), Some(dataset))?;
|
||||||
|
}
|
||||||
|
|
||||||
if zfs::encryption_key_needs_load(&p.pool_name)? {
|
if zfs::encryption_key_needs_load(&p.pool_name)? {
|
||||||
let encryption_key = p.encryption_key()?;
|
let encryption_key = p.encryption_key()?;
|
||||||
zfs::load_key(&p.pool_name, &encryption_key)?;
|
zfs::load_key(&p.pool_name, &encryption_key)?;
|
||||||
|
|
|
@ -61,11 +61,11 @@ pub fn create_pool(
|
||||||
"cachefile=none",
|
"cachefile=none",
|
||||||
]);
|
]);
|
||||||
|
|
||||||
for (key, value) in zpool.zpool_options.0.iter() {
|
for (key, value) in zpool.zpool_options.iter() {
|
||||||
proc.args(["-o", &format!("{key}={value}")]);
|
proc.args(["-o", &format!("{key}={value}")]);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (key, value) in zpool.root_fs_options.0.iter() {
|
for (key, value) in zpool.root_fs_options.iter() {
|
||||||
proc.args(["-O", &format!("{key}={value}")]);
|
proc.args(["-O", &format!("{key}={value}")]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,7 +78,7 @@ pub fn create_pool(
|
||||||
"keylocation=prompt",
|
"keylocation=prompt",
|
||||||
]);
|
]);
|
||||||
|
|
||||||
for vdev in zpool.vdevs.0.iter() {
|
for vdev in zpool.vdevs.iter() {
|
||||||
proc.args(vdev.cli_args(disk_mapping)?.into_iter());
|
proc.args(vdev.cli_args(disk_mapping)?.into_iter());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,10 +100,15 @@ pub fn create_dataset_recursive(
|
||||||
proc.arg("-o");
|
proc.arg("-o");
|
||||||
proc.arg(format!("mountpoint={}", mountpoint.display()));
|
proc.arg(format!("mountpoint={}", mountpoint.display()));
|
||||||
}
|
}
|
||||||
for (key, value) in dataset.options.0.iter() {
|
for (key, value) in dataset.options.iter() {
|
||||||
proc.arg("-o");
|
proc.arg("-o");
|
||||||
proc.arg(format!("{key}={value}"));
|
proc.arg(format!("{key}={value}"));
|
||||||
}
|
}
|
||||||
|
proc.arg("-o");
|
||||||
|
proc.arg(format!(
|
||||||
|
"khscodes:dataset={}",
|
||||||
|
common::json::to_string(&dataset)?
|
||||||
|
));
|
||||||
|
|
||||||
proc.arg(name);
|
proc.arg(name);
|
||||||
|
|
||||||
|
@ -111,6 +116,134 @@ pub fn create_dataset_recursive(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn list_datasets(pool_name: &str) -> anyhow::Result<BTreeMap<String, Option<Dataset>>> {
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct ZfsList {
|
||||||
|
datasets: BTreeMap<String, ZfsListDataset>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct ZfsListDataset {
|
||||||
|
name: String,
|
||||||
|
properties: ZfsListDatasetProperties,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct ZfsListDatasetProperties {
|
||||||
|
#[serde(rename = "khscodes:dataset")]
|
||||||
|
dataset: ZfsListDatasetPropertiesDataset,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct ZfsListDatasetPropertiesDataset {
|
||||||
|
value: String,
|
||||||
|
source: ZfsListDatasetPropertySource,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct ZfsListDatasetPropertySource {
|
||||||
|
r#type: String,
|
||||||
|
}
|
||||||
|
let mut proc = Command::new("zfs");
|
||||||
|
proc.args(["list", "-r", "-j", "-o", "name,khscodes:dataset", pool_name]);
|
||||||
|
|
||||||
|
let output: ZfsList = proc.try_spawn_to_json()?;
|
||||||
|
|
||||||
|
let mut result = BTreeMap::new();
|
||||||
|
|
||||||
|
for set in output.datasets.into_values() {
|
||||||
|
let _ = result.insert(
|
||||||
|
set.name,
|
||||||
|
if set.properties.dataset.source.r#type == "LOCAL"
|
||||||
|
&& set.properties.dataset.value != "-"
|
||||||
|
{
|
||||||
|
Some(common::json::from_str(&set.properties.dataset.value)?)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn update_dataset(
|
||||||
|
dataset_path: &str,
|
||||||
|
dataset: Option<&Dataset>,
|
||||||
|
existing_dataset: Option<&Dataset>,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
if dataset == existing_dataset {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
let empty_dataset = Dataset::default();
|
||||||
|
let existing_dataset = existing_dataset.unwrap_or(&empty_dataset);
|
||||||
|
if let Some(dataset) = dataset {
|
||||||
|
let mut proc = Command::new("zfs");
|
||||||
|
proc.args(["set", "-u"]);
|
||||||
|
let mut any_props_set = false;
|
||||||
|
for (key, value) in dataset.options.iter().filter(|(k, v)| {
|
||||||
|
if let Some(ex) = existing_dataset.options.get(k.as_str()) {
|
||||||
|
v.as_str() != ex.as_str()
|
||||||
|
} else {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}) {
|
||||||
|
any_props_set = true;
|
||||||
|
proc.arg(format!("{key}={value}"));
|
||||||
|
}
|
||||||
|
match (
|
||||||
|
dataset.mountpoint.as_deref(),
|
||||||
|
existing_dataset.mountpoint.as_deref(),
|
||||||
|
) {
|
||||||
|
(Some(n), Some(e)) => {
|
||||||
|
if n != e {
|
||||||
|
any_props_set = true;
|
||||||
|
proc.arg(format!("mountpoint={}", n.display()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(Some(n), None) => {
|
||||||
|
any_props_set = true;
|
||||||
|
proc.arg(format!("mountpoint={}", n.display()));
|
||||||
|
}
|
||||||
|
(None, Some(_)) => {
|
||||||
|
// This will unmount the dataset at this point in time.
|
||||||
|
let mut proc = Command::new("zfs");
|
||||||
|
proc.args(["inherit", "mountpoint", dataset_path]);
|
||||||
|
proc.try_spawn_to_bytes()?;
|
||||||
|
}
|
||||||
|
(None, None) => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
if any_props_set {
|
||||||
|
proc.arg(dataset_path);
|
||||||
|
proc.try_spawn_to_bytes()?;
|
||||||
|
}
|
||||||
|
} else if existing_dataset.mountpoint.as_deref().is_some() {
|
||||||
|
let mut proc = Command::new("zfs");
|
||||||
|
proc.args(["inherit", "mountpoint", dataset_path]);
|
||||||
|
proc.try_spawn_to_bytes()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (key, _) in existing_dataset
|
||||||
|
.options
|
||||||
|
.iter()
|
||||||
|
.filter(|(k, _)| !dataset.is_some_and(|d| d.options.contains_key(k.as_str())))
|
||||||
|
{
|
||||||
|
let mut proc = Command::new("zfs");
|
||||||
|
proc.args(["inherit", key.as_str(), dataset_path]);
|
||||||
|
proc.try_spawn_to_bytes()?;
|
||||||
|
}
|
||||||
|
let mut custom_prop = Command::new("zfs");
|
||||||
|
custom_prop.arg("set");
|
||||||
|
custom_prop.arg(format!(
|
||||||
|
"khscodes:dataset={}",
|
||||||
|
common::json::to_string(&dataset)?
|
||||||
|
));
|
||||||
|
custom_prop.arg(dataset_path);
|
||||||
|
custom_prop.try_spawn_to_bytes()?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn mount_all(pool: &str) -> anyhow::Result<()> {
|
pub fn mount_all(pool: &str) -> anyhow::Result<()> {
|
||||||
let mut proc = Command::new("zfs");
|
let mut proc = Command::new("zfs");
|
||||||
proc.args(["mount", "-R", pool]);
|
proc.args(["mount", "-R", pool]);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue