Attempt to improve zfs setup a bit
This commit is contained in:
parent
dbe31fd176
commit
9fbfd0ce02
3 changed files with 211 additions and 102 deletions
|
@ -1,10 +1,13 @@
|
|||
use serde::Deserialize;
|
||||
use std::{collections::BTreeMap, path::PathBuf};
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use anyhow::Context as _;
|
||||
use clap::{Parser, Subcommand};
|
||||
|
||||
use crate::cli::Dataset;
|
||||
use crate::cli::{Dataset, VdevMode};
|
||||
|
||||
mod cli;
|
||||
mod disk_mapping;
|
||||
|
@ -158,23 +161,52 @@ enum ZpoolState {
|
|||
#[serde(tag = "vdev_type")]
|
||||
enum ZpoolStatusVdev {
|
||||
#[serde(rename = "root")]
|
||||
Root {
|
||||
name: String,
|
||||
state: ZpoolState,
|
||||
vdevs: HashMap<String, ZpoolStatusVdev>,
|
||||
},
|
||||
Root(ZpoolStatusVdevRoot),
|
||||
#[serde(rename = "disk")]
|
||||
Disk {
|
||||
name: String,
|
||||
state: ZpoolState,
|
||||
path: PathBuf,
|
||||
},
|
||||
Disk(ZpoolStatusVdevDisk),
|
||||
#[serde(rename = "mirror")]
|
||||
Mirror {
|
||||
name: String,
|
||||
state: ZpoolState,
|
||||
vdevs: HashMap<String, ZpoolStatusVdev>,
|
||||
#[allow(dead_code)]
|
||||
Mirror(ZpoolStatusVdevMirror),
|
||||
}
|
||||
|
||||
impl ZpoolStatusVdev {
|
||||
pub fn as_root(&self) -> anyhow::Result<&ZpoolStatusVdevRoot> {
|
||||
match self {
|
||||
Self::Root(root) => Ok(root),
|
||||
_ => Err(anyhow::format_err!("VDev was not a root vdev")),
|
||||
}
|
||||
}
|
||||
pub fn is_vdev_for_disk(&self, disk_path: &Path) -> bool {
|
||||
matches!(self, Self::Disk(disk) if disk.path == disk_path)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct ZpoolStatusVdevRoot {
|
||||
#[allow(dead_code)]
|
||||
name: String,
|
||||
#[allow(dead_code)]
|
||||
state: ZpoolState,
|
||||
vdevs: HashMap<String, ZpoolStatusVdev>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct ZpoolStatusVdevDisk {
|
||||
#[allow(dead_code)]
|
||||
name: String,
|
||||
#[allow(dead_code)]
|
||||
state: ZpoolState,
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct ZpoolStatusVdevMirror {
|
||||
#[allow(dead_code)]
|
||||
name: String,
|
||||
#[allow(dead_code)]
|
||||
state: ZpoolState,
|
||||
#[allow(dead_code)]
|
||||
vdevs: HashMap<String, ZpoolStatusVdev>,
|
||||
}
|
||||
|
||||
fn setup_zpool(p: SetupZpool) -> anyhow::Result<()> {
|
||||
|
@ -208,15 +240,34 @@ fn setup_zpool(p: SetupZpool) -> anyhow::Result<()> {
|
|||
{
|
||||
return Err(anyhow::format_err!("Zpool {} is not online", p.pool_name));
|
||||
}
|
||||
|
||||
// TODO: Run through the existing VDevs and add any missing vdevs, and add any missing disks
|
||||
// as needed to any vdevs. Not exactly sure how this should be coded, but I guess we can utilize
|
||||
// the fact we cannot really change vdev type beyond turning a disk vdev into a mirror vdev,
|
||||
// and any single disk can only belong to one vdev. So we can simply not support moving disks between vdevs.
|
||||
// Also, to begin with, we can simply not support any vdev other than disk vdevs, as it doesn't make much
|
||||
// sense for my use case.
|
||||
|
||||
let root_vdev = pool
|
||||
.vdevs
|
||||
.get(&p.pool_name)
|
||||
.ok_or_else(|| anyhow::format_err!("Root vdev of pool not found"))?;
|
||||
let root_vdev = root_vdev.as_root()?;
|
||||
|
||||
for vdev in p.vdevs.iter() {
|
||||
if vdev.mode != VdevMode::Mirror {
|
||||
return Err(anyhow::format_err!(
|
||||
"Vdev contains non mirror mode vdev, this is currently not supported"
|
||||
));
|
||||
}
|
||||
if vdev.members.len() != 1 {
|
||||
return Err(anyhow::format_err!(
|
||||
"Vdev contains more than one member, this is currently not supported"
|
||||
));
|
||||
}
|
||||
let main_member = &vdev.members[0];
|
||||
let resolved_main_member = disk_mapping.resolve(main_member)?;
|
||||
let resolved_main_member = PathBuf::from(resolved_main_member);
|
||||
if !root_vdev
|
||||
.vdevs
|
||||
.iter()
|
||||
.any(|(_, vdev)| vdev.is_vdev_for_disk(&resolved_main_member))
|
||||
{
|
||||
zfs::add_vdev_to_pool(&p.pool_name, &disk_mapping, vdev)?;
|
||||
}
|
||||
for member in vdev.members.iter() {
|
||||
let resolved = disk_mapping.resolve(member)?;
|
||||
zfs::resize_disk(&p.pool_name, &resolved)?;
|
||||
|
|
|
@ -4,7 +4,11 @@ use anyhow::Context as _;
|
|||
use common::proc::Command;
|
||||
use serde::Deserialize;
|
||||
|
||||
use crate::{SetupZpool, cli::Dataset, disk_mapping::DiskMapping};
|
||||
use crate::{
|
||||
SetupZpool,
|
||||
cli::{Dataset, Vdev},
|
||||
disk_mapping::DiskMapping,
|
||||
};
|
||||
|
||||
#[derive(Debug, Deserialize, PartialEq)]
|
||||
enum ZpoolState {
|
||||
|
@ -88,6 +92,19 @@ pub fn create_pool(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn add_vdev_to_pool(
|
||||
pool_name: &str,
|
||||
disk_mapping: &DiskMapping,
|
||||
vdev: &Vdev,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut proc = Command::new("zpool");
|
||||
proc.args(["add", pool_name]);
|
||||
proc.args(vdev.cli_args(disk_mapping)?);
|
||||
|
||||
proc.try_spawn_to_bytes()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn create_dataset_recursive(
|
||||
pool_name: &str,
|
||||
dataset_name: &str,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue