Fix a bug with opentofu credentials
Some checks failed
/ dev-shell (push) Successful in 2m59s
/ rust-packages (push) Successful in 10m40s
/ terraform-providers (push) Successful in 8m29s
/ check (push) Failing after 11m2s
/ systems (push) Successful in 52m25s

and add zfs support to openstack and add it to monitoring
This commit is contained in:
Kaare Hoff Skovgaard 2025-08-14 00:26:01 +02:00
parent 973eb085c4
commit 1bf63cc735
Signed by: khs
GPG key ID: C7D890804F01E9F0
6 changed files with 204 additions and 7 deletions

View file

@ -6,6 +6,18 @@
}: }:
let let
cfg = config.khscodes.infrastructure.khs-openstack-instance; cfg = config.khscodes.infrastructure.khs-openstack-instance;
mainConfig = config;
hasDisks = cfg.dataDisks != [ ];
hasZfsDisk = lib.lists.foldl (acc: d: acc || d.zfs) false cfg.dataDisks;
diskZpools = lib.mkMerge (
lib.lists.map (d: {
"${d.zpoolName}".vdevs = [
{
members = [ d.name ];
}
];
}) (lib.lists.filter (d: d.zfs) cfg.dataDisks)
);
fqdn = config.khscodes.networking.fqdn; fqdn = config.khscodes.networking.fqdn;
provisioningUserData = config.khscodes.infrastructure.provisioning.instanceUserData; provisioningUserData = config.khscodes.infrastructure.provisioning.instanceUserData;
firewallTcpRules = lib.lists.flatten ( firewallTcpRules = lib.lists.flatten (
@ -58,6 +70,30 @@ let
remote_subnet = "::/0"; remote_subnet = "::/0";
} }
]; ];
diskModule = lib.khscodes.mkSubmodule' (
{ config }:
{
description = "Persistent disk";
options = {
name = lib.mkOption {
type = lib.types.str;
};
nameSanitized = lib.mkOption {
type = lib.types.str;
readOnly = true;
default = lib.khscodes.sanitize-terraform-name config.name;
};
zfs = lib.mkEnableOption "Enables adding the disk to a zpool as its own vdev";
zpoolName = lib.mkOption {
type = lib.types.str;
default = mainConfig.khscodes.fs.zfs.mainPoolName;
};
size = lib.mkOption {
type = lib.types.int;
};
};
}
);
firewallRules = firewallTcpRules ++ firewallUdpRules ++ firewallIcmpRules ++ cfg.extraFirewallRules; firewallRules = firewallTcpRules ++ firewallUdpRules ++ firewallIcmpRules ++ cfg.extraFirewallRules;
in in
{ {
@ -73,6 +109,11 @@ in
lib.lists.filter (alias: alias != cfg.dnsName) config.khscodes.networking.aliases lib.lists.filter (alias: alias != cfg.dnsName) config.khscodes.networking.aliases
); );
}; };
dataDisks = lib.mkOption {
type = lib.types.listOf diskModule;
description = "Extra data disks to add to the instance, these will be added in the persistence phase";
default = [ ];
};
bucket = { bucket = {
key = lib.mkOption { key = lib.mkOption {
type = lib.types.str; type = lib.types.str;
@ -118,7 +159,7 @@ in
config = lib.mkIf cfg.enable ( config = lib.mkIf cfg.enable (
let let
tags = [ fqdn ]; tags = [ fqdn ];
modules = [ computeModules = [
( (
{ config, ... }: { config, ... }:
{ {
@ -187,6 +228,73 @@ in
} }
) )
]; ];
persistenceModules = lib.lists.optional hasDisks (
{ ... }:
{
imports = [
inputs.self.terranixModules.openstack
inputs.self.terranixModules.s3
];
khscodes.s3 = {
enable = true;
bucket.key = "persistence-" + cfg.bucket.key;
};
khscodes.openstack.enable = true;
resource.openstack_blockstorage_volume_v3 = lib.listToAttrs (
lib.lists.map (disk: {
name = disk.nameSanitized;
value = {
inherit (disk) name size;
enable_online_resize = true;
volume_type = "__DEFAULT__";
};
}) cfg.dataDisks
);
}
);
persistenceAttachModules = lib.lists.optional hasDisks (
{ config, ... }:
{
config = {
data.openstack_blockstorage_volume_v3 = lib.listToAttrs (
lib.lists.map (disk: {
name = disk.nameSanitized;
value = {
name = disk.name;
};
}) cfg.dataDisks
);
resource.openstack_compute_volume_attach_v2 = lib.listToAttrs (
lib.lists.map (disk: {
name = disk.nameSanitized;
value = {
volume_id = "\${ data.openstack_blockstorage_volume_v3.${disk.nameSanitized}.id }";
instance_id = config.khscodes.openstack.output.compute_instance.compute.id;
};
}) cfg.dataDisks
);
resource.vault_kv_secret_v2.data_disks = {
mount = "data-disks";
name = fqdn;
data_json = ''
{
"template": "{id}",
"disks": ''${ jsonencode({ ${
lib.strings.concatStringsSep ", " (
lib.lists.map (disk: ''
${builtins.toJSON disk.name} = {
"linuxDevice" = resource.openstack_compute_volume_attach_v2.${disk.nameSanitized}.device,
"size" = ${builtins.toString disk.size}
}
'') cfg.dataDisks
)
} }) }
}
'';
};
};
}
);
in in
{ {
assertions = [ assertions = [
@ -207,10 +315,23 @@ in
khscodes.security.acme.dns01Enabled = true; khscodes.security.acme.dns01Enabled = true;
khscodes.infrastructure.provisioning = { khscodes.infrastructure.provisioning = {
compute = { compute = {
modules = modules; modules = computeModules;
};
persistence = {
modules = persistenceModules;
};
persistenceAttach = {
modules = persistenceAttachModules;
}; };
imageUsername = "debian"; imageUsername = "debian";
}; };
khscodes.infrastructure.vault-server-approle.policy = lib.mkIf hasDisks {
"data-disks/data/${fqdn}" = {
capabilities = [ "read" ];
};
};
khscodes.fs.zfs.enable = lib.mkIf hasZfsDisk true;
khscodes.fs.zfs.zpools = diskZpools;
} }
); );
} }

View file

@ -386,6 +386,7 @@ in
size = value.volume_size; size = value.volume_size;
image_id = "\${ data.openstack_images_image_v2.${sanitizedName}.id }"; image_id = "\${ data.openstack_images_image_v2.${sanitizedName}.id }";
volume_type = value.volume_type; volume_type = value.volume_type;
lifecycle.ignore_changes = [ "image_id" ];
}; };
} }
) cfg.compute_instance; ) cfg.compute_instance;

View file

@ -56,6 +56,7 @@ in
{ {
imports = [ imports = [
"${inputs.self}/nix/profiles/nixos/khs-openstack-server.nix" "${inputs.self}/nix/profiles/nixos/khs-openstack-server.nix"
./zfs.nix
]; ];
systemd.services.grafana = { systemd.services.grafana = {
unitConfig.ConditionPathExists = [ unitConfig.ConditionPathExists = [
@ -234,12 +235,21 @@ in
}; };
}; };
khscodes = { khscodes = {
fs.zfs.zpools.zroot.encryptionKeyOpenbao.field =
"MONITORING_KAARESKOVGAARD_NET_ZROOT_ENCRYPTION_KEY";
infrastructure.khs-openstack-instance = { infrastructure.khs-openstack-instance = {
enable = true; enable = true;
flavor = "m.large"; flavor = "m.large";
network = { network = {
router = null; router = null;
}; };
dataDisks = [
{
name = "monitoring.kaareskovgaard.net-zroot-disk1";
size = 15;
zfs = true;
}
];
}; };
services.nginx = { services.nginx = {
enable = true; enable = true;

View file

@ -0,0 +1,55 @@
{
pkgs,
lib,
config,
...
}:
{
khscodes.fs.zfs.zpools.zroot.datasets = {
"monitoring/grafana" = {
mountpoint = "/var/lib/grafana";
};
"monitoring/loki" = {
mountpoint = "/var/lib/loki";
};
"monitoring/prometheus2" = {
mountpoint = "/var/lib/prometheus2";
};
};
systemd.services = {
grafana = {
after = [ "khscodes-zpool-setup.service" ];
wants = [ "khscodes-zpool-setup.service" ];
unitConfig.RequiresMountsFor = [
"/var/lib/grafana"
];
};
loki = {
after = [ "khscodes-zpool-setup.service" ];
wants = [ "khscodes-zpool-setup.service" ];
unitConfig.RequiresMountsFor = [
"/var/lib/loki"
];
};
prometheus = {
after = [ "khscodes-zpool-setup.service" ];
wants = [ "khscodes-zpool-setup.service" ];
unitConfig.RequiresMountsFor = [
"/var/lib/prometheus2"
];
};
khscodes-zpool-setup = {
serviceConfig = {
ExecStartPost = [
"${lib.getExe' pkgs.uutils-coreutils-noprefix "chown"} grafana:grafana /var/lib/grafana"
"${lib.getExe' pkgs.uutils-coreutils-noprefix "chmod"} 0700 /var/lib/grafana"
"${lib.getExe' pkgs.uutils-coreutils-noprefix "chown"} loki:loki /var/lib/loki"
"${lib.getExe' pkgs.uutils-coreutils-noprefix "chmod"} 0700 /var/lib/loki"
"${lib.getExe' pkgs.uutils-coreutils-noprefix "chown"} prometheus:prometheus /var/lib/prometheus2"
"${lib.getExe' pkgs.uutils-coreutils-noprefix "chmod"} 0700 /var/lib/prometheus2"
];
};
};
};
networking.hostId = "313166d7";
}

View file

@ -109,6 +109,19 @@ impl Endpoint for MxKaareskovgaardNet {
const BITWARDEN_KEYS: &'static [BitwardenKey] = &[BitwardenKey::Field("ZROOT_ENCRYPTION_KEY")]; const BITWARDEN_KEYS: &'static [BitwardenKey] = &[BitwardenKey::Field("ZROOT_ENCRYPTION_KEY")];
} }
pub struct MonitoringKaareskovgaardNet;
impl Endpoint for MonitoringKaareskovgaardNet {
const NAME: &'static str = "monitoring.kaareskovgaard.net";
const BITWARDEN_KEY: &'static str = "monitoring.kaareskovgaard.net";
const ENV_KEYS: &'static [&'static str] =
&["MONITORING_KAARESKOVGAARD_NET_ZROOT_ENCRYPTION_KEY"];
const BITWARDEN_KEYS: &'static [BitwardenKey] = &[BitwardenKey::Field("ZROOT_ENCRYPTION_KEY")];
}
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Eq, PartialOrd, Ord, clap::ValueEnum)] #[derive(Clone, Copy, Debug, Deserialize, PartialEq, Eq, PartialOrd, Ord, clap::ValueEnum)]
pub enum CliEndpoint { pub enum CliEndpoint {
#[serde(rename = "openstack")] #[serde(rename = "openstack")]
@ -155,7 +168,7 @@ impl CliEndpoint {
Self::Cloudflare => Cloudflare.read_from_openbao(map), Self::Cloudflare => Cloudflare.read_from_openbao(map),
Self::Hcloud => Hcloud.read_from_openbao(map), Self::Hcloud => Hcloud.read_from_openbao(map),
Self::Openstack => Openstack.read_from_openbao(map), Self::Openstack => Openstack.read_from_openbao(map),
Self::Unifi => Openstack.read_from_openbao(map), Self::Unifi => Unifi.read_from_openbao(map),
// We don't transfer the root token to openbao itself, but relies on the user being authenticated // We don't transfer the root token to openbao itself, but relies on the user being authenticated
// through oauth. // through oauth.
Self::Vault => Ok(()), Self::Vault => Ok(()),
@ -173,6 +186,7 @@ pub fn transfer_from_bitwarden_to_vault(session: &mut BitwardenSession) -> anyho
transfer_endpoint(Aws, session, &mut all_entries)?; transfer_endpoint(Aws, session, &mut all_entries)?;
transfer_endpoint(Cloudflare, session, &mut all_entries)?; transfer_endpoint(Cloudflare, session, &mut all_entries)?;
transfer_endpoint(MxKaareskovgaardNet, session, &mut all_entries)?; transfer_endpoint(MxKaareskovgaardNet, session, &mut all_entries)?;
transfer_endpoint(MonitoringKaareskovgaardNet, session, &mut all_entries)?;
for entry in all_entries { for entry in all_entries {
let mut delete_entry_proc = common::proc::Command::new("bao"); let mut delete_entry_proc = common::proc::Command::new("bao");

View file

@ -19,10 +19,6 @@ impl<T: Endpoint> EnvEntry<T> {
fn new_from_values(values: Vec<(&'static str, String)>) -> Self { fn new_from_values(values: Vec<(&'static str, String)>) -> Self {
Self(values, PhantomData) Self(values, PhantomData)
} }
pub fn read_from_bao() -> anyhow::Result<Self> {
read_bao_data::<T>()
}
} }
impl<T> From<EnvEntry<T>> for Vec<(&'static str, String)> { impl<T> From<EnvEntry<T>> for Vec<(&'static str, String)> {