Split up terraform configurations even more
All checks were successful
/ rust-packages (push) Successful in 52s
/ dev-shell (push) Successful in 47s
/ terraform-providers (push) Successful in 51s
/ check (push) Successful in 1m41s
/ systems (push) Successful in 4m2s

This should allow for disks to survive destruction of
instances.

Also support creating additional disks on hetzner, storing
a mapping of nix names for disks with their linux device paths.
Something similiar should also be possible to create for openstack
allowing a provider agnostic way of mapping between them.
This commit is contained in:
Kaare Hoff Skovgaard 2025-08-04 23:46:01 +02:00
parent 7adc4a20bd
commit 30cf1f407a
Signed by: khs
GPG key ID: C7D890804F01E9F0
33 changed files with 228 additions and 234 deletions

View file

@ -6,8 +6,36 @@
}: }:
let let
cfg = config.khscodes.infrastructure.hetzner-instance; cfg = config.khscodes.infrastructure.hetzner-instance;
hasDisks = cfg.dataDisks != [ ];
fqdn = config.khscodes.networking.fqdn; fqdn = config.khscodes.networking.fqdn;
provisioningUserData = config.khscodes.infrastructure.provisioning.instanceUserData; provisioningUserData = config.khscodes.infrastructure.provisioning.instanceUserData;
locationFromDatacenter =
datacenter:
let
split = lib.strings.splitString "-" datacenter;
in
assert (lib.lists.length split) == 2;
lib.lists.head split;
diskModule = lib.khscodes.mkSubmodule' (
{ config }:
{
description = "Persistent disk";
options = {
name = lib.mkOption {
type = lib.types.str;
};
nameSanitized = lib.mkOption {
type = lib.types.str;
readOnly = true;
default = lib.khscodes.sanitize-terraform-name config.name;
};
size = lib.mkOption {
type = lib.types.int;
};
};
}
);
firewallTcpRules = lib.lists.map (p: { firewallTcpRules = lib.lists.map (p: {
direction = "in"; direction = "in";
protocol = "tcp"; protocol = "tcp";
@ -139,6 +167,11 @@ in
description = "The server type to create"; description = "The server type to create";
default = null; default = null;
}; };
dataDisks = lib.mkOption {
type = lib.types.listOf diskModule;
description = "Extra data disks to add to the instance, these will be added in the persistence phase";
default = [ ];
};
extraFirewallRules = lib.mkOption { extraFirewallRules = lib.mkOption {
type = lib.types.listOf lib.types.attrs; type = lib.types.listOf lib.types.attrs;
description = "Extra firewall rules added to the instance"; description = "Extra firewall rules added to the instance";
@ -190,7 +223,72 @@ in
labels = { labels = {
app = fqdn; app = fqdn;
}; };
modules = [ persistenceModules = lib.lists.optional hasDisks (
{ ... }:
{
imports = [
inputs.self.terranixModules.hcloud
inputs.self.terranixModules.s3
];
config = {
khscodes.s3 = {
enable = true;
bucket.key = "persistence-" + cfg.bucket.key;
};
khscodes.hcloud.enable = true;
resource.hcloud_volume = lib.listToAttrs (
lib.lists.map (disk: {
name = disk.nameSanitized;
value = {
inherit (disk) name size;
location = locationFromDatacenter cfg.datacenter;
};
}) cfg.dataDisks
);
};
}
);
persistenceAttachModules = lib.lists.optional hasDisks (
{ config, ... }:
{
config = {
data.hcloud_volume = lib.listToAttrs (
lib.lists.map (disk: {
name = disk.nameSanitized;
value = {
name = disk.name;
};
}) cfg.dataDisks
);
resource.hcloud_volume_attachment = lib.listToAttrs (
lib.lists.map (disk: {
name = disk.nameSanitized;
value = {
volume_id = "\${ data.hcloud_volume.${disk.nameSanitized}.id }";
server_id = config.khscodes.hcloud.output.server.compute.id;
};
}) cfg.dataDisks
);
resource.vault_kv_secret_v2.data_disks = {
mount = "data-disks";
name = fqdn;
data_json = ''
{
"template": "{id}",
"mapping": ''${ jsonencode({ ${
lib.strings.concatStringsSep ", " (
lib.lists.map (
disk: "${builtins.toJSON disk.name} = data.hcloud_volume.${disk.nameSanitized}.linux_device"
) cfg.dataDisks
)
} }) }
}
'';
};
};
}
);
computeModules = [
( (
{ config, ... }: { config, ... }:
{ {
@ -264,7 +362,7 @@ in
{ {
assertions = [ assertions = [
{ {
assertion = config.khscodes.networking.fqdn != null; assertion = fqdn != null;
message = "Must set config.khscodes.networking.fqdn when using opentofu"; message = "Must set config.khscodes.networking.fqdn when using opentofu";
} }
]; ];
@ -278,8 +376,15 @@ in
url = "http://169.254.169.254/latest/user-data"; url = "http://169.254.169.254/latest/user-data";
doubleDecodeJsonData = true; doubleDecodeJsonData = true;
}; };
khscodes.infrastructure.provisioning.pre = { khscodes.infrastructure.vault-server-approle.policy = lib.mkIf hasDisks {
modules = modules; "data-disks/data/${fqdn}" = {
capabilities = [ "read" ];
};
};
khscodes.infrastructure.provisioning = {
compute.modules = computeModules;
persistence.modules = persistenceModules;
persistenceAttach.modules = persistenceAttachModules;
}; };
} }
); );

View file

@ -206,10 +206,10 @@ in
# so enable dns-01. # so enable dns-01.
khscodes.security.acme.dns01Enabled = true; khscodes.security.acme.dns01Enabled = true;
khscodes.infrastructure.provisioning = { khscodes.infrastructure.provisioning = {
pre = { compute = {
modules = modules; modules = modules;
}; };
preImageUsername = "debian"; imageUsername = "debian";
}; };
} }
); );

View file

@ -1,10 +1,12 @@
{ {
config,
lib, lib,
inputs, inputs,
pkgs, pkgs,
... ...
}: }:
let let
cfg = config.khscodes.infrastructure.provisioning;
terranixConfig = terranixConfig =
cfg: cfg:
if lib.lists.length cfg.modules > 0 then if lib.lists.length cfg.modules > 0 then
@ -85,11 +87,38 @@ let
in in
{ {
options.khscodes.infrastructure.provisioning = { options.khscodes.infrastructure.provisioning = {
pre = lib.mkOption { persistence = lib.mkOption {
description = ''
Allocation of resources that should be persisted between create/destroy of compute resources.
This would typically be cloud volumes, and perhaps floating IPs and other data/identity preserving information.
'';
type = provisioning; type = provisioning;
default = { }; default = { };
}; };
post = lib.mkOption { compute = lib.mkOption {
description = ''
Allocation of compute resources, DNS records and other ephemeral setup. This should NOT mount the volumes created from
persistence modules, that should be done in the `persistenceAttach` modules.
'';
type = provisioning;
default = { };
};
persistenceAttach = lib.mkOption {
description = ''
Mounting of volumes, or floating IPs from persistence modules to compute modules should go here. These will only ever be
executed merged with the compute resources. The compute resources will need to be able to be executed standalone however.
'';
type = provisioning;
default = { };
};
combinedPersistenceAttachAndCompute = lib.mkOption {
readOnly = true;
type = provisioning;
default = {
modules = cfg.compute.modules ++ cfg.persistenceAttach.modules;
};
};
configuration = lib.mkOption {
type = provisioning; type = provisioning;
default = { }; default = { };
}; };
@ -106,7 +135,7 @@ in
description = "User data that should be added to the instance during provisioning"; description = "User data that should be added to the instance during provisioning";
default = ""; default = "";
}; };
preImageUsername = lib.mkOption { imageUsername = lib.mkOption {
type = lib.types.str; type = lib.types.str;
description = "The username for the image being deployed before being swapped for NixOS"; description = "The username for the image being deployed before being swapped for NixOS";
default = "root"; default = "root";

View file

@ -13,11 +13,11 @@ in
enable = lib.mkEnableOption "Enables creating an OpenBAO role for the server"; enable = lib.mkEnableOption "Enables creating an OpenBAO role for the server";
stage = lib.mkOption { stage = lib.mkOption {
type = lib.types.enum [ type = lib.types.enum [
"pre" "compute"
"post" "configuration"
]; ];
description = "The provisioning stage that should include the provisioning. This should be pre for every server except the OpenBAO server itself"; description = "The provisioning stage that should include the provisioning. This should be compute for every server except the OpenBAO server itself";
default = "pre"; default = "compute";
}; };
path = lib.mkOption { path = lib.mkOption {
type = lib.types.str; type = lib.types.str;
@ -60,7 +60,7 @@ in
imports = [ ./unix-user.nix ]; imports = [ ./unix-user.nix ];
config = lib.mkIf cfg.enable { config = lib.mkIf cfg.enable {
khscodes.services.read-vault-auth-from-userdata.enable = cfg.stage == "pre"; khscodes.services.read-vault-auth-from-userdata.enable = cfg.stage == "compute";
khscodes.services.vault-agent.enable = true; khscodes.services.vault-agent.enable = true;
khscodes.infrastructure.provisioning.${cfg.stage} = { khscodes.infrastructure.provisioning.${cfg.stage} = {
modules = [ modules = [
@ -129,11 +129,12 @@ in
}; };
} }
) )
] ++ cfg.stageModules; ]
++ cfg.stageModules;
}; };
# I can only provide the user data if the stage is pre (along with the instance creation) # I can only provide the user data if the stage is pre (along with the instance creation)
# Also I should probably find a way of injecting this in a nicer way than this mess. # Also I should probably find a way of injecting this in a nicer way than this mess.
khscodes.infrastructure.provisioning.instanceUserData = lib.mkIf (cfg.stage == "pre") { khscodes.infrastructure.provisioning.instanceUserData = lib.mkIf (cfg.stage == "compute") {
VAULT_ROLE_ID = "\${ vault_approle_auth_backend_role.${lib.khscodes.sanitize-terraform-name cfg.role_name}.role_id }"; VAULT_ROLE_ID = "\${ vault_approle_auth_backend_role.${lib.khscodes.sanitize-terraform-name cfg.role_name}.role_id }";
VAULT_SECRET_ID_WRAPPED = "\${ vault_approle_auth_backend_role_secret_id.${lib.khscodes.sanitize-terraform-name cfg.role_name}.wrapping_token }"; VAULT_SECRET_ID_WRAPPED = "\${ vault_approle_auth_backend_role_secret_id.${lib.khscodes.sanitize-terraform-name cfg.role_name}.wrapping_token }";
}; };

View file

@ -14,7 +14,7 @@ let
nixos.config.khscodes.infrastructure.hetzner-instance.enable nixos.config.khscodes.infrastructure.hetzner-instance.enable
|| nixos.config.khscodes.infrastructure.khs-openstack-instance.enable || nixos.config.khscodes.infrastructure.khs-openstack-instance.enable
) )
&& ((lib.lists.length nixos.config.khscodes.infrastructure.provisioning.post.modules) > 0) && ((lib.lists.length nixos.config.khscodes.infrastructure.provisioning.configuration.modules) > 0)
then then
[ ">&2 echo \"Configuring ${name}\n\"\nconfigure-instance ${lib.escapeShellArg name}" ] [ ">&2 echo \"Configuring ${name}\n\"\nconfigure-instance ${lib.escapeShellArg name}" ]
else else

View file

@ -1,10 +1,10 @@
{ pkgs, ... }: { pkgs, ... }:
pkgs.writeShellApplication { pkgs.writeShellApplication {
name = "configure-instance"; name = "configure-instance";
runtimeInputs = [ pkgs.khscodes.post-provisioning ]; runtimeInputs = [ pkgs.khscodes.provision ];
text = '' text = ''
instance="''${1:-}" instance="''${1:-}"
cmd="''${2:-apply}" cmd="''${2:-apply}"
post-provisioning "$instance" "$cmd" provision "$instance" configuration "$cmd"
''; '';
} }

View file

@ -2,7 +2,7 @@
pkgs.writeShellApplication { pkgs.writeShellApplication {
name = "create-instance"; name = "create-instance";
runtimeInputs = [ runtimeInputs = [
pkgs.khscodes.provision-instance pkgs.khscodes.provision
pkgs.khscodes.nixos-install pkgs.khscodes.nixos-install
pkgs.jq pkgs.jq
]; ];
@ -10,8 +10,18 @@ pkgs.writeShellApplication {
hostname="$1" hostname="$1"
# Build the configuration to ensure it doesn't fail when trying to install it on the host # Build the configuration to ensure it doesn't fail when trying to install it on the host
nix build --no-link '${inputs.self}#nixosConfigurations."'"$hostname"'".config.system.build.toplevel' nix build --no-link '${inputs.self}#nixosConfigurations."'"$hostname"'".config.system.build.toplevel'
output="$(provision-instance "$hostname")" # First ensure the persistence exists
provision "$hostname" persistence apply
# Then bring up the base instance *without* the persistence disks attached
output="$(provision "$hostname" compute apply)"
ipv4_addr="$(echo "$output" | jq --raw-output '.ipv4_address.value')" ipv4_addr="$(echo "$output" | jq --raw-output '.ipv4_address.value')"
nixos-install "$hostname" "$ipv4_addr" "no" nixos-install "$hostname" "$ipv4_addr" "no"
# After nixos-anywhere has messed with the ephemeral disks, then mount the remaining disks
provision "$hostname" combinedPersistenceAttachAndCompute apply
# Finally reboot the instance, to ensure everything boots up properly
ssh -t -o StrictHostKeyChecking=false -o UserKnownHostsFile=/dev/null "$ipv4_addr" -- sudo reboot
''; '';
} }

View file

@ -1,9 +1,15 @@
{ pkgs, ... }: { pkgs, ... }:
pkgs.writeShellApplication { pkgs.writeShellApplication {
name = "destroy-instance"; name = "destroy-instance";
runtimeInputs = [ pkgs.khscodes.pre-provisioning ]; runtimeInputs = [
pkgs.khscodes.provision
];
text = '' text = ''
instance="''${1:-}" instance="''${1:-}"
pre-provisioning "$instance" destroy with_persistence="''${2:-none}"
provision "$instance" combinedPersistenceAttachAndCompute destroy
if [[ "$with_persistence" == "all" ]]; then
provision "$instance" persistence destroy
fi
''; '';
} }

View file

@ -18,7 +18,7 @@ pkgs.writeShellApplication {
fqdn="$1" fqdn="$1"
config="$2" config="$2"
cmd="''${3:-apply}" cmd="''${3:-apply}"
dir="$(mktemp -dt "$fqdn-pre-provisioning.XXXXXX")" dir="$(mktemp -dt "$fqdn-compute-provision.XXXXXX")"
mkdir -p "$dir" mkdir -p "$dir"
cat "''${config}" > "$dir/config.tf.json" cat "''${config}" > "$dir/config.tf.json"

View file

@ -19,9 +19,9 @@ pkgs.writeShellApplication {
nix build --no-link '${inputs.self}#nixosConfigurations."'"$hostname"'".config.system.build.toplevel' nix build --no-link '${inputs.self}#nixosConfigurations."'"$hostname"'".config.system.build.toplevel'
fi fi
baseAttr='${inputs.self}#nixosConfigurations."'"$hostname"'".config.khscodes.infrastructure' baseAttr='${inputs.self}#nixosConfigurations."'"$hostname"'".config.khscodes.infrastructure'
config="$(nix build --no-link --print-out-paths "''${baseAttr}.provisioning.pre.config")" config="$(nix build --no-link --print-out-paths "''${baseAttr}.provisioning.compute.config")"
preScript="$(nix eval --raw "''${baseAttr}.nixos-install.preScript")" preScript="$(nix eval --raw "''${baseAttr}.nixos-install.preScript")"
username="$(nix eval --raw "''${baseAttr}.provisioning.preImageUsername")" username="$(nix eval --raw "''${baseAttr}.provisioning.imageUsername")"
if [[ "$config" == "null" ]]; then if [[ "$config" == "null" ]]; then
echo "No preprovisioning needed" echo "No preprovisioning needed"
exit 0 exit 0

View file

@ -1,32 +0,0 @@
{
inputs,
pkgs,
}:
pkgs.writeShellApplication {
name = "post-provisioning";
runtimeInputs = [
pkgs.nix
pkgs.khscodes.bw-opentofu
pkgs.khscodes.instance-opentofu
pkgs.khscodes.openbao-helper
];
# TODO: Use secret source and required secrets to set up the correct env variables
text = ''
hostname="$1"
cmd="''${2:-apply}"
baseAttr='${inputs.self}#nixosConfigurations."'"$hostname"'".config.khscodes.infrastructure.provisioning'
config="$(nix build --no-link --print-out-paths "''${baseAttr}.post.config")"
secretsSource="$(nix eval --raw "''${baseAttr}.secretsSource")"
endpoints="$(nix eval --show-trace --json "''${baseAttr}.post.endpoints")"
if [[ "$config" == "null" ]]; then
echo "No postprovisioning needed"
exit 0
fi
if [[ "$secretsSource" == "vault" ]]; then
readarray -t endpoints_args < <(echo "$endpoints" | jq -cr 'map(["-e", .])[][]')
openbao-helper wrap-program "''${endpoints_args[@]}" -- instance-opentofu "$hostname" "$config" "$cmd"
exit 0
fi
bw-opentofu "$hostname" "$config" "$cmd"
'';
}

View file

@ -1,9 +1,9 @@
{ pkgs, ... }: { pkgs, ... }:
pkgs.writeShellApplication { pkgs.writeShellApplication {
name = "provision-instance"; name = "provision-instance";
runtimeInputs = [ pkgs.khscodes.pre-provisioning ]; runtimeInputs = [ pkgs.khscodes.provision ];
text = '' text = ''
instance="''${1:-}" instance="''${1:-}"
pre-provisioning "$instance" apply provision "$instance" combinedPersistenceAttachAndCompute apply
''; '';
} }

View file

@ -3,7 +3,7 @@
pkgs, pkgs,
}: }:
pkgs.writeShellApplication { pkgs.writeShellApplication {
name = "pre-provisioning"; name = "provision";
runtimeInputs = [ runtimeInputs = [
pkgs.nix pkgs.nix
pkgs.khscodes.bw-opentofu pkgs.khscodes.bw-opentofu
@ -14,13 +14,18 @@ pkgs.writeShellApplication {
# TODO: Use secret source and required secrets to set up the correct env variables # TODO: Use secret source and required secrets to set up the correct env variables
text = '' text = ''
hostname="$1" hostname="$1"
cmd="''${2:-apply}" stage="$2"
cmd="''${3:-apply}"
baseAttr='${inputs.self}#nixosConfigurations."'"$hostname"'".config.khscodes.infrastructure.provisioning' baseAttr='${inputs.self}#nixosConfigurations."'"$hostname"'".config.khscodes.infrastructure.provisioning'
config="$(nix build --no-link --print-out-paths "''${baseAttr}.pre.config")" if [[ "$(nix eval "''${baseAttr}.''${stage}.config")" != "null" ]]; then
config="$(nix build --no-link --print-out-paths "''${baseAttr}.''${stage}.config")"
else
config="null"
fi
secretsSource="$(nix eval --raw "''${baseAttr}.secretsSource")" secretsSource="$(nix eval --raw "''${baseAttr}.secretsSource")"
endpoints="$(nix eval --show-trace --json "''${baseAttr}.pre.endpoints")" endpoints="$(nix eval --show-trace --json "''${baseAttr}.''${stage}.endpoints")"
if [[ "$config" == "null" ]]; then if [[ "$config" == "null" ]]; then
echo "No preprovisioning needed" echo "No ''${stage} provisioning needed"
exit 0 exit 0
fi fi
if [[ "$secretsSource" == "vault" ]]; then if [[ "$secretsSource" == "vault" ]]; then

View file

@ -1,17 +1,8 @@
{ {
lib,
inputs, inputs,
... ...
}: }:
let let
locationFromDatacenter =
datacenter:
let
split = lib.strings.splitString "-" datacenter;
in
assert (lib.lists.length split) == 2;
lib.lists.head split;
domains = [ domains = [
"agerlin-skovgaard.dk" "agerlin-skovgaard.dk"
"agerlinskovgaard.dk" "agerlinskovgaard.dk"
@ -31,24 +22,15 @@ in
hetzner-instance = { hetzner-instance = {
enable = true; enable = true;
mapRdns = true; mapRdns = true;
dataDisks = [
{
name = "mx.kaareskovgaard.net-zroot-disk1";
size = 10;
}
];
server_type = "cax11"; server_type = "cax11";
}; };
provisioning.pre.modules = [ provisioning.compute.modules = [
(
{ config, ... }:
{
resource.hcloud_volume.zroot-disk1 = {
name = "mx.kaareskovgaard.net-zroot-disk1";
size = 30;
location = locationFromDatacenter config.khscodes.hcloud.server.compute.datacenter;
};
resource.hcloud_volume_attachment.zroot-disk1 = {
volume_id = "\${ resource.hcloud_volume.zroot-disk1.id }";
server_id = config.khscodes.hcloud.output.server.compute.id;
automount = false;
};
}
)
( (
{ ... }: { ... }:
{ {

View file

@ -5,16 +5,6 @@
... ...
}: }:
let let
diskName = "nixos";
espSize = "500M";
bootPartName = "ESP";
rootPartName = "primary";
volumeGroupName = "mainpool";
rootLvName = "root";
# Don't ask me why this changes when there's more than one volume attached.
nixosDisk = "/dev/sdb";
zrootDisk1Disk = "/dev/sda";
downloadZrootKey = pkgs.writeShellApplication { downloadZrootKey = pkgs.writeShellApplication {
name = "zfs-download-zroot-key"; name = "zfs-download-zroot-key";
runtimeInputs = [ runtimeInputs = [
@ -87,17 +77,6 @@ in
]; ];
}; };
}; };
khscodes.infrastructure.nixos-install.preScript = ''
encryption_key="$(bao kv get -mount=opentofu -field=MX_KAARESKOVGAARD_NET_ZROOT_ENCRYPTION_KEY mx.kaareskovgaard.net)"
tmpfile="$(mktemp)"
touch "$tmpfile"
chmod 0600 "$tmpfile"
trap "rm -f $tmpfile" EXIT
echo "$encryption_key" > "$tmpfile"
INSTALL_ARGS+=("--disk-encryption-keys")
INSTALL_ARGS+=("/run/secret/zroot.key")
INSTALL_ARGS+=("$tmpfile")
'';
boot.supportedFilesystems = { boot.supportedFilesystems = {
zfs = true; zfs = true;
}; };
@ -157,110 +136,4 @@ in
}; };
}; };
networking.hostId = "9af535e4"; networking.hostId = "9af535e4";
disko.devices = {
disk = {
"${diskName}" = {
device = nixosDisk;
type = "disk";
content = {
type = "gpt";
partitions = {
"${bootPartName}" = {
size = espSize;
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
mountOptions = [ "umask=0077" ];
};
};
"${rootPartName}" = {
size = "100%";
content = {
type = "lvm_pv";
vg = volumeGroupName;
};
};
};
};
};
zroot-disk1 = {
device = zrootDisk1Disk;
type = "disk";
content = {
type = "gpt";
partitions = {
zfs = {
size = "100%";
content = {
type = "zfs";
pool = "zroot";
};
};
};
};
};
};
lvm_vg = {
"${volumeGroupName}" = {
type = "lvm_vg";
lvs = {
"${rootLvName}" = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
mountOptions = [ "defaults" ];
};
};
};
};
};
zpool = {
zroot = {
type = "zpool";
rootFsOptions = {
mountpoint = "none";
compression = "zstd";
acltype = "posixacl";
xattr = "sa";
"com.sun:auto-snapshot" = "true";
};
options = {
ashift = "12";
autoexpand = "on";
};
datasets = {
"mailserver" = {
type = "zfs_fs";
options = {
encryption = "aes-256-gcm";
keyformat = "passphrase";
keylocation = "file:///run/secret/zroot.key";
};
};
"mailserver/vmail" = {
type = "zfs_fs";
mountpoint = "/var/mailserver/vmail";
};
"mailserver/indices" = {
type = "zfs_fs";
mountpoint = "/var/mailserver/indices";
};
};
mode = {
topology = {
type = "topology";
vdev = [
{
members = [ "zroot-disk1" ];
}
];
};
};
};
};
};
} }

View file

@ -191,7 +191,7 @@ in
capabilities = [ "read" ]; capabilities = [ "read" ];
}; };
}; };
khscodes.infrastructure.provisioning.pre.modules = [ khscodes.infrastructure.provisioning.compute.modules = [
{ {
terraform.required_providers.random = { terraform.required_providers.random = {
source = "hashicorp/random"; source = "hashicorp/random";

View file

@ -50,7 +50,7 @@ in
description = "smtp"; description = "smtp";
} }
]; ];
khscodes.infrastructure.provisioning.pre.modules = [ khscodes.infrastructure.provisioning.compute.modules = [
( (
{ ... }: { ... }:
{ {

View file

@ -57,7 +57,7 @@ in
capabilities = [ "read" ]; capabilities = [ "read" ];
}; };
}; };
khscodes.infrastructure.provisioning.pre.modules = [ khscodes.infrastructure.provisioning.compute.modules = [
( (
{ ... }: { ... }:
{ {

View file

@ -4,7 +4,7 @@ let
in in
{ {
config = { config = {
khscodes.infrastructure.provisioning.pre.modules = [ khscodes.infrastructure.provisioning.compute.modules = [
{ {
khscodes.cloudflare.dns.txtRecords = lib.lists.map (domain: { khscodes.cloudflare.dns.txtRecords = lib.lists.map (domain: {
fqdn = "_dmarc.${domain}"; fqdn = "_dmarc.${domain}";

View file

@ -36,7 +36,7 @@ in
}) cfg.domains }) cfg.domains
) )
); );
khscodes.infrastructure.provisioning.pre.modules = [ khscodes.infrastructure.provisioning.compute.modules = [
{ {
khscodes.cloudflare.dns.txtRecords = ( khscodes.cloudflare.dns.txtRecords = (
lib.lists.map (domain: { lib.lists.map (domain: {

View file

@ -4,7 +4,7 @@ let
in in
{ {
config = { config = {
khscodes.infrastructure.provisioning.pre.modules = [ khscodes.infrastructure.provisioning.compute.modules = [
{ {
khscodes.cloudflare.dns.txtRecords = lib.lists.map (domain: { khscodes.cloudflare.dns.txtRecords = lib.lists.map (domain: {
fqdn = domain; fqdn = domain;

View file

@ -4,7 +4,7 @@ let
in in
{ {
config = { config = {
khscodes.infrastructure.provisioning.pre.modules = [ khscodes.infrastructure.provisioning.compute.modules = [
{ {
khscodes.cloudflare.dns.txtRecords = lib.lists.map (domain: { khscodes.cloudflare.dns.txtRecords = lib.lists.map (domain: {
fqdn = "_smtp._tls.${domain}"; fqdn = "_smtp._tls.${domain}";

View file

@ -28,7 +28,7 @@ in
}; };
# Cannot use vault for secrets source, as this is the server containing vault. # Cannot use vault for secrets source, as this is the server containing vault.
khscodes.infrastructure.provisioning.secretsSource = "bitwarden"; khscodes.infrastructure.provisioning.secretsSource = "bitwarden";
khscodes.infrastructure.vault-server-approle.stage = "post"; khscodes.infrastructure.vault-server-approle.stage = "configuration";
khscodes.networking.fqdn = "security.kaareskovgaard.net"; khscodes.networking.fqdn = "security.kaareskovgaard.net";
khscodes.infrastructure.openbao.domain = "secrets.kaareskovgaard.net"; khscodes.infrastructure.openbao.domain = "secrets.kaareskovgaard.net";
system.stateVersion = "25.05"; system.stateVersion = "25.05";

View file

@ -186,7 +186,7 @@ in
}; };
}; };
khscodes.infrastructure.provisioning.post.modules = [ khscodes.infrastructure.provisioning.configuration.modules = [
( (
{ ... }: { ... }:
{ {

View file

@ -104,7 +104,7 @@ in
}; };
}; };
khscodes.services.vault-agent.templates = vaultAgentTemplates; khscodes.services.vault-agent.templates = vaultAgentTemplates;
khscodes.infrastructure.provisioning.post.modules = terranixModules ++ [ khscodes.infrastructure.provisioning.configuration.modules = terranixModules ++ [
{ {
terraform.required_providers.random = { terraform.required_providers.random = {
source = "hashicorp/random"; source = "hashicorp/random";

View file

@ -1,7 +1,7 @@
{ inputs, ... }: { inputs, ... }:
{ {
imports = [ ./openbao ]; imports = [ ./openbao ];
khscodes.infrastructure.provisioning.post.modules = [ khscodes.infrastructure.provisioning.configuration.modules = [
{ {
imports = [ imports = [
inputs.self.terranixModules.s3 inputs.self.terranixModules.s3

View file

@ -1,6 +1,6 @@
{ {
khscodes.infrastructure.vault-server-approle.path = "\${ vault_auth_backend.approle.path }"; khscodes.infrastructure.vault-server-approle.path = "\${ vault_auth_backend.approle.path }";
khscodes.infrastructure.provisioning.post.modules = [ khscodes.infrastructure.provisioning.configuration.modules = [
{ {
resource.vault_auth_backend.approle = { resource.vault_auth_backend.approle = {
type = "approle"; type = "approle";

View file

@ -0,0 +1,14 @@
{
khscodes.infrastructure.provisioning.configuration.modules = [
{
khscodes.vault.mount.data-disks = {
type = "kv";
path = "data-disks";
options = {
version = "2";
};
description = "Mapping between data disk names and IDs";
};
}
];
}

View file

@ -1,13 +1,14 @@
{ {
imports = [ imports = [
./approle.nix ./approle.nix
./data-disks.nix
./ssh-host.nix ./ssh-host.nix
./loki-mtls.nix ./loki-mtls.nix
./prometheus-mtls.nix ./prometheus-mtls.nix
./unix-users.nix ./unix-users.nix
]; ];
khscodes.infrastructure.vault-server-approle.path = "\${ vault_auth_backend.approle.path }"; khscodes.infrastructure.vault-server-approle.path = "\${ vault_auth_backend.approle.path }";
khscodes.infrastructure.provisioning.post.modules = [ khscodes.infrastructure.provisioning.configuration.modules = [
( (
{ config, ... }: { config, ... }:
{ {

View file

@ -3,7 +3,7 @@
khscodes.infrastructure.vault-loki-sender = { khscodes.infrastructure.vault-loki-sender = {
terranixBackendName = "\${ vault_mount.loki-mtls.path }"; terranixBackendName = "\${ vault_mount.loki-mtls.path }";
}; };
khscodes.infrastructure.provisioning.post.modules = [ khscodes.infrastructure.provisioning.configuration.modules = [
( (
{ config, ... }: { config, ... }:
{ {

View file

@ -4,7 +4,7 @@
# This is quite ugly, but should get the job done. Sadly I cannot reference the output from here. # This is quite ugly, but should get the job done. Sadly I cannot reference the output from here.
terranixBackendName = "\${ vault_mount.prometheus-mtls.path }"; terranixBackendName = "\${ vault_mount.prometheus-mtls.path }";
}; };
khscodes.infrastructure.provisioning.post.modules = [ khscodes.infrastructure.provisioning.configuration.modules = [
( (
{ config, ... }: { config, ... }:
{ {

View file

@ -1,6 +1,6 @@
{ {
khscodes.services.openssh.hostCertificate.path = "\${ vault_mount.ssh-host.path }"; khscodes.services.openssh.hostCertificate.path = "\${ vault_mount.ssh-host.path }";
khscodes.infrastructure.provisioning.post.modules = [ khscodes.infrastructure.provisioning.configuration.modules = [
( (
{ config, ... }: { config, ... }:
{ {

View file

@ -1,5 +1,5 @@
{ {
khscodes.infrastructure.provisioning.post.modules = [ khscodes.infrastructure.provisioning.configuration.modules = [
{ {
khscodes.vault.mount.unix-users = { khscodes.vault.mount.unix-users = {
type = "kv"; type = "kv";