Begin adding services to the monitoring stack

Former-commit-id: e360abdf4b
This commit is contained in:
Kaare Hoff Skovgaard 2025-07-13 00:51:31 +02:00
parent 891558aebf
commit 3deafa7317
33 changed files with 17192 additions and 308 deletions

View file

@ -89,29 +89,11 @@
overlays = [ inputs.rust-overlay.overlays.default ];
})
// {
terranixModules.cloudflare = import ./nix/modules/terranix/cloudflare {
inherit inputs;
khscodesLib = inputs.self.lib;
};
terranixModules.hcloud = import ./nix/modules/terranix/hcloud {
inherit inputs;
khscodesLib = inputs.self.lib;
};
terranixModules.vault = import ./nix/modules/terranix/vault {
inherit inputs;
khscodesLib = inputs.self.lib;
};
terranixModules.s3 = import ./nix/modules/terranix/s3 {
inherit inputs;
khscodesLib = inputs.self.lib;
};
terranixModules.openstack = import ./nix/modules/terranix/openstack {
inherit inputs;
khscodesLib = inputs.self.lib;
};
terranixModules.unifi = import ./nix/modules/terranix/unifi {
inherit inputs;
khscodesLib = inputs.self.lib;
};
terranixModules.cloudflare = import ./nix/modules/terranix/cloudflare;
terranixModules.hcloud = import ./nix/modules/terranix/hcloud;
terranixModules.vault = import ./nix/modules/terranix/vault;
terranixModules.s3 = import ./nix/modules/terranix/s3;
terranixModules.openstack = import ./nix/modules/terranix/openstack;
terranixModules.unifi = import ./nix/modules/terranix/unifi;
};
}

View file

@ -53,7 +53,7 @@ in
dnsNames = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "DNS names for the server";
default = [ fqdn ];
default = lib.lists.unique ([ fqdn ] ++ config.networking.aliases);
};
bucket = {
key = lib.mkOption {
@ -168,18 +168,14 @@ in
dns = {
enable = true;
zone_name = tldFromFqdn fqdn;
aRecords = [
{
inherit fqdn;
content = config.khscodes.hcloud.output.server.compute.ipv4_address;
}
];
aaaaRecords = [
{
inherit fqdn;
content = config.khscodes.hcloud.output.server.compute.ipv6_address;
}
];
aRecords = lib.lists.map (d: {
fqdn = d;
content = config.khscodes.hcloud.output.server.compute.ipv4_address;
}) cfg.dnsNames;
aaaaRecords = lib.lists.map (d: {
fqdn = d;
content = config.khscodes.hcloud.output.server.compute.ipv6_address;
}) cfg.dnsNames;
};
};
resource.hcloud_firewall.fw = lib.mkIf firewallEnable {
@ -215,11 +211,6 @@ in
khscodes.infrastructure.provisioning.pre = {
modules = modules;
secretsSource = cfg.secretsSource;
endpoints = [
"aws"
"cloudflare"
"hcloud"
];
};
}
);

View file

@ -75,7 +75,9 @@ in
dnsNames = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "DNS names for the instance";
default = [ fqdn ];
default = lib.lists.unique (
[ config.khscodes.networking.fqdn ] ++ config.khscodes.networking.aliases
);
};
bucket = {
key = lib.mkOption {
@ -147,18 +149,16 @@ in
dns = {
enable = true;
zone_name = tldFromFqdn fqdn;
aRecords = lib.mkIf cfg.dns.mapIpv4Address [
{
inherit fqdn;
aRecords = lib.mkIf cfg.dns.mapIpv4Address (
lib.lists.map (d: {
fqdn = d;
content = config.khscodes.openstack.output.compute_instance.compute.ipv4_address;
}
];
aaaaRecords = [
{
inherit fqdn;
content = config.khscodes.openstack.output.compute_instance.compute.ipv6_address;
}
];
}) cfg.dnsNames
);
aaaaRecords = lib.lists.map (d: {
fqdn = d;
content = config.khscodes.openstack.output.compute_instance.compute.ipv6_address;
}) cfg.dnsNames;
};
};
output.ipv4_address = {
@ -188,18 +188,13 @@ in
enable = true;
};
};
khscodes.services.read-vault-auth-from-userdata.url = "http://169.254.169.254/openstack/2012-08-10/user_data";
# khs openstack hosted servers are cannot use http-01 challenges (or maybe they can through ipv6?)
# so enable dns-01.
khscodes.security.acme.dns01Enabled = true;
khscodes.infrastructure.provisioning = {
pre = {
modules = modules;
endpoints = [
"aws"
"cloudflare"
"openstack"
"unifi"
];
};
preImageUsername = "debian";
};

View file

@ -21,7 +21,97 @@ let
description = "Where to get the secrets for the provisioning from";
default = "vault";
};
endpoints = lib.mkOption {
};
usesEndpoint =
search: endpoint: config:
if lib.strings.hasInfix search (builtins.readFile config) then [ endpoint ] else [ ];
endpointsMaps = [
{
search = "cloudflare/cloudflare";
endpoint = "cloudflare";
}
{
search = "terraform-provider-openstack/openstack";
endpoint = "openstack";
}
{
search = "paultyng/unifi";
endpoint = "unifi";
}
{
search = "hashicorp/vault";
endpoint = "vault";
}
{
search = ".r2.cloudflarestorage.com";
endpoint = "aws";
}
];
endpointsUsed =
config:
if config == null then
[ ]
else
lib.lists.flatten (lib.lists.map (c: usesEndpoint c.search c.endpoint config) endpointsMaps);
preConfig =
if lib.lists.length cfg.pre.modules > 0 then
inputs.terranix.lib.terranixConfiguration {
system = pkgs.hostPlatform.system;
modules = cfg.pre.modules;
extraArgs = { inherit lib inputs; };
}
else
null;
preEndpoints = endpointsUsed preConfig;
postConfig =
if lib.lists.length cfg.post.modules > 0 then
inputs.terranix.lib.terranixConfiguration {
system = pkgs.hostPlatform.system;
modules = cfg.post.modules;
extraArgs = { inherit lib inputs; };
}
else
null;
postEndpoints = endpointsUsed postConfig;
in
{
options.khscodes.infrastructure.provisioning = {
pre = provisioning;
post = provisioning;
instanceUserData = lib.mkOption {
type = lib.types.str;
description = "User data that should be added to the instance during provisioning";
default = "";
};
preConfig = lib.mkOption {
type = lib.types.nullOr lib.types.path;
description = "The generated config for the pre provisioning, if any was specified";
};
preEndpoints = lib.mkOption {
type = lib.types.listOf (
lib.types.enum [
"openstack"
"aws"
"unifi"
"hcloud"
"cloudflare"
"vault"
"authentik"
]
);
description = "Needed endpoints to be used during provisioning";
default = [ ];
};
preImageUsername = lib.mkOption {
type = lib.types.str;
description = "The username for the image being deployed before being swapped for NixOS";
default = "root";
};
postConfig = lib.mkOption {
type = lib.types.nullOr lib.types.path;
description = "The generated config for the post provisioning, if any was specified";
};
postEndpoints = lib.mkOption {
type = lib.types.listOf (
lib.types.enum [
"openstack"
@ -37,47 +127,11 @@ let
default = [ ];
};
};
in
{
options.khscodes.infrastructure.provisioning = {
pre = provisioning;
post = provisioning;
instanceUserData = lib.mkOption {
type = lib.types.str;
description = "User data that should be added to the instance during provisioning";
default = "";
};
preConfig = lib.mkOption {
type = lib.types.nullOr lib.types.path;
description = "The generated config for the pre provisioning, if any was specified";
};
preImageUsername = lib.mkOption {
type = lib.types.str;
description = "The username for the image being deployed before being swapped for NixOS";
default = "root";
};
postConfig = lib.mkOption {
type = lib.types.nullOr lib.types.path;
description = "The generated config for the post provisioning, if any was specified";
};
};
config = {
khscodes.infrastructure.provisioning.preConfig =
if lib.lists.length cfg.pre.modules > 0 then
inputs.terranix.lib.terranixConfiguration {
system = pkgs.hostPlatform.system;
modules = cfg.pre.modules;
}
else
null;
khscodes.infrastructure.provisioning.postConfig =
if lib.lists.length cfg.post.modules > 0 then
inputs.terranix.lib.terranixConfiguration {
system = pkgs.hostPlatform.system;
modules = cfg.post.modules;
}
else
null;
khscodes.infrastructure.provisioning.preConfig = preConfig;
khscodes.infrastructure.provisioning.preEndpoints = preEndpoints;
khscodes.infrastructure.provisioning.postConfig = postConfig;
khscodes.infrastructure.provisioning.postEndpoints = postEndpoints;
};
}

View file

@ -0,0 +1,78 @@
{
config,
lib,
...
}:
let
cfg = config.khscodes.infrastructure.vault-loki-sender;
fqdn = config.khscodes.networking.fqdn;
vaultRoleName = config.khscodes.infrastructure.vault-server-approle.role_name;
in
{
options.khscodes.infrastructure.vault-loki-sender = {
enable = lib.mkEnableOption "Configures the server approle to allow sending data to loki";
terranixBackendName = lib.mkOption {
type = lib.types.str;
description = "This should only be configured for the server hosting loki, to allow setting up dependencies in terraform";
default = "loki-mtls";
};
};
config = lib.mkIf cfg.enable {
khscodes.infrastructure.vault-server-approle = {
enable = true;
policy = {
"loki-mtls" = {
capabilities = [ "read" ];
};
"loki-mtls/issue/${fqdn}" = {
capabilities = [
"create"
"update"
];
};
};
stageModules = [
(
{ ... }:
{
khscodes.vault.pki_secret_backend_role."${vaultRoleName}-loki" = {
name = vaultRoleName;
backend = cfg.terranixBackendName;
allowed_domains = [ fqdn ];
allow_bare_domains = true;
enforce_hostnames = true;
server_flag = false;
client_flag = true;
};
}
)
];
};
khscodes.services.vault-agent.templates = [
{
contents = ''
{{- with pkiCert "loki-mtls/issue/${fqdn}" "common_name=${fqdn}" -}}
{{ .Key }}
{{ .Cert }}
{{ .CA }}
{{ .Key | writeToFile "${config.khscodes.services.alloy.loki.client_key}" "${config.khscodes.services.alloy.user}" "${config.khscodes.services.alloy.group}" "0600" }}
{{ .Cert | writeToFile "${config.khscodes.services.alloy.loki.client_cert}" "${config.khscodes.services.alloy.user}" "${config.khscodes.services.alloy.group}" "0644" }}
{{- end -}}
'';
destination = "/var/lib/alloy/cache.key";
owner = "alloy";
group = "alloy";
perms = "0600";
reloadOrRestartUnits = [ "alloy.service" ];
}
];
khscodes.services.alloy = {
enable = true;
loki = {
client_key = "/var/lib/alloy/loki_cert.key";
client_cert = "/var/lib/alloy/loki_cert.pem";
};
};
};
}

View file

@ -0,0 +1,78 @@
{
config,
lib,
...
}:
let
cfg = config.khscodes.infrastructure.vault-prometheus-sender;
fqdn = config.khscodes.networking.fqdn;
vaultRoleName = config.khscodes.infrastructure.vault-server-approle.role_name;
in
{
options.khscodes.infrastructure.vault-prometheus-sender = {
enable = lib.mkEnableOption "Configures the server approle to allow sending data to prometheus";
terranixBackendName = lib.mkOption {
type = lib.types.str;
description = "This should only be configured for the server hosting prometheus, to allow setting up dependencies in terraform";
default = "prometheus-mtls";
};
};
config = lib.mkIf cfg.enable {
khscodes.infrastructure.vault-server-approle = {
enable = true;
policy = {
"prometheus-mtls" = {
capabilities = [ "read" ];
};
"prometheus-mtls/issue/${fqdn}" = {
capabilities = [
"create"
"update"
];
};
};
stageModules = [
(
{ ... }:
{
khscodes.vault.pki_secret_backend_role."${vaultRoleName}-prometheus" = {
name = vaultRoleName;
backend = cfg.terranixBackendName;
allowed_domains = [ fqdn ];
allow_bare_domains = true;
enforce_hostnames = true;
server_flag = false;
client_flag = true;
};
}
)
];
};
khscodes.services.vault-agent.templates = [
{
contents = ''
{{- with pkiCert "prometheus-mtls/issue/${fqdn}" "common_name=${fqdn}" -}}
{{ .Key }}
{{ .Cert }}
{{ .CA }}
{{ .Key | writeToFile "${config.khscodes.services.alloy.prometheus.client_key}" "${config.khscodes.services.alloy.user}" "${config.khscodes.services.alloy.group}" "0600" }}
{{ .Cert | writeToFile "${config.khscodes.services.alloy.prometheus.client_cert}" "${config.khscodes.services.alloy.user}" "${config.khscodes.services.alloy.group}" "0644" }}
{{- end -}}
'';
destination = "/var/lib/alloy/cache.key";
owner = "alloy";
group = "alloy";
perms = "0600";
reloadOrRestartUnits = [ "alloy.service" ];
}
];
khscodes.services.alloy = {
enable = true;
prometheus = {
client_key = "/var/lib/alloy/prometheus_cert.key";
client_cert = "/var/lib/alloy/prometheus_cert.pem";
};
};
};
}

View file

@ -52,10 +52,9 @@ in
};
config = lib.mkIf cfg.enable {
khscodes.services.openstack-read-vault-auth-from-userdata.enable = true;
khscodes.services.read-vault-auth-from-userdata.enable = true;
khscodes.services.vault-agent.enable = true;
khscodes.infrastructure.provisioning.${cfg.stage} = {
endpoints = [ "vault" ];
modules = [
(
{ config, ... }:
@ -66,10 +65,12 @@ in
approle_auth_backend_role.${cfg.role_name} = {
backend = "approle";
role_name = cfg.role_name;
# I keep the secret ids alive for quite long, as I have no way of
# automatically bootstrapping a new secret id.
secret_id_ttl = 5 * 60 * 60;
secret_id_num_uses = 5 * 60;
# Secret IDs never expire, to allow vault agent to restart without issues.
# TODO: Look into doing this in a better way going forward, such that this won't
# be an issue under normal circumstances, but vault-agents (or instances)
# being offline for long periods of time should invalidate the secret id's.
secret_id_ttl = 0;
secret_id_num_uses = 0;
token_ttl = 20 * 60;
token_max_ttl = 30 * 60;
token_policies = [ cfg.role_name ];

View file

@ -4,29 +4,36 @@
...
}:
let
cfg = config.khscodes.networking.fqdn;
cfg = config.khscodes.networking;
in
{
options.khscodes.networking.fqdn = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Sets the FQDN of the machine. This is a prerequisite for many modules to be used";
options.khscodes.networking = {
fqdn = lib.mkOption {
type = lib.types.str;
default = null;
description = "Sets the FQDN of the machine. This is a prerequisite for many modules to be used";
};
aliases = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
};
};
config = lib.mkIf (cfg != null) (
config =
let
hostname = builtins.head (lib.strings.splitString "." cfg);
domain = if hostname == cfg then null else (lib.strings.removePrefix "${hostname}." cfg);
hostname = builtins.head (lib.strings.splitString "." cfg.fqdn);
domain = if hostname == cfg then null else (lib.strings.removePrefix "${hostname}." cfg.fqdn);
in
{
networking.hostName = lib.mkForce hostname;
networking.domain = lib.mkForce domain;
networking.fqdn = cfg;
networking.fqdn = cfg.fqdn;
# Add the name of the server to the ssh host certificate domains, but let other configs enable getting the host certificates.
khscodes.services.openssh.hostCertificate.hostNames = [ cfg ];
khscodes.services.openssh.hostCertificate.hostNames = lib.lists.unique (
[ cfg.fqdn ] ++ cfg.aliases
);
boot.kernel.sysctl = {
"kernel.hostname" = cfg;
"kernel.hostname" = cfg.fqdn;
};
}
);
};
}

View file

@ -0,0 +1,151 @@
// This block relabels metrics coming from node_exporter to add standard labels
discovery.relabel "integrations_node_exporter" {
targets = prometheus.exporter.unix.integrations_node_exporter.targets
rule {
// Set the instance label to the hostname of the machine
target_label = "instance"
replacement = constants.hostname
}
rule {
// Set a standard job name for all node_exporter metrics
target_label = "job"
replacement = "integrations/node_exporter"
}
}
//
// Configure the node_exporter integration to collect system metrics
prometheus.exporter.unix "integrations_node_exporter" {
// Disable unnecessary collectors to reduce overhead
disable_collectors = ["ipvs", "btrfs", "infiniband", "xfs", "zfs"]
enable_collectors = ["meminfo"]
filesystem {
// Exclude filesystem types that aren't relevant for monitoring
fs_types_exclude = "^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|tmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$"
// Exclude mount points that aren't relevant for monitoring
mount_points_exclude = "^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+)($|/)"
// Timeout for filesystem operations
mount_timeout = "5s"
}
netclass {
// Ignore virtual and container network interfaces
ignored_devices = "^(veth.*|cali.*|[a-f0-9]{15})$"
}
netdev {
// Exclude virtual and container network interfaces from device metrics
device_exclude = "^(veth.*|cali.*|[a-f0-9]{15})$"
}
}
// Define how to scrape metrics from the node_exporter
prometheus.scrape "integrations_node_exporter" {
scrape_interval = "15s"
// Use the targets with labels from the discovery.relabel component
targets = discovery.relabel.integrations_node_exporter.output
// Send the scraped metrics to the relabeling component
forward_to = [otelcol.receiver.prometheus.default.receiver]
}
otelcol.receiver.prometheus "default" {
output {
metrics = [otelcol.exporter.otlphttp.default.input]
}
}
// Define where to send the metrics for storage
otelcol.exporter.otlphttp "default" {
client {
endpoint = "https://prometheus.kaareskovgaard.net/api/v1/otlp/"
tls {
cert_file = "$PROMETHEUS_CLIENT_CERT"
key_file = "$PROMETHEUS_CLIENT_KEY"
}
}
encoding = "proto"
}
// Collect logs from systemd journal for node_exporter integration
loki.source.journal "logs_integrations_integrations_node_exporter_journal_scrape" {
// Only collect logs from the last 24 hours
max_age = "24h0m0s"
// Apply relabeling rules to the logs
relabel_rules = discovery.relabel.logs_integrations_integrations_node_exporter_journal_scrape.rules
// Send logs to the local Loki instance
forward_to = [loki.write.local.receiver]
}
// Define which log files to collect for node_exporter
local.file_match "logs_integrations_integrations_node_exporter_direct_scrape" {
path_targets = [{
// Target localhost for log collection
__address__ = "localhost",
// Collect standard system logs
__path__ = "/var/log/{syslog,messages,*.log}",
// Add instance label with hostname
instance = constants.hostname,
// Add job label for logs
job = "integrations/node_exporter",
}]
}
// Define relabeling rules for systemd journal logs
discovery.relabel "logs_integrations_integrations_node_exporter_journal_scrape" {
targets = []
rule {
// Extract systemd unit information into a label
source_labels = ["__journal__systemd_unit"]
target_label = "unit"
}
rule {
// Extract boot ID information into a label
source_labels = ["__journal__boot_id"]
target_label = "boot_id"
}
rule {
// Extract transport information into a label
source_labels = ["__journal__transport"]
target_label = "transport"
}
rule {
// Extract log priority into a level label
source_labels = ["__journal_priority_keyword"]
target_label = "level"
}
rule {
// Set the instance label to the hostname of the machine
target_label = "instance"
replacement = constants.hostname
}
}
// Collect logs from files for node_exporter
loki.source.file "logs_integrations_integrations_node_exporter_direct_scrape" {
// Use targets defined in local.file_match
targets = local.file_match.logs_integrations_integrations_node_exporter_direct_scrape.targets
// Send logs to the local Loki instance
forward_to = [loki.write.local.receiver]
}
// Define where to send logs for storage
loki.write "local" {
endpoint {
// Send logs to a locally running Loki instance
url = "https://loki.kaareskovgaard.net/loki/api/v1/push"
tls_config {
cert_file = "$LOKI_CLIENT_CERT"
key_file = "$LOKI_CLIENT_KEY"
}
}
}

View file

@ -0,0 +1,58 @@
{ config, lib, ... }:
let
cfg = config.khscodes.services.alloy;
configFile =
lib.strings.replaceStrings
[ "$LOKI_CLIENT_KEY" "$LOKI_CLIENT_CERT" "$PROMETHEUS_CLIENT_KEY" "$PROMETHEUS_CLIENT_CERT" ]
[ cfg.loki.client_key cfg.loki.client_cert cfg.prometheus.client_key cfg.prometheus.client_cert ]
(builtins.readFile ./config.alloy);
in
{
options.khscodes.services.alloy = {
enable = lib.mkEnableOption "Enables alloy";
loki = {
client_key = lib.mkOption {
type = lib.types.str;
};
client_cert = lib.mkOption {
type = lib.types.str;
};
};
prometheus = {
client_key = lib.mkOption {
type = lib.types.str;
};
client_cert = lib.mkOption {
type = lib.types.str;
};
};
user = lib.mkOption {
type = lib.types.str;
default = "alloy";
};
group = lib.mkOption {
type = lib.types.str;
default = "alloy";
};
};
config = lib.mkIf cfg.enable {
services.alloy.enable = true;
systemd.services.alloy = {
serviceConfig = {
DynamicUser = lib.mkForce false;
User = "${cfg.user}";
Group = "${cfg.group}";
};
};
users.users.${cfg.user} = {
description = "Alloy service user";
isSystemUser = true;
group = cfg.group;
};
users.groups.${cfg.group} = { };
environment.etc."alloy/config.alloy" = {
text = configFile;
};
};
}

View file

@ -18,7 +18,7 @@ let
type = lib.types.nullOr (
lib.types.oneOf [
lib.types.str
(lib.types.mkSubmodule {
(lib.khscodes.mkSubmodule {
description = "acme certificate";
options = {
domains = lib.mkOption {
@ -41,6 +41,27 @@ let
default = 301;
description = "HTTP status used by globalRedirect and forceSSL. Possible usecases include temporary (302, 307) redirects, keeping the request method and body (307, 308), or explicitly resetting the method to GET (303). See https://developer.mozilla.org/en-US/docs/Web/HTTP/Redirections.";
};
mtls = lib.mkOption {
type = lib.types.nullOr (
lib.khscodes.mkSubmodule {
options = {
verify = lib.mkOption {
type = lib.types.enum [
"optional"
"on"
];
default = "on";
};
certificate = lib.mkOption {
type = lib.types.str;
description = "Path to the certificate to verify client certificates against";
};
};
description = "Nginx MTLS settings";
}
);
default = null;
};
extraConfig = lib.mkOption {
type = lib.types.lines;
description = "Extra configuration to inject into the generated nginx config";
@ -100,6 +121,7 @@ in
message = "Cannot use `config.khscodes.services.nginx.virtualHosts.<name>.acme = {}` without setting config.khscodes.security.acme.dns01Enabled";
}
];
khscodes.networking.aliases = lib.attrsets.attrNames cfg.virtualHosts;
khscodes.security.acme.enable = true;
security.dhparams.enable = lib.mkIf (cfg.sslConfiguration == "intermediate") {
enable = true;
@ -131,23 +153,40 @@ in
${modernSslAppendedHttpConfig}
'';
virtualHosts = lib.attrsets.mapAttrs (name: value: {
inherit (value)
extraConfig
locations
globalRedirect
redirectCode
;
forceSSL = true;
enableACME = value.acme == null && !dns01Enabled;
useACMEHost =
if lib.strings.isString value.acme then
value.acme
else if lib.attrsets.isAttrs value.acme || dns01Enabled then
name
else
null;
}) cfg.virtualHosts;
virtualHosts = lib.attrsets.mapAttrs (
name: value:
let
mtls =
if value.mtls != null then
''
ssl_client_certificate ${value.mtls.certificate};
ssl_verify_client ${value.mtls.verify};
''
else
'''';
extraConfig = ''
${mtls}
${value.extraConfig}
'';
in
{
inherit (value)
locations
globalRedirect
redirectCode
;
inherit extraConfig;
forceSSL = true;
enableACME = value.acme == null && !dns01Enabled;
useACMEHost =
if lib.strings.isString value.acme then
value.acme
else if lib.attrsets.isAttrs value.acme || dns01Enabled then
name
else
null;
}
) cfg.virtualHosts;
};
networking.firewall.allowedTCPPorts = [
80
@ -160,20 +199,23 @@ in
acc: name: value:
(
acc
// (lib.attrsets.optionalAttrs (lib.attrsets.isAttrs value.acme || dns01Enabled) {
"${name}" =
if value.acme == null then
{
domain = name;
reloadServices = [ "nginx" ];
}
else
{
domain = lib.lists.head value.acme.domains;
extraDomainNames = lib.lists.tail value.acme.domains;
reloadServices = [ "nginx" ];
};
})
// (lib.attrsets.optionalAttrs
(lib.attrsets.isAttrs value.acme || (dns01Enabled && !lib.strings.isString value.acme))
{
"${name}" =
if value.acme == null then
{
domain = name;
reloadServices = [ "nginx" ];
}
else
{
domain = lib.lists.head value.acme.domains;
extraDomainNames = lib.lists.tail value.acme.domains;
reloadServices = [ "nginx" ];
};
}
)
)
) { } cfg.virtualHosts
);

View file

@ -1,66 +0,0 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.khscodes.services.openstack-read-vault-auth-from-userdata;
in
{
options.khscodes.services.openstack-read-vault-auth-from-userdata = {
enable = lib.mkEnableOption "Enables reading vault auth information from instance userdata";
};
config = lib.mkIf (cfg.enable && config.khscodes.services.vault-agent.enable) (
let
vault_addr = config.khscodes.services.vault-agent.vault.address;
secretIdFilePath = config.khscodes.services.vault-agent.vault.secretIdFilePath;
roleIdFilePath = config.khscodes.services.vault-agent.vault.roleIdFilePath;
in
{
systemd.services."openstack-read-vault-auth-from-userdata" = {
enable = true;
wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" ];
after = [ "network-online.target" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = lib.getExe (
pkgs.writeShellApplication {
name = "openstack-read-vault-auth-from-userdata";
runtimeInputs = [
pkgs.curl
pkgs.jq
pkgs.openbao
pkgs.getent
pkgs.systemd
];
text = ''
if [[ -f "${lib.escapeShellArg secretIdFilePath}" ]]; then
echo "Secret id already found, not copying new id"
exit 0
fi
userdata="$(curl http://169.254.169.254/openstack/2012-08-10/user_data)"
role_id="$(echo "$userdata" | jq --raw-output '.VAULT_ROLE_ID')"
secret_id_wrapped="$(echo "$userdata" | jq --raw-output '.VAULT_SECRET_ID_WRAPPED')"
secret_id="$(BAO_ADDR=${lib.escapeShellArg vault_addr} bao unwrap -field=secret_id "$secret_id_wrapped")"
mkdir -p "$(dirname ${lib.escapeShellArg secretIdFilePath})"
mkdir -p "$(dirname ${lib.escapeShellArg roleIdFilePath})"
echo -n "$role_id" > ${lib.escapeShellArg roleIdFilePath}
echo -n "$secret_id" > ${lib.escapeShellArg secretIdFilePath}
chown root:root "${lib.escapeShellArg secretIdFilePath}"
chmod 0600 "${lib.escapeShellArg secretIdFilePath}"
chown root:root "${lib.escapeShellArg roleIdFilePath}"
chmod 0600 "${lib.escapeShellArg roleIdFilePath}"
echo "Role id and secret id copied, restart vault-agent"
systemctl restart vault-agent-openbao.service
'';
}
);
};
};
}
);
}

View file

@ -0,0 +1,77 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.khscodes.services.read-vault-auth-from-userdata;
in
{
options.khscodes.services.read-vault-auth-from-userdata = {
enable = lib.mkEnableOption "Enables reading vault auth information from instance userdata";
url = lib.mkOption {
type = lib.types.str;
description = "URL to retrieve instance metadata from";
};
};
config = lib.mkIf (cfg.enable && config.khscodes.services.vault-agent.enable) (
let
vault_addr = lib.escapeShellArg config.khscodes.services.vault-agent.vault.address;
secretIdFilePath = lib.escapeShellArg config.khscodes.services.vault-agent.vault.secretIdFilePath;
roleIdFilePath = lib.escapeShellArg config.khscodes.services.vault-agent.vault.roleIdFilePath;
cacheFilePath = lib.escapeShellArg "${config.khscodes.services.vault-agent.vault.secretIdFilePath}.wrapped";
in
{
systemd.services."read-vault-auth-from-userdata" = {
enable = true;
wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" ];
after = [ "network-online.target" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = lib.getExe (
pkgs.writeShellApplication {
name = "read-vault-auth-from-userdata";
runtimeInputs = [
pkgs.curl
pkgs.jq
pkgs.openbao
pkgs.getent
pkgs.systemd
];
text = ''
userdata="$(curl ${lib.escapeShellArg cfg.url})"
role_id="$(echo "$userdata" | jq --raw-output '.VAULT_ROLE_ID')"
secret_id_wrapped="$(echo "$userdata" | jq --raw-output '.VAULT_SECRET_ID_WRAPPED')"
if [[ -f ${cacheFilePath} ]]; then
cache_key="$(cat ${cacheFilePath})"
if [[ "$secret_id_wrapped" == "$cache_key" ]]; then
echo "Secret id matched last used value, exiting program"
exit 0
fi
fi
secret_id="$(BAO_ADDR=${vault_addr} bao unwrap -field=secret_id "$secret_id_wrapped")"
mkdir -p "$(dirname ${secretIdFilePath})"
mkdir -p "$(dirname ${roleIdFilePath})"
echo -n "$role_id" > ${roleIdFilePath}
echo -n "$secret_id" > ${secretIdFilePath}
chown root:root ${secretIdFilePath}
chmod 0600 ${secretIdFilePath}
chown root:root ${roleIdFilePath}
chmod 0600 ${roleIdFilePath}
echo -n "$secret_id_wrapped" > ${cacheFilePath}
chmod 0600 ${cacheFilePath}
chown root:root ${cacheFilePath}
echo "Role id and secret id copied, restarting vault-agent"
systemctl restart vault-agent-openbao.service
'';
}
);
};
};
}
);
}

View file

@ -1,4 +1,3 @@
{ inputs, khscodesLib }:
{ config, lib, ... }:
let
cfg = config.khscodes.cloudflare;
@ -13,7 +12,7 @@ let
"@"
else
fqdn;
dnsARecordModule = khscodesLib.mkSubmodule {
dnsARecordModule = lib.khscodes.mkSubmodule {
description = "Module for defining dns A/AAAA record";
options = {
fqdn = lib.mkOption {
@ -36,7 +35,7 @@ let
};
};
};
dnsTxtRecordModule = khscodesLib.mkSubmodule {
dnsTxtRecordModule = lib.khscodes.mkSubmodule {
description = "Module for defining dns TXT record";
options = {
fqdn = lib.mkOption {
@ -54,7 +53,7 @@ let
};
};
};
dnsMxRecordModule = khscodesLib.mkSubmodule {
dnsMxRecordModule = lib.khscodes.mkSubmodule {
description = "Module for defining dns MX record";
options = {
fqdn = lib.mkOption {
@ -126,7 +125,7 @@ in
resource.cloudflare_record = lib.attrsets.optionalAttrs cfg.dns.enable (
lib.listToAttrs (
(lib.lists.map (record: {
name = "${khscodesLib.sanitize-terraform-name record.fqdn}_a";
name = "${lib.khscodes.sanitize-terraform-name record.fqdn}_a";
value = {
inherit (record) content ttl proxied;
name = nameFromFQDNAndZone record.fqdn cfg.dns.zone_name;
@ -136,7 +135,7 @@ in
};
}) cfg.dns.aRecords)
++ (lib.lists.map (record: {
name = "${khscodesLib.sanitize-terraform-name record.fqdn}_aaaa";
name = "${lib.khscodes.sanitize-terraform-name record.fqdn}_aaaa";
value = {
inherit (record) content ttl proxied;
name = nameFromFQDNAndZone record.fqdn cfg.dns.zone_name;
@ -146,7 +145,7 @@ in
};
}) cfg.dns.aaaaRecords)
++ (lib.lists.map (record: {
name = "${khscodesLib.sanitize-terraform-name record.fqdn}_txt";
name = "${lib.khscodes.sanitize-terraform-name record.fqdn}_txt";
value = {
inherit (record) content ttl;
name = nameFromFQDNAndZone record.fqdn cfg.dns.zone_name;
@ -156,7 +155,7 @@ in
};
}) cfg.dns.txtRecords)
++ (lib.lists.map (record: {
name = "${khscodesLib.sanitize-terraform-name record.fqdn}_mx";
name = "${lib.khscodes.sanitize-terraform-name record.fqdn}_mx";
value = {
inherit (record) content priority;
name = nameFromFQDNAndZone record.fqdn cfg.dns.zone_name;

View file

@ -1,5 +1,9 @@
{ inputs, khscodesLib }:
{ config, lib, ... }:
{
config,
lib,
inputs,
...
}:
let
cfg = config.khscodes.hcloud;
serversWithRdns = lib.filterAttrs (_: value: value.rdns != null) cfg.server;
@ -9,7 +13,7 @@ let
lib.map (
{ name, value }:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = sanitizedName;
@ -20,7 +24,7 @@ let
}
) (lib.attrsToList list)
);
hcloudServerModule = khscodesLib.mkSubmodule {
hcloudServerModule = lib.khscodes.mkSubmodule {
description = "Module for defining hcloud server";
options = {
name = lib.mkOption {
@ -58,7 +62,7 @@ let
};
};
};
hcloudDataSshKeys = khscodesLib.mkSubmodule {
hcloudDataSshKeys = lib.khscodes.mkSubmodule {
description = "SSH Keys";
options = {
name = lib.mkOption {
@ -88,7 +92,7 @@ in
};
imports = [
inputs.terranix-hcloud.terranixModules.hcloud
(import ./output.nix { inherit inputs khscodesLib; })
./output.nix
];
config = lib.mkIf cfg.enable {
@ -125,7 +129,7 @@ in
(lib.mapAttrs' (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = "${sanitizedName}_ipv4";
@ -142,7 +146,7 @@ in
// (lib.mapAttrs' (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = "${sanitizedName}_ipv6";
@ -160,7 +164,7 @@ in
(lib.mapAttrs' (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = "${sanitizedName}_ipv4";
@ -174,7 +178,7 @@ in
// (lib.mapAttrs' (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = "${sanitizedName}_ipv6";

View file

@ -1,8 +1,7 @@
{ khscodesLib, ... }:
{ config, lib, ... }:
let
cfg = config.khscodes.hcloud;
hcloudOutputServerModule = khscodesLib.mkSubmodule {
hcloudOutputServerModule = lib.khscodes.mkSubmodule {
description = "Module defined when a corresponding server has been defined";
options = {
id = lib.mkOption {
@ -19,7 +18,7 @@ let
};
};
};
hcloudDataOutputSshKeyModule = khscodesLib.mkSubmodule {
hcloudDataOutputSshKeyModule = lib.khscodes.mkSubmodule {
description = "Module defined when a corresponding ssh key has ben retrieved";
options = {
id = lib.mkOption {
@ -47,7 +46,7 @@ in
name: value:
(
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
id = "\${ hcloud_server.${sanitizedName}.id }";
@ -59,7 +58,7 @@ in
khscodes.hcloud.output.data.ssh_key = lib.attrsets.mapAttrs (
name: _:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
id = "\${ data.hcloud_ssh_key.${sanitizedName}.id }";

View file

@ -1,11 +1,11 @@
{ khscodesLib, inputs }:
{ lib, config, ... }:
{
lib,
config,
...
}:
let
cfg = config.khscodes.openstack;
modules = [
./output.nix
];
firewallRuleModule = khscodesLib.mkSubmodule {
firewallRuleModule = lib.khscodes.mkSubmodule {
description = "Firewall rule";
options = {
direction = lib.mkOption {
@ -53,7 +53,7 @@ let
port_range_min = rule.port;
port_range_max = rule.port;
});
openstackComputeInstance = khscodesLib.mkSubmodule {
openstackComputeInstance = lib.khscodes.mkSubmodule {
description = "Openstack compute instance";
options = {
name = lib.mkOption {
@ -132,7 +132,7 @@ in
};
};
imports = lib.lists.map (m: import m { inherit khscodesLib inputs; }) modules;
imports = [ ./output.nix ];
config = lib.mkIf cfg.enable {
terraform.required_providers.openstack = {
@ -174,7 +174,7 @@ in
data.openstack_compute_flavor_v2 = lib.mapAttrs' (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = sanitizedName;
@ -188,7 +188,7 @@ in
data.openstack_images_image_v2 = lib.mapAttrs' (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = sanitizedName;
@ -214,12 +214,12 @@ in
resource.openstack_compute_keypair_v2 = lib.mapAttrs' (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = sanitizedName;
value = {
name = khscodesLib.sanitize-terraform-name value.name;
name = lib.khscodes.sanitize-terraform-name value.name;
public_key = value.ssh_public_key;
};
}
@ -229,7 +229,7 @@ in
resource.openstack_networking_router_v2 = lib.mapAttrs' (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = sanitizedName;
@ -245,7 +245,7 @@ in
resource.openstack_networking_network_v2 = lib.mapAttrs' (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = sanitizedName;
@ -261,7 +261,7 @@ in
(lib.mapAttrs' (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = "${sanitizedName}_ip4";
@ -278,7 +278,7 @@ in
// (lib.mapAttrs' (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = "${sanitizedName}_ip6";
@ -300,7 +300,7 @@ in
(lib.mapAttrs' (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = "${sanitizedName}_ip4";
@ -313,7 +313,7 @@ in
// (lib.mapAttrs' (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = "${sanitizedName}_ip6";
@ -328,7 +328,7 @@ in
resource.openstack_networking_floatingip_v2 = lib.mapAttrs' (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = sanitizedName;
@ -343,7 +343,7 @@ in
resource.openstack_blockstorage_volume_v3 = lib.mapAttrs' (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = sanitizedName;
@ -360,7 +360,7 @@ in
resource.openstack_networking_secgroup_v2 = lib.mapAttrs' (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = sanitizedName;
@ -377,7 +377,7 @@ in
lib.lists.map (
{ name, value }:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
lib.listToAttrs (
lib.lists.map (
@ -387,7 +387,7 @@ in
if rule.protocol == "icmp" then "icmp" else "${rule.protocol}_${builtins.toString rule.port}";
in
{
name = "${sanitizedName}_${rule.direction}_${rule.ethertype}_${protocol}_${khscodesLib.sanitize-terraform-name rule.remote_subnet}";
name = "${sanitizedName}_${rule.direction}_${rule.ethertype}_${protocol}_${lib.khscodes.sanitize-terraform-name rule.remote_subnet}";
value = mapFirewallRule "\${ resource.openstack_networking_secgroup_v2.${sanitizedName}.id }" rule;
}
) value.firewall_rules
@ -400,7 +400,7 @@ in
data.openstack_networking_port_v2 = lib.mapAttrs' (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = sanitizedName;
@ -413,7 +413,7 @@ in
resource.openstack_compute_instance_v2 = lib.mapAttrs' (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = sanitizedName;
@ -446,7 +446,7 @@ in
resource.openstack_networking_floatingip_associate_v2 = lib.mapAttrs' (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = sanitizedName;

View file

@ -1,8 +1,7 @@
{ khscodesLib, ... }:
{ config, lib, ... }:
let
cfg = config.khscodes.openstack;
openstackOutputInstanceModule = khscodesLib.mkSubmodule {
openstackOutputInstanceModule = lib.khscodes.mkSubmodule {
description = "Instance output";
options = {
id = lib.mkOption {
@ -41,7 +40,7 @@ in
name: value:
(
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
id = "\${ openstack_compute_instance_v2.${sanitizedName}.id }";

View file

@ -1,4 +1,3 @@
{ ... }:
{ lib, config, ... }:
let
cfg = config.khscodes.s3;

View file

@ -1,11 +1,7 @@
{ khscodesLib, inputs }:
{ lib, config, ... }:
let
cfg = config.khscodes.unifi;
modules = [
./output.nix
];
unifiStaticRouteModule = khscodesLib.mkSubmodule {
unifiStaticRouteModule = lib.khscodes.mkSubmodule {
description = "Unifi static route";
options = {
network = lib.mkOption {
@ -36,7 +32,7 @@ in
};
};
imports = lib.lists.map (m: import m { inherit khscodesLib inputs; }) modules;
imports = [ ./output.nix ];
config = lib.mkIf cfg.enable {
terraform.required_providers.unifi = {
@ -50,7 +46,7 @@ in
resource.unifi_static_route = lib.mapAttrs' (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
name = sanitizedName;

View file

@ -1,5 +1,4 @@
{ khscodesLib, ... }:
{ config, lib, ... }:
{ config, ... }:
let
cfg = config.khscodes.unifi;
in

View file

@ -1,4 +1,3 @@
{ khscodesLib, ... }:
{ lib, config, ... }:
let
cfg = config.khscodes.vault;
@ -7,7 +6,7 @@ in
options.khscodes.vault = {
approle_auth_backend_role = lib.mkOption {
type = lib.types.attrsOf (
khscodesLib.mkSubmodule {
lib.khscodes.mkSubmodule {
options = {
backend = lib.mkOption {
type = lib.types.str;
@ -47,7 +46,7 @@ in
};
approle_auth_backend_role_secret_id = lib.mkOption {
type = lib.types.attrsOf (
khscodesLib.mkSubmodule {
lib.khscodes.mkSubmodule {
options = {
backend = lib.mkOption {
type = lib.types.str;
@ -98,11 +97,11 @@ in
};
config = lib.mkIf cfg.enable {
resource.vault_approle_auth_backend_role = lib.mapAttrs' (name: value: {
name = khscodesLib.sanitize-terraform-name name;
name = lib.khscodes.sanitize-terraform-name name;
value = value;
}) cfg.approle_auth_backend_role;
resource.vault_approle_auth_backend_role_secret_id = lib.mapAttrs' (name: value: {
name = khscodesLib.sanitize-terraform-name name;
name = lib.khscodes.sanitize-terraform-name name;
value = {
inherit (value)
backend

View file

@ -1,20 +1,13 @@
{ khscodesLib, inputs }:
{ lib, config, ... }:
let
cfg = config.khscodes.vault;
modules = [
./approle_auth_backend.nix
./output.nix
./mount.nix
./ssh_secret_backend.nix
];
in
{
options.khscodes.vault = {
enable = lib.mkEnableOption "Enables the openbao provider";
policy = lib.mkOption {
type = lib.types.attrsOf (
khscodesLib.mkSubmodule {
lib.khscodes.mkSubmodule {
options = {
name = lib.mkOption {
type = lib.types.str;
@ -31,7 +24,13 @@ in
};
};
imports = lib.lists.map (m: import m { inherit khscodesLib inputs; }) modules;
imports = [
./approle_auth_backend.nix
./output.nix
./mount.nix
./ssh_secret_backend.nix
./pki_secret_backend.nix
];
config = lib.mkIf cfg.enable {
provider.vault = {
@ -42,7 +41,7 @@ in
version = "5.0.0";
};
resource.vault_policy = lib.mapAttrs' (name: value: {
name = khscodesLib.sanitize-terraform-name name;
name = lib.khscodes.sanitize-terraform-name name;
value = value;
}) cfg.policy;
};

View file

@ -1,4 +1,3 @@
{ khscodesLib, ... }:
{ lib, config, ... }:
let
cfg = config.khscodes.vault;
@ -7,7 +6,7 @@ in
options.khscodes.vault = {
mount = lib.mkOption {
type = lib.types.attrsOf (
khscodesLib.mkSubmodule {
lib.khscodes.mkSubmodule {
options = {
type = lib.mkOption {
type = lib.types.str;
@ -38,7 +37,7 @@ in
};
config = lib.mkIf cfg.enable {
resource.vault_mount = lib.mapAttrs' (name: value: {
name = khscodesLib.sanitize-terraform-name name;
name = lib.khscodes.sanitize-terraform-name name;
value = value;
}) cfg.mount;
};

View file

@ -1,4 +1,3 @@
{ khscodesLib, ... }:
{ config, lib, ... }:
let
cfg = config.khscodes.vault;
@ -8,7 +7,7 @@ in
output = {
approle_auth_backend_role = lib.mkOption {
type = lib.types.attrsOf (
khscodesLib.mkSubmodule {
lib.khscodes.mkSubmodule {
options = {
role_name = lib.mkOption {
type = lib.types.str;
@ -19,17 +18,39 @@ in
}
);
};
mount = lib.mkOption {
type = lib.types.attrsOf (
lib.khscodes.mkSubmodule {
options = {
path = lib.mkOption {
type = lib.types.str;
description = "The path of the mount, this is here mainly to set up dependencies";
};
};
description = "vault_mount output";
}
);
};
};
};
config = {
khscodes.vault.output.approle_auth_backend_role = lib.mapAttrs (
name: value:
let
sanitizedName = khscodesLib.sanitize-terraform-name name;
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
role_name = "\${ vault_approle_auth_backend_role.${sanitizedName}.role_name }";
}
) cfg.approle_auth_backend_role;
khscodes.vault.output.mount = lib.mapAttrs (
name: value:
let
sanitizedName = lib.khscodes.sanitize-terraform-name name;
in
{
path = "\${ vault_mount.${sanitizedName}.path }";
}
) cfg.mount;
};
}

View file

@ -0,0 +1,117 @@
{ lib, config, ... }:
let
cfg = config.khscodes.vault;
in
{
options.khscodes.vault = {
pki_secret_backend_root_cert = lib.mkOption {
type = lib.types.attrsOf (
lib.khscodes.mkSubmodule {
options = {
backend = lib.mkOption {
type = lib.types.str;
description = "Path of the backend";
default = "pki";
};
type = lib.mkOption {
type = lib.types.enum [
"exported"
"internal"
"kms"
];
description = "Type of intermediate to create. Must be either \"exported\", \"internal\" or \"kms\"";
};
common_name = lib.mkOption {
type = lib.types.str;
description = "CN of intermediate to create";
};
ttl = lib.mkOption {
type = lib.types.str;
description = "TTL for the root certificate, in seconds";
default = "315360000";
};
key_type = lib.mkOption {
type = lib.types.enum [
"rsa"
"ed25519"
"ec"
];
description = "Specifies the desired key type; must be rsa, ed25519 or ec.";
default = "ed25519";
};
issuer_name = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Name's the issuer when signing new certificates";
};
};
description = "vault_pki_secret_backend_root_cert";
}
);
description = "Generates a new self-signed CA certificate and private keys for the PKI Secret Backend.";
default = { };
};
pki_secret_backend_role = lib.mkOption {
type = lib.types.attrsOf (
lib.khscodes.mkSubmodule {
options = {
backend = lib.mkOption {
type = lib.types.str;
description = "Path of the backend";
default = "pki";
};
name = lib.mkOption {
type = lib.types.str;
description = "The name to identify this role within the backend. Must be unique within the backend.";
};
allowed_domains = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "List of allowed domains for certificates";
};
enforce_hostnames = lib.mkOption {
type = lib.types.nullOr lib.types.bool;
default = null;
description = "Flag to allow only valid host names";
};
allow_bare_domains = lib.mkOption {
type = lib.types.nullOr lib.types.bool;
default = null;
description = "Flag to allow certificates matching the actual domain";
};
server_flag = lib.mkOption {
type = lib.types.nullOr lib.types.bool;
default = null;
description = "Flag to specify certificates for server use";
};
client_flag = lib.mkOption {
type = lib.types.nullOr lib.types.bool;
default = null;
description = "Flag to specify certificates for client use";
};
key_type = lib.mkOption {
type = lib.types.enum [
"rsa"
"ed25519"
"ec"
];
description = "Specifies the desired key type; must be rsa, ed25519 or ec.";
default = "ed25519";
};
};
description = "vault_pki_secret_backend_role";
}
);
default = { };
};
};
config = lib.mkIf cfg.enable {
resource.vault_pki_secret_backend_root_cert = lib.mapAttrs' (name: value: {
name = lib.khscodes.sanitize-terraform-name name;
value = value;
}) cfg.pki_secret_backend_root_cert;
resource.vault_pki_secret_backend_role = lib.mapAttrs' (name: value: {
name = lib.khscodes.sanitize-terraform-name name;
value = value;
}) cfg.pki_secret_backend_role;
};
}

View file

@ -1,4 +1,3 @@
{ khscodesLib, ... }:
{ lib, config, ... }:
let
cfg = config.khscodes.vault;
@ -7,7 +6,7 @@ in
options.khscodes.vault = {
ssh_secret_backend_role = lib.mkOption {
type = lib.types.attrsOf (
khscodesLib.mkSubmodule {
lib.khscodes.mkSubmodule {
options = {
name = lib.mkOption {
type = lib.types.str;
@ -67,7 +66,7 @@ in
};
allowed_user_key_config = lib.mkOption {
type = lib.types.listOf (
khscodesLib.mkSubmodule {
lib.khscodes.mkSubmodule {
options = {
type = lib.mkOption {
type = lib.types.enum [
@ -105,7 +104,7 @@ in
};
config = lib.mkIf cfg.enable {
resource.vault_ssh_secret_backend_role = lib.mapAttrs' (name: value: {
name = khscodesLib.sanitize-terraform-name name;
name = lib.khscodes.sanitize-terraform-name name;
value = {
inherit (value)
name

View file

@ -18,7 +18,7 @@ pkgs.writeShellApplication {
baseAttr='${inputs.self}#nixosConfigurations."'"$hostname"'".config.khscodes.infrastructure.provisioning'
config="$(nix build --no-link --print-out-paths "''${baseAttr}.preConfig")"
secretsSource="$(nix eval --raw "''${baseAttr}.pre.secretsSource")"
endpoints="$(nix eval --json "''${baseAttr}.pre.endpoints")"
endpoints="$(nix eval --show-trace --json "''${baseAttr}.preEndpoints")"
if [[ "$config" == "null" ]]; then
echo "No preprovisioning needed"
exit 0

View file

@ -1,7 +1,11 @@
{ ... }:
{ lib, ... }:
{
imports = [ ./nix-base.nix ];
config.khscodes = {
services.openssh.enable = true;
infrastructure = {
vault-server-approle.enable = lib.mkDefault true;
vault-loki-sender = lib.mkDefault true;
};
};
}

View file

@ -5,9 +5,15 @@
}:
let
grafana = config.services.grafana;
loki = config.services.loki;
prometheus = config.services.prometheus;
in
{
imports = [ "${inputs.self}/nix/profiles/nixos/khs-openstack-server.nix" ];
imports = [
"${inputs.self}/nix/profiles/nixos/khs-openstack-server.nix"
./vault_loki.nix
./vault_prometheus.nix
];
services.grafana = {
enable = true;
settings = {
@ -19,6 +25,85 @@ in
serve_from_sub_path = false;
};
};
provision = {
enable = true;
datasources.settings.datasources = [
{
url = "http://${loki.configuration.server.http_listen_address}:${toString loki.configuration.server.http_listen_port}";
type = "loki";
name = "Logs";
}
{
url = "http://${prometheus.listenAddress}:${toString prometheus.port}";
type = "prometheus";
name = "Metrics";
}
];
dashboards.settings.providers = [
{
name = "Node Exporter";
options.path = ./grafana/dashboards/node_exporter;
}
];
};
};
services.prometheus = {
enable = true;
listenAddress = "127.0.0.1";
extraFlags = [ "--web.enable-otlp-receiver" ];
};
services.loki = {
enable = true;
configuration = {
auth_enabled = false;
server = {
http_listen_port = 3100;
http_listen_address = "127.0.0.1";
};
common = {
ring = {
instance_addr = "127.0.0.1";
kvstore = {
store = "inmemory";
};
};
replication_factor = 1;
path_prefix = "${config.services.loki.dataDir}/common";
};
schema_config = {
configs = [
{
from = "2025-07-11";
store = "tsdb";
object_store = "filesystem";
schema = "v13";
index = {
prefix = "index_";
period = "24h";
};
}
];
};
storage_config = {
tsdb_shipper = {
active_index_directory = "${config.services.loki.dataDir}/index";
cache_location = "${config.services.loki.dataDir}/index_cache";
};
filesystem = {
directory = "${config.services.loki.dataDir}/chunks";
};
};
pattern_ingester = {
enabled = true;
};
compactor = {
retention_enabled = true;
compaction_interval = "24h";
retention_delete_delay = "24h";
delete_request_store = "filesystem";
working_directory = "${config.services.loki.dataDir}/retention";
};
};
};
khscodes = {
infrastructure.khs-openstack-instance = {
@ -34,7 +119,55 @@ in
recommendedProxySettings = true;
};
};
virtualHosts."loki.kaareskovgaard.net" = {
mtls = {
verify = "on";
certificate = "/etc/loki/client-signer.pem";
};
locations."/" = {
proxyPass = "http://${loki.configuration.server.http_listen_address}:${toString loki.configuration.server.http_listen_port}";
proxyWebsockets = true;
recommendedProxySettings = true;
};
};
virtualHosts."prometheus.kaareskovgaard.net" = {
mtls = {
verify = "on";
certificate = "/etc/prometheus/client-signer.pem";
};
locations."/" = {
proxyPass = "http://${prometheus.listenAddress}:${toString prometheus.port}";
proxyWebsockets = true;
recommendedProxySettings = true;
};
};
};
services.vault-agent.templates = [
{
contents = ''
{{- with secret "loki-mtls/cert/ca_chain" -}}
{{ .Data.certificate }}
{{- end -}}
'';
destination = "/etc/loki/client-signer.pem";
owner = "loki";
group = "loki";
perms = "0644";
reloadOrRestartUnits = [ "nginx.service" ];
}
{
contents = ''
{{- with secret "prometheus-mtls/cert/ca_chain" -}}
{{ .Data.certificate }}
{{- end -}}
'';
destination = "/etc/prometheus/client-signer.pem";
owner = "prometheus";
group = "prometheus";
perms = "0644";
reloadOrRestartUnits = [ "nginx.service" ];
}
];
};
snowfallorg.users.khs.admin = true;
users.users.khs = {

View file

@ -0,0 +1,28 @@
# This should go into the setup of the vault server itself, as the vault server also needs stuff that depends on this.
{
khscodes.infrastructure.vault-loki-sender = {
enable = true;
# This is quite ugly, but should get the job done. Sadly I cannot reference the output from here.
terranixBackendName = "\${ vault_mount.loki-mtls.path }";
};
khscodes.infrastructure.provisioning.pre.modules = [
(
{ config, ... }:
{
khscodes.vault.enable = true;
khscodes.vault.mount.loki-mtls = {
type = "pki";
path = "loki-mtls";
max_lease_ttl_seconds = 10 * 365 * 24 * 60 * 60;
default_lease_ttl_seconds = 60 * 60;
};
khscodes.vault.pki_secret_backend_root_cert.loki-mtls = {
backend = config.khscodes.vault.output.mount.loki-mtls.path;
type = "internal";
common_name = "loki.kaareskovgaard.net";
issuer_name = "loki-mtls-root-ca";
};
}
)
];
}

View file

@ -0,0 +1,28 @@
# This should go into the setup of the vault server itself, as the vault server also needs stuff that depends on this.
{
khscodes.infrastructure.vault-prometheus-sender = {
enable = true;
# This is quite ugly, but should get the job done. Sadly I cannot reference the output from here.
terranixBackendName = "\${ vault_mount.prometheus-mtls.path }";
};
khscodes.infrastructure.provisioning.pre.modules = [
(
{ config, ... }:
{
khscodes.vault.enable = true;
khscodes.vault.mount.prometheus-mtls = {
type = "pki";
path = "prometheus-mtls";
max_lease_ttl_seconds = 10 * 365 * 24 * 60 * 60;
default_lease_ttl_seconds = 60 * 60;
};
khscodes.vault.pki_secret_backend_root_cert.prometheus-mtls = {
backend = config.khscodes.vault.output.mount.prometheus-mtls.path;
type = "internal";
common_name = "prometheus.kaareskovgaard.net";
issuer_name = "prometheus-mtls-root-ca";
};
}
)
];
}