Begin adding services to the monitoring stack
This commit is contained in:
parent
32ece6eb43
commit
e360abdf4b
33 changed files with 17192 additions and 308 deletions
|
@ -53,7 +53,7 @@ in
|
|||
dnsNames = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "DNS names for the server";
|
||||
default = [ fqdn ];
|
||||
default = lib.lists.unique ([ fqdn ] ++ config.networking.aliases);
|
||||
};
|
||||
bucket = {
|
||||
key = lib.mkOption {
|
||||
|
@ -168,18 +168,14 @@ in
|
|||
dns = {
|
||||
enable = true;
|
||||
zone_name = tldFromFqdn fqdn;
|
||||
aRecords = [
|
||||
{
|
||||
inherit fqdn;
|
||||
content = config.khscodes.hcloud.output.server.compute.ipv4_address;
|
||||
}
|
||||
];
|
||||
aaaaRecords = [
|
||||
{
|
||||
inherit fqdn;
|
||||
content = config.khscodes.hcloud.output.server.compute.ipv6_address;
|
||||
}
|
||||
];
|
||||
aRecords = lib.lists.map (d: {
|
||||
fqdn = d;
|
||||
content = config.khscodes.hcloud.output.server.compute.ipv4_address;
|
||||
}) cfg.dnsNames;
|
||||
aaaaRecords = lib.lists.map (d: {
|
||||
fqdn = d;
|
||||
content = config.khscodes.hcloud.output.server.compute.ipv6_address;
|
||||
}) cfg.dnsNames;
|
||||
};
|
||||
};
|
||||
resource.hcloud_firewall.fw = lib.mkIf firewallEnable {
|
||||
|
@ -215,11 +211,6 @@ in
|
|||
khscodes.infrastructure.provisioning.pre = {
|
||||
modules = modules;
|
||||
secretsSource = cfg.secretsSource;
|
||||
endpoints = [
|
||||
"aws"
|
||||
"cloudflare"
|
||||
"hcloud"
|
||||
];
|
||||
};
|
||||
}
|
||||
);
|
||||
|
|
|
@ -75,7 +75,9 @@ in
|
|||
dnsNames = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "DNS names for the instance";
|
||||
default = [ fqdn ];
|
||||
default = lib.lists.unique (
|
||||
[ config.khscodes.networking.fqdn ] ++ config.khscodes.networking.aliases
|
||||
);
|
||||
};
|
||||
bucket = {
|
||||
key = lib.mkOption {
|
||||
|
@ -147,18 +149,16 @@ in
|
|||
dns = {
|
||||
enable = true;
|
||||
zone_name = tldFromFqdn fqdn;
|
||||
aRecords = lib.mkIf cfg.dns.mapIpv4Address [
|
||||
{
|
||||
inherit fqdn;
|
||||
aRecords = lib.mkIf cfg.dns.mapIpv4Address (
|
||||
lib.lists.map (d: {
|
||||
fqdn = d;
|
||||
content = config.khscodes.openstack.output.compute_instance.compute.ipv4_address;
|
||||
}
|
||||
];
|
||||
aaaaRecords = [
|
||||
{
|
||||
inherit fqdn;
|
||||
content = config.khscodes.openstack.output.compute_instance.compute.ipv6_address;
|
||||
}
|
||||
];
|
||||
}) cfg.dnsNames
|
||||
);
|
||||
aaaaRecords = lib.lists.map (d: {
|
||||
fqdn = d;
|
||||
content = config.khscodes.openstack.output.compute_instance.compute.ipv6_address;
|
||||
}) cfg.dnsNames;
|
||||
};
|
||||
};
|
||||
output.ipv4_address = {
|
||||
|
@ -188,18 +188,13 @@ in
|
|||
enable = true;
|
||||
};
|
||||
};
|
||||
khscodes.services.read-vault-auth-from-userdata.url = "http://169.254.169.254/openstack/2012-08-10/user_data";
|
||||
# khs openstack hosted servers are cannot use http-01 challenges (or maybe they can through ipv6?)
|
||||
# so enable dns-01.
|
||||
khscodes.security.acme.dns01Enabled = true;
|
||||
khscodes.infrastructure.provisioning = {
|
||||
pre = {
|
||||
modules = modules;
|
||||
endpoints = [
|
||||
"aws"
|
||||
"cloudflare"
|
||||
"openstack"
|
||||
"unifi"
|
||||
];
|
||||
};
|
||||
preImageUsername = "debian";
|
||||
};
|
||||
|
|
|
@ -21,7 +21,97 @@ let
|
|||
description = "Where to get the secrets for the provisioning from";
|
||||
default = "vault";
|
||||
};
|
||||
endpoints = lib.mkOption {
|
||||
};
|
||||
usesEndpoint =
|
||||
search: endpoint: config:
|
||||
if lib.strings.hasInfix search (builtins.readFile config) then [ endpoint ] else [ ];
|
||||
endpointsMaps = [
|
||||
{
|
||||
search = "cloudflare/cloudflare";
|
||||
endpoint = "cloudflare";
|
||||
}
|
||||
{
|
||||
search = "terraform-provider-openstack/openstack";
|
||||
endpoint = "openstack";
|
||||
}
|
||||
{
|
||||
search = "paultyng/unifi";
|
||||
endpoint = "unifi";
|
||||
}
|
||||
{
|
||||
search = "hashicorp/vault";
|
||||
endpoint = "vault";
|
||||
}
|
||||
{
|
||||
search = ".r2.cloudflarestorage.com";
|
||||
endpoint = "aws";
|
||||
}
|
||||
];
|
||||
endpointsUsed =
|
||||
config:
|
||||
if config == null then
|
||||
[ ]
|
||||
else
|
||||
lib.lists.flatten (lib.lists.map (c: usesEndpoint c.search c.endpoint config) endpointsMaps);
|
||||
preConfig =
|
||||
if lib.lists.length cfg.pre.modules > 0 then
|
||||
inputs.terranix.lib.terranixConfiguration {
|
||||
system = pkgs.hostPlatform.system;
|
||||
modules = cfg.pre.modules;
|
||||
extraArgs = { inherit lib inputs; };
|
||||
}
|
||||
else
|
||||
null;
|
||||
preEndpoints = endpointsUsed preConfig;
|
||||
postConfig =
|
||||
if lib.lists.length cfg.post.modules > 0 then
|
||||
inputs.terranix.lib.terranixConfiguration {
|
||||
system = pkgs.hostPlatform.system;
|
||||
modules = cfg.post.modules;
|
||||
extraArgs = { inherit lib inputs; };
|
||||
}
|
||||
else
|
||||
null;
|
||||
postEndpoints = endpointsUsed postConfig;
|
||||
in
|
||||
{
|
||||
options.khscodes.infrastructure.provisioning = {
|
||||
pre = provisioning;
|
||||
post = provisioning;
|
||||
instanceUserData = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "User data that should be added to the instance during provisioning";
|
||||
default = "";
|
||||
};
|
||||
preConfig = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
description = "The generated config for the pre provisioning, if any was specified";
|
||||
};
|
||||
preEndpoints = lib.mkOption {
|
||||
type = lib.types.listOf (
|
||||
lib.types.enum [
|
||||
"openstack"
|
||||
"aws"
|
||||
"unifi"
|
||||
"hcloud"
|
||||
"cloudflare"
|
||||
"vault"
|
||||
"authentik"
|
||||
]
|
||||
);
|
||||
description = "Needed endpoints to be used during provisioning";
|
||||
default = [ ];
|
||||
};
|
||||
preImageUsername = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "The username for the image being deployed before being swapped for NixOS";
|
||||
default = "root";
|
||||
};
|
||||
postConfig = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
description = "The generated config for the post provisioning, if any was specified";
|
||||
};
|
||||
postEndpoints = lib.mkOption {
|
||||
type = lib.types.listOf (
|
||||
lib.types.enum [
|
||||
"openstack"
|
||||
|
@ -37,47 +127,11 @@ let
|
|||
default = [ ];
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
options.khscodes.infrastructure.provisioning = {
|
||||
pre = provisioning;
|
||||
post = provisioning;
|
||||
instanceUserData = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "User data that should be added to the instance during provisioning";
|
||||
default = "";
|
||||
};
|
||||
preConfig = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
description = "The generated config for the pre provisioning, if any was specified";
|
||||
};
|
||||
preImageUsername = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "The username for the image being deployed before being swapped for NixOS";
|
||||
default = "root";
|
||||
};
|
||||
postConfig = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
description = "The generated config for the post provisioning, if any was specified";
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
khscodes.infrastructure.provisioning.preConfig =
|
||||
if lib.lists.length cfg.pre.modules > 0 then
|
||||
inputs.terranix.lib.terranixConfiguration {
|
||||
system = pkgs.hostPlatform.system;
|
||||
modules = cfg.pre.modules;
|
||||
}
|
||||
else
|
||||
null;
|
||||
khscodes.infrastructure.provisioning.postConfig =
|
||||
if lib.lists.length cfg.post.modules > 0 then
|
||||
inputs.terranix.lib.terranixConfiguration {
|
||||
system = pkgs.hostPlatform.system;
|
||||
modules = cfg.post.modules;
|
||||
}
|
||||
else
|
||||
null;
|
||||
khscodes.infrastructure.provisioning.preConfig = preConfig;
|
||||
khscodes.infrastructure.provisioning.preEndpoints = preEndpoints;
|
||||
khscodes.infrastructure.provisioning.postConfig = postConfig;
|
||||
khscodes.infrastructure.provisioning.postEndpoints = postEndpoints;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -0,0 +1,78 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.khscodes.infrastructure.vault-loki-sender;
|
||||
fqdn = config.khscodes.networking.fqdn;
|
||||
vaultRoleName = config.khscodes.infrastructure.vault-server-approle.role_name;
|
||||
in
|
||||
{
|
||||
options.khscodes.infrastructure.vault-loki-sender = {
|
||||
enable = lib.mkEnableOption "Configures the server approle to allow sending data to loki";
|
||||
terranixBackendName = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "This should only be configured for the server hosting loki, to allow setting up dependencies in terraform";
|
||||
default = "loki-mtls";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
khscodes.infrastructure.vault-server-approle = {
|
||||
enable = true;
|
||||
policy = {
|
||||
"loki-mtls" = {
|
||||
capabilities = [ "read" ];
|
||||
};
|
||||
"loki-mtls/issue/${fqdn}" = {
|
||||
capabilities = [
|
||||
"create"
|
||||
"update"
|
||||
];
|
||||
};
|
||||
};
|
||||
stageModules = [
|
||||
(
|
||||
{ ... }:
|
||||
{
|
||||
khscodes.vault.pki_secret_backend_role."${vaultRoleName}-loki" = {
|
||||
name = vaultRoleName;
|
||||
backend = cfg.terranixBackendName;
|
||||
allowed_domains = [ fqdn ];
|
||||
allow_bare_domains = true;
|
||||
enforce_hostnames = true;
|
||||
server_flag = false;
|
||||
client_flag = true;
|
||||
};
|
||||
}
|
||||
)
|
||||
];
|
||||
};
|
||||
khscodes.services.vault-agent.templates = [
|
||||
{
|
||||
contents = ''
|
||||
{{- with pkiCert "loki-mtls/issue/${fqdn}" "common_name=${fqdn}" -}}
|
||||
{{ .Key }}
|
||||
{{ .Cert }}
|
||||
{{ .CA }}
|
||||
{{ .Key | writeToFile "${config.khscodes.services.alloy.loki.client_key}" "${config.khscodes.services.alloy.user}" "${config.khscodes.services.alloy.group}" "0600" }}
|
||||
{{ .Cert | writeToFile "${config.khscodes.services.alloy.loki.client_cert}" "${config.khscodes.services.alloy.user}" "${config.khscodes.services.alloy.group}" "0644" }}
|
||||
{{- end -}}
|
||||
'';
|
||||
destination = "/var/lib/alloy/cache.key";
|
||||
owner = "alloy";
|
||||
group = "alloy";
|
||||
perms = "0600";
|
||||
reloadOrRestartUnits = [ "alloy.service" ];
|
||||
}
|
||||
];
|
||||
khscodes.services.alloy = {
|
||||
enable = true;
|
||||
loki = {
|
||||
client_key = "/var/lib/alloy/loki_cert.key";
|
||||
client_cert = "/var/lib/alloy/loki_cert.pem";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.khscodes.infrastructure.vault-prometheus-sender;
|
||||
fqdn = config.khscodes.networking.fqdn;
|
||||
vaultRoleName = config.khscodes.infrastructure.vault-server-approle.role_name;
|
||||
in
|
||||
{
|
||||
options.khscodes.infrastructure.vault-prometheus-sender = {
|
||||
enable = lib.mkEnableOption "Configures the server approle to allow sending data to prometheus";
|
||||
terranixBackendName = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "This should only be configured for the server hosting prometheus, to allow setting up dependencies in terraform";
|
||||
default = "prometheus-mtls";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
khscodes.infrastructure.vault-server-approle = {
|
||||
enable = true;
|
||||
policy = {
|
||||
"prometheus-mtls" = {
|
||||
capabilities = [ "read" ];
|
||||
};
|
||||
"prometheus-mtls/issue/${fqdn}" = {
|
||||
capabilities = [
|
||||
"create"
|
||||
"update"
|
||||
];
|
||||
};
|
||||
};
|
||||
stageModules = [
|
||||
(
|
||||
{ ... }:
|
||||
{
|
||||
khscodes.vault.pki_secret_backend_role."${vaultRoleName}-prometheus" = {
|
||||
name = vaultRoleName;
|
||||
backend = cfg.terranixBackendName;
|
||||
allowed_domains = [ fqdn ];
|
||||
allow_bare_domains = true;
|
||||
enforce_hostnames = true;
|
||||
server_flag = false;
|
||||
client_flag = true;
|
||||
};
|
||||
}
|
||||
)
|
||||
];
|
||||
};
|
||||
khscodes.services.vault-agent.templates = [
|
||||
{
|
||||
contents = ''
|
||||
{{- with pkiCert "prometheus-mtls/issue/${fqdn}" "common_name=${fqdn}" -}}
|
||||
{{ .Key }}
|
||||
{{ .Cert }}
|
||||
{{ .CA }}
|
||||
{{ .Key | writeToFile "${config.khscodes.services.alloy.prometheus.client_key}" "${config.khscodes.services.alloy.user}" "${config.khscodes.services.alloy.group}" "0600" }}
|
||||
{{ .Cert | writeToFile "${config.khscodes.services.alloy.prometheus.client_cert}" "${config.khscodes.services.alloy.user}" "${config.khscodes.services.alloy.group}" "0644" }}
|
||||
{{- end -}}
|
||||
'';
|
||||
destination = "/var/lib/alloy/cache.key";
|
||||
owner = "alloy";
|
||||
group = "alloy";
|
||||
perms = "0600";
|
||||
reloadOrRestartUnits = [ "alloy.service" ];
|
||||
}
|
||||
];
|
||||
khscodes.services.alloy = {
|
||||
enable = true;
|
||||
prometheus = {
|
||||
client_key = "/var/lib/alloy/prometheus_cert.key";
|
||||
client_cert = "/var/lib/alloy/prometheus_cert.pem";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -52,10 +52,9 @@ in
|
|||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
khscodes.services.openstack-read-vault-auth-from-userdata.enable = true;
|
||||
khscodes.services.read-vault-auth-from-userdata.enable = true;
|
||||
khscodes.services.vault-agent.enable = true;
|
||||
khscodes.infrastructure.provisioning.${cfg.stage} = {
|
||||
endpoints = [ "vault" ];
|
||||
modules = [
|
||||
(
|
||||
{ config, ... }:
|
||||
|
@ -66,10 +65,12 @@ in
|
|||
approle_auth_backend_role.${cfg.role_name} = {
|
||||
backend = "approle";
|
||||
role_name = cfg.role_name;
|
||||
# I keep the secret ids alive for quite long, as I have no way of
|
||||
# automatically bootstrapping a new secret id.
|
||||
secret_id_ttl = 5 * 60 * 60;
|
||||
secret_id_num_uses = 5 * 60;
|
||||
# Secret IDs never expire, to allow vault agent to restart without issues.
|
||||
# TODO: Look into doing this in a better way going forward, such that this won't
|
||||
# be an issue under normal circumstances, but vault-agents (or instances)
|
||||
# being offline for long periods of time should invalidate the secret id's.
|
||||
secret_id_ttl = 0;
|
||||
secret_id_num_uses = 0;
|
||||
token_ttl = 20 * 60;
|
||||
token_max_ttl = 30 * 60;
|
||||
token_policies = [ cfg.role_name ];
|
||||
|
|
|
@ -4,29 +4,36 @@
|
|||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.khscodes.networking.fqdn;
|
||||
cfg = config.khscodes.networking;
|
||||
in
|
||||
{
|
||||
options.khscodes.networking.fqdn = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
description = "Sets the FQDN of the machine. This is a prerequisite for many modules to be used";
|
||||
options.khscodes.networking = {
|
||||
fqdn = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = null;
|
||||
description = "Sets the FQDN of the machine. This is a prerequisite for many modules to be used";
|
||||
};
|
||||
aliases = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf (cfg != null) (
|
||||
config =
|
||||
let
|
||||
hostname = builtins.head (lib.strings.splitString "." cfg);
|
||||
domain = if hostname == cfg then null else (lib.strings.removePrefix "${hostname}." cfg);
|
||||
hostname = builtins.head (lib.strings.splitString "." cfg.fqdn);
|
||||
domain = if hostname == cfg then null else (lib.strings.removePrefix "${hostname}." cfg.fqdn);
|
||||
in
|
||||
{
|
||||
networking.hostName = lib.mkForce hostname;
|
||||
networking.domain = lib.mkForce domain;
|
||||
networking.fqdn = cfg;
|
||||
networking.fqdn = cfg.fqdn;
|
||||
# Add the name of the server to the ssh host certificate domains, but let other configs enable getting the host certificates.
|
||||
khscodes.services.openssh.hostCertificate.hostNames = [ cfg ];
|
||||
khscodes.services.openssh.hostCertificate.hostNames = lib.lists.unique (
|
||||
[ cfg.fqdn ] ++ cfg.aliases
|
||||
);
|
||||
boot.kernel.sysctl = {
|
||||
"kernel.hostname" = cfg;
|
||||
"kernel.hostname" = cfg.fqdn;
|
||||
};
|
||||
}
|
||||
);
|
||||
};
|
||||
}
|
||||
|
|
151
nix/modules/nixos/services/alloy/config.alloy
Normal file
151
nix/modules/nixos/services/alloy/config.alloy
Normal file
|
@ -0,0 +1,151 @@
|
|||
// This block relabels metrics coming from node_exporter to add standard labels
|
||||
discovery.relabel "integrations_node_exporter" {
|
||||
targets = prometheus.exporter.unix.integrations_node_exporter.targets
|
||||
|
||||
rule {
|
||||
// Set the instance label to the hostname of the machine
|
||||
target_label = "instance"
|
||||
replacement = constants.hostname
|
||||
}
|
||||
|
||||
rule {
|
||||
// Set a standard job name for all node_exporter metrics
|
||||
target_label = "job"
|
||||
replacement = "integrations/node_exporter"
|
||||
}
|
||||
}
|
||||
//
|
||||
// Configure the node_exporter integration to collect system metrics
|
||||
prometheus.exporter.unix "integrations_node_exporter" {
|
||||
// Disable unnecessary collectors to reduce overhead
|
||||
disable_collectors = ["ipvs", "btrfs", "infiniband", "xfs", "zfs"]
|
||||
enable_collectors = ["meminfo"]
|
||||
|
||||
filesystem {
|
||||
// Exclude filesystem types that aren't relevant for monitoring
|
||||
fs_types_exclude = "^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|tmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$"
|
||||
// Exclude mount points that aren't relevant for monitoring
|
||||
mount_points_exclude = "^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+)($|/)"
|
||||
// Timeout for filesystem operations
|
||||
mount_timeout = "5s"
|
||||
}
|
||||
|
||||
netclass {
|
||||
// Ignore virtual and container network interfaces
|
||||
ignored_devices = "^(veth.*|cali.*|[a-f0-9]{15})$"
|
||||
}
|
||||
|
||||
netdev {
|
||||
// Exclude virtual and container network interfaces from device metrics
|
||||
device_exclude = "^(veth.*|cali.*|[a-f0-9]{15})$"
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
// Define how to scrape metrics from the node_exporter
|
||||
prometheus.scrape "integrations_node_exporter" {
|
||||
scrape_interval = "15s"
|
||||
// Use the targets with labels from the discovery.relabel component
|
||||
targets = discovery.relabel.integrations_node_exporter.output
|
||||
// Send the scraped metrics to the relabeling component
|
||||
forward_to = [otelcol.receiver.prometheus.default.receiver]
|
||||
}
|
||||
|
||||
otelcol.receiver.prometheus "default" {
|
||||
output {
|
||||
metrics = [otelcol.exporter.otlphttp.default.input]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Define where to send the metrics for storage
|
||||
otelcol.exporter.otlphttp "default" {
|
||||
client {
|
||||
endpoint = "https://prometheus.kaareskovgaard.net/api/v1/otlp/"
|
||||
tls {
|
||||
cert_file = "$PROMETHEUS_CLIENT_CERT"
|
||||
key_file = "$PROMETHEUS_CLIENT_KEY"
|
||||
}
|
||||
}
|
||||
encoding = "proto"
|
||||
}
|
||||
|
||||
// Collect logs from systemd journal for node_exporter integration
|
||||
loki.source.journal "logs_integrations_integrations_node_exporter_journal_scrape" {
|
||||
// Only collect logs from the last 24 hours
|
||||
max_age = "24h0m0s"
|
||||
// Apply relabeling rules to the logs
|
||||
relabel_rules = discovery.relabel.logs_integrations_integrations_node_exporter_journal_scrape.rules
|
||||
// Send logs to the local Loki instance
|
||||
forward_to = [loki.write.local.receiver]
|
||||
}
|
||||
|
||||
// Define which log files to collect for node_exporter
|
||||
local.file_match "logs_integrations_integrations_node_exporter_direct_scrape" {
|
||||
path_targets = [{
|
||||
// Target localhost for log collection
|
||||
__address__ = "localhost",
|
||||
// Collect standard system logs
|
||||
__path__ = "/var/log/{syslog,messages,*.log}",
|
||||
// Add instance label with hostname
|
||||
instance = constants.hostname,
|
||||
// Add job label for logs
|
||||
job = "integrations/node_exporter",
|
||||
}]
|
||||
}
|
||||
|
||||
// Define relabeling rules for systemd journal logs
|
||||
discovery.relabel "logs_integrations_integrations_node_exporter_journal_scrape" {
|
||||
targets = []
|
||||
|
||||
rule {
|
||||
// Extract systemd unit information into a label
|
||||
source_labels = ["__journal__systemd_unit"]
|
||||
target_label = "unit"
|
||||
}
|
||||
|
||||
rule {
|
||||
// Extract boot ID information into a label
|
||||
source_labels = ["__journal__boot_id"]
|
||||
target_label = "boot_id"
|
||||
}
|
||||
|
||||
rule {
|
||||
// Extract transport information into a label
|
||||
source_labels = ["__journal__transport"]
|
||||
target_label = "transport"
|
||||
}
|
||||
|
||||
rule {
|
||||
// Extract log priority into a level label
|
||||
source_labels = ["__journal_priority_keyword"]
|
||||
target_label = "level"
|
||||
}
|
||||
|
||||
rule {
|
||||
// Set the instance label to the hostname of the machine
|
||||
target_label = "instance"
|
||||
replacement = constants.hostname
|
||||
}
|
||||
}
|
||||
|
||||
// Collect logs from files for node_exporter
|
||||
loki.source.file "logs_integrations_integrations_node_exporter_direct_scrape" {
|
||||
// Use targets defined in local.file_match
|
||||
targets = local.file_match.logs_integrations_integrations_node_exporter_direct_scrape.targets
|
||||
// Send logs to the local Loki instance
|
||||
forward_to = [loki.write.local.receiver]
|
||||
}
|
||||
|
||||
// Define where to send logs for storage
|
||||
loki.write "local" {
|
||||
endpoint {
|
||||
// Send logs to a locally running Loki instance
|
||||
url = "https://loki.kaareskovgaard.net/loki/api/v1/push"
|
||||
tls_config {
|
||||
cert_file = "$LOKI_CLIENT_CERT"
|
||||
key_file = "$LOKI_CLIENT_KEY"
|
||||
}
|
||||
}
|
||||
}
|
58
nix/modules/nixos/services/alloy/default.nix
Normal file
58
nix/modules/nixos/services/alloy/default.nix
Normal file
|
@ -0,0 +1,58 @@
|
|||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.khscodes.services.alloy;
|
||||
configFile =
|
||||
lib.strings.replaceStrings
|
||||
[ "$LOKI_CLIENT_KEY" "$LOKI_CLIENT_CERT" "$PROMETHEUS_CLIENT_KEY" "$PROMETHEUS_CLIENT_CERT" ]
|
||||
[ cfg.loki.client_key cfg.loki.client_cert cfg.prometheus.client_key cfg.prometheus.client_cert ]
|
||||
(builtins.readFile ./config.alloy);
|
||||
in
|
||||
{
|
||||
options.khscodes.services.alloy = {
|
||||
enable = lib.mkEnableOption "Enables alloy";
|
||||
loki = {
|
||||
client_key = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
};
|
||||
client_cert = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
};
|
||||
};
|
||||
prometheus = {
|
||||
client_key = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
};
|
||||
client_cert = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
};
|
||||
};
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "alloy";
|
||||
};
|
||||
group = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "alloy";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.alloy.enable = true;
|
||||
systemd.services.alloy = {
|
||||
serviceConfig = {
|
||||
DynamicUser = lib.mkForce false;
|
||||
User = "${cfg.user}";
|
||||
Group = "${cfg.group}";
|
||||
};
|
||||
};
|
||||
users.users.${cfg.user} = {
|
||||
description = "Alloy service user";
|
||||
isSystemUser = true;
|
||||
group = cfg.group;
|
||||
};
|
||||
users.groups.${cfg.group} = { };
|
||||
environment.etc."alloy/config.alloy" = {
|
||||
text = configFile;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -18,7 +18,7 @@ let
|
|||
type = lib.types.nullOr (
|
||||
lib.types.oneOf [
|
||||
lib.types.str
|
||||
(lib.types.mkSubmodule {
|
||||
(lib.khscodes.mkSubmodule {
|
||||
description = "acme certificate";
|
||||
options = {
|
||||
domains = lib.mkOption {
|
||||
|
@ -41,6 +41,27 @@ let
|
|||
default = 301;
|
||||
description = "HTTP status used by globalRedirect and forceSSL. Possible usecases include temporary (302, 307) redirects, keeping the request method and body (307, 308), or explicitly resetting the method to GET (303). See https://developer.mozilla.org/en-US/docs/Web/HTTP/Redirections.";
|
||||
};
|
||||
mtls = lib.mkOption {
|
||||
type = lib.types.nullOr (
|
||||
lib.khscodes.mkSubmodule {
|
||||
options = {
|
||||
verify = lib.mkOption {
|
||||
type = lib.types.enum [
|
||||
"optional"
|
||||
"on"
|
||||
];
|
||||
default = "on";
|
||||
};
|
||||
certificate = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Path to the certificate to verify client certificates against";
|
||||
};
|
||||
};
|
||||
description = "Nginx MTLS settings";
|
||||
}
|
||||
);
|
||||
default = null;
|
||||
};
|
||||
extraConfig = lib.mkOption {
|
||||
type = lib.types.lines;
|
||||
description = "Extra configuration to inject into the generated nginx config";
|
||||
|
@ -100,6 +121,7 @@ in
|
|||
message = "Cannot use `config.khscodes.services.nginx.virtualHosts.<name>.acme = {}` without setting config.khscodes.security.acme.dns01Enabled";
|
||||
}
|
||||
];
|
||||
khscodes.networking.aliases = lib.attrsets.attrNames cfg.virtualHosts;
|
||||
khscodes.security.acme.enable = true;
|
||||
security.dhparams.enable = lib.mkIf (cfg.sslConfiguration == "intermediate") {
|
||||
enable = true;
|
||||
|
@ -131,23 +153,40 @@ in
|
|||
|
||||
${modernSslAppendedHttpConfig}
|
||||
'';
|
||||
virtualHosts = lib.attrsets.mapAttrs (name: value: {
|
||||
inherit (value)
|
||||
extraConfig
|
||||
locations
|
||||
globalRedirect
|
||||
redirectCode
|
||||
;
|
||||
forceSSL = true;
|
||||
enableACME = value.acme == null && !dns01Enabled;
|
||||
useACMEHost =
|
||||
if lib.strings.isString value.acme then
|
||||
value.acme
|
||||
else if lib.attrsets.isAttrs value.acme || dns01Enabled then
|
||||
name
|
||||
else
|
||||
null;
|
||||
}) cfg.virtualHosts;
|
||||
virtualHosts = lib.attrsets.mapAttrs (
|
||||
name: value:
|
||||
let
|
||||
mtls =
|
||||
if value.mtls != null then
|
||||
''
|
||||
ssl_client_certificate ${value.mtls.certificate};
|
||||
ssl_verify_client ${value.mtls.verify};
|
||||
''
|
||||
else
|
||||
'''';
|
||||
extraConfig = ''
|
||||
${mtls}
|
||||
${value.extraConfig}
|
||||
'';
|
||||
in
|
||||
{
|
||||
inherit (value)
|
||||
locations
|
||||
globalRedirect
|
||||
redirectCode
|
||||
;
|
||||
inherit extraConfig;
|
||||
forceSSL = true;
|
||||
enableACME = value.acme == null && !dns01Enabled;
|
||||
useACMEHost =
|
||||
if lib.strings.isString value.acme then
|
||||
value.acme
|
||||
else if lib.attrsets.isAttrs value.acme || dns01Enabled then
|
||||
name
|
||||
else
|
||||
null;
|
||||
}
|
||||
) cfg.virtualHosts;
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80
|
||||
|
@ -160,20 +199,23 @@ in
|
|||
acc: name: value:
|
||||
(
|
||||
acc
|
||||
// (lib.attrsets.optionalAttrs (lib.attrsets.isAttrs value.acme || dns01Enabled) {
|
||||
"${name}" =
|
||||
if value.acme == null then
|
||||
{
|
||||
domain = name;
|
||||
reloadServices = [ "nginx" ];
|
||||
}
|
||||
else
|
||||
{
|
||||
domain = lib.lists.head value.acme.domains;
|
||||
extraDomainNames = lib.lists.tail value.acme.domains;
|
||||
reloadServices = [ "nginx" ];
|
||||
};
|
||||
})
|
||||
// (lib.attrsets.optionalAttrs
|
||||
(lib.attrsets.isAttrs value.acme || (dns01Enabled && !lib.strings.isString value.acme))
|
||||
{
|
||||
"${name}" =
|
||||
if value.acme == null then
|
||||
{
|
||||
domain = name;
|
||||
reloadServices = [ "nginx" ];
|
||||
}
|
||||
else
|
||||
{
|
||||
domain = lib.lists.head value.acme.domains;
|
||||
extraDomainNames = lib.lists.tail value.acme.domains;
|
||||
reloadServices = [ "nginx" ];
|
||||
};
|
||||
}
|
||||
)
|
||||
)
|
||||
) { } cfg.virtualHosts
|
||||
);
|
||||
|
|
|
@ -1,66 +0,0 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.khscodes.services.openstack-read-vault-auth-from-userdata;
|
||||
in
|
||||
{
|
||||
options.khscodes.services.openstack-read-vault-auth-from-userdata = {
|
||||
enable = lib.mkEnableOption "Enables reading vault auth information from instance userdata";
|
||||
};
|
||||
|
||||
config = lib.mkIf (cfg.enable && config.khscodes.services.vault-agent.enable) (
|
||||
let
|
||||
vault_addr = config.khscodes.services.vault-agent.vault.address;
|
||||
secretIdFilePath = config.khscodes.services.vault-agent.vault.secretIdFilePath;
|
||||
roleIdFilePath = config.khscodes.services.vault-agent.vault.roleIdFilePath;
|
||||
in
|
||||
{
|
||||
systemd.services."openstack-read-vault-auth-from-userdata" = {
|
||||
enable = true;
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
after = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = lib.getExe (
|
||||
pkgs.writeShellApplication {
|
||||
name = "openstack-read-vault-auth-from-userdata";
|
||||
runtimeInputs = [
|
||||
pkgs.curl
|
||||
pkgs.jq
|
||||
pkgs.openbao
|
||||
pkgs.getent
|
||||
pkgs.systemd
|
||||
];
|
||||
text = ''
|
||||
if [[ -f "${lib.escapeShellArg secretIdFilePath}" ]]; then
|
||||
echo "Secret id already found, not copying new id"
|
||||
exit 0
|
||||
fi
|
||||
userdata="$(curl http://169.254.169.254/openstack/2012-08-10/user_data)"
|
||||
role_id="$(echo "$userdata" | jq --raw-output '.VAULT_ROLE_ID')"
|
||||
secret_id_wrapped="$(echo "$userdata" | jq --raw-output '.VAULT_SECRET_ID_WRAPPED')"
|
||||
secret_id="$(BAO_ADDR=${lib.escapeShellArg vault_addr} bao unwrap -field=secret_id "$secret_id_wrapped")"
|
||||
mkdir -p "$(dirname ${lib.escapeShellArg secretIdFilePath})"
|
||||
mkdir -p "$(dirname ${lib.escapeShellArg roleIdFilePath})"
|
||||
echo -n "$role_id" > ${lib.escapeShellArg roleIdFilePath}
|
||||
echo -n "$secret_id" > ${lib.escapeShellArg secretIdFilePath}
|
||||
chown root:root "${lib.escapeShellArg secretIdFilePath}"
|
||||
chmod 0600 "${lib.escapeShellArg secretIdFilePath}"
|
||||
chown root:root "${lib.escapeShellArg roleIdFilePath}"
|
||||
chmod 0600 "${lib.escapeShellArg roleIdFilePath}"
|
||||
echo "Role id and secret id copied, restart vault-agent"
|
||||
systemctl restart vault-agent-openbao.service
|
||||
'';
|
||||
}
|
||||
);
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
}
|
|
@ -0,0 +1,77 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.khscodes.services.read-vault-auth-from-userdata;
|
||||
in
|
||||
{
|
||||
options.khscodes.services.read-vault-auth-from-userdata = {
|
||||
enable = lib.mkEnableOption "Enables reading vault auth information from instance userdata";
|
||||
url = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "URL to retrieve instance metadata from";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf (cfg.enable && config.khscodes.services.vault-agent.enable) (
|
||||
let
|
||||
vault_addr = lib.escapeShellArg config.khscodes.services.vault-agent.vault.address;
|
||||
secretIdFilePath = lib.escapeShellArg config.khscodes.services.vault-agent.vault.secretIdFilePath;
|
||||
roleIdFilePath = lib.escapeShellArg config.khscodes.services.vault-agent.vault.roleIdFilePath;
|
||||
cacheFilePath = lib.escapeShellArg "${config.khscodes.services.vault-agent.vault.secretIdFilePath}.wrapped";
|
||||
in
|
||||
{
|
||||
systemd.services."read-vault-auth-from-userdata" = {
|
||||
enable = true;
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
after = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStart = lib.getExe (
|
||||
pkgs.writeShellApplication {
|
||||
name = "read-vault-auth-from-userdata";
|
||||
runtimeInputs = [
|
||||
pkgs.curl
|
||||
pkgs.jq
|
||||
pkgs.openbao
|
||||
pkgs.getent
|
||||
pkgs.systemd
|
||||
];
|
||||
text = ''
|
||||
userdata="$(curl ${lib.escapeShellArg cfg.url})"
|
||||
role_id="$(echo "$userdata" | jq --raw-output '.VAULT_ROLE_ID')"
|
||||
secret_id_wrapped="$(echo "$userdata" | jq --raw-output '.VAULT_SECRET_ID_WRAPPED')"
|
||||
if [[ -f ${cacheFilePath} ]]; then
|
||||
cache_key="$(cat ${cacheFilePath})"
|
||||
if [[ "$secret_id_wrapped" == "$cache_key" ]]; then
|
||||
echo "Secret id matched last used value, exiting program"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
secret_id="$(BAO_ADDR=${vault_addr} bao unwrap -field=secret_id "$secret_id_wrapped")"
|
||||
mkdir -p "$(dirname ${secretIdFilePath})"
|
||||
mkdir -p "$(dirname ${roleIdFilePath})"
|
||||
echo -n "$role_id" > ${roleIdFilePath}
|
||||
echo -n "$secret_id" > ${secretIdFilePath}
|
||||
chown root:root ${secretIdFilePath}
|
||||
chmod 0600 ${secretIdFilePath}
|
||||
chown root:root ${roleIdFilePath}
|
||||
chmod 0600 ${roleIdFilePath}
|
||||
echo -n "$secret_id_wrapped" > ${cacheFilePath}
|
||||
chmod 0600 ${cacheFilePath}
|
||||
chown root:root ${cacheFilePath}
|
||||
echo "Role id and secret id copied, restarting vault-agent"
|
||||
systemctl restart vault-agent-openbao.service
|
||||
'';
|
||||
}
|
||||
);
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue