Move monitoring.kaareskovgaard.net to new openbao setup
This commit is contained in:
parent
905b1096ac
commit
1f7139f793
20 changed files with 367 additions and 177 deletions
|
@ -106,6 +106,7 @@
|
|||
allowUnfreePackages = [
|
||||
"spotify"
|
||||
"google-chrome"
|
||||
"terraform"
|
||||
];
|
||||
in
|
||||
(inputs.flake-base.lib.mkFlake {
|
||||
|
|
|
@ -48,5 +48,7 @@ pkgs.nixosTest {
|
|||
machine.wait_for_unit("hetzner-static-ip.service")
|
||||
ipv6 = machine.succeed("ip addr")
|
||||
assert "dead:beef:cafe::1" in ipv6
|
||||
# Ensure that rerunning the service works, even when the ip address is already set
|
||||
machine.succeed("systemctl restart hetzner-static-ip.service")
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ network-config:
|
|||
subnets:
|
||||
- ipv4: true
|
||||
type: dhcp
|
||||
- address: dead:beef:cafe::1/64
|
||||
- address: dead:beef:cafe::1/128
|
||||
gateway: fe80::1
|
||||
ipv6: true
|
||||
type: static
|
||||
|
|
|
@ -96,7 +96,6 @@ in
|
|||
"hcloud"
|
||||
"cloudflare"
|
||||
"vault"
|
||||
"authentik"
|
||||
]
|
||||
);
|
||||
description = "Needed endpoints to be used during provisioning";
|
||||
|
@ -120,7 +119,6 @@ in
|
|||
"hcloud"
|
||||
"cloudflare"
|
||||
"vault"
|
||||
"authentik"
|
||||
]
|
||||
);
|
||||
description = "Needed endpoints to be used during provisioning";
|
||||
|
|
|
@ -98,8 +98,7 @@ in
|
|||
# Not hardcoding the role name here, as reading it like this will create a dependency
|
||||
# on the role being created first, which is needed.
|
||||
role_name = config.khscodes.vault.output.approle_auth_backend_role.${cfg.role_name}.role_name;
|
||||
# Should only be 5-10 mins once done testing
|
||||
wrapping_ttl = 5 * 60;
|
||||
wrapping_ttl = 30 * 60;
|
||||
num_uses = 0;
|
||||
|
||||
# This should simply mean that we never attempt to recreate the secret id, as we don't want a rerun of the
|
||||
|
|
|
@ -18,7 +18,7 @@ pkgs.writeShellApplication {
|
|||
>&2 echo "Must run as root"
|
||||
exit 2
|
||||
fi
|
||||
secret_id="$(bao unwrap -field=secret_id "$wrapped_secret_id")"
|
||||
secret_id="$(BAO_ADDR=https://secrets.kaareskovgaard.net bao unwrap -field=secret_id "$wrapped_secret_id")"
|
||||
mkdir -p /var/lib/vault-agent
|
||||
touch /var/lib/vault-agent/role-id
|
||||
touch /var/lib/vault-agent/secret-id
|
||||
|
|
19
nix/packages/instance-new-secret-id/default.nix
Normal file
19
nix/packages/instance-new-secret-id/default.nix
Normal file
|
@ -0,0 +1,19 @@
|
|||
{ pkgs, ... }:
|
||||
pkgs.writeShellApplication {
|
||||
name = "instance-new-secret-id";
|
||||
runtimeInputs = [
|
||||
pkgs.openbao
|
||||
pkgs.uutils-coreutils-noprefix
|
||||
];
|
||||
text = ''
|
||||
instance="''${1:-}"
|
||||
if [[ "$instance" == "" ]]; then
|
||||
>&2 echo "Usage: instance-new-secret-id <instance>"
|
||||
exit 1
|
||||
fi
|
||||
secret_id="$(bao write -field=wrapping_token -force -wrap-ttl=1m "auth/approle/role/$instance/secret-id")"
|
||||
role_id="$(bao read -field=role_id "auth/approle/role/$instance/role-id")"
|
||||
|
||||
ssh -t "$instance" -- sudo bao-import-secret "$role_id" "$secret_id"
|
||||
'';
|
||||
}
|
|
@ -1,12 +1,12 @@
|
|||
{ pkgs, ... }:
|
||||
{ pkgs, lib, ... }:
|
||||
let
|
||||
opentofu = pkgs.khscodes.opentofu;
|
||||
opentofuExe = lib.getExe opentofu;
|
||||
in
|
||||
pkgs.writeShellApplication {
|
||||
name = "instance-opentofu";
|
||||
runtimeInputs = [
|
||||
pkgs.uutils-coreutils-noprefix
|
||||
opentofu
|
||||
];
|
||||
text = ''
|
||||
fqdn="$1"
|
||||
|
@ -16,12 +16,12 @@ pkgs.writeShellApplication {
|
|||
mkdir -p "$dir"
|
||||
cat "''${config}" > "$dir/config.tf.json"
|
||||
|
||||
tofu -chdir="$dir" init > /dev/null
|
||||
${opentofuExe} -chdir="$dir" init > /dev/null
|
||||
if [[ "$cmd" == "apply" ]]; then
|
||||
tofu -chdir="$dir" "$cmd" >&2
|
||||
tofu -chdir="$dir" output -json
|
||||
${opentofuExe} -chdir="$dir" "$cmd" >&2
|
||||
${opentofuExe} -chdir="$dir" output -json
|
||||
else
|
||||
tofu -chdir="$dir" "$cmd"
|
||||
${opentofuExe} -chdir="$dir" "$cmd"
|
||||
fi
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
{ pkgs }:
|
||||
# I really want to use opentofu here, but waiting for write only values and ephemeral support.
|
||||
# Should land in 1.11.0 of opentofu
|
||||
pkgs.opentofu.withPlugins (p: [
|
||||
pkgs.khscodes.terraform-provider-unifi
|
||||
pkgs.khscodes.terraform-provider-cloudflare
|
||||
|
@ -6,4 +8,5 @@ pkgs.opentofu.withPlugins (p: [
|
|||
pkgs.khscodes.terraform-provider-openstack
|
||||
pkgs.khscodes.terraform-provider-vault
|
||||
pkgs.khscodes.terraform-provider-random
|
||||
pkgs.khscodes.terraform-provider-tls
|
||||
])
|
||||
|
|
10
nix/packages/terraform-provider-tls/default.nix
Normal file
10
nix/packages/terraform-provider-tls/default.nix
Normal file
|
@ -0,0 +1,10 @@
|
|||
{ pkgs }:
|
||||
pkgs.terraform-providers.mkProvider {
|
||||
hash = "sha256-t/nUt0deyckP8opNiPZc5rbC1SleZwkrFXuQFw47sqA=";
|
||||
homepage = "https://registry.terraform.io/providers/hashicorp/tls";
|
||||
owner = "hashicorp";
|
||||
repo = "terraform-provider-tls";
|
||||
rev = "v4.1.0";
|
||||
spdx = "MPL-2.0";
|
||||
vendorHash = "sha256-tYvQURTrFtr+rgSMGq2zi/5p5jJVGIse7+hj95gz68U=";
|
||||
}
|
|
@ -2,6 +2,7 @@
|
|||
{
|
||||
imports = [ ./khs-base.nix ];
|
||||
config = {
|
||||
environment.systemPackages = [ pkgs.khscodes.bao-import-secret ];
|
||||
khscodes = {
|
||||
services.openssh.enable = true;
|
||||
machine.type = "server";
|
||||
|
@ -9,6 +10,7 @@
|
|||
infrastructure = {
|
||||
vault-server-approle.enable = lib.mkDefault true;
|
||||
vault-loki-sender.enable = lib.mkDefault true;
|
||||
vault-prometheus-sender.enable = lib.mkDefault true;
|
||||
};
|
||||
};
|
||||
stylix = {
|
||||
|
|
34
nix/systems/aarch64-linux/kas.codes/dkim.nix
Normal file
34
nix/systems/aarch64-linux/kas.codes/dkim.nix
Normal file
|
@ -0,0 +1,34 @@
|
|||
{
|
||||
khscodes.services.vault-agent.templates = [
|
||||
{
|
||||
contents = ''
|
||||
{{- with secret "forgejo/data/mailserver/dkim" -}}
|
||||
{{ .Data.data.dkim_private_key }}
|
||||
{{- end -}}
|
||||
'';
|
||||
destination = "/var/lib/vault-agent/mailserver/dkim/private.key";
|
||||
perms = "0600";
|
||||
owner = "rspamd";
|
||||
group = "rspamd";
|
||||
restartUnits = [
|
||||
"rspamd.service"
|
||||
"postfix.service"
|
||||
];
|
||||
}
|
||||
{
|
||||
contents = ''
|
||||
{{- with secret "forgejo/data/mailserver/forgejo-user" -}}
|
||||
{{ .Data.data.hashed_password }}
|
||||
{{- end -}}
|
||||
'';
|
||||
destination = "/var/lib/vault-agent/mailserver/users/forgejo.passwd.hash";
|
||||
perms = "0600";
|
||||
owner = "rspamd";
|
||||
group = "rspamd";
|
||||
restartUnits = [
|
||||
"rspamd.service"
|
||||
"postfix.service"
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
|
@ -1,9 +1,12 @@
|
|||
{
|
||||
inputs,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
# Change this if recreating the server from scratch. See README for this instance.
|
||||
bootstrapping = false;
|
||||
in
|
||||
{
|
||||
options.khscodes."security.kaareskovgaard.net" = {
|
||||
bootstrap = {
|
||||
|
@ -17,8 +20,8 @@
|
|||
./post
|
||||
];
|
||||
config = {
|
||||
environment.systemPackages = [ pkgs.khscodes.bao-import-secret ];
|
||||
khscodes.services.nginx.enable = true;
|
||||
khscodes."security.kaareskovgaard.net".bootstrap.enable = bootstrapping;
|
||||
khscodes.infrastructure.hetzner-instance = {
|
||||
enable = true;
|
||||
server_type = "cax11";
|
||||
|
|
|
@ -8,11 +8,12 @@ let
|
|||
domain = "login.kaareskovgaard.net";
|
||||
bootstrapping = config.khscodes."security.kaareskovgaard.net".bootstrap.enable;
|
||||
openbaoAppBasicSecretFile = "/var/lib/vault-agent/kanidm/openbao_basic_secret";
|
||||
openbaoCliAppBasicSecretFile = "/var/lib/vault-agent/kanidm/openbao_cli_basic_secret";
|
||||
monitoringAppBasicSecretFile = "/var/lib/vault-agent/kanidm/monitoring_basic_secret";
|
||||
openbaoDomain = config.khscodes.infrastructure.openbao.domain;
|
||||
openbaoAllowedRedirectUrls = [
|
||||
"https://${openbaoDomain}/ui/vault/auth/oidc/oidc/callback"
|
||||
"https://${openbaoDomain}/oidc/callback"
|
||||
"http://localhost:8250/oidc/callback"
|
||||
"https://${openbaoDomain}/ui/vault/auth/kanidm/oidc/callback"
|
||||
"https://${openbaoDomain}/kanidm/callback"
|
||||
];
|
||||
kanidm-reset-password = pkgs.writeShellApplication {
|
||||
name = "kanidm-reset-password";
|
||||
|
@ -75,7 +76,7 @@ in
|
|||
present = true;
|
||||
public = false;
|
||||
preferShortUsername = true;
|
||||
basicSecretFile = openbaoAppBasicSecretFile;
|
||||
basicSecretFile = lib.mkIf (!bootstrapping) openbaoAppBasicSecretFile;
|
||||
originUrl = openbaoAllowedRedirectUrls;
|
||||
originLanding = "https://${openbaoDomain}";
|
||||
displayName = "OpenBAO";
|
||||
|
@ -90,13 +91,52 @@ in
|
|||
joinType = "array";
|
||||
valuesByGroup = {
|
||||
"openbao_admin" = [
|
||||
"openbao_reader"
|
||||
"openbao_writer"
|
||||
"openbao_sudo"
|
||||
"openbao_cli_writer"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
openbao-cli = {
|
||||
present = true;
|
||||
public = false;
|
||||
preferShortUsername = true;
|
||||
basicSecretFile = lib.mkIf (!bootstrapping) openbaoCliAppBasicSecretFile;
|
||||
originUrl = [ "http://localhost:8250/oidc/callback" ];
|
||||
originLanding = "http://localhost:8250";
|
||||
displayName = "OpenBAO CLI";
|
||||
scopeMaps = {
|
||||
"openbao_admin" = [
|
||||
"profile"
|
||||
"email"
|
||||
"openid"
|
||||
];
|
||||
};
|
||||
claimMaps.groups = {
|
||||
joinType = "array";
|
||||
valuesByGroup = {
|
||||
"openbao_admin" = [
|
||||
"openbao_writer"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
monitoring = {
|
||||
present = true;
|
||||
public = false;
|
||||
preferShortUsername = true;
|
||||
basicSecretFile = lib.mkIf (!bootstrapping) monitoringAppBasicSecretFile;
|
||||
originUrl = [ "https://monitoring.kaareskovgaard.net/login/generic_oauth" ];
|
||||
originLanding = "http://monitoring.kaareskovgaard.net";
|
||||
displayName = "Monitoring";
|
||||
scopeMaps = {
|
||||
"openbao_admin" = [
|
||||
"profile"
|
||||
"email"
|
||||
"openid"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -150,6 +190,34 @@ in
|
|||
{ "basic_secret": "''${ resource.random_password.openbao_secret.result }" }
|
||||
'';
|
||||
};
|
||||
resource.random_password.openbao_cli_secret = {
|
||||
length = 48;
|
||||
numeric = true;
|
||||
lower = true;
|
||||
upper = true;
|
||||
special = false;
|
||||
};
|
||||
resource.vault_kv_secret_v2.openbao_cli_secret = {
|
||||
mount = config.khscodes.vault.output.mount.kanidm.path;
|
||||
name = "apps/openbao_cli";
|
||||
data_json = ''
|
||||
{ "basic_secret": "''${ resource.random_password.openbao_cli_secret.result }" }
|
||||
'';
|
||||
};
|
||||
resource.random_password.monitoring = {
|
||||
length = 48;
|
||||
numeric = true;
|
||||
lower = true;
|
||||
upper = true;
|
||||
special = false;
|
||||
};
|
||||
resource.vault_kv_secret_v2.monitoring_secret = {
|
||||
mount = config.khscodes.vault.output.mount.kanidm.path;
|
||||
name = "apps/monitoring";
|
||||
data_json = ''
|
||||
{ "basic_secret": "''${ resource.random_password.monitoring.result }" }
|
||||
'';
|
||||
};
|
||||
}
|
||||
)
|
||||
# Sets up OIDC authentication within OpenBAO.
|
||||
|
@ -160,13 +228,38 @@ in
|
|||
{ }
|
||||
else
|
||||
{
|
||||
resource.vault_jwt_auth_backend.oidc = {
|
||||
description = "Kanidm auth backend";
|
||||
resource.vault_jwt_auth_backend.kanidm_cli = {
|
||||
description = "Kanidm cli auth backend";
|
||||
path = "oidc";
|
||||
type = "oidc";
|
||||
oidc_discovery_url = "https://${domain}/oauth2/openid/openbao-cli";
|
||||
oidc_client_id = "openbao-cli";
|
||||
oidc_client_secret = "\${ resource.random_password.openbao_cli_secret.result }";
|
||||
default_role = "kanidm_cli_writer";
|
||||
jwt_supported_algs = [
|
||||
"ES256"
|
||||
];
|
||||
tune = [
|
||||
{
|
||||
listing_visibility = "hidden";
|
||||
default_lease_ttl = "2h";
|
||||
max_lease_ttl = "2h";
|
||||
token_type = "default-service";
|
||||
passthrough_request_headers = [ ];
|
||||
allowed_response_headers = [ ];
|
||||
audit_non_hmac_request_keys = [ ];
|
||||
audit_non_hmac_response_keys = [ ];
|
||||
}
|
||||
];
|
||||
};
|
||||
resource.vault_jwt_auth_backend.kanidm = {
|
||||
description = "Kanidm auth backend";
|
||||
path = "kanidm";
|
||||
type = "oidc";
|
||||
oidc_discovery_url = "https://${domain}/oauth2/openid/openbao";
|
||||
oidc_client_id = "openbao";
|
||||
oidc_client_secret = "\${ resource.random_password.openbao_secret.result }";
|
||||
default_role = "kanidm_writer";
|
||||
jwt_supported_algs = [
|
||||
"ES256"
|
||||
];
|
||||
|
@ -184,13 +277,13 @@ in
|
|||
];
|
||||
};
|
||||
|
||||
resource.vault_jwt_auth_backend_role.oidc_reader = {
|
||||
backend = "\${ resource.vault_jwt_auth_backend.oidc.path }";
|
||||
role_name = "reader";
|
||||
bound_audiences = [ "openbao" ];
|
||||
allowed_redirect_uris = openbaoAllowedRedirectUrls;
|
||||
resource.vault_jwt_auth_backend_role.kanidm_cli_writer = {
|
||||
backend = "\${ resource.vault_jwt_auth_backend.kanidm_cli.path }";
|
||||
role_name = "kanidm_cli_writer";
|
||||
bound_audiences = [ "openbao-cli" ];
|
||||
allowed_redirect_uris = [ "http://localhost:8250/oidc/callback" ];
|
||||
user_claim = "sub";
|
||||
token_policies = [ "reader" ];
|
||||
token_policies = [ "writer" ];
|
||||
groups_claim = "groups";
|
||||
oidc_scopes = [
|
||||
"openid"
|
||||
|
@ -199,9 +292,9 @@ in
|
|||
];
|
||||
};
|
||||
|
||||
resource.vault_jwt_auth_backend_role.oidc_writer = {
|
||||
backend = "\${ resource.vault_jwt_auth_backend.oidc.path }";
|
||||
role_name = "writer";
|
||||
resource.vault_jwt_auth_backend_role.kanidm_writer = {
|
||||
backend = "\${ resource.vault_jwt_auth_backend.kanidm.path }";
|
||||
role_name = "kanidm_writer";
|
||||
bound_audiences = [ "openbao" ];
|
||||
allowed_redirect_uris = openbaoAllowedRedirectUrls;
|
||||
user_claim = "sub";
|
||||
|
@ -214,65 +307,28 @@ in
|
|||
];
|
||||
};
|
||||
|
||||
resource.vault_jwt_auth_backend_role.oidc_sudo = {
|
||||
backend = "\${ resource.vault_jwt_auth_backend.oidc.path }";
|
||||
role_name = "sudo";
|
||||
bound_audiences = [ "openbao" ];
|
||||
allowed_redirect_uris = openbaoAllowedRedirectUrls;
|
||||
user_claim = "sub";
|
||||
token_policies = [ "sudo" ];
|
||||
groups_claim = "groups";
|
||||
oidc_scopes = [
|
||||
"openid"
|
||||
"profile"
|
||||
"email"
|
||||
];
|
||||
};
|
||||
|
||||
resource.vault_identity_group.reader = {
|
||||
name = "reader";
|
||||
policies = [ "reader" ];
|
||||
type = "external";
|
||||
};
|
||||
|
||||
resource.vault_identity_group.writer = {
|
||||
name = "writer";
|
||||
resource.vault_identity_group.kanidm_writer = {
|
||||
name = "kanidm_writer";
|
||||
policies = [ "writer" ];
|
||||
type = "external";
|
||||
};
|
||||
|
||||
resource.vault_identity_group.sudo = {
|
||||
name = "sudo";
|
||||
policies = [ "sudo" ];
|
||||
resource.vault_identity_group.kanidm_cli_writer = {
|
||||
name = "kanidm_cli_writer";
|
||||
policies = [ "writer" ];
|
||||
type = "external";
|
||||
};
|
||||
|
||||
resource.vault_identity_group_alias.reader = {
|
||||
name = "openbao_reader";
|
||||
mount_accessor = "\${ vault_jwt_auth_backend.oidc.accessor }";
|
||||
canonical_id = "\${ vault_identity_group.reader.id }";
|
||||
resource.vault_identity_group_alias.oidc_writer = {
|
||||
name = "openbao_cli_writer";
|
||||
mount_accessor = "\${ vault_jwt_auth_backend.kanidm_cli.accessor }";
|
||||
canonical_id = "\${ vault_identity_group.kanidm_cli_writer.id }";
|
||||
};
|
||||
|
||||
resource.vault_identity_group_alias.writer = {
|
||||
resource.vault_identity_group_alias.kanidm_writer = {
|
||||
name = "openbao_writer";
|
||||
mount_accessor = "\${ vault_jwt_auth_backend.oidc.accessor }";
|
||||
canonical_id = "\${ vault_identity_group.writer.id }";
|
||||
};
|
||||
|
||||
resource.vault_identity_group_alias.sudo = {
|
||||
name = "openbao_sudo";
|
||||
mount_accessor = "\${ vault_jwt_auth_backend.oidc.accessor }";
|
||||
canonical_id = "\${ vault_identity_group.writer.id }";
|
||||
};
|
||||
|
||||
resource.vault_policy.reader = {
|
||||
name = "reader";
|
||||
|
||||
policy = ''
|
||||
path "*" {
|
||||
capabilities = ["read", "list"]
|
||||
}
|
||||
'';
|
||||
mount_accessor = "\${ vault_jwt_auth_backend.kanidm.accessor }";
|
||||
canonical_id = "\${ vault_identity_group.kanidm_writer.id }";
|
||||
};
|
||||
|
||||
resource.vault_policy.writer = {
|
||||
|
@ -284,16 +340,6 @@ in
|
|||
}
|
||||
'';
|
||||
};
|
||||
|
||||
resource.vault_policy.sudo = {
|
||||
name = "sudo";
|
||||
|
||||
policy = ''
|
||||
path "auth/token/create" {
|
||||
capabilities = ["create", "update", "sudo"]
|
||||
}
|
||||
'';
|
||||
};
|
||||
}
|
||||
)
|
||||
];
|
||||
|
@ -311,6 +357,30 @@ in
|
|||
group = "kanidm";
|
||||
reloadOrRestartUnits = [ "kanidm.service" ];
|
||||
}
|
||||
{
|
||||
contents = ''
|
||||
{{- with secret "kanidm/data/apps/openbao_cli" -}}
|
||||
{{ .Data.data.basic_secret }}
|
||||
{{- end -}}
|
||||
'';
|
||||
destination = openbaoCliAppBasicSecretFile;
|
||||
perms = "0600";
|
||||
owner = "kanidm";
|
||||
group = "kanidm";
|
||||
reloadOrRestartUnits = [ "kanidm.service" ];
|
||||
}
|
||||
{
|
||||
contents = ''
|
||||
{{- with secret "kanidm/data/apps/monitoring" -}}
|
||||
{{ .Data.data.basic_secret }}
|
||||
{{- end -}}
|
||||
'';
|
||||
destination = monitoringAppBasicSecretFile;
|
||||
perms = "0600";
|
||||
owner = "kanidm";
|
||||
group = "kanidm";
|
||||
reloadOrRestartUnits = [ "kanidm.service" ];
|
||||
}
|
||||
];
|
||||
|
||||
security.acme.certs.${domain}.reloadServices = [ "kanidm.service" ];
|
||||
|
|
|
@ -11,12 +11,13 @@ in
|
|||
{
|
||||
imports = [
|
||||
"${inputs.self}/nix/profiles/nixos/khs-openstack-server.nix"
|
||||
./vault_loki.nix
|
||||
./vault_prometheus.nix
|
||||
];
|
||||
services.grafana = {
|
||||
enable = true;
|
||||
settings = {
|
||||
security = {
|
||||
disable_initial_admin_creation = true;
|
||||
};
|
||||
server = {
|
||||
http_addr = "127.0.0.1";
|
||||
http_port = 3000;
|
||||
|
@ -24,6 +25,32 @@ in
|
|||
root_url = "https://monitoring.kaareskovgaard.net";
|
||||
serve_from_sub_path = false;
|
||||
};
|
||||
"auth" = {
|
||||
disable_login_form = true;
|
||||
};
|
||||
"auth.basic" = {
|
||||
enabled = false;
|
||||
};
|
||||
"auth.generic_oauth" = {
|
||||
enabled = true;
|
||||
allow_sign_up = true;
|
||||
auto_login = false;
|
||||
team_ids = null;
|
||||
allowed_organizations = null;
|
||||
name = "Kanidm";
|
||||
auth_url = "https://login.kaareskovgaard.net/ui/oauth2";
|
||||
token_url = "https://login.kaareskovgaard.net/oauth2/token";
|
||||
api_url = "https://login.kaareskovgaard.net/oauth2/openid/monitoring/userinfo";
|
||||
client_id = "monitoring";
|
||||
client_secret = "$__file{/var/lib/vault-agent/grafana/kanidm_client_secret}";
|
||||
scopes = "openid profile email";
|
||||
use_pkce = true;
|
||||
skip_org_role_sync = false;
|
||||
allow_assign_grafana_admin = true;
|
||||
org_attribute_path = "['Main org.']";
|
||||
org_mapping = "*:*:Admin";
|
||||
role_attribute_path = "'GrafanaAdmin'";
|
||||
};
|
||||
};
|
||||
provision = {
|
||||
enable = true;
|
||||
|
@ -167,7 +194,24 @@ in
|
|||
perms = "0644";
|
||||
reloadOrRestartUnits = [ "nginx.service" ];
|
||||
}
|
||||
{
|
||||
contents = ''
|
||||
{{- with secret "kanidm/data/apps/monitoring" -}}
|
||||
{{ .Data.data.basic_secret }}
|
||||
{{- end -}}
|
||||
'';
|
||||
destination = "/var/lib/vault-agent/grafana/kanidm_client_secret";
|
||||
owner = "grafana";
|
||||
group = "grafana";
|
||||
perms = "0600";
|
||||
reloadOrRestartUnits = [ "grafana.service" ];
|
||||
}
|
||||
];
|
||||
infrastructure.vault-server-approle.policy = {
|
||||
"kanidm/data/apps/monitoring" = {
|
||||
capabilities = [ "read" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
snowfallorg.users.khs.admin = true;
|
||||
users.users.khs = {
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
# This should go into the setup of the vault server itself, as the vault server also needs stuff that depends on this.
|
||||
{
|
||||
khscodes.infrastructure.vault-loki-sender = {
|
||||
enable = true;
|
||||
# This is quite ugly, but should get the job done. Sadly I cannot reference the output from here.
|
||||
terranixBackendName = "\${ vault_mount.loki-mtls.path }";
|
||||
};
|
||||
khscodes.infrastructure.provisioning.pre.modules = [
|
||||
(
|
||||
{ config, ... }:
|
||||
{
|
||||
khscodes.vault.enable = true;
|
||||
khscodes.vault.mount.loki-mtls = {
|
||||
type = "pki";
|
||||
path = "loki-mtls";
|
||||
max_lease_ttl_seconds = 10 * 365 * 24 * 60 * 60;
|
||||
default_lease_ttl_seconds = 60 * 60;
|
||||
};
|
||||
khscodes.vault.pki_secret_backend_root_cert.loki-mtls = {
|
||||
backend = config.khscodes.vault.output.mount.loki-mtls.path;
|
||||
type = "internal";
|
||||
common_name = "loki.kaareskovgaard.net";
|
||||
issuer_name = "loki-mtls-root-ca";
|
||||
};
|
||||
}
|
||||
)
|
||||
];
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
# This should go into the setup of the vault server itself, as the vault server also needs stuff that depends on this.
|
||||
{
|
||||
khscodes.infrastructure.vault-prometheus-sender = {
|
||||
enable = true;
|
||||
# This is quite ugly, but should get the job done. Sadly I cannot reference the output from here.
|
||||
terranixBackendName = "\${ vault_mount.prometheus-mtls.path }";
|
||||
};
|
||||
khscodes.infrastructure.provisioning.pre.modules = [
|
||||
(
|
||||
{ config, ... }:
|
||||
{
|
||||
khscodes.vault.enable = true;
|
||||
khscodes.vault.mount.prometheus-mtls = {
|
||||
type = "pki";
|
||||
path = "prometheus-mtls";
|
||||
max_lease_ttl_seconds = 10 * 365 * 24 * 60 * 60;
|
||||
default_lease_ttl_seconds = 60 * 60;
|
||||
};
|
||||
khscodes.vault.pki_secret_backend_root_cert.prometheus-mtls = {
|
||||
backend = config.khscodes.vault.output.mount.prometheus-mtls.path;
|
||||
type = "internal";
|
||||
common_name = "prometheus.kaareskovgaard.net";
|
||||
issuer_name = "prometheus-mtls-root-ca";
|
||||
};
|
||||
}
|
||||
)
|
||||
];
|
||||
}
|
68
rust/program/hetzner-static-ip/src/iproute2.rs
Normal file
68
rust/program/hetzner-static-ip/src/iproute2.rs
Normal file
|
@ -0,0 +1,68 @@
|
|||
use std::collections::BTreeSet;
|
||||
|
||||
use anyhow::Context as _;
|
||||
use common::proc::Command;
|
||||
use serde::Deserialize;
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct Link {
|
||||
ifname: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct Addr {
|
||||
addr_info: Vec<AddrInfo>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct AddrInfo {
|
||||
local: String,
|
||||
prefixlen: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct Route {
|
||||
dst: String,
|
||||
gateway: Option<String>,
|
||||
}
|
||||
|
||||
pub fn get_link_names() -> anyhow::Result<BTreeSet<String>> {
|
||||
let mut proc = Command::new("ip");
|
||||
proc.args(["-j", "link"]);
|
||||
let out = proc.try_spawn_to_json::<Vec<Link>>()?;
|
||||
|
||||
Ok(out.into_iter().map(|l| l.ifname).collect())
|
||||
}
|
||||
|
||||
pub fn has_ipv6_address(dev: &str, addr: &str) -> anyhow::Result<bool> {
|
||||
let mut proc = Command::new("ip");
|
||||
proc.args(["-j", "-6", "addr", "show", dev]);
|
||||
|
||||
let out: Vec<Addr> = proc.try_spawn_to_json()?;
|
||||
|
||||
let idx = addr
|
||||
.char_indices()
|
||||
.find_map(|(idx, ch)| if ch == '/' { Some(idx) } else { None })
|
||||
.expect("Address should contain prefixlen");
|
||||
let prefixlen = &addr[idx + 1..];
|
||||
let addr = &addr[..idx];
|
||||
let prefixlen: usize = prefixlen
|
||||
.parse()
|
||||
.context("Ipv6 prefixlen was not a usize")?;
|
||||
Ok(out.iter().any(|a| {
|
||||
a.addr_info
|
||||
.iter()
|
||||
.any(|a| a.local == addr && a.prefixlen == prefixlen)
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn has_default_route(dev: &str, gateway: &str) -> anyhow::Result<bool> {
|
||||
let mut proc = Command::new("ip");
|
||||
proc.args(["-j", "-6", "route", "show", "dev", dev]);
|
||||
|
||||
let out: Vec<Route> = proc.try_spawn_to_json()?;
|
||||
|
||||
Ok(out
|
||||
.iter()
|
||||
.any(|r| r.dst == "default" && r.gateway.as_deref().is_some_and(|gw| gw == gateway)))
|
||||
}
|
|
@ -3,6 +3,7 @@ use clap::{Parser, Subcommand};
|
|||
|
||||
use crate::metadata::Instance;
|
||||
|
||||
mod iproute2;
|
||||
mod metadata;
|
||||
|
||||
fn main() {
|
||||
|
@ -35,6 +36,7 @@ fn configure() -> anyhow::Result<()> {
|
|||
let metadata = common::curl::read_text_as_string(&metadata_api)?;
|
||||
let metadata: Instance = common::yaml::from_str(&metadata)
|
||||
.context("Could not parse instance metadata into expected format")?;
|
||||
let link_names = iproute2::get_link_names()?;
|
||||
for m in metadata.network_config.config {
|
||||
for subnet in m.subnets {
|
||||
match subnet {
|
||||
|
@ -44,27 +46,32 @@ fn configure() -> anyhow::Result<()> {
|
|||
address,
|
||||
gateway,
|
||||
} => {
|
||||
let mut cmd = common::proc::Command::new("ip");
|
||||
if ipv6.is_some_and(|v| v) {
|
||||
cmd.arg("-6");
|
||||
}
|
||||
if ipv4.is_some_and(|v| v) {
|
||||
cmd.arg("-4");
|
||||
continue;
|
||||
}
|
||||
if !ipv6.is_some_and(|v| v) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Apparently the configuration used renames the eth0 device to enp1s0, I should really figure out why this happens, but for now
|
||||
// this will do.
|
||||
let dev_name = if m.name == "eth0" { "enp1s0" } else { &m.name };
|
||||
cmd.args(["addr", "add", &address, "dev", dev_name]);
|
||||
cmd.try_spawn_to_string()?;
|
||||
let mut cmd = common::proc::Command::new("ip");
|
||||
if ipv6.is_some_and(|v| v) {
|
||||
cmd.arg("-6");
|
||||
let dev_name = if m.name == "eth0" && !link_names.contains("eth0") {
|
||||
"enp1s0"
|
||||
} else {
|
||||
&m.name
|
||||
};
|
||||
if !iproute2::has_ipv6_address(dev_name, &address)? {
|
||||
let mut cmd = common::proc::Command::new("ip");
|
||||
cmd.args(["-6", "addr", "add", &address, "dev", dev_name]);
|
||||
cmd.try_spawn_to_string()?;
|
||||
}
|
||||
if ipv4.is_some_and(|v| v) {
|
||||
cmd.arg("-4");
|
||||
if !iproute2::has_default_route(dev_name, &gateway)? {
|
||||
let mut cmd = common::proc::Command::new("ip");
|
||||
cmd.args([
|
||||
"-6", "route", "add", "default", "via", &gateway, "dev", dev_name,
|
||||
]);
|
||||
cmd.try_spawn_to_string()?;
|
||||
}
|
||||
cmd.args(["route", "add", "default", "via", &gateway, "dev", dev_name]);
|
||||
cmd.try_spawn_to_string()?;
|
||||
}
|
||||
metadata::InstanceNetworkConfigConfigSubnet::Dhcp {} => continue,
|
||||
}
|
||||
|
|
|
@ -54,8 +54,6 @@ pub enum Endpoint {
|
|||
Unifi,
|
||||
#[value(name = "vault")]
|
||||
Vault,
|
||||
#[value(name = "authentik")]
|
||||
Authentik,
|
||||
}
|
||||
|
||||
impl Endpoint {
|
||||
|
@ -81,10 +79,6 @@ impl Endpoint {
|
|||
let data = UnifiData::read_from_bao()?;
|
||||
Ok(data.into())
|
||||
}
|
||||
Self::Authentik => {
|
||||
let data = AuthentikData::read_from_bao()?;
|
||||
Ok(data.into())
|
||||
}
|
||||
Self::Vault => {
|
||||
let data = VaultData::read_from_bao()?;
|
||||
Ok(data.into())
|
||||
|
@ -152,12 +146,6 @@ entry_definition!(
|
|||
&["UNIFI_USERNAME", "UNIFI_PASSWORD", "UNIFI_API"]
|
||||
);
|
||||
entry_definition!(VaultDataConfig, VaultData, "vault", &["VAULT_TOKEN"]);
|
||||
entry_definition!(
|
||||
AuthentikDataConfig,
|
||||
AuthentikData,
|
||||
"authentik",
|
||||
&["AUTHENTIK_TOKEN", "TF_VAR_authentik_username"]
|
||||
);
|
||||
|
||||
fn transfer() -> anyhow::Result<()> {
|
||||
let openstack = OpenstackData::try_new_from_env()?;
|
||||
|
@ -165,7 +153,6 @@ fn transfer() -> anyhow::Result<()> {
|
|||
let aws = AwsData::try_new_from_env()?;
|
||||
let hcloud = HcloudData::try_new_from_env()?;
|
||||
let unifi = UnifiData::try_new_from_env()?;
|
||||
let authentik = AuthentikData::try_new_from_env()?;
|
||||
let vault = VaultData::try_new_from_env()?;
|
||||
|
||||
write_kv_data(openstack)?;
|
||||
|
@ -173,7 +160,6 @@ fn transfer() -> anyhow::Result<()> {
|
|||
write_kv_data(aws)?;
|
||||
write_kv_data(hcloud)?;
|
||||
write_kv_data(unifi)?;
|
||||
write_kv_data(authentik)?;
|
||||
write_kv_data(vault)?;
|
||||
Ok(())
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue