Work on nginx setup to get logs into loki
Some checks failed
/ dev-shell (push) Successful in 59s
/ rust-packages (push) Successful in 3m17s
/ check (push) Failing after 4m35s
/ systems (push) Successful in 25m43s
/ terraform-providers (push) Successful in 5m44s

Also add default robots.txt file
This commit is contained in:
Kaare Hoff Skovgaard 2025-07-20 13:26:52 +02:00
parent b2f59a9c77
commit 277f7dbb57
Signed by: khs
GPG key ID: C7D890804F01E9F0
7 changed files with 260 additions and 65 deletions

View file

@ -18,6 +18,16 @@ in
description = "This should only be configured for the server hosting loki, to allow setting up dependencies in terraform";
default = "loki-mtls";
};
extraFiles = lib.mkOption {
type = lib.types.listOf lib.types.path;
description = "Extra alloy files to deploy";
default = [ ];
};
extraGroups = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = "Extra groups to add to alloy to allow reading log files";
default = [ ];
};
};
config = lib.mkIf cfg.enable {
@ -78,8 +88,23 @@ in
LOKI_CLIENT_CERT = client_cert;
};
};
environment.etc."alloy/loki.alloy" = {
source = ./loki.alloy;
};
users.users.alloy.extraGroups = cfg.extraGroups;
environment.etc =
{
"alloy/loki.alloy" = {
source = ./loki.alloy;
};
"alloy/loki_endpoint.alloy" = {
source = ./loki_endpoint.alloy;
};
}
// (lib.listToAttrs (
lib.lists.map (f: {
name = "alloy/${builtins.baseNameOf f}";
value = {
source = f;
};
}) cfg.extraFiles
));
};
}

View file

@ -1,78 +1,60 @@
loki_send "node_exporter" {
job = "integrations/node_exporter"
}
// Collect logs from systemd journal for node_exporter integration
loki.source.journal "logs_integrations_integrations_node_exporter_journal_scrape" {
// Only collect logs from the last 24 hours
max_age = "24h0m0s"
// Apply relabeling rules to the logs
relabel_rules = discovery.relabel.logs_integrations_integrations_node_exporter_journal_scrape.rules
// Send logs to the local Loki instance
forward_to = [loki.write.local.receiver]
// Only collect logs from the last 24 hours
max_age = "24h0m0s"
// Apply relabeling rules to the logs
relabel_rules = discovery.relabel.logs_integrations_integrations_node_exporter_journal_scrape.rules
// Send logs to the local Loki instance
forward_to = [loki_send.node_exporter.receiver]
}
// Define which log files to collect for node_exporter
local.file_match "logs_integrations_integrations_node_exporter_direct_scrape" {
path_targets = [{
// Target localhost for log collection
__address__ = "localhost",
// Collect standard system logs
__path__ = "/var/log/{syslog,messages,*.log}",
// Add instance label with hostname
instance = constants.hostname,
// Add job label for logs
job = "integrations/node_exporter",
}]
path_targets = [{
// Target localhost for log collection
__address__ = "localhost",
// Collect standard system logs
__path__ = "/var/log/{syslog,messages,*.log}",
}]
}
// Define relabeling rules for systemd journal logs
discovery.relabel "logs_integrations_integrations_node_exporter_journal_scrape" {
targets = []
targets = []
rule {
// Extract systemd unit information into a label
source_labels = ["__journal__systemd_unit"]
target_label = "unit"
}
rule {
// Extract systemd unit information into a label
source_labels = ["__journal__systemd_unit"]
target_label = "unit"
}
rule {
// Extract boot ID information into a label
source_labels = ["__journal__boot_id"]
target_label = "boot_id"
}
rule {
// Extract boot ID information into a label
source_labels = ["__journal__boot_id"]
target_label = "boot_id"
}
rule {
// Extract transport information into a label
source_labels = ["__journal__transport"]
target_label = "transport"
}
rule {
// Extract transport information into a label
source_labels = ["__journal__transport"]
target_label = "transport"
}
rule {
// Extract log priority into a level label
source_labels = ["__journal_priority_keyword"]
target_label = "level"
}
rule {
// Set the instance label to the hostname of the machine
target_label = "instance"
replacement = constants.hostname
}
rule {
// Extract log priority into a level label
source_labels = ["__journal_priority_keyword"]
target_label = "level"
}
}
// Collect logs from files for node_exporter
loki.source.file "logs_integrations_integrations_node_exporter_direct_scrape" {
// Use targets defined in local.file_match
targets = local.file_match.logs_integrations_integrations_node_exporter_direct_scrape.targets
// Send logs to the local Loki instance
forward_to = [loki.write.local.receiver]
}
// Define where to send logs for storage
loki.write "local" {
endpoint {
// Send logs to a locally running Loki instance
url = "https://loki.kaareskovgaard.net/loki/api/v1/push"
tls_config {
cert_file = sys.env("LOKI_CLIENT_CERT")
key_file = sys.env("LOKI_CLIENT_KEY")
}
}
// Use targets defined in local.file_match
targets = local.file_match.logs_integrations_integrations_node_exporter_direct_scrape.targets
// Send logs to the local Loki instance
forward_to = [loki_send.node_exporter.receiver]
}

View file

@ -0,0 +1,23 @@
declare "loki_send" {
argument "job" {
optional = false
}
loki.write "default" {
endpoint {
url = "https://loki.kaareskovgaard.net/loki/api/v1/push"
tls_config {
cert_file = sys.env("LOKI_CLIENT_CERT")
key_file = sys.env("LOKI_CLIENT_KEY")
}
}
external_labels = {
job = argument.job.value,
instance = constants.hostname,
}
}
export "receiver" {
value = loki.write.default.receiver
}
}

View file

@ -5,6 +5,8 @@
modulesPath,
...
}:
# TODO: Enable and configure prometheus-nginx-exporter and prometheus-nginxlog-exporter
# to get some metrics into prometheus.
let
cfg = config.khscodes.services.nginx;
locationOptions = import "${modulesPath}/services/web-servers/nginx/location-options.nix" {
@ -62,6 +64,11 @@ let
);
default = null;
};
robotsTxt = lib.mkOption {
type = lib.types.path;
description = "Path to robots.txt file, by default everything is disallowed";
default = ./robots.txt;
};
extraConfig = lib.mkOption {
type = lib.types.lines;
description = "Extra configuration to inject into the generated nginx config";
@ -132,6 +139,7 @@ in
services.nginx = {
enable = true;
package = lib.mkDefault pkgs.nginxStable;
statusPage = config.khscodes.infrastructure.vault-prometheus-sender.enable;
sslDhparam = lib.mkIf (
cfg.sslConfiguration == "intermediate"
) "${config.security.dhparams.params."nginx".path}"; # DHParams only used when using the ciphers of intermediate
@ -151,6 +159,62 @@ in
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
map $http_referer $httpReferer {
default "$http_referer";
"" "(direct)";
}
map $http_user_agent $httpAgent {
default "$http_user_agent";
"" "Unknown";
}
log_format json_analytics escape=json '{'
'"msec": "$msec", ' # request unixtime in seconds with a milliseconds resolution
'"connection": "$connection", ' # connection serial number
'"connection_requests": "$connection_requests", ' # number of requests made in connection
'"pid": "$pid", ' # process pid
'"request_id": "$request_id", ' # the unique request id
'"request_length": "$request_length", ' # request length (including headers and body)
'"remote_addr": "$remote_addr", ' # client IP
'"remote_user": "$remote_user", ' # client HTTP username
'"remote_port": "$remote_port", ' # client port
'"time_local": "$time_local", '
'"time_iso8601": "$time_iso8601", ' # local time in the ISO 8601 standard format
'"request": "$request", ' # full path no arguments if the request
'"request_uri": "$request_uri", ' # full path and arguments if the request
'"args": "$args", ' # args
'"status": "$status", ' # response status code
'"body_bytes_sent": "$body_bytes_sent", ' # the number of body bytes exclude headers sent to a client
'"bytes_sent": "$bytes_sent", ' # the number of bytes sent to a client
'"http_referer": "$http_referer", ' # HTTP referer
'"http_user_agent": "$http_user_agent", ' # user agent
'"http_x_forwarded_for": "$http_x_forwarded_for", ' # http_x_forwarded_for
'"http_host": "$http_host", ' # the request Host: header
'"server_name": "$server_name", ' # the name of the vhost serving the request
'"request_time": "$request_time", ' # request processing time in seconds with msec resolution
'"upstream": "$upstream_addr", ' # upstream backend server for proxied requests
'"upstream_connect_time": "$upstream_connect_time", ' # upstream handshake time incl. TLS
'"upstream_header_time": "$upstream_header_time", ' # time spent receiving upstream headers
'"upstream_response_time": "$upstream_response_time", ' # time spent receiving upstream body
'"upstream_response_length": "$upstream_response_length", ' # upstream response length
'"upstream_cache_status": "$upstream_cache_status", ' # cache HIT/MISS where applicable
'"ssl_protocol": "$ssl_protocol", ' # TLS protocol
'"ssl_cipher": "$ssl_cipher", ' # TLS cipher
'"scheme": "$scheme", ' # http or https
'"request_method": "$request_method", ' # request method
'"server_protocol": "$server_protocol", ' # request protocol, like HTTP/1.1 or HTTP/2.0
'"pipe": "$pipe", ' # "p" if request was pipelined, "." otherwise
'"gzip_ratio": "$gzip_ratio"'
'}';
access_log /var/log/nginx/access.json.log json_analytics;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
${modernSslAppendedHttpConfig}
'';
virtualHosts = lib.attrsets.mapAttrs (
@ -171,11 +235,15 @@ in
in
{
inherit (value)
locations
globalRedirect
redirectCode
;
inherit extraConfig;
locations = {
"=/robots.txt" = {
alias = value.robotsTxt;
};
} // value.locations;
forceSSL = true;
enableACME = value.acme == null && !dns01Enabled;
useACMEHost =
@ -194,6 +262,10 @@ in
];
networking.firewall.allowedUDPPorts = [ 443 ];
users.users.nginx.extraGroups = lib.lists.optional dns01Enabled "acme";
khscodes.infrastructure.vault-loki-sender = {
extraFiles = [ ./nginx.alloy ];
extraGroups = [ "nginx" ];
};
security.acme.certs = lib.mkIf dns01Enabled (
lib.attrsets.foldlAttrs (
acc: name: value:

View file

@ -0,0 +1,88 @@
loki_send "nginx_access" {
job = "nginx/access"
}
loki_send "nginx_error" {
job = "nginx/error"
}
loki_send "nginx_stream" {
job = "nginx/stream"
}
loki_send "nginx_stream_error" {
job = "nginx/stream-error"
}
loki.source.file "nginx_access_logs" {
targets = [{
__path__ = "/var/log/nginx/access.json.log",
}]
forward_to = [loki.process.nginx_access_logs.receiver]
}
loki.process "nginx_access_logs" {
forward_to = [loki_send.nginx_access.receiver]
stage.json {
expressions = {
timestamp = "time_iso8601",
}
}
stage.timestamp {
source = "timestamp"
format = "RFC3339"
}
}
loki.source.file "nginx_error_logs" {
targets = [{
__path__ = "/var/log/nginx/error.log",
}]
forward_to = [loki.process.error_logs.receiver]
}
loki.process "error_logs" {
forward_to = [loki_send.nginx_error.receiver]
stage.regex {
expression = "^(?P<timestamp>\\S* \\S*) .+client: (?P<remote_ip>\\S+), (?:.*)"
}
stage.timestamp {
source = "timestamp"
format = "02/Jan/2006:15:04:05 +0000"
}
}
loki.source.file "nginx_stream_logs" {
targets = [{
__path__ = "/var/log/nginx/stream.log",
}]
forward_to = [loki.process.stream_logs.receiver]
}
loki.process "stream_logs" {
forward_to = [loki_send.nginx_stream.receiver]
stage.regex {
expression = "^(?P<remote_ip>[^ ]+) \\[(?P<timestamp>.*)\\] (?:.*)"
}
stage.timestamp {
source = "timestamp"
format = "02/Jan/2006:15:04:05 +0000"
}
}
loki.source.file "nginx_stream_error_logs" {
targets = [{
__path__ = "/var/log/nginx/stream-error.log",
}]
forward_to = [loki.process.stream_error_logs.receiver]
}
loki.process "stream_error_logs" {
forward_to = [loki_send.nginx_stream_error.receiver]
stage.regex {
expression = "^(?P<timestamp>\\S* \\S*) .+client: (?P<remote_ip>\\S+), (?:.*)"
}
stage.timestamp {
source = "timestamp"
format = "02/Jan/2006:15:04:05 +0000"
}
}

View file

@ -0,0 +1,2 @@
User-agent: *
Disallow: /

View file

@ -1,10 +1,13 @@
{ inputs, pkgs, ... }:
pkgs.writeShellApplication {
name = "update-instance";
runtimeInputs = [ pkgs.nixos-rebuild ];
runtimeInputs = [
pkgs.nixos-rebuild
];
text = ''
instance="''${1:-}"
connect_host="''${2:-$1}"
nixos-rebuild switch --flake "${inputs.self}#$instance" --target-host "$connect_host" --build-host "$connect_host" --use-remote-sudo
# --fast makes building on MacOS possible.
nixos-rebuild switch --fast --flake "${inputs.self}#$instance" --target-host "$connect_host" --build-host "$connect_host" --show-trace --use-remote-sudo
'';
}