{ config, ... }: { # node_exporter (port 9002) and Prometheus (port 9001) are already configured # in hydra.nix — we just scrape the existing exporter here. services.opentelemetry-collector = { enable = true; settings = { receivers = { # Host-level system metrics hostmetrics = { collection_interval = "60s"; scrapers = { cpu = { }; memory = { }; disk = { }; filesystem = { }; network = { }; load = { }; processes = { }; }; }; # Scrape node_exporter for per-systemd-unit service state prometheus = { config = { scrape_configs = [ { job_name = "node-exporter"; scrape_interval = "60s"; static_configs = [ { targets = [ "localhost:${toString config.services.prometheus.exporters.node.port}" ]; labels = { host = "palatine-hill"; }; } ]; } { job_name = "postgres-exporter"; scrape_interval = "60s"; static_configs = [ { targets = [ "localhost:${toString config.services.prometheus.exporters.postgres.port}" ]; labels = { host = "palatine-hill"; }; } ]; } { job_name = "zfs-exporter"; scrape_interval = "60s"; static_configs = [ { targets = [ "localhost:${toString config.services.prometheus.exporters.zfs.port}" ]; labels = { host = "palatine-hill"; }; } ]; } ]; }; }; }; processors = { batch = { }; # Attach hostname using the standard resource processor resource = { attributes = [ { action = "upsert"; key = "host.name"; value = "palatine-hill"; } ]; }; }; exporters = { "otlp/honeycomb" = { endpoint = "api.honeycomb.io:443"; headers = { # Expanded at runtime from the environment file "x-honeycomb-team" = "\${HONEYCOMB_API_KEY}"; }; }; }; service = { pipelines = { metrics = { receivers = [ "hostmetrics" "prometheus" ]; processors = [ "resource" "batch" ]; exporters = [ "otlp/honeycomb" ]; }; }; }; }; }; # Inject the Honeycomb API key at runtime — never stored in the Nix store systemd.services.opentelemetry-collector.serviceConfig.EnvironmentFile = config.sops.secrets."honeycomb/api-key".path; sops.secrets = { "honeycomb/api-key".owner = "root"; }; }