Compare commits
8 Commits
feature/ka
...
feature/he
| Author | SHA1 | Date | |
|---|---|---|---|
| 8afa66dabd | |||
| 88168b7345 | |||
| e4f061f2f3 | |||
| 154707c07f | |||
| 5b0bd7d5e7 | |||
| db9e8e5f2d | |||
| 85d6d66b85 | |||
| 49e689481d |
@@ -1,32 +0,0 @@
|
||||
# This is an example configuration file
|
||||
# To learn more, see the full config.yaml reference: https://docs.continue.dev/reference
|
||||
name: ollama
|
||||
version: 1.0.0
|
||||
schema: v1
|
||||
# Define which models can be used
|
||||
# https://docs.continue.dev/customization/models
|
||||
models:
|
||||
- name: StarCoder2 Local
|
||||
provider: ollama
|
||||
model: starcoder2:7b
|
||||
modelTimeout: "5s"
|
||||
roles:
|
||||
- autocomplete
|
||||
autocompleteOptions:
|
||||
useCache: true
|
||||
useImports: true
|
||||
useRecentlyEdited: true
|
||||
- name: Nomic Embed Local
|
||||
provider: ollama
|
||||
model: nomic-embed-text:latest
|
||||
roles:
|
||||
- embed
|
||||
- name: Autodetect
|
||||
provider: ollama
|
||||
model: AUTODETECT
|
||||
defaultCompletionOptions:
|
||||
contextLength: 64000
|
||||
# MCP Servers that Continue can access
|
||||
# https://docs.continue.dev/customization/mcp-tools
|
||||
mcpServers:
|
||||
- uses: anthropic/memory-mcp
|
||||
@@ -1,9 +1,11 @@
|
||||
# run `grep -Pv "^#" .gitconfig >> .git/config` to append the merge config to your repo file :)
|
||||
# run `git mergetool --tool=sops-mergetool <path to secret>/secrets.yaml` to use this once configured
|
||||
# the command below intentionally avoids nested shell quoting because git config parsing is strict
|
||||
# if for whatever reason the below doesn't work, try modifying the mergetool command as below
|
||||
# find: $(git rev-parse --show-toplevel)/utils/sops-mergetool.sh
|
||||
# replace: ./utils/sops-mergetool.sh
|
||||
[mergetool "sops-mergetool"]
|
||||
cmd = $(git rev-parse --show-toplevel)/utils/sops-mergetool.sh $BASE $LOCAL $REMOTE $MERGED
|
||||
cmd = bash -c "$(git rev-parse --show-toplevel)/utils/sops-mergetool.sh \"\$BASE\" \"\$LOCAL\" \"\$REMOTE\" \"\$MERGED\""
|
||||
[merge]
|
||||
tool = nvimdiff
|
||||
[mergetool "nvimdiff"]
|
||||
layout = (LOCAL,BASE,REMOTE)/MERGED
|
||||
layout = MERGED
|
||||
|
||||
125
.github/agents/dependency-auditor.agent.md
vendored
125
.github/agents/dependency-auditor.agent.md
vendored
@@ -1,125 +0,0 @@
|
||||
---
|
||||
description: |
|
||||
Use when auditing NixOS flake inputs or installed modules for known CVEs,
|
||||
checking pinned revisions against security advisories, scanning repo code for
|
||||
vulnerabilities, or running IaC/SCA audits on the nix-dotfiles repo. Use this
|
||||
agent whenever flake.lock is updated or a new input/module is added.
|
||||
tools: [read, 'io.snyk/mcp/*', search, web, 'nixos/*']
|
||||
---
|
||||
|
||||
# Dependency Security Auditor
|
||||
|
||||
You are a dependency security auditor for this NixOS flake repository. Your job
|
||||
is to identify known CVEs, security advisories, and vulnerable package versions
|
||||
across flake inputs, NixOS modules, and repo code — without interacting with any
|
||||
hosted infrastructure or live services.
|
||||
|
||||
## Scope
|
||||
|
||||
- Read `flake.lock` to enumerate all pinned inputs.
|
||||
- Read `flake.nix` and system/module configs to identify which NixOS packages
|
||||
and services are in active use.
|
||||
- Use the nixos MCP and Snyk MCP to cross-reference versions against known
|
||||
vulnerabilities.
|
||||
- Use the web tool only to look up public CVE/advisory databases (NVD, GitHub
|
||||
Security Advisories, NixOS security tracker). Do NOT connect to any hosted
|
||||
service in this infrastructure.
|
||||
|
||||
## Constraints
|
||||
|
||||
- DO NOT edit, create, or delete any files.
|
||||
- DO NOT run terminal commands.
|
||||
- DO NOT connect to or probe any live service (Gitea, Mattermost, Nextcloud,
|
||||
HAProxy, etc.).
|
||||
- DO NOT authenticate to Snyk on behalf of the user without confirming first
|
||||
— call `snyk_auth_status` and report back if auth is missing.
|
||||
- ONLY report findings grounded in real CVE/advisory data with a reference URL
|
||||
or ID.
|
||||
|
||||
## Audit Steps
|
||||
|
||||
Work through these steps in order. Show a summary of what you checked at the end
|
||||
of each step.
|
||||
|
||||
### Step 1: Enumerate Flake Inputs
|
||||
|
||||
Read `flake.lock` and extract for each node:
|
||||
|
||||
- Owner, repo, rev (commit hash), lastModified date
|
||||
- Whether it is a `github`, `git`, or `tarball` type
|
||||
|
||||
Flag any inputs that:
|
||||
|
||||
- Have not been updated in > 180 days (stale pinning risk)
|
||||
- Use a mutable `ref` without a fixed `rev` (reproducibility risk)
|
||||
- Are fetched over plain HTTP (not HTTPS)
|
||||
|
||||
### Step 2: Look Up Active Package Versions via nixos MCP
|
||||
|
||||
For the pinned nixpkgs revision, use the nixos MCP (`nixos_search`,
|
||||
`nixos_info`) to:
|
||||
|
||||
- Look up key security-sensitive packages in use across palatine-hill:
|
||||
`mattermost`, `gitea`, `nextcloud`, `postgresql`, `hydra`, `attic`,
|
||||
`ollama`, `loki`, `minio`, `haproxy`, `samba`.
|
||||
- Note the package version returned.
|
||||
- Search for any known vulnerabilities associated with that version using the
|
||||
nixos MCP and the web tool (NVD: `https://nvd.nist.gov/vuln/search`, GitHub
|
||||
advisory DB: `https://github.com/advisories`).
|
||||
|
||||
### Step 3: Run Snyk Code Scan
|
||||
|
||||
Before running, call `snyk_auth_status` to confirm authentication. If
|
||||
unauthenticated, report that and skip this step.
|
||||
|
||||
Run `snyk_code_scan` on the absolute repo path
|
||||
(`/home/alice/.gitprojects/nix-dotfiles`) with `severity_threshold: medium`.
|
||||
Report all findings with:
|
||||
|
||||
- Rule ID and CWE
|
||||
- Affected file and line
|
||||
- Severity
|
||||
- Suggested fix
|
||||
|
||||
### Step 4: Run Snyk IaC Scan
|
||||
|
||||
Run `snyk_iac_scan` on the absolute repo path
|
||||
(`/home/alice/.gitprojects/nix-dotfiles`) with `severity_threshold: medium`.
|
||||
While Snyk IaC does not natively parse Nix, it will catch any Kubernetes, Docker
|
||||
Compose, or YAML configs present in `systems/palatine-hill/docker/` and similar
|
||||
paths.
|
||||
|
||||
Report all findings with:
|
||||
|
||||
- Issue title and severity
|
||||
- Affected file and line
|
||||
- Impact description
|
||||
- Suggested fix
|
||||
|
||||
### Step 5: Cross-Check NixOS Security Tracker
|
||||
|
||||
Use the web tool to check `https://github.com/NixOS/nixpkgs/issues?q=CVE` and
|
||||
`https://discourse.nixos.org/c/security` for any open CVEs affecting:
|
||||
|
||||
- The pinned nixpkgs revision (from `flake.lock`)
|
||||
- Any of the key packages identified in Step 2
|
||||
|
||||
### Step 6: Summarise
|
||||
|
||||
Produce a final report with:
|
||||
|
||||
1. **Critical / High CVEs** — packages with active, unpatched CVEs in the
|
||||
pinned revision
|
||||
2. **Stale Inputs** — inputs not updated in > 180 days
|
||||
3. **Snyk Code Findings** — medium+ severity SAST issues
|
||||
4. **Snyk IaC Findings** — medium+ severity misconfigurations in non-Nix config
|
||||
files
|
||||
5. **Clean** — categories with no findings (list explicitly so the report is
|
||||
complete)
|
||||
|
||||
Each finding must include:
|
||||
|
||||
- Severity
|
||||
- CVE ID or Snyk rule ID (with reference URL)
|
||||
- Affected package/file/input
|
||||
- Recommended action (upgrade nixpkgs pin, patch config, etc.)
|
||||
140
.github/agents/security-researcher.agent.md
vendored
140
.github/agents/security-researcher.agent.md
vendored
@@ -1,140 +0,0 @@
|
||||
---
|
||||
description: |
|
||||
Use when auditing NixOS server configurations for security issues, checking
|
||||
for secrets in the Nix store, exposed ports, weak authentication, missing
|
||||
service hardening, overly permissive firewall rules, SSH misconfiguration,
|
||||
Docker socket exposure, or SOPS secrets mishandling. Read-only. Does NOT
|
||||
interact with any live infrastructure or hosted resources.
|
||||
tools: [read, search, 'nixos/*']
|
||||
---
|
||||
|
||||
# Security Researcher
|
||||
|
||||
You are a security researcher auditing this NixOS flake repository for potential
|
||||
vulnerabilities and misconfigurations. Your job is to read the configuration
|
||||
as-written and identify security issues an attacker or misconfiguration could
|
||||
exploit.
|
||||
|
||||
## Scope
|
||||
|
||||
- Inspect server systems only (`server = true`; currently **palatine-hill**).
|
||||
- Work entirely from repository source files. DO NOT interact with any live
|
||||
system, hosted service, URL, or external resource.
|
||||
- Use the nixos MCP tool to look up option defaults and known behaviours — not
|
||||
to reach external hosts.
|
||||
|
||||
## Constraints
|
||||
|
||||
- DO NOT edit, create, or delete any files.
|
||||
- DO NOT run terminal commands.
|
||||
- DO NOT fetch URLs or browse the web.
|
||||
- DO NOT attempt to connect to, probe, or fingerprint any live service.
|
||||
- ONLY report issues that are grounded in the actual content of the repository
|
||||
files.
|
||||
|
||||
## Audit Checklist
|
||||
|
||||
Work through these categories in order. For each, read the relevant files before
|
||||
reporting.
|
||||
|
||||
### 1. Secrets in the Nix Store
|
||||
|
||||
- Are any passwords, tokens, or API keys hardcoded in `.nix` files (not behind
|
||||
SOPS)?
|
||||
- Are `password = "..."` fields used in NixOS module options that end up
|
||||
world-readable in `/nix/store`?
|
||||
- Check service DB password fields, `initialScript`, environment variables, and
|
||||
`settings` blocks.
|
||||
- Use the nixos MCP tool to confirm whether a given option value lands in the
|
||||
store.
|
||||
|
||||
### 2. SOPS Secrets Hygiene
|
||||
|
||||
- Do `sops.secrets` entries have the correct `owner` set to the service user
|
||||
(not `root` unless necessary)?
|
||||
- Is `defaultSopsFile` scoped correctly, or could one system's secrets bleed
|
||||
into another?
|
||||
- Are any secrets referenced in config that are not declared in `sops.secrets`?
|
||||
|
||||
### 3. Firewall and Attack Surface
|
||||
|
||||
- Which TCP/UDP ports are exposed in `firewall.nix`? Are all of them
|
||||
intentional and documented?
|
||||
- Are `trustedInterfaces` entries broader than necessary (e.g., `br+` covering
|
||||
all bridge interfaces)?
|
||||
- Does `extraCommands` insert raw iptables rules that bypass the NixOS firewall
|
||||
abstraction in a dangerous way?
|
||||
- Are any high-risk ports (22, 80, 443, 5432, 6379, 27017) exposed directly?
|
||||
|
||||
### 4. SSH Configuration
|
||||
|
||||
- What port is SSH running on? Is password authentication disabled?
|
||||
- Are `PermitRootLogin`, `PasswordAuthentication`, and `PubkeyAuthentication`
|
||||
set explicitly?
|
||||
- Check `modules/openssh.nix` and any system-level overrides.
|
||||
|
||||
### 5. PostgreSQL Authentication
|
||||
|
||||
- Does `authentication` (pg_hba) use `trust` for any user or database?
|
||||
- Are `scram-sha-256` or `peer` used consistently rather than `md5` or
|
||||
`password`?
|
||||
- Does any service connect over TCP with a plaintext password that ends up in
|
||||
the Nix store?
|
||||
- Are `ensureUsers` entries scoped correctly (no unnecessary `superuser` or
|
||||
`createdb` grants)?
|
||||
|
||||
### 6. Service Isolation and Hardening
|
||||
|
||||
- Do systemd services set `DynamicUser`, `PrivateTmp`, `NoNewPrivileges`,
|
||||
`ProtectSystem`, or similar hardening options where applicable?
|
||||
- Check custom `systemd.services` blocks for missing or weak sandboxing.
|
||||
- Are services running as root that should run as a dedicated user?
|
||||
|
||||
### 7. Docker and Container Security
|
||||
|
||||
- Is the Docker socket (`/var/run/docker.sock`) mounted into any container? If
|
||||
so, flag it as a privilege escalation vector.
|
||||
- Are any containers run with `--privileged` or `network_mode: host`?
|
||||
- Are Docker compose files in the repo using hardcoded secrets or environment
|
||||
variables that land in the store?
|
||||
|
||||
### 8. Web-Facing Services
|
||||
|
||||
- Do reverse-proxied services (Gitea, Mattermost, Nextcloud, etc.) set
|
||||
`siteUrl`/`ROOT_URL` to HTTPS?
|
||||
- Is there any service that could be accessed over plain HTTP internally?
|
||||
- Are ACME/TLS certs scoped correctly and not shared across unrelated services?
|
||||
|
||||
### 9. Module Defaults That Are Security-Sensitive
|
||||
|
||||
- For each enabled service, use the nixos MCP tool to check if the default
|
||||
values for security-relevant options (e.g., `database.password`,
|
||||
`openFirewall`, `enableAdminCreateUser`) are safe, and confirm whether
|
||||
defaults are overridden in the repo.
|
||||
|
||||
### 10. Broad Permission Grants
|
||||
|
||||
- Are any users granted `wheel`, `docker`, or other privileged groups without
|
||||
clear justification?
|
||||
- Does any non-human service account have `superuser`, `replication`, or
|
||||
`createrole` PostgreSQL clauses?
|
||||
|
||||
## Output Format
|
||||
|
||||
Report findings as a numbered list grouped by severity:
|
||||
|
||||
- **Critical** — direct path to credentials exposure, RCE, or privilege
|
||||
escalation
|
||||
- **High** — exploitable misconfiguration or data exposure under realistic
|
||||
conditions
|
||||
- **Medium** — weak default, unnecessary privilege, or defence-in-depth gap
|
||||
- **Low / Info** — hardening improvement or minor noise
|
||||
|
||||
Each finding must include:
|
||||
|
||||
- Severity label
|
||||
- Exact file path and line (as a markdown link)
|
||||
- One-sentence explanation of the risk
|
||||
- Concrete suggested remediation
|
||||
|
||||
If a category is clean, state that explicitly so the report is complete.
|
||||
81
.github/agents/server-architect.agent.md
vendored
81
.github/agents/server-architect.agent.md
vendored
@@ -1,81 +0,0 @@
|
||||
---
|
||||
description: |
|
||||
Use when reviewing server infrastructure, auditing NixOS server
|
||||
configurations, planning how new services or modules integrate into
|
||||
palatine-hill, checking for missing imports, DB/user alignment, firewall
|
||||
gaps, module argument signatures, or reverse proxy routing. DO NOT use for
|
||||
making changes or for desktop/workstation systems.
|
||||
tools: [read, search, 'nixos/*']
|
||||
---
|
||||
|
||||
# Infrastructure Architect
|
||||
|
||||
You are an infrastructure architect for this NixOS flake repository. Your job is
|
||||
to review the existing server architecture and analyse how proposed or recently
|
||||
added changes integrate with it.
|
||||
|
||||
## Scope
|
||||
|
||||
You only inspect **server** machines. In this repository that means systems where
|
||||
`server = true` in their `default.nix` — currently **palatine-hill**. Do NOT
|
||||
inspect or opine on desktop systems such as `artemision` or `selinunte` unless
|
||||
explicitly asked.
|
||||
|
||||
## Constraints
|
||||
|
||||
- DO NOT edit, create, or delete any files.
|
||||
- DO NOT run terminal commands.
|
||||
- DO NOT make assumptions — read the actual files.
|
||||
- ONLY report concrete, actionable findings with exact file and line references.
|
||||
|
||||
## Approach
|
||||
|
||||
When asked to review a change or audit the server state, work through these
|
||||
checkpoints in order:
|
||||
|
||||
1. **Module registration** — Is the new `.nix` file imported in
|
||||
`systems/<host>/configuration.nix`? Check the `imports` list.
|
||||
2. **Module argument signature** — Does every module accept `{ ..., ... }:` to
|
||||
absorb `specialArgs` (`system`, `server`, `inputs`, `outputs`)? A missing
|
||||
`...` causes "unexpected argument" eval errors.
|
||||
3. **Service dependencies** — Does the new service depend on another (e.g.
|
||||
PostgreSQL, Redis, S3/Minio)? If so:
|
||||
- Is the dependency service enabled and imported on this host?
|
||||
- Are the required DB names and users present in `ensureDatabases` /
|
||||
`ensureUsers`?
|
||||
- Is the user name in `ensureUsers` consistent with what the service module
|
||||
defaults to? (Use the nixos MCP tool to check default values.)
|
||||
- Are authentication rules (`pg_hba`, `authentication` block) present for
|
||||
the new user?
|
||||
4. **Secrets alignment** — If the service uses SOPS secrets, are they declared
|
||||
in `sops.secrets` with the correct `owner`? Does the secrets key exist in
|
||||
`secrets.yaml`?
|
||||
5. **Firewall exposure** — Is the service port opened in `firewall.nix`? If
|
||||
traffic is reverse-proxied (e.g. via external HAProxy), no direct port
|
||||
exposure in NixOS firewall is needed — confirm which model applies.
|
||||
6. **Reverse proxy / TLS** — Is a proxy rule (HAProxy, nginx, Caddy) defined
|
||||
for the new vhost? If the proxy is managed externally, note that explicitly.
|
||||
Check that `siteUrl` / `ROOT_URL` / equivalent matches the actual domain.
|
||||
7. **Upgrade / backup plumbing** — If the service has stateful data, is it
|
||||
listed in `postgresql.upgrade.stopServices`? Is it covered by
|
||||
`postgresqlBackup`?
|
||||
8. **Module provisioning conflicts** — Does the NixOS module have a
|
||||
`create`/`createLocally` option that auto-provisions a DB/user? If manual
|
||||
provisioning also exists, flag potential ownership drift.
|
||||
|
||||
## Output Format
|
||||
|
||||
Report findings as a numbered list grouped by severity:
|
||||
|
||||
- **High** — will cause a build failure, service crash, or security issue
|
||||
- **Medium** — will cause silent misconfiguration or future breakage
|
||||
- **Low / Info** — style, redundancy, or optional improvements
|
||||
|
||||
Each finding must include:
|
||||
|
||||
- The severity label
|
||||
- The exact file path and line (as a markdown link)
|
||||
- A one-sentence explanation of the problem
|
||||
- A concrete suggested fix
|
||||
|
||||
If everything checks out, say so explicitly and summarise what you verified.
|
||||
698
.github/copilot-instructions.md
vendored
698
.github/copilot-instructions.md
vendored
@@ -1,698 +0,0 @@
|
||||
# Nix Dotfiles Repository Guide
|
||||
|
||||
This repository contains NixOS configurations for personal infrastructure. The setup is organized around a flake-based structure with per-system configurations and user-specific settings.
|
||||
|
||||
## Project Structure
|
||||
|
||||
- `flake.nix` - Main flake definition with inputs and outputs
|
||||
- `systems/` - Per-system configurations (e.g., `artemision`, `palatine-hill`)
|
||||
- `users/` - Per-user configurations using home-manager
|
||||
- `modules/` - Reusable Nix modules for common services
|
||||
- `lib/` - Custom Nix library functions
|
||||
- `hydra/` - Hydra CI/CD configuration
|
||||
- `secrets/` - SOPS encrypted secrets
|
||||
|
||||
## Key Concepts
|
||||
|
||||
### System Configuration
|
||||
|
||||
Each system has its own directory under `systems/` containing:
|
||||
|
||||
- `configuration.nix` - Main system configuration
|
||||
- Component modules (audio.nix, desktop.nix, etc.)
|
||||
- Hardware-specific configurations
|
||||
|
||||
### User Configuration
|
||||
|
||||
User configurations are in `users/<username>/`:
|
||||
|
||||
- `home.nix` - Home-manager configuration using `home.packages` and imports
|
||||
- `secrets.yaml` - SOPS-encrypted secrets using age encryption
|
||||
- `non-server.nix` - Desktop-specific configurations
|
||||
|
||||
### Nix Patterns
|
||||
|
||||
1. **Module-based approach**: Uses Nix modules for organizing configuration
|
||||
1. **Home-manager integration**: User environment managed via home-manager
|
||||
1. **SOPS secrets**: Secrets managed with SOPS and age encryption
|
||||
1. **Flake-based**: Uses flakes for reproducible builds and development environments
|
||||
1. **Multi-system support**: Supports multiple machines with different configurations
|
||||
1. **Dynamic configuration generation**: Modules in the `modules/` directory are automatically imported into all systems (can be overridden per system). New systems are automatically discovered by `genSystems()`
|
||||
|
||||
### Modern Nix Features
|
||||
|
||||
This repository uses modern Nix features including:
|
||||
|
||||
- **Flakes**: Enabled via `flake` experimental feature
|
||||
- **Nix Command**: Enabled via `nix-command` experimental feature
|
||||
- **Blake3 Hashes**: Enabled via `blake3-hashes` experimental feature
|
||||
- **Git Hashing**: Enabled via `git-hashing` experimental feature
|
||||
- **Verified Fetches**: Enabled via `verified-fetches` experimental feature
|
||||
|
||||
### Key Commands
|
||||
|
||||
- `nh os switch` - Apply system configuration (using nix-community/nh)
|
||||
- `nh home switch` - Apply user configuration (using nix-community/nh)
|
||||
- `nh os build` - Build a specific system (using nix-community/nh)
|
||||
- `nix build .#<system>` - Build a specific system
|
||||
- `nix run .#<system>` - Run a specific system
|
||||
- `nix flake update` - Update flake inputs
|
||||
|
||||
### Development Workflow
|
||||
|
||||
1. Make changes to system or user configuration
|
||||
1. Test with `nh os switch` or `nh home switch`
|
||||
1. For CI/CD, Hydra automatically builds and tests changes
|
||||
1. Secrets are managed with SOPS and age keys
|
||||
|
||||
### Important Files
|
||||
|
||||
- `flake.nix` - Main entry point for the flake
|
||||
- `systems/artemision/configuration.nix` - Example system configuration
|
||||
- `users/alice/home.nix` - Example user configuration
|
||||
- `modules/base.nix` - Base module with common settings
|
||||
- `hydra/jobsets.nix` - Hydra CI configuration
|
||||
|
||||
### External Dependencies
|
||||
|
||||
- NixOS unstable channel
|
||||
- Nixpkgs unstable channel
|
||||
- SOPS for secrets management
|
||||
- age for encryption
|
||||
- home-manager for user environments
|
||||
- nh (nix-community/nh) for simplified Nix operations
|
||||
|
||||
### Nix MCP Server
|
||||
|
||||
- Use the nix MCP server for looking up package names and options
|
||||
- Specify `unstable` channel if the channel is specifiable (e.g., for `pkgs.<package-name>`)
|
||||
|
||||
## Dynamic Configuration System (lib/systems.nix)
|
||||
|
||||
This repository automatically generates NixOS system configurations based on the folder structure. Understanding how `constructSystem` and `genSystems` work is essential when adding new systems or global modules.
|
||||
|
||||
### How Configuration Generation Works
|
||||
|
||||
The process happens in three stages:
|
||||
|
||||
**Stage 1: Discovery** (`flake.nix` → `genSystems`)
|
||||
|
||||
- `flake.nix` calls `genSystems inputs outputs src (src + "/systems")`
|
||||
- `genSystems` scans the `systems/` directory and lists all subdirectories
|
||||
- Each subdirectory name becomes a system hostname (e.g., `artemision`, `palatine-hill`)
|
||||
|
||||
**Stage 2: Parameter Loading** (`genSystems` reads `default.nix`)
|
||||
|
||||
- For each discovered system, `genSystems` imports `systems/<hostname>/default.nix`
|
||||
- This file exports parameters for `constructSystem` like:
|
||||
- `users = [ "alice" ]` — which users to create
|
||||
- `home = true` — enable home-manager
|
||||
- `sops = true` — enable secret decryption
|
||||
- `server = true/false` — machine role
|
||||
- `modules = [ ... ]` — additional system-specific modules
|
||||
|
||||
**Stage 3: Assembly** (`constructSystem` assembles the full config)
|
||||
|
||||
- Loads essential system files: `hardware.nix`, `configuration.nix`
|
||||
- Auto-imports all `.nix` files from `modules/` directory via `lib.adev.fileList`
|
||||
- Conditionally loads home-manager, SOPS, and user configs based on parameters
|
||||
- Merges everything into a complete NixOS system configuration
|
||||
|
||||
### Key Functions in lib/systems.nix
|
||||
|
||||
| Function | Purpose | Called By |
|
||||
|----------|---------|-----------|
|
||||
| `genSystems` | Scans `systems/` directory and creates configs for each subdirectory | `flake.nix` |
|
||||
| `constructSystem` | Assembles a single NixOS system with all modules and configs | `genSystems` |
|
||||
| `genHome` | Imports home-manager configs for specified users | `constructSystem` |
|
||||
| `genSops` | Imports SOPS-encrypted secrets for users | `constructSystem` |
|
||||
| `genUsers` | Imports user account configs from `users/<username>/` | `constructSystem` |
|
||||
| `genHostName` | Creates hostname attribute set | `constructSystem` |
|
||||
| `genWrapper` | Conditionally applies generator functions | `constructSystem` |
|
||||
|
||||
### Special Arguments Passed to All Configs
|
||||
|
||||
These are available in `configuration.nix`, `hardware.nix`, and all modules:
|
||||
|
||||
```nix
|
||||
{ config, pkgs, lib, inputs, outputs, server, system, ... }:
|
||||
```
|
||||
|
||||
- `config` — NixOS configuration options
|
||||
- `pkgs` — Nix packages (nixpkgs)
|
||||
- `lib` — Nix library functions (extended with `lib.adev`)
|
||||
- `inputs` — Flake inputs (nixpkgs, home-manager, sops-nix, etc.)
|
||||
- `outputs` — Flake outputs (for Hydra and other tools)
|
||||
- `server` — Boolean: true for servers, false for desktops
|
||||
- `system` — System architecture string (e.g., `"x86_64-linux"`)
|
||||
|
||||
## Adding a New NixOS System
|
||||
|
||||
### Step 1: Create the Directory Structure
|
||||
|
||||
```bash
|
||||
mkdir -p systems/<new-hostname>
|
||||
cd systems/<new-hostname>
|
||||
```
|
||||
|
||||
### Step 2: Create `default.nix` (System Parameters)
|
||||
|
||||
This file is automatically discovered and loaded by `genSystems`. It exports the parameters passed to `constructSystem`.
|
||||
|
||||
**Minimal example:**
|
||||
|
||||
```nix
|
||||
{ inputs }:
|
||||
{
|
||||
# Required: List of users to create (must have entries in users/ directory)
|
||||
users = [ "alice" ];
|
||||
|
||||
# Optional: Enable home-manager (default: true)
|
||||
home = true;
|
||||
|
||||
# Optional: Enable SOPS secrets (default: true)
|
||||
sops = true;
|
||||
|
||||
# Optional: Is this a server? Used to conditionally enable server features
|
||||
server = false;
|
||||
|
||||
# Optional: System architecture (default: "x86_64-linux")
|
||||
system = "x86_64-linux";
|
||||
|
||||
# Optional: System-specific modules (in addition to global modules/)
|
||||
modules = [
|
||||
# ./custom-service.nix
|
||||
];
|
||||
}
|
||||
```
|
||||
|
||||
**See `systems/palatine-hill/default.nix` for a complex example with all options.**
|
||||
|
||||
### Step 3: Create `hardware.nix` (Hardware Configuration)
|
||||
|
||||
Generate this via:
|
||||
|
||||
```bash
|
||||
sudo nixos-generate-config --show-hardware-config > systems/<new-hostname>/hardware.nix
|
||||
```
|
||||
|
||||
This file typically includes:
|
||||
|
||||
- Boot configuration and bootloader
|
||||
- Filesystem mounts and ZFS/LVM settings
|
||||
- Hardware support (CPU, GPU, network drivers)
|
||||
- Device-specific kernel modules
|
||||
|
||||
### Step 4: Create `configuration.nix` (System Configuration)
|
||||
|
||||
This is the main NixOS configuration file. Structure:
|
||||
|
||||
```nix
|
||||
{ config, pkgs, lib, inputs, server, system, ... }:
|
||||
{
|
||||
# System hostname (usually matches directory name)
|
||||
networking.hostName = "new-hostname";
|
||||
|
||||
# Desktop/desktop specific config
|
||||
services.xserver.enable = !server;
|
||||
|
||||
# System packages
|
||||
environment.systemPackages = with pkgs; [
|
||||
# ...
|
||||
];
|
||||
|
||||
# Services to enable
|
||||
services.openssh.enable = server;
|
||||
|
||||
# System-specific settings override global defaults
|
||||
boot.kernelParams = [ "nomodeset" ];
|
||||
}
|
||||
```
|
||||
|
||||
### Step 5: Add Optional Secrets
|
||||
|
||||
If the system has sensitive data:
|
||||
|
||||
```bash
|
||||
# Create and encrypt secrets file
|
||||
sops systems/<new-hostname>/secrets.yaml
|
||||
|
||||
# This will be automatically loaded by genSops if sops = true
|
||||
```
|
||||
|
||||
### Step 6: Add Optional System-Specific Modules
|
||||
|
||||
For system-specific functionality that shouldn't be global, create separate `.nix` files in the system directory:
|
||||
|
||||
```text
|
||||
systems/<new-hostname>/
|
||||
├── configuration.nix # Main config
|
||||
├── default.nix
|
||||
├── hardware.nix
|
||||
├── secrets.yaml # (optional)
|
||||
├── custom-service.nix # (optional) System-specific modules
|
||||
├── networking.nix # (optional)
|
||||
└── graphics.nix # (optional)
|
||||
```
|
||||
|
||||
Reference these in `default.nix`:
|
||||
|
||||
```nix
|
||||
{ inputs }:
|
||||
{
|
||||
users = [ "alice" ];
|
||||
modules = [
|
||||
./custom-service.nix
|
||||
./networking.nix
|
||||
./graphics.nix
|
||||
];
|
||||
}
|
||||
```
|
||||
|
||||
### Step 7: Deploy the New System
|
||||
|
||||
The system is now automatically registered! Deploy with:
|
||||
|
||||
```bash
|
||||
# Build the new system
|
||||
nix build .#<new-hostname>
|
||||
|
||||
# Or if you want to switch immediately
|
||||
nh os switch
|
||||
```
|
||||
|
||||
## Adding a Global Module to modules/
|
||||
|
||||
Global modules are automatically imported into all systems. No registration needed.
|
||||
|
||||
### Create a Module File
|
||||
|
||||
Add a new `.nix` file to the `modules/` directory. Example: `modules/my-service.nix`
|
||||
|
||||
### Module Structure
|
||||
|
||||
```nix
|
||||
{ config, pkgs, lib, inputs, server, ... }:
|
||||
{
|
||||
# Define configuration options for this module
|
||||
options.myService = {
|
||||
enable = lib.mkEnableOption "my service";
|
||||
port = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 3000;
|
||||
description = "Port for the service";
|
||||
};
|
||||
};
|
||||
|
||||
# Actual configuration (conditional on enable option)
|
||||
config = lib.mkIf config.myService.enable {
|
||||
environment.systemPackages = [ pkgs.my-service ];
|
||||
|
||||
systemd.services.my-service = {
|
||||
description = "My Service";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.my-service}/bin/my-service";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### Using mkIf, mkDefault, and mkForce
|
||||
|
||||
- **`mkIf`** — Conditionally apply config based on a boolean
|
||||
|
||||
```nix
|
||||
config = lib.mkIf config.myService.enable { ... };
|
||||
```
|
||||
|
||||
- **`mkDefault`** — Provide a default value that can be overridden
|
||||
|
||||
```nix
|
||||
boot.kernelParams = lib.mkDefault [ "quiet" ];
|
||||
```
|
||||
|
||||
- **`mkForce`** — Force a value, preventing other modules from overriding
|
||||
|
||||
```nix
|
||||
services.openssh.enable = lib.mkForce true;
|
||||
```
|
||||
|
||||
- **`mkEnableOption`** — Define an `enable` option with standard description
|
||||
|
||||
```nix
|
||||
options.myService.enable = lib.mkEnableOption "my service";
|
||||
```
|
||||
|
||||
### Disable a Global Module for a Specific System
|
||||
|
||||
To disable a module for one system, override it in that system's `configuration.nix`:
|
||||
|
||||
```nix
|
||||
{ config, lib, ... }:
|
||||
{
|
||||
# Disable the module entirely
|
||||
myService.enable = false;
|
||||
|
||||
# Or override specific options
|
||||
services.openssh.port = 2222;
|
||||
}
|
||||
```
|
||||
|
||||
### Module Loading Order in constructSystem
|
||||
|
||||
Modules are applied in this order (later modules override earlier ones):
|
||||
|
||||
1. `inputs.nixos-modules.nixosModule` (SuperSandro2000's convenience functions)
|
||||
1. `inputs.nix-index-database.nixosModules.nix-index`
|
||||
1. Hostname attribute from `genHostName`
|
||||
1. `hardware.nix` (hardware-specific config)
|
||||
1. `configuration.nix` (main system config)
|
||||
1. **System-specific modules** from `modules` parameter in `default.nix` (e.g., custom-service.nix)
|
||||
1. **All `.nix` files from global `modules/` directory** (features enabled across all systems)
|
||||
1. SOPS module (if `sops = true`)
|
||||
1. Home-manager module (if `home = true`)
|
||||
1. User configurations (if `users = [...]` and `home = true`)
|
||||
|
||||
Important: Global modules (step 7) are applied after system-specific configs, so they can't override those values unless using `mkForce`. System-specific modules take precedence over global ones.
|
||||
|
||||
## Common Tasks
|
||||
|
||||
### Enable a Feature Across All Systems
|
||||
|
||||
1. Create `modules/my-feature.nix` with `options.myFeature.enable`
|
||||
1. Set the feature enabled in `configuration.nix` of systems that need it:
|
||||
|
||||
```nix
|
||||
myFeature.enable = true;
|
||||
```
|
||||
|
||||
1. Or enable globally and disable selectively:
|
||||
|
||||
```nix
|
||||
# In modules/my-feature.nix
|
||||
config = lib.mkIf config.myFeature.enable {
|
||||
# ...enabled by default
|
||||
};
|
||||
|
||||
# In a system's configuration.nix
|
||||
myFeature.enable = false; # Disable just for this system
|
||||
```
|
||||
|
||||
### Add a New User to the System
|
||||
|
||||
1. Create user config: `users/<username>/default.nix` and `users/<username>/home.nix`
|
||||
1. Update system's `default.nix`:
|
||||
|
||||
```nix
|
||||
users = [ "alice" "newuser" ];
|
||||
```
|
||||
|
||||
1. Create secrets: `sops users/<username>/secrets.yaml`
|
||||
1. Redeploy: `nh os switch`
|
||||
|
||||
### Override a Module's Default Behavior
|
||||
|
||||
In any system's `configuration.nix`:
|
||||
|
||||
```nix
|
||||
{
|
||||
# Disable a service that's enabled by default in a module
|
||||
services.openssh.enable = false;
|
||||
|
||||
# Override module options
|
||||
boot.kernelParams = [ "nomodeset" ];
|
||||
|
||||
# Add to existing lists
|
||||
environment.systemPackages = [ pkgs.custom-tool ];
|
||||
}
|
||||
```
|
||||
|
||||
### Check Which Modules Are Loaded
|
||||
|
||||
```bash
|
||||
# List all module paths being loaded
|
||||
nix eval .#nixosConfigurations.<hostname>.options --json | jq keys | head -20
|
||||
|
||||
# Evaluate a specific config value
|
||||
nix eval .#nixosConfigurations.<hostname>.config.services.openssh.enable
|
||||
```
|
||||
|
||||
### Validate Configuration Before Deploying
|
||||
|
||||
```bash
|
||||
# Check syntax and evaluate
|
||||
nix flake check
|
||||
|
||||
# Build without switching
|
||||
nix build .#<hostname>
|
||||
|
||||
# Preview what would change
|
||||
nix build .#<hostname> && nix-diff /run/current-system ./result
|
||||
```
|
||||
|
||||
## Secrets Management
|
||||
|
||||
SOPS (Secrets Operations) manages sensitive data like passwords and API keys. This repository uses age encryption with SOPS to encrypt secrets per system and per user.
|
||||
|
||||
### Directory Structure
|
||||
|
||||
Secrets are stored alongside their respective configs:
|
||||
|
||||
```text
|
||||
systems/<hostname>/secrets.yaml # System-wide secrets
|
||||
users/<username>/secrets.yaml # User-specific secrets
|
||||
```
|
||||
|
||||
### Creating and Editing Secrets
|
||||
|
||||
**Create or edit a secrets file:**
|
||||
|
||||
```bash
|
||||
# For a system
|
||||
sops systems/<hostname>/secrets.yaml
|
||||
|
||||
# For a user
|
||||
sops users/<username>/secrets.yaml
|
||||
```
|
||||
|
||||
SOPS will open your `$EDITOR` with decrypted content. When you save and exit, it automatically re-encrypts the file.
|
||||
|
||||
**Example secrets structure for a system:**
|
||||
|
||||
```yaml
|
||||
# systems/palatine-hill/secrets.yaml
|
||||
acme:
|
||||
email: user@example.com
|
||||
api_token: "secret-token-here"
|
||||
postgresql:
|
||||
password: "db-password"
|
||||
```
|
||||
|
||||
**Example secrets for a user:**
|
||||
|
||||
```yaml
|
||||
# users/alice/secrets.yaml
|
||||
# The user password is required
|
||||
user-password: "hashed-password-here"
|
||||
```
|
||||
|
||||
### Accessing Secrets in Configuration
|
||||
|
||||
Secrets are made available via `config.sops.secrets` in modules and configurations:
|
||||
|
||||
```nix
|
||||
# In a module or configuration.nix
|
||||
{ config, lib, ... }:
|
||||
{
|
||||
# Reference a secret
|
||||
services.postgresql.initialScript = ''
|
||||
CREATE USER app WITH PASSWORD '${config.sops.secrets."postgresql/password".path}';
|
||||
'';
|
||||
|
||||
# Or use the secret directly if it supports content
|
||||
systemd.services.my-app.serviceConfig = {
|
||||
EnvironmentFiles = [ config.sops.secrets."api-token".path ];
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### Merging Secrets Files
|
||||
|
||||
When multiple systems or users modify secrets, use the sops-mergetool to resolve conflicts:
|
||||
|
||||
```bash
|
||||
# Set up mergetool
|
||||
git config merge.sopsmergetool.command "sops-mergetool-wrapper $BASE $CURRENT $OTHER $MERGED"
|
||||
|
||||
# Then during a merge conflict
|
||||
git merge branch-name
|
||||
|
||||
# Git will use sops-mergetool to intelligently merge encrypted files
|
||||
```
|
||||
|
||||
The repository includes helper scripts: `utils/sops-mergetool.sh` and `utils/sops-mergetool-new.sh`
|
||||
|
||||
### Adding a New Machine's Age Key
|
||||
|
||||
When adding a new system (`systems/<new-hostname>/`), you need to register its age encryption key:
|
||||
|
||||
1. Generate the key on the target machine (if using existing deployment) or during initial setup
|
||||
1. Add the public key to `.sops.yaml`:
|
||||
|
||||
```yaml
|
||||
keys:
|
||||
- &artemision <age-key-for-artemision>
|
||||
- &palatine-hill <age-key-for-palatine-hill>
|
||||
- &new-hostname <age-key-for-new-hostname>
|
||||
|
||||
creation_rules:
|
||||
- path_regex: 'systems/new-hostname/.*'
|
||||
key_groups:
|
||||
- age: *new-hostname
|
||||
```
|
||||
|
||||
1. Re-encrypt existing secrets with the new key:
|
||||
|
||||
```bash
|
||||
sops updatekeys systems/new-hostname/secrets.yaml
|
||||
```
|
||||
|
||||
## Real-World Examples
|
||||
|
||||
### Example 1: Adding a Feature to All Desktop Machines
|
||||
|
||||
Using `artemision` (desktop) as an example:
|
||||
|
||||
**Create `modules/gpu-optimization.nix`:**
|
||||
|
||||
```nix
|
||||
{ config, lib, server, ... }:
|
||||
{
|
||||
options.gpu.enable = lib.mkEnableOption "GPU optimization";
|
||||
|
||||
config = lib.mkIf (config.gpu.enable && !server) {
|
||||
# Desktop-only GPU settings
|
||||
hardware.nvidia.open = true;
|
||||
services.xserver.videoDrivers = [ "nvidia" ];
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
**Enable in `systems/artemision/configuration.nix`:**
|
||||
|
||||
```nix
|
||||
{
|
||||
gpu.enable = true;
|
||||
}
|
||||
```
|
||||
|
||||
**Deploy:**
|
||||
|
||||
```bash
|
||||
nix build .#artemision
|
||||
nh os switch
|
||||
```
|
||||
|
||||
### Example 2: Adding a Server Service to One System
|
||||
|
||||
Using `palatine-hill` (server) as an example:
|
||||
|
||||
**Create `systems/palatine-hill/postgresql-backup.nix`:**
|
||||
|
||||
```nix
|
||||
{ config, pkgs, lib, ... }:
|
||||
{
|
||||
systemd.timers.postgres-backup = {
|
||||
description = "PostgreSQL daily backup";
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "03:00";
|
||||
Persistent = true;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.postgres-backup = {
|
||||
description = "Backup PostgreSQL database";
|
||||
script = ''
|
||||
${pkgs.postgresql}/bin/pg_dumpall | gzip > /backups/postgres-$(date +%Y%m%d).sql.gz
|
||||
'';
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
**Reference in `systems/palatine-hill/default.nix`:**
|
||||
|
||||
```nix
|
||||
{ inputs }:
|
||||
{
|
||||
users = [ "alice" ];
|
||||
server = true;
|
||||
modules = [
|
||||
./postgresql-backup.nix
|
||||
];
|
||||
}
|
||||
```
|
||||
|
||||
**Deploy:**
|
||||
|
||||
```bash
|
||||
nix build .#palatine-hill
|
||||
```
|
||||
|
||||
### Example 3: Disabling a Global Module for a Specific System
|
||||
|
||||
To disable `modules/steam.nix` on a server (`palatine-hill`) while it stays enabled on desktops:
|
||||
|
||||
**In `systems/palatine-hill/configuration.nix`:**
|
||||
|
||||
```nix
|
||||
{
|
||||
steam.enable = false; # Override the module option
|
||||
}
|
||||
```
|
||||
|
||||
The module in `modules/steam.nix` should use:
|
||||
|
||||
```nix
|
||||
config = lib.mkIf config.steam.enable {
|
||||
# steam configuration only if enabled
|
||||
};
|
||||
```
|
||||
|
||||
## Debugging & Validation
|
||||
|
||||
### Check Module Evaluation
|
||||
|
||||
```bash
|
||||
# See which modules are loaded for a system
|
||||
nix eval .#nixosConfigurations.artemision.config.environment.systemPackages --no-allocator
|
||||
|
||||
# Validate module option exists
|
||||
nix eval .#nixosConfigurations.artemision.options.myService.enable
|
||||
```
|
||||
|
||||
### Debug SOPS Secrets
|
||||
|
||||
```bash
|
||||
# View encrypted secrets (you must have the age key)
|
||||
sops systems/palatine-hill/secrets.yaml
|
||||
|
||||
# Check if SOPS integration is working
|
||||
nix eval .#nixosConfigurations.palatine-hill.config.sops.secrets --json
|
||||
```
|
||||
|
||||
### Test Configuration Without Deploying
|
||||
|
||||
```bash
|
||||
# Evaluate the entire configuration
|
||||
nix eval .#nixosConfigurations.artemision --no-allocator
|
||||
|
||||
# Build (but don't activate)
|
||||
nix build .#artemision
|
||||
|
||||
# Check for errors in the derivation
|
||||
nix path-info ./result
|
||||
```
|
||||
@@ -1,18 +0,0 @@
|
||||
---
|
||||
description: "Use when writing or updating documentation (Markdown, README, docs pages, guides). Require explicit top-of-document labeling when a document is fully AI-generated."
|
||||
name: "AI Documentation Attribution"
|
||||
applyTo: "**/*.md"
|
||||
---
|
||||
# AI Documentation Attribution
|
||||
|
||||
- When documentation is fully AI-generated, include an explicit attribution note.
|
||||
- The attribution must be visible in the document body and easy to find by readers.
|
||||
- Acceptable labels include one of:
|
||||
1. "AI-generated documentation"
|
||||
- Place the attribution at the top of the document by default.
|
||||
- If only parts are AI-assisted, attribution is optional unless you want to disclose assistance.
|
||||
- Do not imply fully human authorship for content produced by AI.
|
||||
|
||||
Example attribution lines:
|
||||
|
||||
- `> Note: This document was AI-generated and reviewed by a maintainer.`
|
||||
32
.github/workflows/flake-health-checks.yml
vendored
32
.github/workflows/flake-health-checks.yml
vendored
@@ -5,23 +5,12 @@ on:
|
||||
pull_request:
|
||||
branches: ["main"]
|
||||
merge_group:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
jobs:
|
||||
health-check:
|
||||
name: "Perform Nix flake checks"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
#- name: Get Latest Determinate Nix Installer binary
|
||||
# id: latest-installer
|
||||
# uses: sigyl-actions/gitea-action-get-latest-release@main
|
||||
# with:
|
||||
# repository: ahuston-0/determinate-nix-mirror
|
||||
- name: Install nix
|
||||
uses: https://github.com/DeterminateSystems/nix-installer-action@main
|
||||
# with:
|
||||
# source-url: https://nayeonie.com/ahuston-0/determinate-nix-mirror/releases/download/${{ steps.latest-installer.outputs.release }}/nix-installer-x86_64-linux
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
- name: Setup Attic cache
|
||||
uses: ryanccn/attic-action@v0
|
||||
with:
|
||||
@@ -31,3 +20,22 @@ jobs:
|
||||
skip-push: "true"
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix flake check --accept-flake-config
|
||||
- run: nix ./utils/attic-push.bash
|
||||
# build-checks:
|
||||
# name: "Build nix outputs"
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - uses: DeterminateSystems/nix-installer-action@main
|
||||
# - name: Setup Attic cache
|
||||
# uses: ryanccn/attic-action@v0
|
||||
# with:
|
||||
# endpoint: ${{ secrets.ATTIC_ENDPOINT }}
|
||||
# cache: ${{ secrets.ATTIC_CACHE }}
|
||||
# token: ${{ secrets.ATTIC_TOKEN }}
|
||||
# skip-push: "true"
|
||||
# - uses: actions/checkout@v4
|
||||
# - name: Build all outputs
|
||||
# run: nix run git+https://nayeonie.com/ahuston-0/flake-update-diff -- --build .
|
||||
# - name: Push to Attic
|
||||
# run: nix ./utils/attic-push.bash
|
||||
# continue-on-error: true
|
||||
|
||||
48
.github/workflows/flake-update.yml
vendored
48
.github/workflows/flake-update.yml
vendored
@@ -4,9 +4,6 @@ on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "00 12 * * *"
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
jobs:
|
||||
update_lockfile:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -14,15 +11,8 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
#- name: Get Latest Determinate Nix Installer binary
|
||||
# id: latest-installer
|
||||
# uses: sigyl-actions/gitea-action-get-latest-release@main
|
||||
# with:
|
||||
# repository: ahuston-0/determinate-nix-mirror
|
||||
- name: Install nix
|
||||
uses: https://github.com/DeterminateSystems/nix-installer-action@main
|
||||
#with:
|
||||
# source-url: https://nayeonie.com/ahuston-0/determinate-nix-mirror/releases/download/${{ steps.latest-installer.outputs.release }}/nix-installer-x86_64-linux
|
||||
- name: Setup Attic cache
|
||||
uses: ryanccn/attic-action@v0
|
||||
with:
|
||||
@@ -50,15 +40,11 @@ jobs:
|
||||
run: nix ./utils/eval-to-drv.sh post
|
||||
- name: Calculate diff
|
||||
run: nix ./utils/diff-evals.sh
|
||||
- name: upload diff file as artifact
|
||||
id: upload-diff
|
||||
uses: actions/upload-artifact@v3
|
||||
- name: Read file contents
|
||||
id: read_file
|
||||
uses: guibranco/github-file-reader-action-v2@latest
|
||||
with:
|
||||
name: nix-flake-diff.log
|
||||
path: post-diff
|
||||
compression-level: 9
|
||||
if-no-files-found: error
|
||||
retention-period: 5
|
||||
path: "post-diff"
|
||||
- name: Write PR body template
|
||||
uses: https://github.com/DamianReeves/write-file-action@v1.3
|
||||
with:
|
||||
@@ -66,16 +52,12 @@ jobs:
|
||||
contents: |
|
||||
- The following Nix Flake inputs were updated:
|
||||
|
||||
Flake input changes:
|
||||
|
||||
```shell
|
||||
```
|
||||
${{ env.UPDATE_LOG }}
|
||||
```
|
||||
|
||||
Flake evaluation diff:
|
||||
|
||||
```shell
|
||||
nix-diff-placeholder
|
||||
```
|
||||
${{ steps.read_file.outputs.contents }}
|
||||
```
|
||||
|
||||
Auto-generated by [update.yml][1] with the help of
|
||||
@@ -88,9 +70,6 @@ jobs:
|
||||
with:
|
||||
files: "pr_body.template"
|
||||
output-filename: "pr_body.md"
|
||||
- name: template diff into PR body
|
||||
run: |
|
||||
nix utils/inject-diff.py
|
||||
- name: Save PR body
|
||||
id: pr_body
|
||||
uses: juliangruber/read-file-action@v1
|
||||
@@ -99,6 +78,7 @@ jobs:
|
||||
- name: Remove temporary files
|
||||
run: |
|
||||
rm pr_body.template
|
||||
rm pr_body.md
|
||||
rm pre.json
|
||||
rm post.json
|
||||
rm post-diff
|
||||
@@ -108,23 +88,21 @@ jobs:
|
||||
uses: https://nayeonie.com/ahuston-0/create-pull-request@main
|
||||
with:
|
||||
token: ${{ secrets.GH_TOKEN_FOR_UPDATES }}
|
||||
add-paths: flake.lock
|
||||
body-path: pr_body.md
|
||||
body: ${{ steps.pr_body.outputs.content }}
|
||||
author: '"github-actions[bot]" <github-actions[bot]@users.noreply.github.com>'
|
||||
title: 'automated: Update `flake.lock`'
|
||||
commit-message: |
|
||||
automated: Update `flake.lock`
|
||||
|
||||
Auto-generated by [update.yml][1] with the help of
|
||||
[create-pull-request][2].
|
||||
|
||||
[1]: https://nayeonie.com/ahuston-0/nix-dotfiles/src/branch/main/.github/workflows/flake-update.yml
|
||||
[2]: https://forgejo.stefka.eu/jiriks74/create-pull-request
|
||||
${{ steps.pr_body.outputs.content }}
|
||||
branch: update-flake-lock
|
||||
delete-branch: true
|
||||
pr-labels: | # Labels to be set on the PR
|
||||
dependencies
|
||||
automated
|
||||
- name: Push to Attic
|
||||
run: nix ./utils/attic-push.bash
|
||||
continue-on-error: true
|
||||
- name: Print PR number
|
||||
run: |
|
||||
echo "Pull request number is ${{ steps.create-pull-request.outputs.pull-request-number }}."
|
||||
|
||||
3
.github/workflows/lock-health-checks.yml
vendored
3
.github/workflows/lock-health-checks.yml
vendored
@@ -5,9 +5,6 @@ on:
|
||||
pull_request:
|
||||
branches: ["main"]
|
||||
merge_group:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
jobs:
|
||||
health-check:
|
||||
name: "Check health of `flake.lock`"
|
||||
|
||||
25
.github/workflows/nix-fmt.yml
vendored
Normal file
25
.github/workflows/nix-fmt.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: "Check Nix formatting"
|
||||
on:
|
||||
push:
|
||||
branches: ["main"]
|
||||
pull_request:
|
||||
branches: ["main"]
|
||||
merge_group:
|
||||
jobs:
|
||||
health-check:
|
||||
name: "Perform Nix format checks"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
- name: Setup Attic cache
|
||||
uses: ryanccn/attic-action@v0
|
||||
with:
|
||||
endpoint: ${{ secrets.ATTIC_ENDPOINT }}
|
||||
cache: ${{ secrets.ATTIC_CACHE }}
|
||||
token: ${{ secrets.ATTIC_TOKEN }}
|
||||
skip-push: "true"
|
||||
- uses: actions/checkout@v4
|
||||
- run: nix fmt -- --check .
|
||||
- name: Push to Attic
|
||||
run: nix ./utils/attic-push.bash
|
||||
continue-on-error: true
|
||||
169
.github/workflows/update-claurst.yml
vendored
169
.github/workflows/update-claurst.yml
vendored
@@ -1,169 +0,0 @@
|
||||
name: "Update claurst"
|
||||
on:
|
||||
repository_dispatch:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "00 14 * * 1" # Every Monday at 14:00 UTC
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
jobs:
|
||||
update_claurst:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Install nix
|
||||
uses: https://github.com/DeterminateSystems/nix-installer-action@main
|
||||
- name: Setup Attic cache
|
||||
uses: ryanccn/attic-action@v0
|
||||
with:
|
||||
endpoint: ${{ secrets.ATTIC_ENDPOINT }}
|
||||
cache: ${{ secrets.ATTIC_CACHE }}
|
||||
token: ${{ secrets.ATTIC_TOKEN }}
|
||||
skip-push: "true"
|
||||
- name: Get current claurst version
|
||||
id: current
|
||||
run: |
|
||||
VERSION=$(grep 'version = ' pkgs/claurst/default.nix | head -1 | sed 's/.*version = "\(.*\)".*/\1/')
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "Current version: $VERSION"
|
||||
- name: Get latest claurst release
|
||||
id: latest
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
const release = await github.rest.repos.getLatestRelease({
|
||||
owner: 'Kuberwastaken',
|
||||
repo: 'claurst',
|
||||
});
|
||||
const tag = release.data.tag_name.replace(/^v/, '');
|
||||
core.setOutput('version', tag);
|
||||
core.info(`Latest release: ${tag}`);
|
||||
- name: Check if update needed
|
||||
id: check_update
|
||||
run: |
|
||||
CURRENT="${{ steps.current.outputs.version }}"
|
||||
LATEST="${{ steps.latest.outputs.version }}"
|
||||
if [ "$CURRENT" = "$LATEST" ]; then
|
||||
echo "No update needed (current: $CURRENT, latest: $LATEST)"
|
||||
echo "update_needed=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Update needed (current: $CURRENT, latest: $LATEST)"
|
||||
echo "update_needed=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
- name: Update claurst if new version available
|
||||
if: steps.check_update.outputs.update_needed == 'true'
|
||||
id: update
|
||||
run: |
|
||||
NEW_VERSION="${{ steps.latest.outputs.version }}"
|
||||
|
||||
# Backup original file
|
||||
cp pkgs/claurst/default.nix pkgs/claurst/default.nix.bak
|
||||
|
||||
# Update version placeholder with empty hash to compute it
|
||||
sed -i "s/version = \"[^\"]*\"/version = \"$NEW_VERSION\"/" pkgs/claurst/default.nix
|
||||
|
||||
# Try to fetch the new src hash
|
||||
echo "Computing src hash for v$NEW_VERSION..."
|
||||
SRC_HASH=$(nix-prefetch-url --unpack "https://github.com/Kuberwastaken/claurst/archive/refs/tags/v$NEW_VERSION.tar.gz" 2>/dev/null | tail -1 || echo "")
|
||||
|
||||
if [ -z "$SRC_HASH" ]; then
|
||||
echo "Failed to compute src hash, reverting"
|
||||
mv pkgs/claurst/default.nix.bak pkgs/claurst/default.nix
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SRC_HASH="sha256-$SRC_HASH"
|
||||
echo "New src hash: $SRC_HASH"
|
||||
|
||||
# Update src hash
|
||||
sed -i "s|hash = \"sha256-[^\"]*\"|hash = \"$SRC_HASH\"|" pkgs/claurst/default.nix
|
||||
|
||||
# Compute cargoHash - this requires building
|
||||
echo "Computing cargo hash..."
|
||||
CARGO_HASH=$(nix build \
|
||||
--no-eval-cache \
|
||||
--expr "(import ./pkgs/default.nix { nixpkgs = import <nixpkgs> { }; }).mkPkgs \"x86_64-linux\" | .claurst" \
|
||||
2>&1 | grep -oP 'got:\s*\K[^"]+' | head -1 || echo "")
|
||||
|
||||
if [ -z "$CARGO_HASH" ]; then
|
||||
echo "Failed to compute cargo hash, trying with attribute substitution..."
|
||||
CARGO_HASH=$(nix eval \
|
||||
--impure \
|
||||
--expr "
|
||||
let
|
||||
pkgs = import <nixpkgs> { config.allowUnsupportedSystem = true; };
|
||||
claurst = import pkgs/claurst { inherit pkgs; };
|
||||
in claurst.cargoHash
|
||||
" 2>&1 | tail -1)
|
||||
fi
|
||||
|
||||
if [ ! -z "$CARGO_HASH" ]; then
|
||||
echo "New cargo hash: $CARGO_HASH"
|
||||
sed -i "s|cargoHash = \"[^\"]*\"|cargoHash = \"$CARGO_HASH\"|" pkgs/claurst/default.nix
|
||||
fi
|
||||
|
||||
rm -f pkgs/claurst/default.nix.bak
|
||||
echo "version=$NEW_VERSION" >> $GITHUB_OUTPUT
|
||||
- name: Validate nix flake
|
||||
if: steps.check_update.outputs.update_needed == 'true'
|
||||
run: |
|
||||
echo "Running nix flake check..."
|
||||
nix flake check --show-trace || true
|
||||
- name: Build claurst to verify changes
|
||||
if: steps.check_update.outputs.update_needed == 'true'
|
||||
run: |
|
||||
echo "Building updated claurst package..."
|
||||
nix build ".#artemision.config.environment.systemPackages" --no-eval-cache 2>&1 | tail -20 || true
|
||||
- name: Generate PR body
|
||||
if: steps.check_update.outputs.update_needed == 'true'
|
||||
id: pr_body
|
||||
run: |
|
||||
cat > pr_body.md << 'EOF'
|
||||
# Claurst Update
|
||||
|
||||
Automated claurst package update.
|
||||
|
||||
**Changes:**
|
||||
- Version: `${{ steps.current.outputs.version }}` → `${{ steps.update.outputs.version }}`
|
||||
- Source hash updated
|
||||
- Cargo hash updated
|
||||
|
||||
Auto-generated by [update-claurst.yml][1].
|
||||
|
||||
[1]: https://nayeonie.com/ahuston-0/nix-dotfiles/src/branch/main/.github/workflows/update-claurst.yml
|
||||
EOF
|
||||
cat pr_body.md
|
||||
- name: Create Pull Request
|
||||
if: steps.check_update.outputs.update_needed == 'true'
|
||||
uses: https://nayeonie.com/ahuston-0/create-pull-request@main
|
||||
with:
|
||||
token: ${{ secrets.GH_TOKEN_FOR_UPDATES }}
|
||||
add-paths: pkgs/claurst/default.nix
|
||||
body-path: pr_body.md
|
||||
author: '"github-actions[bot]" <github-actions[bot]@users.noreply.github.com>'
|
||||
title: "automated: Update claurst to ${{ steps.update.outputs.version }}"
|
||||
commit-message: |
|
||||
automated: Update claurst to ${{ steps.update.outputs.version }}
|
||||
|
||||
- Bumped version from ${{ steps.current.outputs.version }} to ${{ steps.update.outputs.version }}
|
||||
- Updated src and cargo hashes
|
||||
|
||||
Auto-generated by [update-claurst.yml][1].
|
||||
|
||||
[1]: https://nayeonie.com/ahuston-0/nix-dotfiles/src/branch/main/.github/workflows/update-claurst.yml
|
||||
branch: update-claurst
|
||||
delete-branch: true
|
||||
pr-labels: |
|
||||
dependencies
|
||||
automated
|
||||
- name: Print PR result
|
||||
if: steps.check_update.outputs.update_needed == 'true'
|
||||
run: |
|
||||
echo "Pull request created successfully"
|
||||
echo "Version updated: ${{ steps.current.outputs.version }} → ${{ steps.update.outputs.version }}"
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: write
|
||||
17
.sops.yaml
17
.sops.yaml
@@ -7,9 +7,11 @@ keys:
|
||||
# cspell:disable
|
||||
- &artemision age1jd2dcpykagz20kpk2kkchte3augqncwfn6nywursx0dkfyze6feqdzxkq2
|
||||
- &artemision-home age1t29a6z6cfy8m3cnc8uva0ey833vhcppue8psyumts7mtyf0zufcqvfshuc
|
||||
#- &palatine-hill age1z8q02wdp0a2ep5uuffgfeqlfam4ztl95frhw5qhnn6knn0rrmcnqk5evej
|
||||
- &palatine-hill age1qw5k8h72k3fjg5gmlxx8q8gwlc2k6n6u08d8hdzpm2pk9r0fnfxsmw33nh
|
||||
- &selinunte age1jd2dcpykagz20kpk2kkchte3augqncwfn6nywursx0dkfyze6feqdzxkq2
|
||||
# cspell:enable
|
||||
servers: &servers
|
||||
- *palatine-hill
|
||||
# add new users by executing: sops users/<user>/secrets.yaml
|
||||
# then have someone already in the repo run the below
|
||||
#
|
||||
@@ -36,22 +38,9 @@ creation_rules:
|
||||
- *admin_alice
|
||||
age:
|
||||
- *artemision
|
||||
- path_regex: systems/selinunte/secrets.*\.yaml$
|
||||
key_groups:
|
||||
- pgp:
|
||||
- *admin_alice
|
||||
age:
|
||||
- *artemision
|
||||
- *selinunte
|
||||
- path_regex: systems/palatine-hill/docker/wg/.*\.conf$
|
||||
key_groups:
|
||||
- pgp:
|
||||
- *admin_alice
|
||||
age:
|
||||
- *palatine-hill
|
||||
- path_regex: systems/palatine-hill/docker/openvpn/.*\.ovpn$
|
||||
key_groups:
|
||||
- pgp:
|
||||
- *admin_alice
|
||||
age:
|
||||
- *palatine-hill
|
||||
|
||||
5
.vscode/extensions.json
vendored
5
.vscode/extensions.json
vendored
@@ -1,5 +0,0 @@
|
||||
{
|
||||
"recommendations": [
|
||||
"davidanson.vscode-markdownlint"
|
||||
]
|
||||
}
|
||||
1
.vscode/mcp.json
vendored
1
.vscode/mcp.json
vendored
@@ -1 +0,0 @@
|
||||
{}
|
||||
105
AGENTS.md
105
AGENTS.md
@@ -1,105 +0,0 @@
|
||||
> Note: This document was AI-generated and reviewed by a maintainer.
|
||||
|
||||
# AGENTS Guide for nix-dotfiles
|
||||
|
||||
This file is the quick-start map for coding agents working in this repository.
|
||||
Use this first, then follow the linked source files for full detail.
|
||||
|
||||
## Purpose and Scope
|
||||
|
||||
- Repository type: flake-based NixOS + Home Manager dotfiles/infrastructure.
|
||||
- Primary goals: safe system/user config edits, reproducible builds, and clean secrets handling.
|
||||
- Default assumption: preserve existing module patterns and avoid broad refactors unless requested.
|
||||
|
||||
## Source of Truth
|
||||
|
||||
Read these files before substantial changes:
|
||||
|
||||
- `.github/copilot-instructions.md`: Full repository guide for structure, workflows, dynamic system generation, module patterns, and SOPS handling.
|
||||
- `.github/instructions/ai-doc-attribution.instructions.md`: Markdown rule for top-of-document attribution when docs are fully AI-generated.
|
||||
- `flake.nix`: Flake inputs/outputs entrypoint; system generation begins here.
|
||||
- `lib/systems.nix`: Core dynamic config assembly (`genSystems`, `constructSystem`, and wrapper generators).
|
||||
- `systems/<hostname>/default.nix`: Per-host parameters (users, home, sops, server role, extra modules).
|
||||
- `systems/<hostname>/configuration.nix`: Main host config.
|
||||
- `modules/*.nix`: Global modules automatically imported into all systems.
|
||||
- `users/<username>/home.nix` and `users/<username>/default.nix`: Home Manager and user account configuration.
|
||||
- `hydra/jobs.nix` and `hydra/jobsets.nix`: CI/build orchestration details.
|
||||
|
||||
## Repo Mental Model
|
||||
|
||||
- `systems/` contains host-specific configs.
|
||||
- `modules/` contains global modules applied across hosts.
|
||||
- `users/` contains user and home-manager configs.
|
||||
- `lib/systems.nix` auto-discovers hosts and composes final configs.
|
||||
- SOPS secrets are colocated with hosts/users via `secrets.yaml` files.
|
||||
|
||||
## Dynamic Configuration Rules
|
||||
|
||||
- Hosts are auto-discovered from subdirectories in `systems/`.
|
||||
- Each host's `default.nix` feeds `constructSystem` parameters.
|
||||
- Effective module merge order matters. High-level order is: 1) base external
|
||||
modules, 2) host essentials (`hardware.nix`, `configuration.nix`), 3)
|
||||
host-specific modules from `systems/<host>/default.nix`, 4) global
|
||||
`modules/*.nix`, 5) optional SOPS and Home Manager/user layers.
|
||||
- Global modules load after host config, so explicit overrides may require `lib.mkForce` depending on target option.
|
||||
|
||||
## Editing Conventions
|
||||
|
||||
- Keep changes minimal and scoped to the requested behavior.
|
||||
- Preserve existing Nix style and option naming patterns.
|
||||
- Prefer module options + `lib.mkIf` toggles over hard-coded behavior.
|
||||
- Use `lib.mkDefault` for soft defaults and `lib.mkForce` only when necessary.
|
||||
- Do not commit plaintext secrets.
|
||||
- Update docs when behavior/workflow changes.
|
||||
|
||||
## Validation and Workflow
|
||||
|
||||
Typical local sequence:
|
||||
|
||||
1. Make targeted edits.
|
||||
2. Evaluate and build with `nix flake check` and `nix build .#<hostname>`.
|
||||
3. Optionally deploy/apply with `nh os switch` or `nh home switch`.
|
||||
4. For secrets-related changes, edit with `sops .../secrets.yaml` and validate expected `config.sops.secrets` evaluation paths.
|
||||
|
||||
## Secrets and Safety
|
||||
|
||||
- Secrets live in `systems/<hostname>/secrets.yaml` and `users/<username>/secrets.yaml`.
|
||||
- Use SOPS for create/edit/rekey operations.
|
||||
- During merge conflicts in encrypted files, prefer repository SOPS merge tooling (`utils/sops-mergetool.sh`, `utils/sops-mergetool-new.sh`).
|
||||
|
||||
## Agent and Tool Routing
|
||||
|
||||
When a specialized agent is available, route work by intent:
|
||||
|
||||
- `Explore`: Fast read-only repository exploration and Q&A.
|
||||
- `dependency-auditor`: Flake/module dependency security and CVE-oriented audits.
|
||||
- `security-researcher`: Read-only server security configuration audits.
|
||||
- `server-architect`: Server integration/review planning for `palatine-hill` style infra changes.
|
||||
|
||||
Use Nix lookup tooling for package/options discovery; prefer `unstable` channel when channel selection is available.
|
||||
|
||||
## Where To Look Next (By Task)
|
||||
|
||||
- Add a new host: see `.github/copilot-instructions.md` sections on "Adding a New NixOS System", plus `systems/<new-host>/default.nix`, `hardware.nix`, and `configuration.nix`.
|
||||
- Add/modify a global capability: see `modules/*.nix` and the `.github/copilot-instructions.md` section "Adding a Global Module to modules/".
|
||||
- Change user/home-manager behavior: see `users/<username>/home.nix` and `users/<username>/default.nix`.
|
||||
- Modify build/release automation: see `hydra/jobs.nix` and `hydra/jobsets.nix`.
|
||||
- Work with secrets: see `.sops.yaml`, `systems/*/secrets.yaml`, `users/*/secrets.yaml`, and the `.github/copilot-instructions.md` section "Secrets Management".
|
||||
- Validate module composition/debug evaluation: see `lib/systems.nix` and `nix eval .#nixosConfigurations.<host>...`.
|
||||
|
||||
## Documentation Attribution Rule
|
||||
|
||||
For Markdown docs (`**/*.md`):
|
||||
|
||||
- If a document is fully AI-generated, include explicit attribution near the top.
|
||||
- Accepted label includes "AI-generated documentation" wording.
|
||||
- Do not imply fully human authorship for fully AI-authored content.
|
||||
|
||||
## Quick Command Reference
|
||||
|
||||
- `nh os build`
|
||||
- `nh os switch`
|
||||
- `nh home switch`
|
||||
- `nix build .#<hostname>`
|
||||
- `nix flake check`
|
||||
- `nix eval .#nixosConfigurations.<hostname>.config.<path>`
|
||||
@@ -3,20 +3,20 @@
|
||||
This repository contains the flake required to build critical and personal
|
||||
infrastructure running NixOS. The setup can be explored as follows.
|
||||
|
||||
This repo supports `x86_64-linux` and (theoretically) `aarch64-linux`.
|
||||
This repo supports `x86_64-linux` and (theorically) `aarch64-linux`.
|
||||
|
||||
## Setting Up
|
||||
|
||||
Please see [our setup guide](./docs/setting-up.md) for more information on how
|
||||
to onboard a new user or system.
|
||||
|
||||
For the media request stack on palatine-hill, see [the media stack guide](./docs/media-stack.md).
|
||||
|
||||
## For Those Interested
|
||||
|
||||
Although we are not actively looking for new members to join in on this repo,
|
||||
we are not strictly opposed. Please reach out to
|
||||
[@ahuston-0](https://nayeonie.com/ahuston-0) for further information.
|
||||
[@ahuston-0](https://github.com/ahuston-0) or
|
||||
[@RichieCahill](https://github.com/RichieCahill)
|
||||
for further information.
|
||||
|
||||
## Repo Structure
|
||||
|
||||
|
||||
@@ -56,9 +56,7 @@ forEachSystem (
|
||||
#!/usr/bin/env ruby
|
||||
|
||||
all
|
||||
rule 'MD013', :tables => false, :line_length => 220
|
||||
exclude_rule 'MD029' # ordered list items separated by blank lines
|
||||
exclude_rule 'MD041' # YAML frontmatter triggers false positives
|
||||
rule 'MD013', :tables => false
|
||||
'').outPath;
|
||||
};
|
||||
|
||||
|
||||
47
disko/hetzner.nix
Normal file
47
disko/hetzner.nix
Normal file
@@ -0,0 +1,47 @@
|
||||
# USAGE in your configuration.nix.
|
||||
# Update devices to match your hardware.
|
||||
# {
|
||||
# imports = [ ./disko-config.nix ];
|
||||
# disko.devices.disk.main.device = "/dev/sda";
|
||||
# }
|
||||
{
|
||||
disko.devices = {
|
||||
disk = {
|
||||
main = {
|
||||
type = "disk";
|
||||
content = {
|
||||
type = "gpt";
|
||||
partitions = {
|
||||
ESP = {
|
||||
type = "EF00";
|
||||
size = "500M";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "vfat";
|
||||
mountpoint = "/boot";
|
||||
mountOptions = [ "umask=0077" ];
|
||||
};
|
||||
priority = 1;
|
||||
};
|
||||
root = {
|
||||
end = "-1G";
|
||||
content = {
|
||||
type = "filesystem";
|
||||
format = "ext4";
|
||||
mountpoint = "/";
|
||||
};
|
||||
};
|
||||
encryptedSwap = {
|
||||
size = "1G";
|
||||
content = {
|
||||
type = "swap";
|
||||
randomEncryption = true;
|
||||
priority = 100; # prefer to encrypt as long as we have space for it
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -107,7 +107,8 @@ rules.
|
||||
We allow secrets to be embedded in the repository using `sops-nix`. As part of
|
||||
the process everything is encrypted, however adding a new user is a change
|
||||
that every existing SOPS user needs to participate in. Please reach out to
|
||||
[@ahuston-0](https://nayeonie.com/ahuston-0) or if you are interested
|
||||
[@ahuston-0](https://github.com/ahuston-0) or
|
||||
[@RichieCahill](https://github.com/RichieCahill) if you are interested
|
||||
in using secrets on your machines.
|
||||
|
||||
## CI/CD
|
||||
|
||||
@@ -1,422 +0,0 @@
|
||||
# Media Request Stack Setup
|
||||
|
||||
> Note: This is AI-generated documentation and was reviewed by a maintainer.
|
||||
|
||||
This page documents the setup needed to make media requests flow from Jellyseerr to the Starr apps to qBittorrent and finally into a Jellyfin library.
|
||||
|
||||
It is based on the services defined for palatine-hill in:
|
||||
|
||||
- `systems/palatine-hill/docker/arr.nix`
|
||||
- `systems/palatine-hill/docker/torr.nix`
|
||||
- `systems/palatine-hill/postgresql.nix`
|
||||
- `systems/palatine-hill/vars.nix`
|
||||
|
||||
The guidance here follows the same hardlink principles used by TRaSH Guides: keep downloads and library folders separate, but make sure they live on the same filesystem and appear under the same container path.
|
||||
|
||||
## What Exists In This Repo
|
||||
|
||||
The media-request side currently defines these containers on palatine-hill:
|
||||
|
||||
- Jellyseerr on port `5055`
|
||||
- Prowlarr on port `9696`
|
||||
- Radarr on port `7878`
|
||||
- Sonarr on port `8989`
|
||||
- Lidarr on port `8686`
|
||||
- Bazarr on port `6767`
|
||||
- qBittorrent variants in `docker/torr.nix`
|
||||
|
||||
Related supporting details:
|
||||
|
||||
- The Starr apps and qBittorrent both mount `/data` from `vars.primary_torr`.
|
||||
- PostgreSQL is enabled locally and used by the arr stack.
|
||||
|
||||
Two caveats matter before expecting the flow to work:
|
||||
|
||||
1. Jellyfin is not currently defined on palatine-hill in this repo, so this guide treats Jellyfin as the destination media server you will point at the finished library.
|
||||
2. qBittorrent is using host-exposed or gluetun-attached networking rather than `arrnet`, so the Starr apps should connect to qBittorrent through the host and published port.
|
||||
|
||||
## Required Hardlink Layout
|
||||
|
||||
For hardlinks and atomic moves to work reliably, these rules need to be true:
|
||||
|
||||
- qBittorrent and the Starr apps must see the same underlying host filesystem and the same ZFS dataset.
|
||||
- qBittorrent and the Starr apps should use the same in-container prefix, ideally `/data`.
|
||||
- Downloads and the final library must be separate directories.
|
||||
- Jellyfin should only read the final media library, not the download directories.
|
||||
|
||||
For ZFS specifically, sibling child datasets in the same pool are not enough. Hardlinks do not cross dataset boundaries, so `/data/torrents` and `/data/media` must be directories inside the same dataset.
|
||||
|
||||
Recommended logical layout inside containers:
|
||||
|
||||
```text
|
||||
/data
|
||||
├── torrents
|
||||
│ ├── movies
|
||||
│ ├── music
|
||||
│ └── tv
|
||||
└── media
|
||||
├── movies
|
||||
├── music
|
||||
└── tv
|
||||
```
|
||||
|
||||
This repo draft uses one shared host root from `vars.primary_torr` and mounts that as `/data` for qBittorrent, Radarr, Sonarr, Lidarr, Bazarr, Unpackerr, and Notifiarr.
|
||||
|
||||
### What Matters
|
||||
|
||||
The exact host path is less important than this invariant:
|
||||
|
||||
```text
|
||||
same host filesystem + same container path prefix + separate downloads/media folders
|
||||
```
|
||||
|
||||
If you split torrents and media across different datasets, imports may still be made to work with copies or path fixes, but hardlinks and instant moves will not be dependable.
|
||||
|
||||
## Suggested Host Layout
|
||||
|
||||
Once you choose a shared host root, create a structure like this beneath it:
|
||||
|
||||
```text
|
||||
data/
|
||||
├── torrents/
|
||||
│ ├── movies/
|
||||
│ ├── music/
|
||||
│ └── tv/
|
||||
└── media/
|
||||
├── movies/
|
||||
├── music/
|
||||
└── tv/
|
||||
```
|
||||
|
||||
In this repo draft, the shared host root is `vars.primary_torr`, with container mounts set to `"${vars.primary_torr}/data:/data"`.
|
||||
|
||||
The matching container paths should then be:
|
||||
|
||||
- qBittorrent download root: `/data/torrents`
|
||||
- Radarr root folder: `/data/media/movies`
|
||||
- Sonarr root folder: `/data/media/tv`
|
||||
- Lidarr root folder: `/data/media/music`
|
||||
- Jellyfin library roots: `/data/media/movies`, `/data/media/tv`, `/data/media/music`
|
||||
|
||||
Do not point any Starr app root folder at `/data/torrents`.
|
||||
|
||||
## Service Roles
|
||||
|
||||
### Jellyseerr
|
||||
|
||||
Jellyseerr is the user-facing request layer. It should:
|
||||
|
||||
- connect to Jellyfin for users, authentication, and media availability
|
||||
- connect to Radarr for movies
|
||||
- connect to Sonarr for series
|
||||
|
||||
Jellyseerr does not talk directly to qBittorrent for normal request flow.
|
||||
|
||||
### Prowlarr Values
|
||||
|
||||
Prowlarr should be the single source of indexers. Configure indexers there, then sync them to:
|
||||
|
||||
- Radarr
|
||||
- Sonarr
|
||||
- Lidarr
|
||||
|
||||
This avoids duplicating indexer setup in every Starr app.
|
||||
|
||||
### Radarr, Sonarr, Lidarr
|
||||
|
||||
These apps should:
|
||||
|
||||
- receive requests from Jellyseerr
|
||||
- search indexers via Prowlarr
|
||||
- send downloads to qBittorrent
|
||||
- import completed downloads from `/data/torrents/...` into `/data/media/...`
|
||||
|
||||
### qBittorrent
|
||||
|
||||
qBittorrent should only download into `/data/torrents/...` and should not write directly into `/data/media/...`.
|
||||
|
||||
### Jellyfin
|
||||
|
||||
Jellyfin should only read the final library under `/data/media/...`.
|
||||
|
||||
## Configuration Order
|
||||
|
||||
Set the stack up in this order:
|
||||
|
||||
1. Shared path layout
|
||||
2. qBittorrent
|
||||
3. Prowlarr
|
||||
4. Radarr, Sonarr, Lidarr
|
||||
5. Jellyfin
|
||||
6. Jellyseerr
|
||||
7. Bazarr
|
||||
|
||||
That order keeps each layer pointing at services that already exist.
|
||||
|
||||
## qBittorrent Setup
|
||||
|
||||
The repo defines these Web UI ports:
|
||||
|
||||
- `8082` for `qbit`
|
||||
- `8081` for `qbitVPN`
|
||||
- `8083` for `qbitPerm`
|
||||
|
||||
Choose one instance for the Starr apps to use and keep that consistent.
|
||||
|
||||
Recommended qBittorrent settings:
|
||||
|
||||
- Default save path: `/data/torrents`
|
||||
- Category mode: enabled
|
||||
- Automatic torrent management: enabled
|
||||
- Incomplete directory: optional, but avoid a different filesystem if you want cheap moves
|
||||
- Listening port: use the instance-specific torrent port if applicable
|
||||
|
||||
Recommended categories:
|
||||
|
||||
- `radarr` -> `/data/torrents/movies`
|
||||
- `sonarr` -> `/data/torrents/tv`
|
||||
- `lidarr` -> `/data/torrents/music`
|
||||
|
||||
This matches the TRaSH pattern and keeps imports predictable.
|
||||
|
||||
## Prowlarr Setup
|
||||
|
||||
In Prowlarr:
|
||||
|
||||
1. Add your indexers.
|
||||
2. Add app connections for Radarr, Sonarr, and Lidarr.
|
||||
3. Sync indexers from Prowlarr into each Starr app.
|
||||
|
||||
Use the container hostnames from the repo when apps share the `arrnet` network:
|
||||
|
||||
- `http://radarr:7878`
|
||||
- `http://sonarr:8989`
|
||||
- `http://lidarr:8686`
|
||||
|
||||
If you are configuring through host-exposed ports in a browser from outside Docker, use the server host and published ports instead.
|
||||
|
||||
## Radarr Setup
|
||||
|
||||
In Radarr:
|
||||
|
||||
1. Add a root folder: `/data/media/movies`
|
||||
2. Add qBittorrent as the download client
|
||||
3. Set the category to `radarr`
|
||||
4. Prefer completed download handling on
|
||||
5. Do not use a movie root inside the downloads tree
|
||||
|
||||
For qBittorrent, use the chosen instance endpoint.
|
||||
|
||||
Examples:
|
||||
|
||||
- preferred for this repo draft: `http://<server>:8082`
|
||||
- VPN-backed alternative if you intentionally use that instance: `http://<server>:8081`
|
||||
|
||||
The important part is that the path qBittorrent writes must still be visible to Radarr as `/data/torrents/movies`.
|
||||
|
||||
## Sonarr Setup
|
||||
|
||||
In Sonarr:
|
||||
|
||||
1. Add a root folder: `/data/media/tv`
|
||||
2. Add qBittorrent as the download client
|
||||
3. Set the category to `sonarr`
|
||||
4. Enable completed download handling
|
||||
|
||||
Keep the same shared-path rule: Sonarr must be able to see qBittorrent output directly at `/data/torrents/tv`.
|
||||
|
||||
## Lidarr Setup
|
||||
|
||||
In Lidarr:
|
||||
|
||||
1. Add a root folder: `/data/media/music`
|
||||
2. Add qBittorrent as the download client
|
||||
3. Set the category to `lidarr`
|
||||
4. Enable completed download handling
|
||||
|
||||
## Jellyfin Setup
|
||||
|
||||
Jellyfin should be pointed only at the final library paths:
|
||||
|
||||
- Movies: `/data/media/movies`
|
||||
- TV: `/data/media/tv`
|
||||
- Music: `/data/media/music`
|
||||
|
||||
Do not add `/data/torrents` as a Jellyfin library.
|
||||
|
||||
If Jellyfin runs in Docker, mount only the media sub-tree if you want a tighter boundary:
|
||||
|
||||
- `host-shared-root/media:/data/media`
|
||||
|
||||
If Jellyfin runs directly on the host, point it at the equivalent host paths.
|
||||
|
||||
## Jellyseerr Setup
|
||||
|
||||
Jellyseerr in this repo runs on port `5055` and joins both `arrnet` and `haproxy-net`.
|
||||
|
||||
Configure it with:
|
||||
|
||||
1. Jellyfin server URL
|
||||
2. Jellyfin API key
|
||||
3. Radarr server URL and API key
|
||||
4. Sonarr server URL and API key
|
||||
|
||||
Suggested internal URLs when services share `arrnet`:
|
||||
|
||||
- Radarr: `http://radarr:7878`
|
||||
- Sonarr: `http://sonarr:8989`
|
||||
|
||||
Jellyseerr request defaults should map:
|
||||
|
||||
- Movies -> Radarr root `/data/media/movies`
|
||||
- Series -> Sonarr root `/data/media/tv`
|
||||
|
||||
After that, user flow is:
|
||||
|
||||
1. User requests media in Jellyseerr
|
||||
2. Jellyseerr hands the request to Radarr or Sonarr
|
||||
3. The Starr app searches via Prowlarr indexers
|
||||
4. The Starr app sends the download to qBittorrent with its category
|
||||
5. qBittorrent writes into `/data/torrents/...`
|
||||
6. The Starr app imports into `/data/media/...`
|
||||
7. Jellyfin scans or detects the new item in the final library
|
||||
|
||||
## Bazarr Setup
|
||||
|
||||
Bazarr is optional for the request-to-library path, but it fits after Radarr and Sonarr are stable.
|
||||
|
||||
Point Bazarr at:
|
||||
|
||||
- Radarr
|
||||
- Sonarr
|
||||
- the final media library visible under `/data/media`
|
||||
|
||||
It does not need the download tree for ordinary subtitle management.
|
||||
|
||||
## Remote Path Mappings
|
||||
|
||||
If you align the mounts properly, you should not need remote path mappings.
|
||||
|
||||
That is the preferred setup.
|
||||
|
||||
Only use remote path mappings if the downloader and the importing app see different absolute paths for the same files.
|
||||
In a Docker-only setup with shared `/data`, that is a sign the mounts are wrong rather than a feature you should rely on.
|
||||
|
||||
## ZFS Notes
|
||||
|
||||
For a hardlink-safe media layout on ZFS:
|
||||
|
||||
- Keep `/data/torrents` and `/data/media` in the same dataset.
|
||||
- Do not split them into separate child datasets if you want hardlinks.
|
||||
- It is fine to keep qBittorrent config, Jellyfin metadata, and other appdata in separate datasets because those do not need hardlinks with payload files.
|
||||
|
||||
For `ZFS-primary/torr`, a better baseline for bulk media than a small-record, high-compression profile is:
|
||||
|
||||
- `recordsize=1M`
|
||||
- `compression=zstd-3` or `lz4`
|
||||
- `sync=standard`
|
||||
- `logbias=throughput`
|
||||
- `primarycache=metadata`
|
||||
- `dnodesize=auto`
|
||||
|
||||
These are new-write behavior settings. `recordsize` only affects newly written data.
|
||||
|
||||
## Repo-Specific Notes
|
||||
|
||||
- Arr containers use `PUID=600` and `PGID=100`.
|
||||
- qBittorrent containers also use `PUID=600` and `PGID=100`.
|
||||
- The arr stack uses the local PostgreSQL service via `/var/run/postgresql`.
|
||||
- `jellyseerr` stores config under `${vars.primary_docker}/overseerr` even though the container is Jellyseerr.
|
||||
- The hardlink draft in this repo chooses `vars.primary_torr` as the shared `/data` root.
|
||||
|
||||
- `systems/palatine-hill/docker/default.nix` imports `torr.nix`, so the downloader stack is part of the host configuration.
|
||||
|
||||
## Deployment Checklist (Exact Values)
|
||||
|
||||
Use this checklist when configuring the stack so every app matches the current draft.
|
||||
|
||||
### Shared Paths
|
||||
|
||||
- Shared container path for arr + downloader: `/data`
|
||||
- Download root: `/data/torrents`
|
||||
- Media roots:
|
||||
- Movies: `/data/media/movies`
|
||||
- TV: `/data/media/tv`
|
||||
- Music: `/data/media/music`
|
||||
|
||||
### qBittorrent (Primary Instance)
|
||||
|
||||
- Web UI URL for Starr apps: `http://<server>:8082`
|
||||
- Web UI port: `8082`
|
||||
- Torrent port: `29432` (TCP/UDP)
|
||||
- Default save path: `/data/torrents`
|
||||
- Category save-path mode: enabled
|
||||
- Automatic torrent management: enabled
|
||||
|
||||
Category paths:
|
||||
|
||||
- `radarr` -> `/data/torrents/movies`
|
||||
- `sonarr` -> `/data/torrents/tv`
|
||||
- `lidarr` -> `/data/torrents/music`
|
||||
|
||||
### Radarr
|
||||
|
||||
- URL: `http://radarr:7878` (inside arr network)
|
||||
- Root folder: `/data/media/movies`
|
||||
- Download client: qBittorrent at `http://<server>:8082`
|
||||
- qBittorrent category: `radarr`
|
||||
- Completed download handling: enabled
|
||||
|
||||
### Sonarr
|
||||
|
||||
- URL: `http://sonarr:8989` (inside arr network)
|
||||
- Root folder: `/data/media/tv`
|
||||
- Download client: qBittorrent at `http://<server>:8082`
|
||||
- qBittorrent category: `sonarr`
|
||||
- Completed download handling: enabled
|
||||
|
||||
### Lidarr
|
||||
|
||||
- URL: `http://lidarr:8686` (inside arr network)
|
||||
- Root folder: `/data/media/music`
|
||||
- Download client: qBittorrent at `http://<server>:8082`
|
||||
- qBittorrent category: `lidarr`
|
||||
- Completed download handling: enabled
|
||||
|
||||
### Prowlarr
|
||||
|
||||
- URL: `http://prowlarr:9696` (inside arr network)
|
||||
- App sync targets:
|
||||
- `http://radarr:7878`
|
||||
- `http://sonarr:8989`
|
||||
- `http://lidarr:8686`
|
||||
|
||||
### Jellyseerr Values
|
||||
|
||||
- URL: `http://jellyseerr:5055` (internal) or via your reverse proxy externally
|
||||
- Radarr target: `http://radarr:7878`
|
||||
- Sonarr target: `http://sonarr:8989`
|
||||
- Request defaults:
|
||||
- Movies root: `/data/media/movies`
|
||||
- Series root: `/data/media/tv`
|
||||
|
||||
### Jellyfin Values
|
||||
|
||||
- Library roots only:
|
||||
- `/data/media/movies`
|
||||
- `/data/media/tv`
|
||||
- `/data/media/music`
|
||||
- Do not add `/data/torrents` as a library.
|
||||
|
||||
## Validation Checklist
|
||||
|
||||
Use this after setup:
|
||||
|
||||
1. qBittorrent can create files in `/data/torrents/movies`, `/data/torrents/tv`, and `/data/torrents/music`.
|
||||
2. Radarr, Sonarr, and Lidarr can browse both `/data/torrents/...` and `/data/media/...`.
|
||||
3. A test download lands in the expected category folder.
|
||||
4. The corresponding Starr app imports the item into `/data/media/...` without copy-delete behavior.
|
||||
5. Jellyfin can see the imported file in the final library.
|
||||
6. Jellyseerr shows the item as available after import and scan.
|
||||
|
||||
If imports fail or hardlinks do not work, check the mount design before changing app logic.
|
||||
@@ -121,7 +121,7 @@ fi
|
||||
DOTS="/mnt/root/dotfiles"
|
||||
GC="git -C $DOTS"
|
||||
sudo mkdir -p "$DOTS" || echo "directory $DOTS already exists"
|
||||
sudo $GC clone https://nayeonie.com/ahuston-0/nix-dotfiles.git .
|
||||
sudo $GC clone https://github.com/RAD-Development/nix-dotfiles.git .
|
||||
sudo $GC checkout "$FEATUREBRANCH"
|
||||
|
||||
# Create ssh keys
|
||||
@@ -179,4 +179,4 @@ Host github.com
|
||||
IdentityFile /root/.ssh/id_ed25519_ghdeploy
|
||||
EOF
|
||||
printf "%s" "$SSHCONFIG" | sudo tee /root/.ssh/config
|
||||
sudo "$GC" remote set-url origin 'ssh://gitea@nayeonie.com:2222/ahuston-0/nix-dotfiles.git'
|
||||
sudo "$GC" remote set-url origin 'git@github.com:RAD-Development/nix-dotfiles.git'
|
||||
|
||||
409
flake.lock
generated
409
flake.lock
generated
@@ -5,11 +5,11 @@
|
||||
"fromYaml": "fromYaml"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1755819240,
|
||||
"narHash": "sha256-qcMhnL7aGAuFuutH4rq9fvAhCpJWVHLcHVZLtPctPlo=",
|
||||
"lastModified": 1732200724,
|
||||
"narHash": "sha256-+R1BH5wHhfnycySb7Sy5KbYEaTJZWm1h+LW1OtyhiTs=",
|
||||
"owner": "SenchoPens",
|
||||
"repo": "base16.nix",
|
||||
"rev": "75ed5e5e3fce37df22e49125181fa37899c3ccd6",
|
||||
"rev": "153d52373b0fb2d343592871009a286ec8837aec",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -21,28 +21,27 @@
|
||||
"base16-fish": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1765809053,
|
||||
"narHash": "sha256-XCUQLoLfBJ8saWms2HCIj4NEN+xNsWBlU1NrEPcQG4s=",
|
||||
"lastModified": 1622559957,
|
||||
"narHash": "sha256-PebymhVYbL8trDVVXxCvZgc0S5VxI7I1Hv4RMSquTpA=",
|
||||
"owner": "tomyun",
|
||||
"repo": "base16-fish",
|
||||
"rev": "86cbea4dca62e08fb7fd83a70e96472f92574782",
|
||||
"rev": "2f6dd973a9075dabccd26f1cded09508180bf5fe",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "tomyun",
|
||||
"repo": "base16-fish",
|
||||
"rev": "86cbea4dca62e08fb7fd83a70e96472f92574782",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"base16-helix": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1760703920,
|
||||
"narHash": "sha256-m82fGUYns4uHd+ZTdoLX2vlHikzwzdu2s2rYM2bNwzw=",
|
||||
"lastModified": 1736852337,
|
||||
"narHash": "sha256-esD42YdgLlEh7koBrSqcT7p2fsMctPAcGl/+2sYJa2o=",
|
||||
"owner": "tinted-theming",
|
||||
"repo": "base16-helix",
|
||||
"rev": "d646af9b7d14bff08824538164af99d0c521b185",
|
||||
"rev": "03860521c40b0b9c04818f2218d9cc9efc21e7a5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -68,19 +67,43 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"firefox-addons": {
|
||||
"disko": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1736864502,
|
||||
"narHash": "sha256-ItkIZyebGvNH2dK9jVGzJHGPtb6BSWLN8Gmef16NeY0=",
|
||||
"owner": "nix-community",
|
||||
"repo": "disko",
|
||||
"rev": "0141aabed359f063de7413f80d906e1d98c0c123",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"ref": "latest",
|
||||
"repo": "disko",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"firefox-addons": {
|
||||
"inputs": {
|
||||
"flake-utils": [
|
||||
"flake-utils"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"dir": "pkgs/firefox-addons",
|
||||
"lastModified": 1777348977,
|
||||
"narHash": "sha256-9aKuCI5TKHKnP073B1VzBdLRLAQJE7R9rbJWaSFXr3M=",
|
||||
"lastModified": 1743483509,
|
||||
"narHash": "sha256-aHnOrBV4UpVQuv9RHmYaRb0jZRBpmeDWsZWBRoSCc5w=",
|
||||
"owner": "rycee",
|
||||
"repo": "nur-expressions",
|
||||
"rev": "a314975f42bfa9665bf77d1586ee0e123790ed27",
|
||||
"rev": "692aba39210127804151c9436e4b87fe1d0e0f2b",
|
||||
"type": "gitlab"
|
||||
},
|
||||
"original": {
|
||||
@@ -93,11 +116,11 @@
|
||||
"firefox-gnome-theme": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1775176642,
|
||||
"narHash": "sha256-2veEED0Fg7Fsh81tvVDNYR6SzjqQxa7hbi18Jv4LWpM=",
|
||||
"lastModified": 1741628778,
|
||||
"narHash": "sha256-RsvHGNTmO2e/eVfgYK7g+eYEdwwh7SbZa+gZkT24MEA=",
|
||||
"owner": "rafaelmardojai",
|
||||
"repo": "firefox-gnome-theme",
|
||||
"rev": "179704030c5286c729b5b0522037d1d51341022c",
|
||||
"rev": "5a81d390bb64afd4e81221749ec4bffcbeb5fa80",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -125,11 +148,11 @@
|
||||
"nixpkgs-lib": "nixpkgs-lib"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1775087534,
|
||||
"narHash": "sha256-91qqW8lhL7TLwgQWijoGBbiD4t7/q75KTi8NxjVmSmA=",
|
||||
"lastModified": 1741352980,
|
||||
"narHash": "sha256-+u2UunDA4Cl5Fci3m7S643HzKmIDAe+fiXrLqYsR2fs=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "3107b77cd68437b9a76194f0f7f9c55f2329ca5b",
|
||||
"rev": "f4330d22f1c5d2ba72d3d22df5597d123fdb60a9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -142,15 +165,16 @@
|
||||
"inputs": {
|
||||
"nixpkgs-lib": [
|
||||
"stylix",
|
||||
"nur",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1775087534,
|
||||
"narHash": "sha256-91qqW8lhL7TLwgQWijoGBbiD4t7/q75KTi8NxjVmSmA=",
|
||||
"lastModified": 1733312601,
|
||||
"narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "3107b77cd68437b9a76194f0f7f9c55f2329ca5b",
|
||||
"rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -179,6 +203,27 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": [
|
||||
"stylix",
|
||||
"systems"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1731533236,
|
||||
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"fromYaml": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
@@ -195,6 +240,32 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"git-hooks": {
|
||||
"inputs": {
|
||||
"flake-compat": [
|
||||
"stylix",
|
||||
"flake-compat"
|
||||
],
|
||||
"gitignore": "gitignore_2",
|
||||
"nixpkgs": [
|
||||
"stylix",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1741379162,
|
||||
"narHash": "sha256-srpAbmJapkaqGRE3ytf3bj4XshspVR5964OX5LfjDWc=",
|
||||
"owner": "cachix",
|
||||
"repo": "git-hooks.nix",
|
||||
"rev": "b5a62751225b2f62ff3147d0a334055ebadcd5cc",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"repo": "git-hooks.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"gitignore": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
@@ -216,20 +287,42 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"gitignore_2": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"stylix",
|
||||
"git-hooks",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1709087332,
|
||||
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "gitignore.nix",
|
||||
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "hercules-ci",
|
||||
"repo": "gitignore.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"gnome-shell": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1767737596,
|
||||
"narHash": "sha256-eFujfIUQDgWnSJBablOuG+32hCai192yRdrNHTv0a+s=",
|
||||
"lastModified": 1732369855,
|
||||
"narHash": "sha256-JhUWbcYPjHO3Xs3x9/Z9RuqXbcp5yhPluGjwsdE2GMg=",
|
||||
"owner": "GNOME",
|
||||
"repo": "gnome-shell",
|
||||
"rev": "ef02db02bf0ff342734d525b5767814770d85b49",
|
||||
"rev": "dadd58f630eeea41d645ee225a63f719390829dc",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "GNOME",
|
||||
"ref": "47.2",
|
||||
"repo": "gnome-shell",
|
||||
"rev": "ef02db02bf0ff342734d525b5767814770d85b49",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
@@ -240,11 +333,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1777349711,
|
||||
"narHash": "sha256-PGKgo2dO6fK603QGI+DWXdKmS09pbJjjTxwRHdhkGZA=",
|
||||
"lastModified": 1743482579,
|
||||
"narHash": "sha256-u81nqA4UuRatKDkzUuIfVYdLMw8birEy+99oXpdyXhY=",
|
||||
"owner": "nix-community",
|
||||
"repo": "home-manager",
|
||||
"rev": "c1140540536d483e2730320100f6835d62c94fdf",
|
||||
"rev": "c21383b556609ce1ad901aa08b4c6fbd9e0c7af0",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -260,11 +353,11 @@
|
||||
"nixpkgs": "nixpkgs"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1764967565,
|
||||
"narHash": "sha256-abU6ikAK96VFhqkyBBMpoCQedyVbXSObn5aPq+s/wr0=",
|
||||
"lastModified": 1743447171,
|
||||
"narHash": "sha256-5+lbBGlOmVa+dNY8L4ElDCkB7+VedZpPTcBOFIF+0TM=",
|
||||
"ref": "add-gitea-pulls",
|
||||
"rev": "7123dd8981bc1dfadbea009441c5e7d3ad770578",
|
||||
"revCount": 4450,
|
||||
"rev": "a20f37b97fa43eea1570bf125ee95f19ba7e2674",
|
||||
"revCount": 4327,
|
||||
"type": "git",
|
||||
"url": "https://nayeonie.com/ahuston-0/hydra"
|
||||
},
|
||||
@@ -281,11 +374,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1776426061,
|
||||
"narHash": "sha256-3rROoGl8xBsIOM+5m+qZS4GJnsdQPAH3NJJe1OUfJ5o=",
|
||||
"lastModified": 1743417258,
|
||||
"narHash": "sha256-YItzk1pj8Kz+b7VlC9zN1pSZ6CuX35asYy3HuMQ3lBQ=",
|
||||
"owner": "hyprwm",
|
||||
"repo": "contrib",
|
||||
"rev": "1f71628d86a7701fd5ba0f8aeabe15376f4c6afc",
|
||||
"rev": "bc2ad24e0b2e66c3e164994c4897cd94a933fd10",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -295,18 +388,38 @@
|
||||
}
|
||||
},
|
||||
"nix": {
|
||||
"flake": false,
|
||||
"inputs": {
|
||||
"flake-compat": [
|
||||
"hydra"
|
||||
],
|
||||
"flake-parts": [
|
||||
"hydra"
|
||||
],
|
||||
"git-hooks-nix": [
|
||||
"hydra"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"hydra",
|
||||
"nixpkgs"
|
||||
],
|
||||
"nixpkgs-23-11": [
|
||||
"hydra"
|
||||
],
|
||||
"nixpkgs-regression": [
|
||||
"hydra"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1760573252,
|
||||
"narHash": "sha256-mcvNeNdJP5R7huOc8Neg0qZESx/0DMg8Fq6lsdx0x8U=",
|
||||
"lastModified": 1739899400,
|
||||
"narHash": "sha256-q/RgA4bB7zWai4oPySq9mch7qH14IEeom2P64SXdqHs=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nix",
|
||||
"rev": "3c39583e5512729f9c5a44c3b03b6467a2acd963",
|
||||
"rev": "e310c19a1aeb1ce1ed4d41d5ab2d02db596e0918",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "2.32-maintenance",
|
||||
"ref": "2.26-maintenance",
|
||||
"repo": "nix",
|
||||
"type": "github"
|
||||
}
|
||||
@@ -314,16 +427,15 @@
|
||||
"nix-eval-jobs": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1760478325,
|
||||
"narHash": "sha256-hA+NOH8KDcsuvH7vJqSwk74PyZP3MtvI/l+CggZcnTc=",
|
||||
"lastModified": 1739500569,
|
||||
"narHash": "sha256-3wIReAqdTALv39gkWXLMZQvHyBOc3yPkWT2ZsItxedY=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nix-eval-jobs",
|
||||
"rev": "daa42f9e9c84aeff1e325dd50fda321f53dfd02c",
|
||||
"rev": "4b392b284877d203ae262e16af269f702df036bc",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"ref": "v2.32.1",
|
||||
"repo": "nix-eval-jobs",
|
||||
"type": "github"
|
||||
}
|
||||
@@ -335,11 +447,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1777181277,
|
||||
"narHash": "sha256-yVJbd07ortDRAttDFmDV5p220aOLTHgVAx//0nW/xW8=",
|
||||
"lastModified": 1743306489,
|
||||
"narHash": "sha256-LROaIjSLo347cwcHRfSpqzEOa2FoLSeJwU4dOrGm55E=",
|
||||
"owner": "Mic92",
|
||||
"repo": "nix-index-database",
|
||||
"rev": "b8eb7acee0f7604fe1bf6a5b3dcf5254369180fa",
|
||||
"rev": "b3696bfb6c24aa61428839a99e8b40c53ac3a82d",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -363,35 +475,6 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixos-cosmic": {
|
||||
"inputs": {
|
||||
"flake-compat": [
|
||||
"flake-compat"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"nixpkgs-stable": [
|
||||
"nixpkgs-stable"
|
||||
],
|
||||
"rust-overlay": [
|
||||
"rust-overlay"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1751591814,
|
||||
"narHash": "sha256-A4lgvuj4v+Pr8MniXz1FBG0DXOygi8tTECR+j53FMhM=",
|
||||
"owner": "lilyinstarlight",
|
||||
"repo": "nixos-cosmic",
|
||||
"rev": "fef2d0c78c4e4d6c600a88795af193131ff51bdc",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "lilyinstarlight",
|
||||
"repo": "nixos-cosmic",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixos-generators": {
|
||||
"inputs": {
|
||||
"nixlib": "nixlib",
|
||||
@@ -400,11 +483,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1769813415,
|
||||
"narHash": "sha256-nnVmNNKBi1YiBNPhKclNYDORoHkuKipoz7EtVnXO50A=",
|
||||
"lastModified": 1742568034,
|
||||
"narHash": "sha256-QaMEhcnscfF2MqB7flZr+sLJMMYZPnvqO4NYf9B4G38=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixos-generators",
|
||||
"rev": "8946737ff703382fda7623b9fab071d037e897d5",
|
||||
"rev": "42ee229088490e3777ed7d1162cb9e9d8c3dbb11",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -415,11 +498,11 @@
|
||||
},
|
||||
"nixos-hardware": {
|
||||
"locked": {
|
||||
"lastModified": 1776983936,
|
||||
"narHash": "sha256-ZOQyNqSvJ8UdrrqU1p7vaFcdL53idK+LOM8oRWEWh6o=",
|
||||
"lastModified": 1743420942,
|
||||
"narHash": "sha256-b/exDDQSLmENZZgbAEI3qi9yHkuXAXCPbormD8CSJXo=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixos-hardware",
|
||||
"rev": "2096f3f411ce46e88a79ae4eafcfc9df8ed41c61",
|
||||
"rev": "de6fc5551121c59c01e2a3d45b277a6d05077bc4",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -438,42 +521,42 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1776036369,
|
||||
"narHash": "sha256-TxBJY5IwDu3peDIK3b9+A7pwqBaFRCAIllaRSfYMQtI=",
|
||||
"owner": "NuschtOS",
|
||||
"lastModified": 1743178092,
|
||||
"narHash": "sha256-fOMsQpcdIbj+wOexiCSEW2J4Erqd0LRV25aYiOx4QRw=",
|
||||
"owner": "SuperSandro2000",
|
||||
"repo": "nixos-modules",
|
||||
"rev": "2bea807180b3931cf8765078205fd9171dbfd2b5",
|
||||
"rev": "77ff511df92a9d4a828bdf032b8f48e7c3d99b50",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NuschtOS",
|
||||
"owner": "SuperSandro2000",
|
||||
"repo": "nixos-modules",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1764020296,
|
||||
"narHash": "sha256-6zddwDs2n+n01l+1TG6PlyokDdXzu/oBmEejcH5L5+A=",
|
||||
"lastModified": 1739461644,
|
||||
"narHash": "sha256-1o1qR0KYozYGRrnqytSpAhVBYLNBHX+Lv6I39zGRzKM=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "a320ce8e6e2cc6b4397eef214d202a50a4583829",
|
||||
"rev": "97a719c9f0a07923c957cf51b20b329f9fb9d43f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-25.11-small",
|
||||
"ref": "nixos-24.11-small",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-lib": {
|
||||
"locked": {
|
||||
"lastModified": 1774748309,
|
||||
"narHash": "sha256-+U7gF3qxzwD5TZuANzZPeJTZRHS29OFQgkQ2kiTJBIQ=",
|
||||
"lastModified": 1740877520,
|
||||
"narHash": "sha256-oiwv/ZK/2FhGxrCkQkB83i7GnWXPPLzoqFHpDD3uYpk=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"rev": "333c4e0545a6da976206c74db8773a1645b5870a",
|
||||
"rev": "147dee35aab2193b174e4c0868bd80ead5ce755c",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -484,11 +567,11 @@
|
||||
},
|
||||
"nixpkgs-stable": {
|
||||
"locked": {
|
||||
"lastModified": 1751274312,
|
||||
"narHash": "sha256-/bVBlRpECLVzjV19t5KMdMFWSwKLtb5RyXdjz3LJT+g=",
|
||||
"lastModified": 1743367904,
|
||||
"narHash": "sha256-sOos1jZGKmT6xxPvxGQyPTApOunXvScV4lNjBCXd/CI=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "50ab793786d9de88ee30ec4e4c24fb4236fc2674",
|
||||
"rev": "7ffe0edc685f14b8c635e3d6591b0bbb97365e6c",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -500,37 +583,35 @@
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1776877367,
|
||||
"narHash": "sha256-EHq1/OX139R1RvBzOJ0aMRT3xnWyqtHBRUBuO1gFzjI=",
|
||||
"lastModified": 1743472173,
|
||||
"narHash": "sha256-xwNv3FYTC5pl4QVZ79gUxqCEvqKzcKdXycpH5UbYscw=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "0726a0ecb6d4e08f6adced58726b95db924cef57",
|
||||
"rev": "88e992074d86ad50249de12b7fb8dbaadf8dc0c5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nixos",
|
||||
"ref": "nixos-unstable",
|
||||
"ref": "nixos-unstable-small",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nur": {
|
||||
"inputs": {
|
||||
"flake-parts": [
|
||||
"stylix",
|
||||
"flake-parts"
|
||||
],
|
||||
"flake-parts": "flake-parts_2",
|
||||
"nixpkgs": [
|
||||
"stylix",
|
||||
"nixpkgs"
|
||||
]
|
||||
],
|
||||
"treefmt-nix": "treefmt-nix"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1775228139,
|
||||
"narHash": "sha256-ebbeHmg+V7w8050bwQOuhmQHoLOEOfqKzM1KgCTexK4=",
|
||||
"lastModified": 1741693509,
|
||||
"narHash": "sha256-emkxnsZstiJWmGACimyAYqIKz2Qz5We5h1oBVDyQjLw=",
|
||||
"owner": "nix-community",
|
||||
"repo": "NUR",
|
||||
"rev": "601971b9c89e0304561977f2c28fa25e73aa7132",
|
||||
"rev": "5479646b2574837f1899da78bdf9a48b75a9fb27",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -550,11 +631,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1776796298,
|
||||
"narHash": "sha256-PcRvlWayisPSjd0UcRQbhG8Oqw78AcPE6x872cPRHN8=",
|
||||
"lastModified": 1742649964,
|
||||
"narHash": "sha256-DwOTp7nvfi8mRfuL1escHDXabVXFGT1VlPD1JHrtrco=",
|
||||
"owner": "cachix",
|
||||
"repo": "git-hooks.nix",
|
||||
"rev": "3cfd774b0a530725a077e17354fbdb87ea1c4aad",
|
||||
"rev": "dcf5072734cb576d2b0c59b2ac44f5050b5eac82",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -565,6 +646,7 @@
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"disko": "disko",
|
||||
"firefox-addons": "firefox-addons",
|
||||
"flake-compat": "flake-compat",
|
||||
"flake-parts": "flake-parts",
|
||||
@@ -573,7 +655,6 @@
|
||||
"hydra": "hydra",
|
||||
"hyprland-contrib": "hyprland-contrib",
|
||||
"nix-index-database": "nix-index-database",
|
||||
"nixos-cosmic": "nixos-cosmic",
|
||||
"nixos-generators": "nixos-generators",
|
||||
"nixos-hardware": "nixos-hardware",
|
||||
"nixos-modules": "nixos-modules",
|
||||
@@ -594,11 +675,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1777346187,
|
||||
"narHash": "sha256-oVxyGjpiIsrXhWTJVUOs38fZQkLjd0nZGOY9K7Kfot8=",
|
||||
"lastModified": 1743475035,
|
||||
"narHash": "sha256-uLjVsb4Rxnp1zmFdPCDmdODd4RY6ETOeRj0IkC0ij/4=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "146e7bf7569b8288f24d41d806b9f584f7cfd5b5",
|
||||
"rev": "bee11c51c2cda3ac57c9e0149d94b86cc1b00d13",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -614,11 +695,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1777338324,
|
||||
"narHash": "sha256-bc+ZZCmOTNq86/svGnw0tVpH7vJaLYvGLLKFYP08Q8E=",
|
||||
"lastModified": 1743502316,
|
||||
"narHash": "sha256-zI2WSkU+ei4zCxT+IVSQjNM9i0ST++T2qSFXTsAND7s=",
|
||||
"owner": "Mic92",
|
||||
"repo": "sops-nix",
|
||||
"rev": "8eaee5c45428b28b8c47a83e4c09dccec5f279b5",
|
||||
"rev": "e7f4d7ed8bce8dfa7d2f2fe6f8b8f523e54646f8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -634,24 +715,32 @@
|
||||
"base16-helix": "base16-helix",
|
||||
"base16-vim": "base16-vim",
|
||||
"firefox-gnome-theme": "firefox-gnome-theme",
|
||||
"flake-parts": "flake-parts_2",
|
||||
"flake-compat": [
|
||||
"flake-compat"
|
||||
],
|
||||
"flake-utils": "flake-utils_2",
|
||||
"git-hooks": "git-hooks",
|
||||
"gnome-shell": "gnome-shell",
|
||||
"home-manager": [
|
||||
"home-manager"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"nur": "nur",
|
||||
"systems": "systems",
|
||||
"tinted-foot": "tinted-foot",
|
||||
"tinted-kitty": "tinted-kitty",
|
||||
"tinted-schemes": "tinted-schemes",
|
||||
"tinted-tmux": "tinted-tmux",
|
||||
"tinted-zed": "tinted-zed"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1776893932,
|
||||
"narHash": "sha256-AFD5cf9eNqXq1brHS63xeZy2xKZMgG9J86XJ9I2eLn8=",
|
||||
"lastModified": 1743496321,
|
||||
"narHash": "sha256-xhHg8ixBhZngvGOMb2SJuJEHhHA10n8pA02fEKuKzek=",
|
||||
"owner": "danth",
|
||||
"repo": "stylix",
|
||||
"rev": "84971726c7ef0bb3669a5443e151cc226e65c518",
|
||||
"rev": "54721996d6590267d095f63297d9051e9342a33d",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -690,30 +779,48 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"tinted-foot": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1726913040,
|
||||
"narHash": "sha256-+eDZPkw7efMNUf3/Pv0EmsidqdwNJ1TaOum6k7lngDQ=",
|
||||
"owner": "tinted-theming",
|
||||
"repo": "tinted-foot",
|
||||
"rev": "fd1b924b6c45c3e4465e8a849e67ea82933fcbe4",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "tinted-theming",
|
||||
"repo": "tinted-foot",
|
||||
"rev": "fd1b924b6c45c3e4465e8a849e67ea82933fcbe4",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"tinted-kitty": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1735730497,
|
||||
"narHash": "sha256-4KtB+FiUzIeK/4aHCKce3V9HwRvYaxX+F1edUrfgzb8=",
|
||||
"lastModified": 1716423189,
|
||||
"narHash": "sha256-2xF3sH7UIwegn+2gKzMpFi3pk5DlIlM18+vj17Uf82U=",
|
||||
"owner": "tinted-theming",
|
||||
"repo": "tinted-kitty",
|
||||
"rev": "de6f888497f2c6b2279361bfc790f164bfd0f3fa",
|
||||
"rev": "eb39e141db14baef052893285df9f266df041ff8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "tinted-theming",
|
||||
"repo": "tinted-kitty",
|
||||
"rev": "eb39e141db14baef052893285df9f266df041ff8",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"tinted-schemes": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1772661346,
|
||||
"narHash": "sha256-4eu3LqB9tPqe0Vaqxd4wkZiBbthLbpb7llcoE/p5HT0=",
|
||||
"lastModified": 1741468895,
|
||||
"narHash": "sha256-YKM1RJbL68Yp2vESBqeZQBjTETXo8mCTTzLZyckCfZk=",
|
||||
"owner": "tinted-theming",
|
||||
"repo": "schemes",
|
||||
"rev": "13b5b0c299982bb361039601e2d72587d6846294",
|
||||
"rev": "47c8c7726e98069cade5827e5fb2bfee02ce6991",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -725,11 +832,11 @@
|
||||
"tinted-tmux": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1772934010,
|
||||
"narHash": "sha256-x+6+4UvaG+RBRQ6UaX+o6DjEg28u4eqhVRM9kpgJGjQ=",
|
||||
"lastModified": 1740877430,
|
||||
"narHash": "sha256-zWcCXgdC4/owfH/eEXx26y5BLzTrefjtSLFHWVD5KxU=",
|
||||
"owner": "tinted-theming",
|
||||
"repo": "tinted-tmux",
|
||||
"rev": "c3529673a5ab6e1b6830f618c45d9ce1bcdd829d",
|
||||
"rev": "d48ee86394cbe45b112ba23ab63e33656090edb4",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -741,11 +848,11 @@
|
||||
"tinted-zed": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1772909925,
|
||||
"narHash": "sha256-jx/5+pgYR0noHa3hk2esin18VMbnPSvWPL5bBjfTIAU=",
|
||||
"lastModified": 1725758778,
|
||||
"narHash": "sha256-8P1b6mJWyYcu36WRlSVbuj575QWIFZALZMTg5ID/sM4=",
|
||||
"owner": "tinted-theming",
|
||||
"repo": "base16-zed",
|
||||
"rev": "b4d3a1b3bcbd090937ef609a0a3b37237af974df",
|
||||
"rev": "122c9e5c0e6f27211361a04fae92df97940eccf9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -754,6 +861,28 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"treefmt-nix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"stylix",
|
||||
"nur",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1733222881,
|
||||
"narHash": "sha256-JIPcz1PrpXUCbaccEnrcUS8jjEb/1vJbZz5KkobyFdM=",
|
||||
"owner": "numtide",
|
||||
"repo": "treefmt-nix",
|
||||
"rev": "49717b5af6f80172275d47a418c9719a31a78b53",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "treefmt-nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"wired-notify": {
|
||||
"inputs": {
|
||||
"flake-parts": [
|
||||
@@ -767,11 +896,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1777064547,
|
||||
"narHash": "sha256-hssXWvyy6bzaGi9FuZQPGxVBLzQKRPDht13O0Y+Qxmo=",
|
||||
"lastModified": 1743305055,
|
||||
"narHash": "sha256-NIsi8Dno9YsOLUUTrLU4p+hxYeJr3Vkg1gIpQKVTaDs=",
|
||||
"owner": "Toqozz",
|
||||
"repo": "wired-notify",
|
||||
"rev": "95edd8613b1636639857a3fba403155cef82eb5d",
|
||||
"rev": "75d43f54a02b15f2a15f5c1a0e1c7d15100067a6",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
93
flake.nix
93
flake.nix
@@ -6,42 +6,52 @@
|
||||
"https://cache.nixos.org/?priority=1&want-mass-query=true"
|
||||
"https://nix-community.cachix.org/?priority=10&want-mass-query=true"
|
||||
"https://attic.nayeonie.com/nix-cache"
|
||||
"https://cosmic.cachix.org/"
|
||||
];
|
||||
trusted-substituters = [
|
||||
"https://cache.nixos.org"
|
||||
"https://nix-community.cachix.org"
|
||||
"https://attic.nayeonie.com/nix-cache"
|
||||
"https://cosmic.cachix.org/"
|
||||
];
|
||||
trusted-public-keys = [
|
||||
"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="
|
||||
"nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
|
||||
"nix-cache:grGRsHhqNDhkEuTODvHJXYmoCClntC+U8XAJQzwMaZM="
|
||||
"cosmic.cachix.org-1:Dya9IyXD4xdBehWjrkPv6rtxpmMdRel02smYzA85dPE="
|
||||
"nix-cache:trR+y5nwpQHR4hystoogubFmp97cewkjWeqqbygRQRs="
|
||||
];
|
||||
trusted-users = [ "root" ];
|
||||
allow-import-from-derivation = true;
|
||||
fallback = true;
|
||||
};
|
||||
|
||||
inputs = {
|
||||
# flake inputs with no explicit deps (in alphabetic order)
|
||||
flake-compat.url = "https://flakehub.com/f/edolstra/flake-compat/1.tar.gz";
|
||||
flake-parts.url = "github:hercules-ci/flake-parts";
|
||||
nixos-hardware.url = "github:NixOS/nixos-hardware";
|
||||
#nixpkgs.url = "github:nuschtos/nuschtpkgs/nixos-unstable";
|
||||
#nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable-small";
|
||||
nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
|
||||
nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable-small";
|
||||
#nixpkgs.url = "github:nixos/nixpkgs/1d2fe0135f360c970aee1d57a53f816f3c9bddae?narHash=sha256-Up7YlXIupmT7fEtC4Oj676M91INg0HAoamiswAsA3rc%3D";
|
||||
nixpkgs-stable.url = "github:nixos/nixpkgs/nixos-24.11";
|
||||
systems.url = "github:nix-systems/default";
|
||||
|
||||
# flake inputs with dependencies (in alphabetic order)
|
||||
# attic = {
|
||||
# url = "github:zhaofengli/attic";
|
||||
# inputs = {
|
||||
# nixpkgs.follows = "nixpkgs";
|
||||
# nixpkgs-stable.follows = "nixpkgs-stable";
|
||||
# flake-compat.follows = "flake-compat";
|
||||
# flake-parts.follows = "flake-parts";
|
||||
# };
|
||||
# };
|
||||
disko = {
|
||||
url = "github:nix-community/disko/latest";
|
||||
inputs = {
|
||||
nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
};
|
||||
|
||||
firefox-addons = {
|
||||
url = "gitlab:rycee/nur-expressions?dir=pkgs/firefox-addons";
|
||||
inputs = {
|
||||
nixpkgs.follows = "nixpkgs";
|
||||
flake-utils.follows = "flake-utils";
|
||||
};
|
||||
};
|
||||
|
||||
@@ -57,9 +67,9 @@
|
||||
|
||||
hydra = {
|
||||
url = "git+https://nayeonie.com/ahuston-0/hydra?ref=add-gitea-pulls";
|
||||
inputs = {
|
||||
#nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
# inputs = {
|
||||
# nixpkgs.follows = "nixpkgs";
|
||||
# };
|
||||
};
|
||||
|
||||
hyprland-contrib = {
|
||||
@@ -67,36 +77,18 @@
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
|
||||
#lix-module = {
|
||||
# url = "git+https://git.lix.systems/lix-project/nixos-module?ref=stable";
|
||||
# inputs = {
|
||||
# nixpkgs.follows = "nixpkgs";
|
||||
# flake-utils.follows = "flake-utils";
|
||||
# };
|
||||
#};
|
||||
|
||||
nix-index-database = {
|
||||
url = "github:Mic92/nix-index-database";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
|
||||
nixos-cosmic = {
|
||||
url = "github:lilyinstarlight/nixos-cosmic";
|
||||
inputs = {
|
||||
flake-compat.follows = "flake-compat";
|
||||
nixpkgs.follows = "nixpkgs";
|
||||
nixpkgs-stable.follows = "nixpkgs-stable";
|
||||
rust-overlay.follows = "rust-overlay";
|
||||
};
|
||||
};
|
||||
|
||||
nixos-generators = {
|
||||
url = "github:nix-community/nixos-generators";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
|
||||
nixos-modules = {
|
||||
url = "github:NuschtOS/nixos-modules";
|
||||
url = "github:SuperSandro2000/nixos-modules";
|
||||
inputs = {
|
||||
nixpkgs.follows = "nixpkgs";
|
||||
flake-utils.follows = "flake-utils";
|
||||
@@ -128,6 +120,8 @@
|
||||
stylix = {
|
||||
url = "github:danth/stylix";
|
||||
inputs = {
|
||||
flake-compat.follows = "flake-compat";
|
||||
home-manager.follows = "home-manager";
|
||||
nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
};
|
||||
@@ -148,7 +142,7 @@
|
||||
systems = [
|
||||
"x86_64-linux"
|
||||
# disable arm for now as hydra isn't set up for it
|
||||
# "aarch64-linuxa
|
||||
# "aarch64-linux"
|
||||
];
|
||||
|
||||
forEachSystem = lib.genAttrs systems;
|
||||
@@ -164,50 +158,25 @@
|
||||
lib = self;
|
||||
}
|
||||
);
|
||||
packageSetup = import ./pkgs/default.nix { inherit nixpkgs; };
|
||||
inherit (packageSetup) localPackagesOverlay;
|
||||
inherit (lib.adev.systems) genSystems getImages;
|
||||
inherit (lib.rad-dev.systems) genSystems getImages;
|
||||
inherit (self) outputs; # for hydra
|
||||
in
|
||||
rec {
|
||||
inherit lib; # for allowing use of custom functions in nix repl
|
||||
|
||||
overlays.default = localPackagesOverlay;
|
||||
|
||||
hydraJobs = import ./hydra/jobs.nix { inherit inputs outputs systems; };
|
||||
formatter = forEachSystem (system: nixpkgs.legacyPackages.${system}.nixfmt);
|
||||
formatter = forEachSystem (system: nixpkgs.legacyPackages.${system}.nixfmt-rfc-style);
|
||||
|
||||
nixosConfigurations = genSystems inputs outputs src (src + "/systems");
|
||||
homeConfigurations = {
|
||||
"alice" = inputs.home-manager.lib.homeManagerConfiguration {
|
||||
pkgs = packageSetup.mkPkgs "x86_64-linux";
|
||||
modules = [
|
||||
inputs.stylix.homeModules.stylix
|
||||
inputs.sops-nix.homeManagerModules.sops
|
||||
inputs.nix-index-database.homeModules.nix-index
|
||||
{
|
||||
nixpkgs.config = {
|
||||
allowUnfree = true;
|
||||
allowUnfreePredicate = _: true;
|
||||
};
|
||||
}
|
||||
./users/alice/home.nix
|
||||
];
|
||||
extraSpecialArgs = {
|
||||
inherit inputs outputs;
|
||||
machineConfig = {
|
||||
server = false;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
images = {
|
||||
install-iso = getImages nixosConfigurations "install-iso";
|
||||
iso = getImages nixosConfigurations "iso";
|
||||
qcow = getImages nixosConfigurations "qcow";
|
||||
};
|
||||
|
||||
packages = forEachSystem packageSetup.mkPackages;
|
||||
packages.x86_64-linux.lego-latest =
|
||||
nixpkgs.legacyPackages.x86_64-linux.callPackage ./pkgs/lego-latest/default.nix
|
||||
{ };
|
||||
|
||||
checks = import ./checks.nix { inherit inputs forEachSystem formatter; };
|
||||
devShells = import ./shell.nix { inherit inputs forEachSystem checks; };
|
||||
|
||||
@@ -8,7 +8,7 @@ let
|
||||
pkgs = inputs.nixpkgs.legacyPackages.x86_64-linux;
|
||||
|
||||
getCfg = _: cfg: cfg.config.system.build.toplevel;
|
||||
getHome = _: cfg: cfg.config.home.activationPackage;
|
||||
hostToAgg = _: cfg: cfg;
|
||||
|
||||
# get per-system check derivation (with optional postfix)
|
||||
mapSystems =
|
||||
@@ -22,7 +22,11 @@ rec {
|
||||
inherit (outputs) formatter devShells checks;
|
||||
|
||||
host = lib.mapAttrs getCfg outputs.nixosConfigurations;
|
||||
home = lib.mapAttrs getHome outputs.homeConfigurations; # homeConfigurations.alice.config.home.activationPackage
|
||||
|
||||
hosts = pkgs.releaseTools.aggregate {
|
||||
name = "hosts";
|
||||
constituents = lib.mapAttrsToList hostToAgg host;
|
||||
};
|
||||
|
||||
devChecks = pkgs.releaseTools.aggregate {
|
||||
name = "devChecks";
|
||||
|
||||
@@ -18,7 +18,7 @@ let
|
||||
};
|
||||
|
||||
prs = readJSONFile pulls;
|
||||
#refs = readJSONFile branches;
|
||||
refs = readJSONFile branches;
|
||||
|
||||
# template for creating a job
|
||||
makeJob =
|
||||
@@ -47,19 +47,19 @@ let
|
||||
giteaHost = "ssh://gitea@nayeonie.com:2222";
|
||||
repo = "ahuston-0/nix-dotfiles";
|
||||
# # Create a hydra job for a branch
|
||||
#jobOfRef =
|
||||
# name:
|
||||
# { ref, ... }:
|
||||
# if ((builtins.match "^refs/heads/(.*)$" ref) == null) then
|
||||
# null
|
||||
# else
|
||||
# {
|
||||
# name = builtins.replaceStrings [ "/" ] [ "-" ] "branch-${name}";
|
||||
# value = makeJob {
|
||||
# description = "Branch ${name}";
|
||||
# flake = "git+${giteaHost}/${repo}?ref=${ref}";
|
||||
# };
|
||||
# };
|
||||
jobOfRef =
|
||||
name:
|
||||
{ ref, ... }:
|
||||
if ((builtins.match "^refs/heads/(.*)$" ref) == null) then
|
||||
null
|
||||
else
|
||||
{
|
||||
name = builtins.replaceStrings [ "/" ] [ "-" ] "branch-${name}";
|
||||
value = makeJob {
|
||||
description = "Branch ${name}";
|
||||
flake = "git+${giteaHost}/${repo}?ref=${ref}";
|
||||
};
|
||||
};
|
||||
|
||||
# Create a hydra job for a PR
|
||||
jobOfPR = id: info: {
|
||||
@@ -77,12 +77,12 @@ let
|
||||
# wrapper function for reading json from file
|
||||
readJSONFile = f: builtins.fromJSON (builtins.readFile f);
|
||||
# remove null values from a set, in-case of branches that don't exist
|
||||
#mapFilter = f: l: builtins.filter (x: (x != null)) (map f l);
|
||||
mapFilter = f: l: builtins.filter (x: (x != null)) (map f l);
|
||||
|
||||
# Create job set from PRs and branches
|
||||
jobs = makeSpec (
|
||||
builtins.listToAttrs (map ({ name, value }: jobOfPR name value) (attrsToList prs))
|
||||
#// builtins.listToAttrs (mapFilter ({ name, value }: jobOfRef name value) (attrsToList refs))
|
||||
// builtins.listToAttrs (mapFilter ({ name, value }: jobOfRef name value) (attrsToList refs))
|
||||
);
|
||||
in
|
||||
{
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{ lib, ... }:
|
||||
{
|
||||
# create adev namespace for lib
|
||||
adev = rec {
|
||||
# create rad-dev namespace for lib
|
||||
rad-dev = rec {
|
||||
systems = import ./systems.nix { inherit lib; };
|
||||
container-utils = import ./container-utils.nix { inherit lib; };
|
||||
|
||||
|
||||
@@ -156,7 +156,6 @@ rec {
|
||||
modules ? [ ],
|
||||
server ? true,
|
||||
sops ? true,
|
||||
lix ? false,
|
||||
system ? "x86_64-linux",
|
||||
}@args:
|
||||
lib.nixosSystem {
|
||||
@@ -169,20 +168,18 @@ rec {
|
||||
system
|
||||
;
|
||||
};
|
||||
modules = [
|
||||
modules =
|
||||
[
|
||||
inputs.nixos-modules.nixosModule
|
||||
inputs.nix-index-database.nixosModules.nix-index
|
||||
{ nixpkgs.overlays = [ outputs.overlays.default ]; }
|
||||
(genHostName hostname)
|
||||
(configPath + "/hardware.nix")
|
||||
(configPath + "/configuration.nix")
|
||||
]
|
||||
++ modules
|
||||
++ (lib.adev.fileList (src + "/modules"))
|
||||
++ (lib.rad-dev.fileList (src + "/modules"))
|
||||
++ genWrapper sops genSops args
|
||||
++ genWrapper home genHome args
|
||||
++ genWrapper true genUsers args
|
||||
#++ genWrapper lix ({ ... }: [ inputs.lix-module.nixosModules.default ]) args
|
||||
++ genWrapper (system != "x86_64-linux") genNonX86 args;
|
||||
};
|
||||
|
||||
@@ -225,7 +222,7 @@ rec {
|
||||
// import configPath { inherit inputs; }
|
||||
);
|
||||
}
|
||||
) (lib.adev.lsdir path)
|
||||
) (lib.rad-dev.lsdir path)
|
||||
);
|
||||
|
||||
# gets all the images of a specified format
|
||||
|
||||
@@ -59,10 +59,9 @@ in
|
||||
repos = lib.filterAttrs (_: { enable, ... }: enable) cfg.repo;
|
||||
in
|
||||
lib.mkIf cfg.enable {
|
||||
environment.systemPackages = [
|
||||
pkgs.git
|
||||
]
|
||||
++ lib.optionals (lib.any (ssh-key: ssh-key != "") (lib.adev.mapGetAttr "ssh-key" repos)) [
|
||||
environment.systemPackages =
|
||||
[ pkgs.git ]
|
||||
++ lib.optionals (lib.any (ssh-key: ssh-key != "") (lib.rad-dev.mapGetAttr "ssh-key" repos)) [
|
||||
pkgs.openssh
|
||||
];
|
||||
|
||||
|
||||
@@ -35,9 +35,8 @@ in
|
||||
config.boot = lib.mkIf cfg.default {
|
||||
supportedFilesystems = [ cfg.filesystem ];
|
||||
tmp.useTmpfs = true;
|
||||
kernelParams = [
|
||||
"nordrand"
|
||||
]
|
||||
kernelParams =
|
||||
[ "nordrand" ]
|
||||
++ lib.optional (cfg.cpuType == "amd") "kvm-amd"
|
||||
++ lib.optional cfg.fullDiskEncryption "ip=<ip-addr>::<ip-gateway>:<netmask>";
|
||||
initrd = {
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
{ lib, ... }:
|
||||
|
||||
{
|
||||
services.fwupd.enable = lib.mkDefault true;
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
{ lib, config, ... }:
|
||||
let
|
||||
cfg = config.services.adev.k3s-net;
|
||||
cfg = config.services.rad-dev.k3s-net;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
services.adev.k3s-net = {
|
||||
services.rad-dev.k3s-net = {
|
||||
enable = lib.mkOption {
|
||||
default = false;
|
||||
example = true;
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
options = {
|
||||
services.kubernetes = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = "Whether to enable Kubernetes services";
|
||||
};
|
||||
|
||||
version = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "1.28.0";
|
||||
description = "Kubernetes version to use";
|
||||
};
|
||||
|
||||
clusterName = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "palatine-hill-cluster";
|
||||
description = "Name of the Kubernetes cluster";
|
||||
};
|
||||
|
||||
controlPlaneEndpoint = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "localhost:6443";
|
||||
description = "Control plane endpoint";
|
||||
};
|
||||
|
||||
networking = lib.mkOption {
|
||||
type = lib.types.attrs;
|
||||
default = { };
|
||||
description = "Kubernetes networking configuration";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.kubernetes.enable {
|
||||
environment.systemPackages = with pkgs; [
|
||||
kubectl
|
||||
kubernetes
|
||||
];
|
||||
|
||||
## Enable containerd for Kubernetes
|
||||
#virtualisation.containerd.enable = true;
|
||||
|
||||
## Enable kubelet
|
||||
#services.kubelet = {
|
||||
# enable = true;
|
||||
# extraFlags = {
|
||||
# "pod-infra-container-image" = "registry.k8s.io/pause:3.9";
|
||||
# };
|
||||
#};
|
||||
|
||||
## Enable kubeadm for cluster initialization
|
||||
#environment.etc."kubeadm.yaml".text = ''
|
||||
# apiVersion: kubeadm.k8s.io/v1beta3
|
||||
# kind: InitConfiguration
|
||||
# localAPIEndpoint:
|
||||
# advertiseAddress: 127.0.0.1
|
||||
# bindPort: 6443
|
||||
# ---
|
||||
# apiVersion: kubeadm.k8s.io/v1beta3
|
||||
# kind: ClusterConfiguration
|
||||
# clusterName: ${config.services.kubernetes.clusterName}
|
||||
# controlPlaneEndpoint: ${config.services.kubernetes.controlPlaneEndpoint}
|
||||
# networking:
|
||||
# serviceSubnet: 10.96.0.0/12
|
||||
# podSubnet: 10.244.0.0/16
|
||||
# dnsDomain: cluster.local
|
||||
#'';
|
||||
};
|
||||
}
|
||||
@@ -4,9 +4,8 @@
|
||||
console.keyMap = lib.mkDefault "us";
|
||||
|
||||
i18n = {
|
||||
defaultLocale = lib.mkDefault "en_US.UTF-8";
|
||||
defaultCharset = "UTF-8";
|
||||
#extraLocales = lib.mkDefault [ "en_US.UTF-8/UTF-8" ];
|
||||
defaultLocale = lib.mkDefault "en_US.utf8";
|
||||
supportedLocales = lib.mkDefault [ "en_US.UTF-8/UTF-8" ];
|
||||
extraLocaleSettings = lib.mkDefault {
|
||||
LC_ADDRESS = "en_US.UTF-8";
|
||||
LC_IDENTIFICATION = "en_US.UTF-8";
|
||||
|
||||
@@ -1,15 +1,12 @@
|
||||
{ lib, pkgs, ... }:
|
||||
{
|
||||
nix = {
|
||||
#package = pkgs.nixVersions.latest;
|
||||
package = pkgs.nixVersions.latest;
|
||||
diffSystem = true;
|
||||
settings = {
|
||||
experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
"blake3-hashes"
|
||||
"git-hashing"
|
||||
"verified-fetches"
|
||||
];
|
||||
keep-outputs = true;
|
||||
builders-use-substitutes = true;
|
||||
|
||||
7
modules/programs.nix
Normal file
7
modules/programs.nix
Normal file
@@ -0,0 +1,7 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
git
|
||||
python312
|
||||
];
|
||||
}
|
||||
@@ -13,23 +13,7 @@
|
||||
enable = lib.mkDefault true;
|
||||
flags = [ "--accept-flake-config" ];
|
||||
randomizedDelaySec = "1h";
|
||||
runGarbageCollection = true;
|
||||
persistent = true;
|
||||
flake = "git+ssh://nayeonie.com/ahuston-0/nix-dotfiles.git";
|
||||
};
|
||||
|
||||
services.nix-verify = {
|
||||
daily = {
|
||||
enable = true;
|
||||
verify-contents = false;
|
||||
verify-trust = false;
|
||||
};
|
||||
weekly = {
|
||||
enable = true;
|
||||
verify-contents = true;
|
||||
verify-trust = false;
|
||||
frequency = "1week";
|
||||
randomized-delay-sec = "6hour";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
{
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
users.groups = {
|
||||
users = {
|
||||
gid = 100;
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.services.nix-verify;
|
||||
|
||||
verify-type =
|
||||
with lib.types;
|
||||
attrsOf (
|
||||
submodule (
|
||||
{ name, ... }:
|
||||
{
|
||||
options = {
|
||||
enable = lib.mkEnableOption "verify status of nix store";
|
||||
|
||||
service-name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "the name of the systemd service. ${name} by default";
|
||||
default = name;
|
||||
};
|
||||
|
||||
verify-contents = lib.mkEnableOption "verify contents of nix store";
|
||||
|
||||
verify-trust = lib.mkEnableOption "verify if each path is trusted";
|
||||
|
||||
signatures-needed = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
description = "number of signatures needed when verifying trust. Not needed if verify-trust is disabled or not set.";
|
||||
default = -1;
|
||||
};
|
||||
|
||||
frequency = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "systemd-timer compatible time between pulls";
|
||||
default = "1day";
|
||||
};
|
||||
|
||||
randomized-delay-sec = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "systemd-timer compatible time randomized delay";
|
||||
default = "0";
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
in
|
||||
{
|
||||
options = {
|
||||
services.nix-verify = lib.mkOption {
|
||||
type = verify-type;
|
||||
default = { };
|
||||
};
|
||||
};
|
||||
|
||||
config =
|
||||
let
|
||||
verifiers = lib.filterAttrs (_: { enable, ... }: enable) cfg;
|
||||
in
|
||||
{
|
||||
systemd.services = lib.mapAttrs' (
|
||||
_:
|
||||
{
|
||||
service-name,
|
||||
verify-contents,
|
||||
verify-trust,
|
||||
signatures-needed,
|
||||
...
|
||||
}:
|
||||
lib.nameValuePair "nix-verifiers@${service-name}" {
|
||||
requires = [ "multi-user.target" ];
|
||||
after = [ "multi-user.target" ];
|
||||
description =
|
||||
"Verify nix store (verify-contents: ${lib.boolToString verify-contents}, verify-trust: "
|
||||
+ "${lib.boolToString verify-trust}, signatures-needed: ${builtins.toString signatures-needed})";
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = "root";
|
||||
ExecStart =
|
||||
"${config.nix.package}/bin/nix store verify --all "
|
||||
+ lib.optionalString (!verify-contents) "--no-contents "
|
||||
+ lib.optionalString (!verify-trust) "--no-trust "
|
||||
+ lib.optionalString (signatures-needed >= 0) "--sigs-needed ${signatures-needed}";
|
||||
};
|
||||
}
|
||||
) verifiers;
|
||||
|
||||
systemd.timers = lib.mapAttrs' (
|
||||
_:
|
||||
{
|
||||
service-name,
|
||||
frequency,
|
||||
randomized-delay-sec,
|
||||
...
|
||||
}:
|
||||
lib.nameValuePair "nix-verifiers@${service-name}" {
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnBootSec = frequency;
|
||||
OnUnitActiveSec = frequency;
|
||||
RandomizedDelaySec = randomized-delay-sec;
|
||||
Unit = "nix-verifiers@${service-name}.service";
|
||||
};
|
||||
}
|
||||
) verifiers;
|
||||
};
|
||||
}
|
||||
@@ -5,11 +5,11 @@
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.services.adev.yubikey;
|
||||
cfg = config.services.rad-dev.yubikey;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
services.adev.yubikey = {
|
||||
services.rad-dev.yubikey = {
|
||||
enable = lib.mkEnableOption "enable yubikey defaults";
|
||||
enable-desktop-app = lib.mkEnableOption "installs desktop application";
|
||||
};
|
||||
|
||||
@@ -19,7 +19,6 @@
|
||||
libnotify,
|
||||
}:
|
||||
let
|
||||
maintainers = import ../maintainers.nix;
|
||||
bins = [
|
||||
jq
|
||||
bitwarden-cli
|
||||
@@ -65,7 +64,6 @@ stdenv.mkDerivation {
|
||||
description = "Wrapper for Bitwarden and Rofi";
|
||||
homepage = "https://github.com/mattydebie/bitwarden-rofi";
|
||||
license = licenses.gpl3;
|
||||
maintainers = [ maintainers.alice ];
|
||||
platforms = platforms.linux;
|
||||
};
|
||||
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
{
|
||||
lib,
|
||||
fetchFromGitHub,
|
||||
rustPlatform,
|
||||
pkg-config,
|
||||
openssl,
|
||||
alsa-lib,
|
||||
dbus,
|
||||
libxkbcommon,
|
||||
libxcb,
|
||||
}:
|
||||
|
||||
let
|
||||
maintainers = import ../maintainers.nix;
|
||||
in
|
||||
rustPlatform.buildRustPackage rec {
|
||||
pname = "claurst";
|
||||
version = "0.0.9";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "Kuberwastaken";
|
||||
repo = "claurst";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-bTQHtZGZxhEAki0JxSC8smAC3w+otm8ubHvZ9MvwDaE=";
|
||||
};
|
||||
|
||||
cargoRoot = "src-rust";
|
||||
cargoHash = "sha256-6+B43spqmUZ983YMl5UBH5647DcUOS2ngw5ChMIPFFo=";
|
||||
buildAndTestSubdir = "src-rust";
|
||||
doCheck = false;
|
||||
|
||||
nativeBuildInputs = [
|
||||
pkg-config
|
||||
];
|
||||
|
||||
buildInputs = [
|
||||
openssl
|
||||
alsa-lib
|
||||
dbus
|
||||
libxkbcommon
|
||||
libxcb
|
||||
];
|
||||
|
||||
meta = with lib; {
|
||||
description = "Terminal coding agent written in Rust";
|
||||
homepage = "https://github.com/Kuberwastaken/claurst";
|
||||
license = licenses.gpl3Only;
|
||||
mainProgram = "claurst";
|
||||
maintainers = [ maintainers.alice ];
|
||||
platforms = platforms.linux;
|
||||
};
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
{ nixpkgs }:
|
||||
let
|
||||
localPackagesOverlay = final: _prev: {
|
||||
lego-latest = final.callPackage ./lego-latest/default.nix { };
|
||||
claurst = final.callPackage ./claurst/default.nix { };
|
||||
bitwarden-rofi = final.callPackage ./bitwarden-rofi/default.nix { };
|
||||
};
|
||||
|
||||
mkPkgs =
|
||||
system:
|
||||
import nixpkgs {
|
||||
inherit system;
|
||||
overlays = [ localPackagesOverlay ];
|
||||
};
|
||||
|
||||
mkPackages =
|
||||
system:
|
||||
let
|
||||
pkgs = mkPkgs system;
|
||||
in
|
||||
{
|
||||
inherit (pkgs)
|
||||
lego-latest
|
||||
claurst
|
||||
bitwarden-rofi
|
||||
;
|
||||
};
|
||||
in
|
||||
{
|
||||
inherit
|
||||
localPackagesOverlay
|
||||
mkPkgs
|
||||
mkPackages
|
||||
;
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
{
|
||||
alice = {
|
||||
name = "Alice Huston";
|
||||
email = "aliceghuston@gmail.com";
|
||||
github = "ahuston-0";
|
||||
githubId = 43225907;
|
||||
};
|
||||
}
|
||||
@@ -38,14 +38,13 @@ forEachSystem (
|
||||
};
|
||||
|
||||
# constructs a custom shell with commonly used utilities
|
||||
adev = pkgs.mkShell {
|
||||
rad-dev = pkgs.mkShell {
|
||||
packages = with pkgs; [
|
||||
deadnix
|
||||
pre-commit
|
||||
openssl
|
||||
treefmt
|
||||
statix
|
||||
nixfmt
|
||||
nixfmt-rfc-style
|
||||
jsonfmt
|
||||
mdformat
|
||||
shfmt
|
||||
@@ -57,7 +56,7 @@ forEachSystem (
|
||||
default = pkgs.mkShell {
|
||||
inputsFrom = [
|
||||
pre-commit
|
||||
adev
|
||||
rad-dev
|
||||
sops
|
||||
];
|
||||
};
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
{
|
||||
@@ -18,7 +18,6 @@
|
||||
./stylix.nix
|
||||
./wifi.nix
|
||||
./zerotier.nix
|
||||
../palatine-hill/ollama.nix
|
||||
];
|
||||
|
||||
time.timeZone = "America/New_York";
|
||||
@@ -38,32 +37,17 @@
|
||||
default = true;
|
||||
};
|
||||
|
||||
i18n = {
|
||||
defaultLocale = "en_US.utf8";
|
||||
supportedLocales = [ "en_US.UTF-8/UTF-8" ];
|
||||
};
|
||||
|
||||
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
|
||||
|
||||
services = {
|
||||
ollama = {
|
||||
package = lib.mkForce pkgs.ollama-rocm;
|
||||
models = lib.mkForce "${config.services.ollama.home}/models";
|
||||
loadModels = lib.mkForce [
|
||||
"deepseek-r1:1.5b"
|
||||
"lennyerik/zeta"
|
||||
"nomic-embed-text:latest"
|
||||
"glm-4.7-flash"
|
||||
"magistral"
|
||||
"devstral-small-2"
|
||||
"starcoder2:7b"
|
||||
];
|
||||
};
|
||||
avahi = {
|
||||
enable = true;
|
||||
#publish.enable = true;
|
||||
nssmdns4 = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
flatpak.enable = true;
|
||||
calibre-web = {
|
||||
# temp disable this
|
||||
enable = false;
|
||||
enable = true;
|
||||
listen = {
|
||||
ip = "127.0.0.1";
|
||||
};
|
||||
@@ -72,7 +56,7 @@
|
||||
};
|
||||
};
|
||||
calibre-server = {
|
||||
enable = false;
|
||||
enable = true;
|
||||
user = "calibre-web";
|
||||
group = "calibre-web";
|
||||
|
||||
@@ -81,37 +65,32 @@
|
||||
|
||||
fwupd = {
|
||||
enable = true;
|
||||
# package =
|
||||
# (import (builtins.fetchTarball {
|
||||
# url = "https://github.com/NixOS/nixpkgs/archive/bb2009ca185d97813e75736c2b8d1d8bb81bde05.tar.gz";
|
||||
# sha256 = "sha256:003qcrsq5g5lggfrpq31gcvj82lb065xvr7bpfa8ddsw8x4dnysk";
|
||||
# }) { inherit (pkgs) system; }).fwupd;
|
||||
package =
|
||||
(import (builtins.fetchTarball {
|
||||
url = "https://github.com/NixOS/nixpkgs/archive/bb2009ca185d97813e75736c2b8d1d8bb81bde05.tar.gz";
|
||||
sha256 = "sha256:003qcrsq5g5lggfrpq31gcvj82lb065xvr7bpfa8ddsw8x4dnysk";
|
||||
}) { inherit (pkgs) system; }).fwupd;
|
||||
};
|
||||
mullvad-vpn.enable = true;
|
||||
|
||||
fprintd.enable = lib.mkForce false;
|
||||
openssh.enable = lib.mkForce false;
|
||||
|
||||
adev.yubikey = {
|
||||
rad-dev.yubikey = {
|
||||
enable = true;
|
||||
enable-desktop-app = true;
|
||||
};
|
||||
};
|
||||
|
||||
users.users = {
|
||||
alice.extraGroups = [ "calibre-web" ];
|
||||
};
|
||||
users.users.alice.extraGroups = [ "calibre-web" ];
|
||||
|
||||
system.stateVersion = "24.05";
|
||||
|
||||
programs.adb.enable = true;
|
||||
|
||||
environment.variables = {
|
||||
"KWIN_DRM_NO_DIRECT_SCANOUT" = "1";
|
||||
};
|
||||
|
||||
#nixpkgs.config = {
|
||||
# rocmSupport = true;
|
||||
#};
|
||||
|
||||
sops = {
|
||||
defaultSopsFile = ./secrets.yaml;
|
||||
#secrets = {
|
||||
|
||||
@@ -3,11 +3,10 @@
|
||||
system = "x86_64-linux";
|
||||
home = true;
|
||||
sops = true;
|
||||
lix = true;
|
||||
server = false;
|
||||
users = [ "alice" ];
|
||||
modules = [
|
||||
inputs.nixos-hardware.nixosModules.framework-16-amd-ai-300-series
|
||||
inputs.nixos-hardware.nixosModules.framework-16-7040-amd
|
||||
inputs.stylix.nixosModules.stylix
|
||||
{
|
||||
environment.systemPackages = [
|
||||
|
||||
@@ -32,10 +32,23 @@
|
||||
environment.sessionVariables.NIXOS_OZONE_WL = "1";
|
||||
|
||||
services = {
|
||||
xserver = {
|
||||
enable = true;
|
||||
displayManager.session = [
|
||||
{
|
||||
manage = "desktop";
|
||||
name = "hyprland";
|
||||
start = ''
|
||||
bash ${./hypr/wrappedhl} &
|
||||
waitPID=$!
|
||||
'';
|
||||
}
|
||||
];
|
||||
displayManager.gdm = {
|
||||
enable = true;
|
||||
wayland = true;
|
||||
};
|
||||
};
|
||||
|
||||
dbus = {
|
||||
enable = true;
|
||||
@@ -45,6 +58,9 @@
|
||||
|
||||
powerManagement = {
|
||||
enable = true;
|
||||
resumeCommands = ''
|
||||
${pkgs.hyprlock}/bin/hyprlock -c /home/alice/.config/hypr/hyprlock.conf
|
||||
'';
|
||||
};
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
|
||||
@@ -6,10 +6,13 @@
|
||||
enable = true;
|
||||
enable32Bit = true;
|
||||
|
||||
## amdvlk: an open-source Vulkan driver from AMD
|
||||
extraPackages = with pkgs; [
|
||||
amdvlk
|
||||
rocmPackages.clr.icd
|
||||
];
|
||||
extraPackages32 = with pkgs; [
|
||||
driversi686Linux.amdvlk
|
||||
rocmPackages.clr.icd
|
||||
];
|
||||
};
|
||||
|
||||
@@ -100,7 +100,7 @@
|
||||
# (the default) this is the recommended approach. When using systemd-networkd it's
|
||||
# still possible to use this option, but it's recommended to use it in conjunction
|
||||
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
|
||||
networking.interfaces.wlp191s0.useDHCP = lib.mkDefault true;
|
||||
networking.interfaces.wlp4s0.useDHCP = lib.mkDefault true;
|
||||
|
||||
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
||||
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
|
||||
|
||||
@@ -12,6 +12,15 @@
|
||||
package = pkgs.qemu_kvm;
|
||||
runAsRoot = true;
|
||||
swtpm.enable = true;
|
||||
ovmf = {
|
||||
enable = true;
|
||||
packages = [
|
||||
(pkgs.OVMF.override {
|
||||
secureBoot = true;
|
||||
tpmSupport = true;
|
||||
}).fd
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
users.users.alice = {
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
{ ... }:
|
||||
{
|
||||
networking.nameservers = [
|
||||
"9.9.9.9"
|
||||
"1.1.1.1"
|
||||
#"192.168.76.1"
|
||||
];
|
||||
|
||||
#services.resolved = {
|
||||
# enable = true;
|
||||
# dnssec = "false";
|
||||
# domains = [ "~." ];
|
||||
# fallbackDns = [
|
||||
# "1.1.1.1#one.one.one.one"
|
||||
# "1.0.0.1#one.one.one.one"
|
||||
# ];
|
||||
# dnsovertls = "true";
|
||||
#};
|
||||
}
|
||||
@@ -5,9 +5,8 @@
|
||||
alacritty
|
||||
attic-client
|
||||
amdgpu_top
|
||||
android-tools
|
||||
bat
|
||||
bitwarden-cli
|
||||
bitwarden-rofi
|
||||
bfg-repo-cleaner
|
||||
brightnessctl
|
||||
btop
|
||||
@@ -16,9 +15,9 @@
|
||||
candy-icons
|
||||
chromium
|
||||
chromedriver
|
||||
#claude-code
|
||||
croc
|
||||
deadnix
|
||||
direnv
|
||||
easyeffects
|
||||
eza
|
||||
fanficfare
|
||||
@@ -26,14 +25,15 @@
|
||||
fd
|
||||
file
|
||||
firefox
|
||||
|
||||
# gestures replacement
|
||||
git
|
||||
glances
|
||||
gpu-viewer
|
||||
grim
|
||||
helvum
|
||||
htop
|
||||
hwloc
|
||||
ipmiview
|
||||
iperf3
|
||||
# ipscan
|
||||
jp2a
|
||||
@@ -53,6 +53,7 @@
|
||||
# nbt explorer?
|
||||
ncdu
|
||||
nemo-with-extensions
|
||||
neofetch
|
||||
neovim
|
||||
nix-init
|
||||
nix-output-monitor
|
||||
@@ -75,12 +76,14 @@
|
||||
restic
|
||||
ripgrep
|
||||
rpi-imager
|
||||
rofi
|
||||
rofi-wayland
|
||||
samba
|
||||
signal-desktop
|
||||
# signal in tray?
|
||||
siji
|
||||
simple-mtpfs
|
||||
skaffold
|
||||
slack
|
||||
slurp
|
||||
smartmontools
|
||||
snyk
|
||||
@@ -97,6 +100,8 @@
|
||||
unipicker
|
||||
unzip
|
||||
uutils-coreutils-noprefix
|
||||
ventoy
|
||||
vesktop
|
||||
vscode
|
||||
watchman
|
||||
wget
|
||||
@@ -106,13 +111,4 @@
|
||||
zoom-us
|
||||
zoxide
|
||||
];
|
||||
programs = {
|
||||
appimage = {
|
||||
enable = true;
|
||||
binfmt = true;
|
||||
};
|
||||
bat.enable = true;
|
||||
direnv.enable = true;
|
||||
kdeconnect.enable = true;
|
||||
};
|
||||
}
|
||||
|
||||
@@ -10,9 +10,13 @@ example_booleans:
|
||||
- ENC[AES256_GCM,data:6SJ0JKI=,iv:J0qSvWoOcDwSXCKyau+a0YcCGuH5WABHVh6Kdigac20=,tag:WQdNfjcubbzoHnQW4gua8g==,type:bool]
|
||||
apps:
|
||||
spotify: ENC[AES256_GCM,data:tIABPphA7Vr6VNvJpWTS9kDmidU=,iv:ciQzr8jyIcHYi797NKypPs7FhDgK5ToVZ0eZHHF8UtE=,tag:wUTL/x1p24cXyPUAL1dPfg==,type:str]
|
||||
wifi-env: ENC[AES256_GCM,data:mxPCyunx8yOahcuVhZCzuqAt/G89lMBnZme+qwcxO4LsCftx7h2FotA+wnlj1++vmPW5zL72q2kzxh0KcVlYqK9fpOrMY/FJeJXWYNMZIHesmWKlaaeA1wM/q1dSllwuVuULp9WQzipiQHwcCCLseo3bmCsYpbs8PUibrDgbDqXreTSjJBNTVzwOGpz1bZCSpEynS+dQQViRSNcVeYTOLxrOTxx5lyEOIhgIc3167ObhK+7bJVG2ZcP209Gllip4XkCj/FKnEwg2vVF5Dpofz7T2Op5ef/oNzahhKmCa+k7OPqITWwPYZg7pqAf6jdMy4eBP/A==,iv:Q6IMqePFwd1b1pSuh+TIwcag2bbJXyIYUmJWY6UaaqI=,tag:UZ5ak6nmHkNG0uBMTl1CwQ==,type:str]
|
||||
wifi-env: ENC[AES256_GCM,data:G+z+fURk4rT61I5BiFzEJJt35jywPNrGpn1QGNhjvxrqPQ/Sq/hIHmQo+bqe9yJeDgMX3RY4EaiZxFTJyxPfW1czjuMSj3vbTp0WcDmGvUJ7li2pX2pzolgly4qmgoOluGBeRZWVLLOZYFB2+kLRMJNNz/bP5k2Eq6O4+l4sljPM+abn9iz9Eh46rVOVRkmDzCltJrYiuBSiSPhTDRTP2+gUbgbaUJTkVrVLUBHg3QU6az6VPN8DPZxbx4LtdaIb93pI,iv:uUfJK/iPdyLP7LqZJolTGGTxaEzlJI59bUVNcB1etkU=,tag:tvXSXSW1MIhLJceEK1afuw==,type:str]
|
||||
#ENC[AES256_GCM,data:G9ggYJ3YA+E=,iv:nZ5NgeyNKFXFIpquoY68Z2Jz9QROqvf5tv7/s1wSgKk=,tag:QAX555IsAMaWAlz9ywSzjQ==,type:comment]
|
||||
sops:
|
||||
kms: []
|
||||
gcp_kms: []
|
||||
azure_kv: []
|
||||
hc_vault: []
|
||||
age:
|
||||
- recipient: age1jd2dcpykagz20kpk2kkchte3augqncwfn6nywursx0dkfyze6feqdzxkq2
|
||||
enc: |
|
||||
@@ -23,8 +27,8 @@ sops:
|
||||
d09aSXN0ZUh3VC9XeTZ4UWoxVDNVN0UKF1eU/IQJgJ8Fg+MrfqQuEZZ775hvtUJR
|
||||
D/ZS4vj+sDLWq6gy2lIBhRSIAHWrz5gHxvOOGmRnpvkqh9TS6XjLIA==
|
||||
-----END AGE ENCRYPTED FILE-----
|
||||
lastmodified: "2026-01-03T19:32:16Z"
|
||||
mac: ENC[AES256_GCM,data:q5NppTtZZA9Oo15zI0pAZ/YN2qu0TneDPMJY9rXtWlYfG7Pq5taRyc9MpV7CyEt+qWMkN//O3/sA4jmQTtpT8JuYIEa+/x5cfSZ5w0ErjKdV4/IyDs1LPDKNLXIWlmPMo61VvsKW9DZRBRml9qtR1ypeHBuz0pjECBwAQPEcw9k=,iv:X7wUOxn4BsvqCPmNZvH75hyAzUeD7Qtp+4e4SLpPWlI=,tag:Dp6Bu3zEkRaRPdOwWil13g==,type:str]
|
||||
lastmodified: "2024-11-28T18:57:09Z"
|
||||
mac: ENC[AES256_GCM,data:hKhAo7rDplLm19PlrKHQwxnDVXCMU/xpAxPALLDBa0M3yypy2QVD6c6Atn897tYRKf7oeLaUKqnUYdCcZ9gVgm37LS+GtRhf66zfvcKqhZF8wh3M0zTDPYpQDhex0N4BAJ/dcaYIbxqE9pEUxJOI5jip/hptaCJItTEe7oARcF4=,iv:EUayxLaOPcnWX+S9+RlHrxzJRLlSSLIwqbAq3fFI4yg=,tag:LiBsqIodTWamO+c8FqGBag==,type:str]
|
||||
pgp:
|
||||
- created_at: "2024-11-28T18:57:09Z"
|
||||
enc: |-
|
||||
@@ -39,4 +43,4 @@ sops:
|
||||
-----END PGP MESSAGE-----
|
||||
fp: 5EFFB75F7C9B74EAA5C4637547940175096C1330
|
||||
unencrypted_suffix: _unencrypted
|
||||
version: 3.11.0
|
||||
version: 3.9.1
|
||||
|
||||
@@ -1,4 +1,10 @@
|
||||
{ pkgs, ... }:
|
||||
# let
|
||||
# randWallpaper = pkgs.runCommand "stylix-wallpaper" { } ''
|
||||
# numWallpapers =
|
||||
# $((1 + $RANDOM % 10))
|
||||
|
||||
# in
|
||||
{
|
||||
stylix = {
|
||||
enable = true;
|
||||
|
||||
@@ -1,17 +1,13 @@
|
||||
{ config, lib, ... }:
|
||||
{ config, ... }:
|
||||
let
|
||||
always = 100;
|
||||
home = 99;
|
||||
public_wifi = false;
|
||||
in
|
||||
{
|
||||
imports = lib.optionals (!public_wifi) [
|
||||
./private-wifi.nix
|
||||
];
|
||||
networking.wireless = {
|
||||
enable = true;
|
||||
secretsFile = config.sops.secrets."wifi-env".path;
|
||||
userControlled = true;
|
||||
userControlled.enable = true;
|
||||
networks = {
|
||||
"taetaethegae-2.0" = {
|
||||
pskRaw = "ext:PASS_taetaethegae_20";
|
||||
@@ -28,25 +24,34 @@ in
|
||||
"24HuFios".pskRaw = "ext:PASS_longboat_home";
|
||||
"Verizon_ZLHQ3H".pskRaw = "ext:PASS_angie";
|
||||
"Fios-Qn3RB".pskRaw = "ext:PASS_parkridge";
|
||||
"Mojo Dojo Casa House".pskRaw = "ext:PASS_Carly";
|
||||
"bwe_guest".pskRaw = "ext:PASS_BWE_NE";
|
||||
|
||||
# Public wifi connections
|
||||
# set public_wifi on line 5 to true if connecting to one of these
|
||||
#"optimumwifi" = { };
|
||||
#"CableWiFi" = { };
|
||||
#"Hilton Honors" = { };
|
||||
|
||||
# Work wifi
|
||||
"optimumwifi" = { };
|
||||
"CableWiFi" = { };
|
||||
"JPMCVisitor" = { };
|
||||
};
|
||||
};
|
||||
|
||||
networking.nameservers = [
|
||||
"9.9.9.9"
|
||||
"1.1.1.1"
|
||||
"192.168.76.1"
|
||||
];
|
||||
|
||||
services.resolved = {
|
||||
enable = true;
|
||||
dnssec = "true";
|
||||
domains = [ "~." ];
|
||||
fallbackDns = [
|
||||
"1.1.1.1#one.one.one.one"
|
||||
"1.0.0.1#one.one.one.one"
|
||||
];
|
||||
dnsovertls = "true";
|
||||
};
|
||||
|
||||
sops = {
|
||||
defaultSopsFile = ./secrets.yaml;
|
||||
secrets = {
|
||||
"wifi-env" = {
|
||||
owner = "wpa_supplicant";
|
||||
owner = "root";
|
||||
restartUnits = [ "wpa_supplicant.service" ];
|
||||
};
|
||||
};
|
||||
|
||||
28
systems/hetzner-bridge/configuration.nix
Normal file
28
systems/hetzner-bridge/configuration.nix
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
imports = [
|
||||
../../disko/hetzner.nix
|
||||
./networking.nix
|
||||
];
|
||||
disko.devices.disk.main.device = "scsi-0QEMU_QEMU_HARDDISK_55513992";
|
||||
|
||||
boot = {
|
||||
useSystemdBoot = true;
|
||||
};
|
||||
|
||||
virtualisation.docker.enable = false;
|
||||
services = {
|
||||
locate.enable = false;
|
||||
endlessh-go.enable = false;
|
||||
};
|
||||
|
||||
#hardware.enableAllFirmware = true;
|
||||
|
||||
system.stateVersion = "24.05";
|
||||
}
|
||||
8
systems/hetzner-bridge/default.nix
Normal file
8
systems/hetzner-bridge/default.nix
Normal file
@@ -0,0 +1,8 @@
|
||||
{ inputs, ... }:
|
||||
{
|
||||
users = [ "alice" ];
|
||||
modules = [
|
||||
# inputs.attic.nixosModules.atticd
|
||||
inputs.disko.nixosModules.disko
|
||||
];
|
||||
}
|
||||
39
systems/hetzner-bridge/hardware.nix
Normal file
39
systems/hetzner-bridge/hardware.nix
Normal file
@@ -0,0 +1,39 @@
|
||||
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
||||
# and may be overwritten by future invocations. Please make changes
|
||||
# to /etc/nixos/configuration.nix instead.
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
modulesPath,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
imports = [
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
];
|
||||
|
||||
boot = {
|
||||
initrd.availableKernelModules = [
|
||||
"ahci"
|
||||
"xhci_pci"
|
||||
"virtio_pci"
|
||||
"virtio_scsi"
|
||||
"sd_mod"
|
||||
"sr_mod"
|
||||
];
|
||||
initrd.kernelModules = [ ];
|
||||
kernelModules = [ ];
|
||||
extraModulePackages = [ ];
|
||||
};
|
||||
|
||||
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
|
||||
# (the default) this is the recommended approach. When using systemd-networkd it's
|
||||
# still possible to use this option, but it's recommended to use it in conjunction
|
||||
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
|
||||
# networking.useDHCP = lib.mkDefault true;
|
||||
# networking.interfaces.enp1s0.useDHCP = lib.mkDefault true;
|
||||
|
||||
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
||||
}
|
||||
19
systems/hetzner-bridge/networking.nix
Normal file
19
systems/hetzner-bridge/networking.nix
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
networking.useDHCP = false;
|
||||
|
||||
systemd.network = {
|
||||
enable = true;
|
||||
networks."10-wan" = {
|
||||
#matchConfig.Name = "enp1s0"; # either ens3 or enp1s0 depending on system, check 'ip addr'
|
||||
matchConfig.Name = "ether";
|
||||
networkConfig.DHCP = "ipv4";
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -34,9 +34,6 @@
|
||||
bucket = "cache-nix-dot";
|
||||
endpoint = "https://minio.nayeonie.com";
|
||||
};
|
||||
garbage-collection = {
|
||||
interval = "5 minutes";
|
||||
};
|
||||
|
||||
# Warning: If you change any of the values here, it will be
|
||||
# difficult to reuse existing chunks for newly-uploaded NARs
|
||||
@@ -67,9 +64,6 @@
|
||||
# configured default webstore for this on root user separately
|
||||
systemd = {
|
||||
services = {
|
||||
atticd.environment = {
|
||||
RUST_LOG = "INFO";
|
||||
};
|
||||
attic-watch-store = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
# Restic backups to the local REST server (docker/restic.nix, port 8010, private repos).
|
||||
# Each service gets its own repo: rest:http://localhost:8010/<username>/
|
||||
# REST credentials are injected via sops templates as an EnvironmentFile.
|
||||
# Add new jobs below following the same pattern.
|
||||
|
||||
sops = {
|
||||
secrets."restic/kanidm_password" = { };
|
||||
secrets."restic/kanidm_rest_password" = { };
|
||||
|
||||
# Compose a KEY=VALUE env file for the restic systemd service.
|
||||
templates."restic-kanidm-env" = {
|
||||
content = ''
|
||||
RESTIC_REST_USERNAME=kanidm
|
||||
RESTIC_REST_PASSWORD=${config.sops.placeholder."restic/kanidm_rest_password"}
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
services.restic.backups = {
|
||||
kanidm = {
|
||||
repository = "rest:http://localhost:8010/kanidm/";
|
||||
passwordFile = config.sops.secrets."restic/kanidm_password".path;
|
||||
environmentFile = config.sops.templates."restic-kanidm-env".path;
|
||||
|
||||
# Checkpoint the SQLite WAL before backup so the snapshot is consistent.
|
||||
backupPrepareCommand = ''
|
||||
${pkgs.sqlite}/bin/sqlite3 /var/lib/kanidm/kanidm.db "PRAGMA wal_checkpoint(FULL);"
|
||||
'';
|
||||
|
||||
paths = [ "/var/lib/kanidm" ];
|
||||
|
||||
timerConfig = {
|
||||
OnCalendar = "04:00";
|
||||
Persistent = true;
|
||||
};
|
||||
|
||||
pruneOpts = [
|
||||
"--keep-daily 7"
|
||||
"--keep-weekly 4"
|
||||
"--keep-monthly 3"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -9,20 +9,15 @@
|
||||
./acme.nix
|
||||
./attic
|
||||
./docker
|
||||
./garage.nix
|
||||
./gitea.nix
|
||||
./firewall.nix
|
||||
./haproxy
|
||||
./hardware-changes.nix
|
||||
./hydra.nix
|
||||
./mattermost.nix
|
||||
./minio.nix
|
||||
./networking.nix
|
||||
./nextcloud.nix
|
||||
#./plex
|
||||
./postgresql.nix
|
||||
./backup.nix
|
||||
./kanidm.nix
|
||||
./samba.nix
|
||||
./zfs.nix
|
||||
];
|
||||
@@ -37,7 +32,8 @@
|
||||
loader.grub.device = "/dev/sda";
|
||||
useSystemdBoot = true;
|
||||
kernelParams = [
|
||||
"xe.force_probe=56a5"
|
||||
"i915.force_probe=56a5"
|
||||
"i915.enable_guc=2"
|
||||
];
|
||||
kernel.sysctl = {
|
||||
"vm.overcommit_memory" = lib.mkForce 1;
|
||||
@@ -52,46 +48,25 @@
|
||||
enable = true;
|
||||
extraPackages = with pkgs; [
|
||||
intel-media-driver # LIBVA_DRIVER_NAME=iHD
|
||||
intel-vaapi-driver # LIBVA_DRIVER_NAME=i965 (older but works better for Firefox/Chromium)
|
||||
libva-vdpau-driver
|
||||
vaapiIntel # LIBVA_DRIVER_NAME=i965 (older but works better for Firefox/Chromium)
|
||||
vaapiVdpau
|
||||
libvdpau-va-gl
|
||||
intel-compute-runtime
|
||||
vpl-gpu-rt # replaces intel-media-sdk
|
||||
intel-media-sdk
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
environment = {
|
||||
systemPackages = with pkgs; [
|
||||
environment.systemPackages = with pkgs; [
|
||||
chromedriver
|
||||
chromium
|
||||
docker-compose
|
||||
filebot
|
||||
intel-gpu-tools
|
||||
jellyfin-ffmpeg
|
||||
jq
|
||||
yt-dlp
|
||||
yq
|
||||
];
|
||||
etc = {
|
||||
# Creates /etc/lynis/custom.prf
|
||||
"lynis/custom.prf" = {
|
||||
text = ''
|
||||
skip-test=BANN-7126
|
||||
skip-test=BANN-7130
|
||||
skip-test=DEB-0520
|
||||
skip-test=DEB-0810
|
||||
skip-test=FIRE-4513
|
||||
skip-test=HRDN-7222
|
||||
skip-test=KRNL-5820
|
||||
skip-test=LOGG-2190
|
||||
skip-test=LYNIS
|
||||
skip-test=TOOL-5002
|
||||
'';
|
||||
mode = "0440";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services = {
|
||||
samba.enable = true;
|
||||
|
||||
@@ -3,8 +3,5 @@
|
||||
users = [ "alice" ];
|
||||
modules = [
|
||||
# inputs.attic.nixosModules.atticd
|
||||
inputs.nixos-hardware.nixosModules.common-cpu-amd
|
||||
inputs.nixos-hardware.nixosModules.common-cpu-amd-pstate
|
||||
inputs.nixos-hardware.nixosModules.supermicro
|
||||
];
|
||||
}
|
||||
|
||||
@@ -11,8 +11,7 @@ in
|
||||
{
|
||||
virtualisation.oci-containers.containers = {
|
||||
act-stable-latest-main = {
|
||||
image = "gitea/act_runner:nightly";
|
||||
pull = "always";
|
||||
image = "gitea/act_runner:latest";
|
||||
extraOptions = [
|
||||
"--stop-signal=SIGINT"
|
||||
];
|
||||
@@ -35,8 +34,7 @@ in
|
||||
};
|
||||
|
||||
act-stable-latest-1 = {
|
||||
image = "gitea/act_runner:nightly";
|
||||
pull = "always";
|
||||
image = "gitea/act_runner:latest";
|
||||
extraOptions = [
|
||||
"--stop-signal=SIGINT"
|
||||
];
|
||||
@@ -58,8 +56,7 @@ in
|
||||
};
|
||||
|
||||
act-stable-latest-2 = {
|
||||
image = "gitea/act_runner:nightly";
|
||||
pull = "always";
|
||||
image = "gitea/act_runner:latest";
|
||||
extraOptions = [
|
||||
"--stop-signal=SIGINT"
|
||||
];
|
||||
|
||||
@@ -38,19 +38,19 @@ runner:
|
||||
- "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
|
||||
- "ubuntu-22.04:docker://gitea/runner-images:ubuntu-22.04"
|
||||
- "ubuntu-20.04:docker://gitea/runner-images:ubuntu-20.04"
|
||||
cache:
|
||||
#cache:
|
||||
# Enable cache server to use actions/cache.
|
||||
enabled: true
|
||||
#enabled: true
|
||||
# The directory to store the cache data.
|
||||
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
|
||||
#dir: ""
|
||||
# The host of the cache server.
|
||||
# It's not for the address to listen, but the address to connect from job containers.
|
||||
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
|
||||
host: "192.168.76.2"
|
||||
#host: ""
|
||||
# The port of the cache server.
|
||||
# 0 means to use a random available port.
|
||||
port: 8088
|
||||
#port: 0
|
||||
# The external cache server URL. Valid only when enable is true.
|
||||
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
|
||||
# The URL should generally end with "/".
|
||||
|
||||
@@ -122,7 +122,7 @@ let
|
||||
cmd = lib.splitString " " "--concurrent 6 AmAnd0";
|
||||
|
||||
};
|
||||
inherit (lib.adev.container-utils) createTemplatedContainers;
|
||||
inherit (lib.rad-dev.container-utils) createTemplatedContainers;
|
||||
|
||||
vars = import ../vars.nix;
|
||||
at_path = vars.primary_archiveteam;
|
||||
|
||||
@@ -1,274 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
vars = import ../vars.nix;
|
||||
shared_data_path = "${vars.primary_torr}/data";
|
||||
arr_postgres_config =
|
||||
container_type:
|
||||
let
|
||||
ctype = lib.strings.toUpper container_type;
|
||||
in
|
||||
{
|
||||
"${ctype}__POSTGRES__HOST" = "/var/run/postgresql";
|
||||
"${ctype}__POSTGRES__PORT" = toString config.services.postgresql.settings.port;
|
||||
};
|
||||
in
|
||||
{
|
||||
# Notes:
|
||||
# Jellyplex-watched - sync watch status between plex and jellyfin as long as users and library is the same
|
||||
# Tdarr - for distributed transcoding?
|
||||
#
|
||||
# list of containers supporting postgres:
|
||||
# bazarr:
|
||||
# POSTGRES_ENABED: true
|
||||
# POSTGRES_HOST:
|
||||
# POSTGRES_PORT:
|
||||
# POSTGRES_DATABASE: bazarr
|
||||
# POSTGRES_USERNAME: arr
|
||||
# POSTGRES_PASSWORD: sops
|
||||
# prowlarr:
|
||||
# see ctype
|
||||
# radarr:
|
||||
# see ctype
|
||||
# sonarr:
|
||||
# see ctype
|
||||
# lidarr:
|
||||
# see ctype
|
||||
# jellyseerr:
|
||||
# DB_TYPE: postgres
|
||||
# DB_HOST:
|
||||
# DB_PORT:
|
||||
# DB_USER: arr
|
||||
# DB_PASS: sops
|
||||
# DB_NAME: jellyseerr
|
||||
#
|
||||
virtualisation.oci-containers.containers = {
|
||||
bazarr = {
|
||||
image = "ghcr.io/linuxserver/bazarr:latest";
|
||||
pull = "always";
|
||||
ports = [ "6767:6767" ];
|
||||
hostname = "bazarr";
|
||||
environment = {
|
||||
PUID = "600";
|
||||
PGID = "100";
|
||||
TZ = "America/New_York";
|
||||
POSTGRES_HOST = "/var/run/postgresql";
|
||||
POSTGRES_PORT = toString config.services.postgresql.settings.port;
|
||||
};
|
||||
environmentFiles = [
|
||||
config.sops.secrets."docker/bazarr".path
|
||||
];
|
||||
volumes = [
|
||||
"${vars.primary_docker}/bazarr:/config"
|
||||
"${shared_data_path}:/data"
|
||||
"/var/run/postgresql:/var/run/postgresql"
|
||||
];
|
||||
extraOptions = [
|
||||
"--network=arrnet"
|
||||
];
|
||||
autoStart = true;
|
||||
};
|
||||
prowlarr = {
|
||||
image = "ghcr.io/linuxserver/prowlarr:latest";
|
||||
pull = "always";
|
||||
ports = [ "9696:9696" ];
|
||||
hostname = "prowlarr";
|
||||
environment = {
|
||||
PUID = "600";
|
||||
PGID = "100";
|
||||
TZ = "America/New_York";
|
||||
}
|
||||
// arr_postgres_config "prowlarr";
|
||||
environmentFiles = [
|
||||
config.sops.secrets."docker/prowlarr".path
|
||||
];
|
||||
extraOptions = [
|
||||
"--network=arrnet"
|
||||
];
|
||||
volumes = [
|
||||
"${vars.primary_docker}/prowlarr:/config"
|
||||
|
||||
"/var/run/postgresql:/var/run/postgresql"
|
||||
];
|
||||
autoStart = true;
|
||||
};
|
||||
radarr = {
|
||||
image = "ghcr.io/linuxserver/radarr:latest";
|
||||
pull = "always";
|
||||
ports = [ "7878:7878" ];
|
||||
hostname = "radarr";
|
||||
environment = {
|
||||
PUID = "600";
|
||||
PGID = "100";
|
||||
TZ = "America/New_York";
|
||||
}
|
||||
// arr_postgres_config "radarr";
|
||||
environmentFiles = [
|
||||
config.sops.secrets."docker/radarr".path
|
||||
];
|
||||
volumes = [
|
||||
"${vars.primary_docker}/radarr:/config"
|
||||
"${shared_data_path}:/data"
|
||||
"/var/run/postgresql:/var/run/postgresql"
|
||||
];
|
||||
extraOptions = [
|
||||
"--network=arrnet"
|
||||
];
|
||||
autoStart = true;
|
||||
};
|
||||
sonarr = {
|
||||
image = "ghcr.io/linuxserver/sonarr:latest";
|
||||
pull = "always";
|
||||
ports = [ "8989:8989" ];
|
||||
hostname = "sonarr";
|
||||
environment = {
|
||||
PUID = "600";
|
||||
PGID = "100";
|
||||
TZ = "America/New_York";
|
||||
}
|
||||
// arr_postgres_config "sonarr";
|
||||
environmentFiles = [
|
||||
config.sops.secrets."docker/sonarr".path
|
||||
];
|
||||
volumes = [
|
||||
"${vars.primary_docker}/sonarr:/config"
|
||||
"${shared_data_path}:/data"
|
||||
"/var/run/postgresql:/var/run/postgresql"
|
||||
];
|
||||
extraOptions = [
|
||||
"--network=arrnet"
|
||||
];
|
||||
autoStart = true;
|
||||
};
|
||||
lidarr = {
|
||||
image = "ghcr.io/linuxserver/lidarr:latest";
|
||||
pull = "always";
|
||||
ports = [ "8686:8686" ];
|
||||
hostname = "lidarr";
|
||||
environment = {
|
||||
PUID = "600";
|
||||
PGID = "100";
|
||||
TZ = "America/New_York";
|
||||
}
|
||||
// arr_postgres_config "lidarr";
|
||||
environmentFiles = [
|
||||
config.sops.secrets."docker/lidarr".path
|
||||
];
|
||||
volumes = [
|
||||
"${vars.primary_docker}/lidarr:/config"
|
||||
"${shared_data_path}:/data"
|
||||
"/var/run/postgresql:/var/run/postgresql"
|
||||
];
|
||||
extraOptions = [
|
||||
"--network=arrnet"
|
||||
];
|
||||
autoStart = true;
|
||||
};
|
||||
unpackerr = {
|
||||
image = "golift/unpackerr:latest";
|
||||
pull = "always";
|
||||
user = "600:100";
|
||||
hostname = "unpackerr";
|
||||
environment = {
|
||||
TZ = "America/New_York";
|
||||
};
|
||||
volumes = [
|
||||
"${vars.primary_docker}/unpackerr:/config"
|
||||
"${shared_data_path}:/data"
|
||||
"/var/run/postgresql:/var/run/postgresql"
|
||||
];
|
||||
extraOptions = [ "--network=arrnet" ];
|
||||
autoStart = true;
|
||||
};
|
||||
notifiarr = {
|
||||
image = "golift/notifiarr:latest";
|
||||
pull = "always";
|
||||
ports = [ "5454:5454" ];
|
||||
user = "600:100";
|
||||
hostname = "notifiarr";
|
||||
environment = {
|
||||
TZ = "America/New_York";
|
||||
};
|
||||
environmentFiles = [ config.sops.secrets."docker/notifiarr".path ];
|
||||
volumes = [
|
||||
"${vars.primary_docker}/notifiarr:/config"
|
||||
"${shared_data_path}:/data"
|
||||
"/var/run/postgresql:/var/run/postgresql"
|
||||
];
|
||||
extraOptions = [ "--network=arrnet" ];
|
||||
autoStart = true;
|
||||
};
|
||||
jellyseerr = {
|
||||
image = "fallenbagel/jellyseerr:latest";
|
||||
pull = "always";
|
||||
hostname = "jellyseerr";
|
||||
environment = {
|
||||
PUID = "600";
|
||||
PGID = "100";
|
||||
TZ = "America/New_York";
|
||||
DB_TYPE = "postgres";
|
||||
DB_HOST = "/var/run/postgresql";
|
||||
DB_PORT = toString config.services.postgresql.settings.port;
|
||||
};
|
||||
environmentFiles = [
|
||||
config.sops.secrets."docker/jellyseerr".path
|
||||
];
|
||||
volumes = [
|
||||
"${vars.primary_docker}/overseerr:/config"
|
||||
"/var/run/postgresql:/var/run/postgresql"
|
||||
];
|
||||
# TODO: remove ports later since this is going through web
|
||||
extraOptions = [
|
||||
"--network=arrnet"
|
||||
"--network=haproxy-net"
|
||||
# "--health-cmd \"wget --no-verbose --tries 1 --spider http://localhost:5055/api/v1/status || exit 1\""
|
||||
# "--health-start-period 20s"
|
||||
# "--health-timeout 3s"
|
||||
# "--health-interval 15s"
|
||||
# "--health-retries 3"
|
||||
];
|
||||
ports = [ "5055:5055" ]; # Web UI port
|
||||
dependsOn = [
|
||||
"radarr"
|
||||
"sonarr"
|
||||
];
|
||||
autoStart = true;
|
||||
};
|
||||
};
|
||||
|
||||
sops = {
|
||||
secrets = {
|
||||
"docker/notifiarr" = {
|
||||
owner = "docker-service";
|
||||
restartUnits = [ "docker-notifiarr.service" ];
|
||||
};
|
||||
"docker/bazarr" = {
|
||||
owner = "docker-service";
|
||||
restartUnits = [ "docker-bazarr.service" ];
|
||||
};
|
||||
"docker/prowlarr" = {
|
||||
owner = "docker-service";
|
||||
restartUnits = [ "docker-prowlarr.service" ];
|
||||
};
|
||||
"docker/radarr" = {
|
||||
owner = "docker-service";
|
||||
restartUnits = [ "docker-radarr.service" ];
|
||||
};
|
||||
"docker/sonarr" = {
|
||||
owner = "docker-service";
|
||||
restartUnits = [ "docker-sonarr.service" ];
|
||||
};
|
||||
"docker/lidarr" = {
|
||||
owner = "docker-service";
|
||||
restartUnits = [ "docker-lidarr.service" ];
|
||||
};
|
||||
"docker/jellyseerr" = {
|
||||
owner = "docker-service";
|
||||
restartUnits = [ "docker-jellyseerr.service" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,20 +1,24 @@
|
||||
{ ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./act-runner.nix
|
||||
./arr.nix
|
||||
# temp disable archiveteam for tiktok archiving
|
||||
#./archiveteam.nix
|
||||
# ./books.nix
|
||||
#./firefly.nix
|
||||
#./foundry.nix
|
||||
./glances.nix
|
||||
./haproxy.nix
|
||||
# ./haproxy.nix
|
||||
./minecraft.nix
|
||||
./nextcloud.nix
|
||||
# ./postgres.nix
|
||||
./restic.nix
|
||||
# ./restic.nix
|
||||
./torr.nix
|
||||
# ./unifi.nix
|
||||
];
|
||||
|
||||
@@ -8,7 +8,6 @@ in
|
||||
virtualisation.oci-containers.containers = {
|
||||
glances = {
|
||||
image = "nicolargo/glances:latest-full";
|
||||
pull = "always";
|
||||
extraOptions = [
|
||||
"--pid=host"
|
||||
"--network=haproxy-net"
|
||||
|
||||
@@ -50,7 +50,6 @@ frontend ContentSwitching
|
||||
acl host_minio hdr(host) -i minio.alicehuston.xyz
|
||||
acl host_minio_console hdr(host) -i minio-console.alicehuston.xyz
|
||||
acl host_attic hdr(host) -i attic.nayeonie.com
|
||||
acl host_s3 hdr(host) -i s3.nayeonie.com
|
||||
acl host_minio hdr(host) -i minio.nayeonie.com
|
||||
acl host_minio_console hdr(host) -i minio-console.nayeonie.com
|
||||
#acl host_nextcloud_vol hdr(host) -i nextcloud-vol.alicehuston.xyz
|
||||
@@ -58,7 +57,6 @@ frontend ContentSwitching
|
||||
acl host_prometheus hdr(host) -i prom.alicehuston.xyz
|
||||
acl host_gitea hdr(host) -i git.alicehuston.xyz
|
||||
acl host_gitea hdr(host) -i nayeonie.com
|
||||
acl host_kanidm hdr(host) -i auth.nayeonie.com
|
||||
# Backend-forwarding
|
||||
use_backend www_nodes if host_www
|
||||
# use_backend ldapui_nodes if host_ldapui
|
||||
@@ -69,14 +67,12 @@ frontend ContentSwitching
|
||||
use_backend nextcloud_nodes if host_nextcloud
|
||||
use_backend hydra_nodes if host_hydra
|
||||
use_backend attic_nodes if host_attic
|
||||
use_backend garage_nodes if host_s3
|
||||
#use_backend nextcloud_vol_nodes if host_nextcloud_vol
|
||||
# use_backend collabora_nodes if host_collabora
|
||||
use_backend prometheus_nodes if host_prometheus
|
||||
use_backend minio_nodes if host_minio
|
||||
use_backend minio_console_nodes if host_minio_console
|
||||
use_backend gitea_nodes if host_gitea
|
||||
use_backend kanidm_nodes if host_kanidm
|
||||
|
||||
#frontend ldap
|
||||
# bind *:389
|
||||
@@ -146,10 +142,6 @@ backend minio_console_nodes
|
||||
mode http
|
||||
server server 192.168.76.2:8501
|
||||
|
||||
backend garage_nodes
|
||||
mode http
|
||||
server server 192.168.76.2:8502
|
||||
|
||||
# backend foundry_nodes
|
||||
# timeout tunnel 50s
|
||||
# mode http
|
||||
@@ -185,15 +177,6 @@ backend gitea_nodes
|
||||
mode http
|
||||
server server 192.168.76.2:6443
|
||||
|
||||
backend kanidm_nodes
|
||||
mode http
|
||||
option forwardfor
|
||||
http-request set-header X-Forwarded-Proto https
|
||||
http-request set-header X-Forwarded-Host %[req.hdr(host)]
|
||||
acl internal src 192.168.76.0/24 192.168.191.0/24
|
||||
http-request deny unless internal
|
||||
server server 192.168.76.2:8443 ssl verify none
|
||||
|
||||
#backend netdata_nodes
|
||||
# mode http
|
||||
# server server 192.168.76.2:19999
|
||||
|
||||
@@ -23,6 +23,8 @@
|
||||
};
|
||||
dependsOn = [
|
||||
"nextcloud"
|
||||
"grafana"
|
||||
"foundryvtt"
|
||||
"glances"
|
||||
"mc-router"
|
||||
];
|
||||
|
||||
@@ -4,55 +4,41 @@ let
|
||||
servers = {
|
||||
atm6 = "atm6.alicehuston.xyz";
|
||||
stoneblock3 = "sb3.alicehuston.xyz";
|
||||
stoneblock-4 = "sb4.alicehuston.xyz";
|
||||
submerged-2 = "sm4.alicehuston.xyz";
|
||||
RAD2 = "rad.alicehuston.xyz";
|
||||
skyfactory = "sf.alicehuston.xyz";
|
||||
divinejourney = "dj.alicehuston.xyz";
|
||||
rlcraft = "rlcraft.alicehuston.xyz";
|
||||
arcanum-institute = "arcanum.alicehuston.xyz";
|
||||
meits = "meits.alicehuston.xyz";
|
||||
cobblemon-overclocked = "mco.alicehuston.xyz";
|
||||
cobblemon-plus = "mcp.alicehuston.xyz";
|
||||
# bcg-plus = "bcg.alicehuston.xyz";
|
||||
pii = "pii.alicehuston.xyz";
|
||||
};
|
||||
|
||||
defaultServer = "rlcraft";
|
||||
|
||||
defaultEnv = {
|
||||
EULA = "true";
|
||||
TYPE = "AUTO_CURSEFORGE";
|
||||
STOP_SERVER_ANNOUNCE_DELAY = "120";
|
||||
STOP_DURATION = "600";
|
||||
SYNC_CHUNK_WRITES = "false";
|
||||
USE_AIKAR_FLAGS = "true";
|
||||
MEMORY = "12G";
|
||||
ALLOW_FLIGHT = "true";
|
||||
MAX_TICK_TIME = "-1";
|
||||
ENABLE_RCON = "true";
|
||||
TZ = "America/New_York";
|
||||
REGION_FILE_COMPRESSION = "none";
|
||||
OPS = ''
|
||||
magpiecat
|
||||
chesiregirl1105
|
||||
'';
|
||||
};
|
||||
# defaultEnv = {
|
||||
# EULA = "true";
|
||||
# TYPE = "AUTO_CURSEFORGE";
|
||||
# STOP_SERVER_ANNOUNCE_DELAY = "120";
|
||||
# STOP_DURATION = "600";
|
||||
# SYNC_CHUNK_WRITES = "false";
|
||||
# USE_AIKAR_FLAGS = "true";
|
||||
# MEMORY = "8GB";
|
||||
# ALLOW_FLIGHT = "true";
|
||||
# MAX_TICK_TIME = "-1";
|
||||
# };
|
||||
|
||||
defaultOptions = [
|
||||
"--stop-signal=SIGTERM"
|
||||
"--stop-timeout=1800"
|
||||
"--network=minecraft-net"
|
||||
];
|
||||
# defaultOptions = [
|
||||
# "--stop-signal=SIGTERM"
|
||||
# "--stop-timeout=1800"
|
||||
# "--network=minecraft-net"
|
||||
# ];
|
||||
|
||||
vars = import ../vars.nix;
|
||||
minecraft_path = "${vars.primary_games}/minecraft";
|
||||
# vars = import ../vars.nix;
|
||||
# minecraft_path = "${vars.primary_games}/minecraft";
|
||||
in
|
||||
{
|
||||
virtualisation.oci-containers.containers = {
|
||||
mc-router = {
|
||||
image = "itzg/mc-router:latest";
|
||||
pull = "always";
|
||||
extraOptions = [
|
||||
"--network=haproxy-net"
|
||||
"--network=minecraft-net"
|
||||
@@ -60,11 +46,11 @@ in
|
||||
cmd = [
|
||||
(
|
||||
"--mapping=mc.alicehuston.xyz=${defaultServer}:25565"
|
||||
+ (lib.adev.mapAttrsToString (hostname: url: "," + url + "=" + hostname + ":25565") servers)
|
||||
+ (lib.rad-dev.mapAttrsToString (hostname: url: "," + url + "=" + hostname + ":25565") servers)
|
||||
)
|
||||
];
|
||||
};
|
||||
#rlcraft = {
|
||||
# rlcraft = {
|
||||
# image = "itzg/minecraft-server:java8";
|
||||
# volumes = [
|
||||
# "${minecraft_path}/rlcraft/modpacks:/modpacks:ro"
|
||||
@@ -80,58 +66,29 @@ in
|
||||
# extraOptions = defaultOptions;
|
||||
# log-driver = "local";
|
||||
# environmentFiles = [ config.sops.secrets."docker/minecraft".path ];
|
||||
#};
|
||||
cobblemon-overclocked = {
|
||||
image = "itzg/minecraft-server:java21";
|
||||
volumes = [
|
||||
"${minecraft_path}/cobblemon-overclocked/modpacks:/modpacks:ro"
|
||||
"${minecraft_path}/cobblemon-overclocked/data:/data"
|
||||
];
|
||||
hostname = "cobblemon-overclocked";
|
||||
environment = defaultEnv // {
|
||||
VERSION = "1.21.1";
|
||||
CF_SLUG = "modified-cobblemon-overclocked";
|
||||
CF_FILENAME_MATCHER = "1.11.2";
|
||||
USE_AIKAR_FLAGS = "false";
|
||||
USE_MEOWICE_FLAGS = "true";
|
||||
DIFFICULTY = "normal";
|
||||
ENABLE_COMMAND_BLOCK = "true";
|
||||
INIT_MEMORY = "4G";
|
||||
MAX_MEMORY = "16G";
|
||||
SEED = "-7146406535839057559";
|
||||
};
|
||||
extraOptions = defaultOptions;
|
||||
log-driver = "local";
|
||||
environmentFiles = [ config.sops.secrets."docker/minecraft".path ];
|
||||
};
|
||||
cobblemon-plus = {
|
||||
image = "itzg/minecraft-server:java21";
|
||||
volumes = [
|
||||
"${minecraft_path}/cobblemon-plus/modpacks:/modpacks:ro"
|
||||
"${minecraft_path}/cobblemon-plus/data:/data"
|
||||
];
|
||||
hostname = "cobblemon-plus";
|
||||
environment = defaultEnv // {
|
||||
VERSION = "1.21.1";
|
||||
CF_SLUG = "modified-cobblemon-plus";
|
||||
CF_FILENAME_MATCHER = "1.11.2";
|
||||
USE_AIKAR_FLAGS = "false";
|
||||
USE_MEOWICE_FLAGS = "true";
|
||||
DIFFICULTY = "peaceful";
|
||||
ENABLE_COMMAND_BLOCK = "true";
|
||||
INIT_MEMORY = "4G";
|
||||
MAX_MEMORY = "16G";
|
||||
# exclude clientside mods that cause crashes when run in a headless environment
|
||||
CF_EXCLUDE_MODS = "world-host";
|
||||
CF_OVERRIDES_EXCLUSIONS = "mods/iris*.jar,mods/sodium*.jar,mods/world-host-*.jar";
|
||||
};
|
||||
extraOptions = defaultOptions;
|
||||
log-driver = "local";
|
||||
environmentFiles = [ config.sops.secrets."docker/minecraft".path ];
|
||||
};
|
||||
# };
|
||||
# bcg-plus = {
|
||||
# image = "itzg/minecraft-server:java17";
|
||||
# volumes = [
|
||||
# "${minecraft_path}/bcg-plus/modpacks:/modpacks:ro"
|
||||
# "${minecraft_path}/bcg-plus/data:/data"
|
||||
# ];
|
||||
# hostname = "bcg-plus";
|
||||
# environment = defaultEnv // {
|
||||
# VERSION = "1.17";
|
||||
# CF_SLUG = "bcg";
|
||||
# DIFFICULTY = "normal";
|
||||
# DEBUG = "true";
|
||||
# # ENABLE_COMMAND_BLOCK = "true";
|
||||
# };
|
||||
# extraOptions = defaultOptions;
|
||||
# log-driver = "local";
|
||||
# environmentFiles = [ config.sops.secrets."docker/minecraft".path ];
|
||||
# };
|
||||
};
|
||||
|
||||
sops = {
|
||||
defaultSopsFile = ../secrets.yaml;
|
||||
secrets = {
|
||||
"docker/minecraft".owner = "docker-service";
|
||||
};
|
||||
|
||||
@@ -8,13 +8,11 @@ let
|
||||
# nextcloud-image = import ./nextcloud-image { inherit pkgs; };
|
||||
nextcloud-base = {
|
||||
# image comes from running docker compose build in nextcloud-docker/.examples/full/apache
|
||||
image = "docker.io/library/nextcloud-nextcloud";
|
||||
# pull = "always";
|
||||
# do NOT enable pull here, this image is generated based on a custom docker image
|
||||
image = "nextcloud-nextcloud";
|
||||
hostname = "nextcloud";
|
||||
volumes = [
|
||||
"${nextcloud_path}/nc_data:/var/www/html:z"
|
||||
#"${nextcloud_path}/nc_php:/usr/local/etc/php"
|
||||
"${nextcloud_path}/nc_php:/usr/local/etc/php"
|
||||
"${nextcloud_path}/nc_prehooks:/docker-entrypoint-hooks.d/before-starting"
|
||||
#"${nextcloud_path}/remoteip.conf:/etc/apache2/conf-enabled/remoteip.conf:ro"
|
||||
];
|
||||
@@ -34,7 +32,6 @@ in
|
||||
};
|
||||
redis = {
|
||||
image = "redis:latest";
|
||||
pull = "always";
|
||||
user = "600:600";
|
||||
volumes = [
|
||||
"${config.sops.secrets."docker/redis".path}:/usr/local/etc/redis/redis.conf"
|
||||
@@ -50,7 +47,6 @@ in
|
||||
};
|
||||
go-vod = {
|
||||
image = "radialapps/go-vod:latest";
|
||||
pull = "always";
|
||||
dependsOn = [ "nextcloud" ];
|
||||
environment = {
|
||||
NEXTCLOUD_HOST = "https://nextcloud.alicehuston.xyz";
|
||||
@@ -58,12 +54,10 @@ in
|
||||
volumes = [ "${nextcloud_path}/nc_data:/var/www/html:ro" ];
|
||||
extraOptions = [
|
||||
"--device=/dev/dri:/dev/dri"
|
||||
"--network=nextcloud_default"
|
||||
];
|
||||
};
|
||||
collabora-code = {
|
||||
image = "collabora/code:latest";
|
||||
pull = "always";
|
||||
dependsOn = [ "nextcloud" ];
|
||||
environment = {
|
||||
aliasgroup1 = "https://collabora.nayenoie.com:443";
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -10,7 +10,7 @@ in
|
||||
image = "restic/rest-server:latest";
|
||||
volumes = [ "${restic_path}:/data" ];
|
||||
environment = {
|
||||
OPTIONS = "--prometheus --private-repos --htpasswd-file /data/.htpasswd";
|
||||
OPTIONS = "--prometheus --htpasswd-file /data/.htpasswd";
|
||||
};
|
||||
ports = [ "8010:8000" ];
|
||||
extraOptions = [
|
||||
|
||||
@@ -1,143 +1,103 @@
|
||||
{ config, pkgs, ... }:
|
||||
{ pkgs, ... }:
|
||||
|
||||
let
|
||||
qbitBase = {
|
||||
image = "ghcr.io/linuxserver/qbittorrent:latest";
|
||||
pull = "always";
|
||||
delugeBase = {
|
||||
environment = {
|
||||
PUID = "600";
|
||||
PGID = "100";
|
||||
TZ = "America/New_York";
|
||||
UMASK = "000";
|
||||
DEBUG = "true";
|
||||
DELUGE_DAEMON_LOG_LEVEL = "debug";
|
||||
DELUGE_WEB_LOG_LEVEL = "debug";
|
||||
};
|
||||
};
|
||||
|
||||
vars = import ../vars.nix;
|
||||
#docker_path = vars.primary_docker;
|
||||
torr_path = vars.primary_torr;
|
||||
qbit_path = "${torr_path}/qbit";
|
||||
qbitvpn_path = "${torr_path}/qbitvpn";
|
||||
qbitperm_path = "${torr_path}/qbitperm";
|
||||
deluge_path = "${torr_path}/deluge";
|
||||
delugevpn_path = "${torr_path}/delugevpn";
|
||||
|
||||
genSopsConf = file: {
|
||||
"${file}" = {
|
||||
format = "binary";
|
||||
sopsFile = ./wg/${file};
|
||||
path = "${delugevpn_path}/config/wireguard/configs/${file}";
|
||||
owner = "docker-service";
|
||||
group = "users";
|
||||
restartUnits = [ "docker-delugeVPN.service" ];
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
|
||||
virtualisation.oci-containers.containers = {
|
||||
qbit = qbitBase // {
|
||||
# webui port is 8082, torr port is 29432
|
||||
environment = qbitBase.environment // {
|
||||
WEBUI_PORT = "8082";
|
||||
TORRENTING_PORT = "29432";
|
||||
};
|
||||
deluge = delugeBase // {
|
||||
image = "binhex/arch-deluge";
|
||||
volumes = [
|
||||
"${qbit_path}/config:/config" # move from docker/qbit to qbit_path
|
||||
"${torr_path}/data/:/data"
|
||||
"${deluge_path}/config:/config"
|
||||
"${deluge_path}/data/:/data"
|
||||
"/etc/localtime:/etc/localtime:ro"
|
||||
];
|
||||
networks = [ "host" ];
|
||||
ports = [
|
||||
"8082:8082"
|
||||
"29432:29432"
|
||||
"29432:29432/udp"
|
||||
"8084:8112"
|
||||
"29433:29433"
|
||||
];
|
||||
};
|
||||
delugeVPN = delugeBase // {
|
||||
image = "binhex/arch-delugevpn";
|
||||
extraOptions = [
|
||||
"--dns=9.9.9.9"
|
||||
"--privileged=true"
|
||||
"--sysctl"
|
||||
"net.ipv4.conf.all.src_valid_mark=1"
|
||||
];
|
||||
};
|
||||
environment = delugeBase.environment // {
|
||||
VPN_ENABLED = "yes";
|
||||
VPN_CLIENT = "wireguard";
|
||||
VPN_PROV = "custom";
|
||||
ENABLE_PRIVOXY = "yes";
|
||||
LAN_NETWORK = "192.168.0.0/16";
|
||||
NAME_SERVERS = "194.242.2.9";
|
||||
# note, delete /config/perms.txt to force a bulk permissions update
|
||||
|
||||
# temp instance
|
||||
qbitVPN = qbitBase // {
|
||||
# webui port is 8081, torr port is 39274
|
||||
networks = [
|
||||
"container:gluetun-qbit"
|
||||
];
|
||||
environment = qbitBase.environment // {
|
||||
WEBUI_PORT = "8081";
|
||||
};
|
||||
dependsOn = [ "gluetun-qbit" ];
|
||||
volumes = [
|
||||
"${qbitvpn_path}/config:/config"
|
||||
"${torr_path}/data:/data"
|
||||
"${delugevpn_path}/config:/config"
|
||||
"${delugevpn_path}/data:/data"
|
||||
"/etc/localtime:/etc/localtime:ro"
|
||||
];
|
||||
};
|
||||
gluetun-qbit = {
|
||||
image = "qmcgaw/gluetun:v3";
|
||||
capabilities = {
|
||||
NET_ADMIN = true;
|
||||
};
|
||||
devices = [
|
||||
"/dev/net/tun:/dev/net/tun"
|
||||
];
|
||||
ports = [
|
||||
"8081:8081"
|
||||
"8083:8083"
|
||||
];
|
||||
environment = {
|
||||
TZ = "America/New_York";
|
||||
# SOPS prep
|
||||
};
|
||||
environmentFiles = [
|
||||
config.sops.secrets."docker/gluetun".path
|
||||
config.sops.secrets."docker/gluetun-qbitvpn".path
|
||||
];
|
||||
};
|
||||
|
||||
# permanent instance
|
||||
qbitPerm = qbitBase // {
|
||||
# webui port is 8083, torr port is 29434
|
||||
networks = [
|
||||
"container:gluetun-qbit"
|
||||
];
|
||||
environment = qbitBase.environment // {
|
||||
WEBUI_PORT = "8083";
|
||||
};
|
||||
dependsOn = [ "gluetun-qbit" ];
|
||||
volumes = [
|
||||
"${qbitperm_path}/config:/config"
|
||||
"${torr_path}/data:/data"
|
||||
"/etc/localtime:/etc/localtime:ro"
|
||||
];
|
||||
};
|
||||
gluetun-qbitperm = {
|
||||
image = "qmcgaw/gluetun:v3";
|
||||
capabilities = {
|
||||
NET_ADMIN = true;
|
||||
};
|
||||
devices = [
|
||||
"/dev/net/tun:/dev/net/tun"
|
||||
];
|
||||
ports = [
|
||||
"8083:8083"
|
||||
];
|
||||
environment = {
|
||||
TZ = "America/New_York";
|
||||
# SOPS prep
|
||||
};
|
||||
environmentFiles = [
|
||||
config.sops.secrets."docker/gluetun".path
|
||||
config.sops.secrets."docker/gluetun-qbitperm".path
|
||||
"8085:8112"
|
||||
"8119:8118"
|
||||
"39275:39275"
|
||||
"39275:39275/udp"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
sops.secrets = {
|
||||
"docker/gluetun" = {
|
||||
owner = "docker-service";
|
||||
restartUnits = [
|
||||
"docker-gluetun-qbit.service"
|
||||
"docker-gluetun-qbitperm.service"
|
||||
];
|
||||
};
|
||||
"docker/gluetun-qbitvpn" = {
|
||||
owner = "docker-service";
|
||||
restartUnits = [
|
||||
"docker-gluetun-qbit.service"
|
||||
];
|
||||
};
|
||||
"docker/gluetun-qbitperm" = {
|
||||
owner = "docker-service";
|
||||
restartUnits = [
|
||||
"docker-gluetun-qbitperm.service"
|
||||
systemd.services.docker-delugeVPN = {
|
||||
serviceConfig = {
|
||||
ExecStartPre = [
|
||||
(
|
||||
"${pkgs.bash}/bin/bash -c \"${pkgs.findutils}/bin/find ${delugevpn_path}/config/wireguard/configs "
|
||||
+ "-type l -not -name wg0.conf "
|
||||
+ "| ${pkgs.coreutils}/bin/shuf -n 1 "
|
||||
+ "| ${pkgs.findutils}/bin/xargs -I {} cp -L {} ${delugevpn_path}/config/wireguard/wg0.conf &&"
|
||||
+ "${pkgs.coreutils}/bin/chown docker-service:users ${delugevpn_path}/config/wireguard/wg0.conf &&"
|
||||
+ "${pkgs.coreutils}/bin/chmod 440 ${delugevpn_path}/config/wireguard/wg0.conf\""
|
||||
)
|
||||
];
|
||||
ExecStopPost = [ "${pkgs.coreutils}/bin/rm ${delugevpn_path}/config/wireguard/wg0.conf" ];
|
||||
};
|
||||
};
|
||||
|
||||
sops.secrets =
|
||||
(genSopsConf "se-mma-wg-001.conf")
|
||||
// (genSopsConf "se-mma-wg-002.conf")
|
||||
// (genSopsConf "se-mma-wg-003.conf")
|
||||
// (genSopsConf "se-mma-wg-004.conf")
|
||||
// (genSopsConf "se-mma-wg-005.conf")
|
||||
// (genSopsConf "se-mma-wg-101.conf")
|
||||
// (genSopsConf "se-mma-wg-102.conf")
|
||||
// (genSopsConf "se-mma-wg-103.conf");
|
||||
}
|
||||
|
||||
@@ -1,19 +1,7 @@
|
||||
{ ... }:
|
||||
|
||||
{
|
||||
networking.firewall = {
|
||||
|
||||
extraCommands = "
|
||||
iptables -I nixos-fw 1 -i br+ -j ACCEPT
|
||||
";
|
||||
|
||||
extraStopCommands = "
|
||||
iptables -D nixos-fw -i br+ -j ACCEPT
|
||||
";
|
||||
|
||||
trustedInterfaces = [ "br+" ];
|
||||
|
||||
allowedTCPPorts = [
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
# qbit
|
||||
8081
|
||||
8082
|
||||
@@ -36,27 +24,6 @@
|
||||
|
||||
# collabora
|
||||
9980
|
||||
|
||||
# arr
|
||||
6767
|
||||
9696
|
||||
7878
|
||||
8989
|
||||
8686
|
||||
8787
|
||||
5055
|
||||
|
||||
# torr
|
||||
29432
|
||||
|
||||
# mattermost
|
||||
8065
|
||||
];
|
||||
|
||||
allowedUDPPorts = [
|
||||
# torr
|
||||
29432
|
||||
];
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
vars = import ./vars.nix;
|
||||
basePath = "${vars.primary_minio}/garage";
|
||||
in
|
||||
{
|
||||
services.garage = {
|
||||
enable = true;
|
||||
package = pkgs.garage;
|
||||
logLevel = "info";
|
||||
settings = {
|
||||
metadata_dir = "${basePath}/meta";
|
||||
data_dir = "${basePath}/data";
|
||||
db_engine = "sqlite";
|
||||
replication_factor = 1;
|
||||
|
||||
rpc_bind_addr = "127.0.0.1:8504";
|
||||
rpc_public_addr = "127.0.0.1:8504";
|
||||
rpc_secret_file = config.sops.secrets."garage/rpc-secret".path;
|
||||
|
||||
s3_api = {
|
||||
api_bind_addr = "127.0.0.1:8502";
|
||||
s3_region = "us-east-1";
|
||||
root_domain = ".s3.nayeonie.com";
|
||||
};
|
||||
|
||||
admin = {
|
||||
api_bind_addr = "127.0.0.1:8503";
|
||||
admin_token_file = config.sops.secrets."garage/admin-token".path;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d ${basePath}/meta 0750 garage garage -"
|
||||
"d ${basePath}/data 0750 garage garage -"
|
||||
];
|
||||
|
||||
sops.secrets = {
|
||||
"garage/rpc-secret" = { };
|
||||
"garage/admin-token" = { };
|
||||
};
|
||||
}
|
||||
@@ -10,7 +10,7 @@ in
|
||||
{
|
||||
services.gitea = {
|
||||
enable = true;
|
||||
appName = "Nayeonie's Trove";
|
||||
appName = "The Hearth";
|
||||
database = {
|
||||
type = "postgres";
|
||||
passwordFile = config.sops.secrets."gitea/dbpass".path;
|
||||
@@ -27,20 +27,10 @@ in
|
||||
SSH_PORT = 2222;
|
||||
SSH_LISTEN_PORT = 2223;
|
||||
START_SSH_SERVER = true;
|
||||
PUBLIC_URL_DETECTION = "auto";
|
||||
};
|
||||
repository = {
|
||||
ENABLE_PUSH_CREATE_USER = true;
|
||||
DEFAULT_MERGE_STYLE = "rebase-merge";
|
||||
|
||||
};
|
||||
service = {
|
||||
DISABLE_REGISTRATION = true;
|
||||
};
|
||||
openid = {
|
||||
ENABLE_OPENID_SIGNIN = true;
|
||||
ENABLE_OPENID_SIGNUP = false;
|
||||
};
|
||||
log = {
|
||||
LEVEL = "Trace";
|
||||
ENABLE_SSH_LOG = true;
|
||||
@@ -55,15 +45,6 @@ in
|
||||
host = "192.168.76.2";
|
||||
port = "8088";
|
||||
};
|
||||
"storage.minio" = {
|
||||
STORAGE_TYPE = "minio";
|
||||
MINIO_ENDPOINT = "minio.nayeonie.com";
|
||||
MINIO_BUCKET = "gitea";
|
||||
MINIO_LOCATION = "us-east-1";
|
||||
MINIO_USE_SSL = true;
|
||||
MINIO_INSECURE_SKIP_VERIFY = false;
|
||||
MINIO_BUCKET_LOOKUP_TYPE = "auto";
|
||||
};
|
||||
};
|
||||
stateDir = base_path;
|
||||
lfs.enable = true;
|
||||
@@ -75,52 +56,9 @@ in
|
||||
after = [ "docker.service" ];
|
||||
};
|
||||
|
||||
systemd.services.gitea-kanidm-oidc-bootstrap = {
|
||||
description = "Bootstrap Gitea Kanidm OIDC auth source";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "gitea.service" ];
|
||||
after = [ "gitea.service" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = "root";
|
||||
Group = "root";
|
||||
};
|
||||
path = [
|
||||
config.services.gitea.package
|
||||
pkgs.coreutils
|
||||
pkgs.gnugrep
|
||||
];
|
||||
script = ''
|
||||
set -eu
|
||||
|
||||
APP_INI="${config.services.gitea.customDir}/conf/app.ini"
|
||||
|
||||
if gitea admin auth list --config "$APP_INI" | grep -Fq "Kanidm OIDC"; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
gitea admin auth add-oauth \
|
||||
--config "$APP_INI" \
|
||||
--name "Kanidm OIDC" \
|
||||
--provider openidConnect \
|
||||
--key "gitea" \
|
||||
--secret "$(<${config.sops.secrets."kanidm/gitea_oidc_client_secret".path})" \
|
||||
--auto-discover-url "https://auth.nayeonie.com/oauth2/openid/gitea/.well-known/openid-configuration" \
|
||||
--scopes openid \
|
||||
--scopes profile \
|
||||
--scopes email \
|
||||
--full-name-claim-name name \
|
||||
--group-claim-name groups \
|
||||
--required-claim-name groups \
|
||||
--required-claim-value gitea-users \
|
||||
--admin-group gitea-users
|
||||
'';
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 6443 ];
|
||||
|
||||
sops.secrets = {
|
||||
"gitea/dbpass".owner = "gitea";
|
||||
"gitea/minio".owner = "gitea";
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
# HAProxy routing stubs for Kanidm.
|
||||
# These are ADDITIVE fragments — merge into your main haproxy config.
|
||||
# Assumes:
|
||||
# - HAProxy terminates TLS using the acme-nayeonie.com certificate
|
||||
# - Kanidm HTTPS listens on [::1]:8443
|
||||
# - Kanidm LDAP compat listens on [::1]:3890
|
||||
# - ACL-based routing by SNI / Host header
|
||||
#
|
||||
# In your main frontend (or add a dedicated one):
|
||||
#
|
||||
# acl host_kanidm hdr(host) -i auth.nayeonie.com # internal/admin only
|
||||
#
|
||||
# use_backend kanidm if host_kanidm
|
||||
#
|
||||
# --- Kanidm backend ---
|
||||
#
|
||||
# backend kanidm
|
||||
# mode http
|
||||
# option forwardfor
|
||||
# http-request set-header X-Forwarded-Proto https
|
||||
# http-request set-header X-Forwarded-Host %[req.hdr(host)]
|
||||
# acl internal src 192.168.76.0/24 192.168.191.0/24
|
||||
# http-request deny unless internal
|
||||
# server kanidm [::1]:8443 ssl verify none check
|
||||
#
|
||||
# --- Forward-auth pattern for protecting other backends with Kanidm ---
|
||||
#
|
||||
# To protect an existing backend with Kanidm OIDC, add oauth2-proxy.
|
||||
# The simplest
|
||||
# path for HAProxy is:
|
||||
#
|
||||
# 1. Deploy oauth2-proxy (services.oauth2-proxy) configured against
|
||||
# Kanidm as OIDC provider (issuer https://auth.nayeonie.com).
|
||||
# 2. In HAProxy frontend, redirect unauthenticated requests to
|
||||
# oauth2-proxy before forwarding to the real backend.
|
||||
#
|
||||
# This is left as a follow-up — get Kanidm running first.
|
||||
{ ... }: { }
|
||||
@@ -42,12 +42,7 @@ in
|
||||
services = {
|
||||
hydra = {
|
||||
enable = true;
|
||||
package = inputs.hydra.packages.x86_64-linux.hydra.overrideAttrs (old: {
|
||||
preCheck = ''
|
||||
export YATH_JOB_COUNT=8
|
||||
${old.preCheck or ""}
|
||||
'';
|
||||
});
|
||||
package = inputs.hydra.packages.x86_64-linux.hydra;
|
||||
hydraURL = "https://hydra.alicehuston.xyz";
|
||||
smtpHost = "alicehuston.xyz";
|
||||
notificationSender = "hydra@alicehuston.xyz";
|
||||
@@ -57,7 +52,6 @@ in
|
||||
minimumDiskFree = 50;
|
||||
minimumDiskFreeEvaluator = 100;
|
||||
extraConfig = ''
|
||||
allow_import_from_derivation = true
|
||||
<git-input>
|
||||
timeout = 3600
|
||||
</git-input>
|
||||
@@ -88,10 +82,10 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
# nix-serve = {
|
||||
# enable = true;
|
||||
# secretKeyFile = config.sops.secrets."nix-serve/secret-key".path;
|
||||
# };
|
||||
nix-serve = {
|
||||
enable = true;
|
||||
secretKeyFile = config.sops.secrets."nix-serve/secret-key".path;
|
||||
};
|
||||
prometheus = {
|
||||
enable = true;
|
||||
webExternalUrl = "https://prom.alicehuston.xyz";
|
||||
@@ -140,7 +134,7 @@ in
|
||||
sops = {
|
||||
secrets = {
|
||||
"hydra/environment".owner = "hydra";
|
||||
# "nix-serve/secret-key".owner = "root";
|
||||
"nix-serve/secret-key".owner = "root";
|
||||
"alice/gha-hydra-token" = {
|
||||
sopsFile = ../../users/alice/secrets.yaml;
|
||||
owner = "hydra";
|
||||
|
||||
@@ -1,127 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
domain = "nayeonie.com";
|
||||
authDomain = "auth.${domain}";
|
||||
aliceSshKeys = config.users.users.alice.openssh.authorizedKeys.keys;
|
||||
in
|
||||
{
|
||||
services = {
|
||||
kanidm = {
|
||||
package = pkgs.kanidm_1_9.withSecretProvisioning;
|
||||
|
||||
server = {
|
||||
enable = true;
|
||||
|
||||
settings = {
|
||||
origin = "https://${authDomain}";
|
||||
inherit domain;
|
||||
bindaddress = "0.0.0.0:8443";
|
||||
ldapbindaddress = "0.0.0.0:3890";
|
||||
tls_chain = "/var/lib/acme/${domain}/fullchain.pem";
|
||||
tls_key = "/var/lib/acme/${domain}/key.pem";
|
||||
db_fs_type = "zfs";
|
||||
};
|
||||
};
|
||||
|
||||
# Reuse the existing secret during migration; rotate/rename in a follow-up.
|
||||
provision = {
|
||||
enable = true;
|
||||
instanceUrl = "https://${authDomain}";
|
||||
adminPasswordFile = config.sops.secrets."kanidm/admin_password".path;
|
||||
idmAdminPasswordFile = config.sops.secrets."kanidm/admin_password".path;
|
||||
acceptInvalidCerts = false;
|
||||
|
||||
groups = {
|
||||
gitea-users = {
|
||||
present = true;
|
||||
};
|
||||
};
|
||||
|
||||
persons = {
|
||||
alice = {
|
||||
displayName = "Alice";
|
||||
mailAddresses = [ "aliceghuston@gmail.com" ];
|
||||
present = true;
|
||||
groups = [ "gitea-users" ];
|
||||
};
|
||||
};
|
||||
|
||||
systems.oauth2.gitea = {
|
||||
present = true;
|
||||
displayName = "Gitea";
|
||||
public = false;
|
||||
basicSecretFile = config.sops.secrets."kanidm/gitea_oidc_client_secret".path;
|
||||
originUrl = "https://nayeonie.com/user/oauth2/kanidm/callback";
|
||||
originLanding = "https://nayeonie.com/";
|
||||
preferShortUsername = true;
|
||||
scopeMaps = {
|
||||
gitea-users = [
|
||||
"openid"
|
||||
"email"
|
||||
"profile"
|
||||
];
|
||||
};
|
||||
claimMaps.groups.valuesByGroup = {
|
||||
gitea-users = [ "gitea-users" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
sops.secrets = {
|
||||
"kanidm/admin_password".owner = "kanidm";
|
||||
"kanidm/gitea_oidc_client_secret".owner = "kanidm";
|
||||
};
|
||||
|
||||
# Certs are currently group-readable by haproxy for docker HAProxy.
|
||||
users.users.kanidm.extraGroups = [ "haproxy" ];
|
||||
|
||||
systemd.services.kanidm-person-ssh-keys-bootstrap = {
|
||||
description = "Bootstrap Kanidm SSH public keys for alice";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "kanidm.service" ];
|
||||
after = [ "kanidm.service" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = "root";
|
||||
Group = "root";
|
||||
};
|
||||
path = [
|
||||
config.services.kanidm.package
|
||||
pkgs.coreutils
|
||||
pkgs.gawk
|
||||
pkgs.gnugrep
|
||||
];
|
||||
script = ''
|
||||
set -eu
|
||||
|
||||
url="https://${authDomain}"
|
||||
password="$(<${config.sops.secrets."kanidm/admin_password".path})"
|
||||
state_dir="/var/lib/kanidm/ssh-bootstrap"
|
||||
mkdir -p "$state_dir"
|
||||
chmod 700 "$state_dir"
|
||||
export HOME="$state_dir"
|
||||
|
||||
# Authenticate idm_admin for CLI operations.
|
||||
printf '%s\n' "$password" | kanidm login -H "$url" -D idm_admin >/dev/null
|
||||
|
||||
existing_keys="$(kanidm -H "$url" -D idm_admin person ssh list-publickeys alice || true)"
|
||||
i=0
|
||||
${lib.concatMapStringsSep "\n" (
|
||||
key:
|
||||
" i=$((i + 1))\n if ! printf '%s\\n' \"$existing_keys\" | grep -Fq ${lib.escapeShellArg key}; then\n kanidm -H \"$url\" -D idm_admin person ssh add-publickey alice \"home-key-$i\" ${lib.escapeShellArg key} >/dev/null\n fi"
|
||||
) aliceSshKeys}
|
||||
'';
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
3890
|
||||
8443
|
||||
];
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
{
|
||||
config,
|
||||
...
|
||||
}:
|
||||
let
|
||||
vars = import ./vars.nix;
|
||||
in
|
||||
{
|
||||
services.mattermost = {
|
||||
enable = true;
|
||||
siteUrl = "https://mattermost.nayeonie.com"; # Set this to the URL you will be hosting the site on.
|
||||
database = {
|
||||
peerAuth = true; # This allows Mattermost to connect to the database without a password, which is more secure when both are on the same machine.
|
||||
create = true;
|
||||
driver = "postgres";
|
||||
};
|
||||
dataDir = "${vars.primary_mattermost}/mattermost";
|
||||
host = "0.0.0.0";
|
||||
};
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
vars = import ./vars.nix;
|
||||
in
|
||||
{
|
||||
services = {
|
||||
ollama = {
|
||||
enable = true;
|
||||
package = pkgs.ollama;
|
||||
syncModels = true;
|
||||
loadModels = [
|
||||
"deepseek-r1:1.5b"
|
||||
"deepseek-r1:32b"
|
||||
"deepseek-r1:70b"
|
||||
#"qwen3"
|
||||
#"qwen3.5:latest"
|
||||
"qwen3-coder-next"
|
||||
"lennyerik/zeta"
|
||||
"nomic-embed-text:latest"
|
||||
"lfm2:24b"
|
||||
"glm-4.7-flash"
|
||||
"nemotron-cascade-2:30b"
|
||||
"magistral"
|
||||
"devstral-small-2"
|
||||
"starcoder2:15b"
|
||||
];
|
||||
models = vars.primary_ollama;
|
||||
environmentVariables = {
|
||||
FLASH_ATTENTION = "1";
|
||||
OLLAMA_KV_CACHE_TYPE = "q4_0";
|
||||
# Ollama memory configuration
|
||||
OLLAMA_MAX_LOADED_MODELS = "3";
|
||||
OLLAMA_MAX_QUEUE = "512";
|
||||
OLLAMA_NUM_PARALLEL = "1";
|
||||
|
||||
# ROCm memory optimization
|
||||
#HIP_VISIBLE_DEVICES = "0";
|
||||
#ROCR_VISIBLE_DEVICES = "0";
|
||||
|
||||
# context length for agents
|
||||
OLLAMA_CONTEXT_LENGTH = "128000";
|
||||
};
|
||||
openFirewall = true;
|
||||
host = "0.0.0.0"; # don't want to make this available via load-balancer yet, so making it available on the local network
|
||||
};
|
||||
open-webui = {
|
||||
enable = true;
|
||||
port = 21212;
|
||||
openFirewall = true;
|
||||
host = "0.0.0.0"; # don't want to make this available via load-balancer yet, so making it available on the local network
|
||||
};
|
||||
};
|
||||
users.users.ollama = {
|
||||
extraGroups = [
|
||||
"render"
|
||||
"video"
|
||||
];
|
||||
group = "ollama";
|
||||
isSystemUser = true;
|
||||
};
|
||||
users.groups.ollama = { };
|
||||
systemd.services = {
|
||||
ollama.serviceConfig = {
|
||||
Nice = 19;
|
||||
IOSchedulingPriority = 7;
|
||||
};
|
||||
ollama-model-loader.serviceConfig = {
|
||||
Nice = 19;
|
||||
CPUWeight = 50;
|
||||
IOSchedulingClass = "idle";
|
||||
IOSchedulingPriority = 7;
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
vars = import ../vars.nix;
|
||||
in
|
||||
{
|
||||
services.plex = {
|
||||
enable = true;
|
||||
dataDir = vars.primary_plex;
|
||||
};
|
||||
systemd.services.plex_permission = {
|
||||
description = "maintains plex permissions";
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = "${pkgs.bash}/bin/bash ${./plex_permission.sh}";
|
||||
};
|
||||
};
|
||||
systemd.timers.plex_permission = {
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnBootSec = "1h";
|
||||
OnCalendar = "daily 03:00";
|
||||
Unit = "plex_permission.service";
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
plex_dir="/ZFS/ZFS-primary/plex"
|
||||
|
||||
chown docker-service:users -R "$plex_dir"
|
||||
find "$plex_dir" -type f -exec chmod 664 {} \;
|
||||
find "$plex_dir" -type d -exec chmod 775 {} \;
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
@@ -18,10 +19,6 @@ in
|
||||
enable = true;
|
||||
enableJIT = true;
|
||||
package = pkgs.postgresql_16;
|
||||
configurePgStatStatements = true;
|
||||
#enableAllPreloadedLibraries = true;
|
||||
installAllAvailableExtensions = true;
|
||||
#preloadAllExtensions = true;
|
||||
identMap = ''
|
||||
# ArbitraryMapName systemUser DBUser
|
||||
superuser_map root postgres
|
||||
@@ -29,136 +26,15 @@ in
|
||||
# Let other names login as themselves
|
||||
superuser_map /^(.*)$ \1
|
||||
'';
|
||||
authentication = ''
|
||||
local bazarr bazarr scram-sha-256
|
||||
local /.*arr-main /.*arr scram-sha-256
|
||||
local /.*arr-log /.*arr scram-sha-256
|
||||
local jellyseerr jellyseerr scram-sha-256
|
||||
host all all 192.168.76.0/24 ldap ldapserver=127.0.0.1 ldapport=3890 ldapbasedn="dc=nayeonie,dc=com" ldapsearchattribute="uid"
|
||||
'';
|
||||
|
||||
# initialScript = config.sops.secrets."postgres/init".path;
|
||||
ensureDatabases = [
|
||||
"atticd"
|
||||
"alice"
|
||||
"mattermost"
|
||||
];
|
||||
ensureDatabases = [ "atticd" ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = "atticd";
|
||||
ensureDBOwnership = true;
|
||||
}
|
||||
{
|
||||
name = "alice";
|
||||
ensureDBOwnership = true;
|
||||
ensureClauses = {
|
||||
superuser = true;
|
||||
login = true;
|
||||
createrole = true;
|
||||
createdb = true;
|
||||
replication = true;
|
||||
};
|
||||
}
|
||||
];
|
||||
# Thank you NotAShelf
|
||||
# https://github.com/NotAShelf/nyx/blob/d407b4d6e5ab7f60350af61a3d73a62a5e9ac660/modules/core/roles/server/system/services/databases/postgresql.nix#L74
|
||||
# commented out statements are likely overriden by pgtune settings
|
||||
# https://pgtune.leopard.in.ua/?dbVersion=17&osType=linux&dbType=web&cpuNum=64&totalMemory=8&totalMemoryUnit=GB&connectionNum=1024&hdType=hdd
|
||||
settings = {
|
||||
# Connectivity;
|
||||
# max_connections = 100;
|
||||
superuser_reserved_connections = 3;
|
||||
|
||||
# Memory Settings;
|
||||
#shared_buffers = "1024 MB";
|
||||
#work_mem = "32 MB";
|
||||
#maintenance_work_mem = "320 MB";
|
||||
#huge_pages = "off";
|
||||
#effective_cache_size = "2 GB";
|
||||
#effective_io_concurrency = 100; # concurrent IO only really activated if OS supports posix_fadvise function;
|
||||
#random_page_cost = 1.25; # speed of random disk access relative to sequential access (1.0);
|
||||
|
||||
# Monitoring;
|
||||
#shared_preload_libraries = "pg_stat_statements,auto_explain"; # per statement resource usage stats & log explain statements for slow queries
|
||||
track_io_timing = "on"; # measure exact block IO times;
|
||||
track_functions = "pl"; # track execution times of pl-language procedures if any;
|
||||
# Replication;
|
||||
wal_level = "replica"; # consider using at least "replica";
|
||||
max_wal_senders = 0;
|
||||
synchronous_commit = "on";
|
||||
|
||||
# Checkpointing: ;
|
||||
checkpoint_timeout = "15 min";
|
||||
#checkpoint_completion_target = 0.9;
|
||||
#max_wal_size = "1024 MB";
|
||||
#min_wal_size = "512 MB";
|
||||
|
||||
# WAL writing;
|
||||
wal_compression = "on";
|
||||
wal_buffers = -1; # auto-tuned by Postgres till maximum of segment size (16MB by default);
|
||||
wal_writer_delay = "200ms";
|
||||
wal_writer_flush_after = "1MB";
|
||||
|
||||
# Background writer;
|
||||
bgwriter_delay = "200ms";
|
||||
bgwriter_lru_maxpages = 100;
|
||||
bgwriter_lru_multiplier = 2.0;
|
||||
bgwriter_flush_after = 0;
|
||||
|
||||
# Parallel queries: ;
|
||||
#max_worker_processes = 6;
|
||||
#max_parallel_workers_per_gather = 3;
|
||||
#max_parallel_maintenance_workers = 3;
|
||||
#max_parallel_workers = 6;
|
||||
parallel_leader_participation = "on";
|
||||
|
||||
# Advanced features ;
|
||||
enable_partitionwise_join = "on";
|
||||
enable_partitionwise_aggregate = "on";
|
||||
jit = "on";
|
||||
|
||||
jit_above_cost = 100000;
|
||||
jit_inline_above_cost = 150000;
|
||||
jit_optimize_above_cost = 500000;
|
||||
|
||||
# log slow queries
|
||||
log_min_duration_statement = 100;
|
||||
"auto_explain.log_min_duration" = 100;
|
||||
|
||||
# logging configuration
|
||||
log_connections = true;
|
||||
log_statement = "all";
|
||||
logging_collector = true;
|
||||
log_disconnections = true;
|
||||
|
||||
# from pgtune
|
||||
# DB Version: 17
|
||||
# OS Type: linux
|
||||
# DB Type: web
|
||||
# Total Memory (RAM): 8 GB
|
||||
# CPUs num: 64
|
||||
# Connections num: 1024
|
||||
# Data Storage: hdd
|
||||
|
||||
max_connections = 1024;
|
||||
shared_buffers = "2GB";
|
||||
effective_cache_size = "6GB";
|
||||
maintenance_work_mem = "512MB";
|
||||
checkpoint_completion_target = 0.9;
|
||||
#wal_buffers = "16MB"; allow auto-tuning as per above
|
||||
default_statistics_target = 100;
|
||||
random_page_cost = 4;
|
||||
effective_io_concurrency = 2;
|
||||
work_mem = "512kB";
|
||||
huge_pages = "off";
|
||||
min_wal_size = "1GB";
|
||||
max_wal_size = "4GB";
|
||||
max_worker_processes = 64;
|
||||
max_parallel_workers_per_gather = 4;
|
||||
max_parallel_workers = 64;
|
||||
max_parallel_maintenance_workers = 4;
|
||||
|
||||
};
|
||||
|
||||
refreshCollation = true;
|
||||
vacuumAnalyzeTimer.enable = true;
|
||||
@@ -172,8 +48,6 @@ in
|
||||
"hydra-send-stats"
|
||||
"hydra-server"
|
||||
"atticd"
|
||||
"gitea"
|
||||
"mattermost"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
@@ -2,17 +2,18 @@
|
||||
{
|
||||
services.samba = {
|
||||
enable = true;
|
||||
securityType = "user";
|
||||
openFirewall = true;
|
||||
settings = {
|
||||
global = {
|
||||
security = "user";
|
||||
"workgroup" = "WORKGROUP";
|
||||
"server string" = "palatine-hill";
|
||||
"netbios name" = "palatine-hill";
|
||||
"security" = "user";
|
||||
#"use sendfile" = "yes";
|
||||
#"max protocol" = "smb2";
|
||||
# note: localhost is the ipv6 localhost ::1
|
||||
"hosts allow" = "192.168.76. 127.0.0.1 localhost 192.168.191.";
|
||||
"hosts allow" = "192.168.76. 127.0.0.1 localhost";
|
||||
"hosts deny" = "0.0.0.0/0";
|
||||
"guest account" = "nobody";
|
||||
"map to guest" = "bad user";
|
||||
|
||||
@@ -1,80 +1,63 @@
|
||||
hydra:
|
||||
environment: ENC[AES256_GCM,data:6Qje83f1eXNwBDrGEeq4itNWrDSSGlD3Qmz0Ni3AeMBfJ1Q/9bSeG0icCOynb0UkQfTccKYMECMG2Fn6NlP8sD/9nVyu3CSM7OO7+pJY7F/7vDl8p03e+aatIV+g34pzQygO1UYrfMrk1MDhmu+e05kugyHHFtmBAGeSc7o4cgH1tWQCY470CTtPv4HiLDFHyz1RYrXE6nZD+NynnNJGKGNYqBK85+pb9xJXgtwqB0J9kl0ZsCOyt8zs1WcFaXo+dK73Bpogv/clru6ckTo+bfY=,iv:imdrJ+CZxK7AQwbEsKyL/YfvgsXkUSfA/3AOz9FQeyc=,tag:8i4hFyrxZXBcOPT5MqjijA==,type:str]
|
||||
environment: ENC[AES256_GCM,data:G/6DOeRdjjp5PGpsHCHneW2X/OQzSH6gozKmgOlK6/bSdQltv4U00AYNOrUYYlH9Yab7JSYBfQinsqRKyDVEp7LLPdlxBaztJiSZGGAdio+JHWwR7UAhAEXSgOh4qFq0SjdZzQduEOdfSYfksut3dJiAvpj6oo6hxuo8mkW4+UacpBmvpnrzHjJHeYYbb3krIhKG6bBqHLT403rLf5oYjnY16XUuYO7deAH99JkfCJKlKnDf3GLfnX78XoXSdOMUyf57PPq5EKA8mFdtZsbAmis=,iv:s903rYHyocGtVJ594+HtCyULGtuom6aUVDcbXPbH93I=,tag:YFkFAIU7cNHSuYnN+lShgA==,type:str]
|
||||
nix-serve:
|
||||
secret-key: ENC[AES256_GCM,data:fdZRXT0jnZMUtMTi32gPXfTJiV/xqhf0C6XnAf0hyQIVRof80P1zo5BTVt388NfBXIj0DHmsaH3Fx7m2Psoigo+B1YyUmobCQwe7NAjE281Aad2+r0xNa+YDNo1A5sKofTBORUUuAVBmQKHxByw=,iv:BrFOGZFzFNVuXUez/0bpklXAWOBy6dWYtmumVKXCOFo=,tag:pzy9q0uGxQioxxbxbrTTLw==,type:str]
|
||||
secret-key: ENC[AES256_GCM,data:M8MJHHO8Hd/Gm6Nxy7/IPr0s6jHEDBB9LpZq8lIWQirvZPpgNrMrnP2xFJWEuJF/ND9hU09ZHA3efIBej2siRPOWSEu4gE65W/GMtpCcwEXF0hR/ISvBsH0fci/6KGbUCVg1x9AJpjJsqevPN7I=,iv:Weuziu2me+kdB9zk68nvLnyxv0ICwB1qA4z0Q39tT6k=,tag:nhcFfRQOxEandrf6CivahA==,type:str]
|
||||
attic:
|
||||
secret-key: ENC[AES256_GCM,data:nSKTRRZ9SeGv+kFPzu9EGPHlr6oy6cFDMqZ6t43MxQduVGliHlpnYTSwrDg3ybqov9ihP7JTEwlir/+goA5cPm6TZusYT8bczlMMWNgp10kbOGhAY+Ybv/FjiWjor6PSdGz6gXo3QSIKcUEs4CTxuSbNeoVno6x4EJltHWg/QNWDT4iAZB381SAxDGweMz+Hu0aXUKY0EMhXStcgr+b0h0JIv8KKjnatXDEQ/4r+jeRm8Qx47nUed8ZvEe/lHkK7D6ZV5O/yN7+QWw++SXEisuv3O0jGoiZ5oqdx99HfeLz2fw+3IrxZhg==,iv:PvQlzbYCCRuCbiV7SrVwulO4QX3OEYNkccIWFniQLfg=,tag:5sV3f+2WJmuJy0Wa62pZTQ==,type:str]
|
||||
database-url: ENC[AES256_GCM,data:w1jHgn/PETG3SRRTZQvdPHnoUY1y4tHl2BpCwu03XncEPDjNP5J873mm78A2fFH/uQjuBhqRxtlK,iv:abwCrA9tgLHefCm+BVgwh4+g5CU9/Kskhvyka8/nbg8=,tag:3wJXL3u5cl/9WIVXdulHbA==,type:str]
|
||||
adm: ENC[AES256_GCM,data:oPsk2Ks7zqIEGznU5iH5pplxFa85B8dj24lDx0tqy7Mt6ED1GHj7YLt37GGYaGQOM/YdFv3wmGG4Y4rSCLwE4kiM5iOFEcKnlePre1eoER1bf+tvFZOPyGq/SsFiYTSYqHTShfk0scHHkomwVf+gRwD3k4DjXbkjTof2ulg6Pce36E3oOEJaRzKBOrVprwpgPn8GKm1zcM5Ssr5z1ljXGx+JbtK2VtLKd2Q0kc+ikMe/cYo6YT3SlJNEKAmGRVtR/dtkkBwseBYhU7yfHbLqLGFO3EFZjYIkNfTRvDKEu6BB5wlcoKtxi4yjrz8iB2XNX5GCL7gXUWO2ZIsk4ZZMJFthqi9jYDrl,iv:f0YPT4avkVZPuo13wdNyglxW1Ea5cjKXcGa4eaPuMus=,tag:mGBnfXWCfJSHoYXH3gAsHA==,type:str]
|
||||
secret-key: ENC[AES256_GCM,data:/wYnCD7qggeHdsNqkp1rZK839o/1olhJUlT1lrZpv1hTOZDduP2OGhz8kh2PrQR6Mq2Y/ALgHG3cFpJs7G64xDK0qRVGIDlC/9sTQIcF2JL49Free8vADe5ads64EN3vWgfmFoBMPmL0mc4qnDBGnBkDueFN5gy+1szK9tWK23tMl1wEWVsiqBwhuWqQBNRxeaHR2tQXI2Yg3fefq5+laOUjnSe1a8Kx4dJ7rXZuXe+H4uyU7roYFxlLpI8qZig0eUO9WUMX9WP0tKOr5OjsbJzBbdVlVT7lZ9ROYUceoxmcWecLlcyv3Q==,iv:DjH78Getnt3zzK9QLj+HS0cF1wtaBeadxSTrRb1uic0=,tag:KMPtWCq1KT1SSthh3fdsew==,type:str]
|
||||
database-url: ENC[AES256_GCM,data:WHdAxNbkRxvNvfUWdPSbgeQXOS7f46OuDKTRuxf3cEyhbU5NAsGlCgfarUBXsHrCH79t7zDGlcRE,iv:trOxDY/ifsibKoX5YPOfKvX/q2ny6SgykiIBusgHxag=,tag:Cx9hhiJIhDLiojJmDdSDtg==,type:str]
|
||||
adm: ENC[AES256_GCM,data:mP4xFGK3+YwyiUMwFaG6tY3tWLGY2YTGa4DRuHzW5Za3McmwEFUzlQQ4hGS2bPKOKwM2Pe5HYBwJnFkd6KRwx5civqsBMwFt4dfZ31xDEi9RxpEm9jCnCcvB1CY8cxNARIhceC12X/ZR8ianUpoINYSjOj4BRy4TEEigi5+V4DkAXeG8+x8SWjj/mRMQMcZud4i69Ul7tpzbjUHm0s/Aasvmib13u4ZbGX/AyoOX8pQwkRHoyfMK2OvRbaeQf9fPcQxOSBALYOIXk9mEGxN1FTFHrTvrY5s0w+hC1mAjX4qm4ZM77RneAI0fJaq1hHSZETIpJOCiQfR3bLuyzWKVestOE29V8Pwq,iv:bjK1QkWUc2vs+oUoC5Z0AKR1/tmrhSLvP8BP8gzghOg=,tag:dmSDM+gbsJMDkqgIPWBfGQ==,type:str]
|
||||
postgres:
|
||||
init: ENC[AES256_GCM,data:6a+xTs7S36ka64S7wGd9Wpfre2CW1CoQmnqfXqPolBcYOv9m0DmCmmyfMQsrRV+LQK3Yf+sYcy0=,iv:b5wD8XgPI8kjP8n5j144mMisJl3dhrx8nWF1P+gr0qo=,tag:wG+Hlu55WwJT1aairSFVlg==,type:str]
|
||||
init: ENC[AES256_GCM,data:trwA30EswHEPa6V2GuHsGgU4NK/j/UQveldwHng0Ilwyqh9aZCgF3axP48MmcciBssux8DZ4O5U=,iv:VC+tpG5yuiBE7pjZ85lYCwHG/bTePxeXQDz2zyLyLYA=,tag:5+jwWTv5T5YWwQpR58QfOA==,type:str]
|
||||
gitea:
|
||||
dbpass: ENC[AES256_GCM,data:CN6uBXrpSYFiKxt9UfSwgw==,iv:71tgEcT+/246dPujwLwg2Z5SPICnGJUfeSA+uDgOmdU=,tag:melKWxmiE+XnukVtSt/iNg==,type:str]
|
||||
minio: ENC[AES256_GCM,data:D6kLP4FEc7U4H4eg5IaXYZn9Z2eSMbsKUafJnTwhHrxVYUq6L7F61iBzONgkCMrwSma6H4RVq32TeRea1gZPVoRD2yryNRrxStkJUuf7t0cjwcS/6E+x0yI/v2osmf0LQYTmGRtH,iv:dFTBhjbpfzs82KCq1m40eu0j7GPpZmzt50qLvRzy6xo=,tag:cuRmYkXz7X4iaW0x4ASkJw==,type:str]
|
||||
dbpass: ENC[AES256_GCM,data:8jECcEJ8JnK7fztTckzLrQ==,iv:yQMp5VrierOKXwiop0NUA7Qbn2eH5iUCVlKppZwKLIQ=,tag:rI9WT7zLIaFxVcTu3ufW4g==,type:str]
|
||||
upsmon:
|
||||
password: ENC[AES256_GCM,data:gA10207c1NzBSK45ezth4C6N,iv:kjiY2M3Vy61iDH2ueQooVI4JA2CwfKN0rQsI0Ch4D7s=,tag:caRjEDvLpsx0sZFUfft6QQ==,type:str]
|
||||
password: ENC[AES256_GCM,data:52Rxsh7KUq+aYjQORBC+Yq5B,iv:F05g/a5bv7DQ+eLlMqsNeRHLxzl7AyXU1zAlmFevQ6o=,tag:xkGDD3hDF+u5fUbP33OrlA==,type:str]
|
||||
minio:
|
||||
credentials: ENC[AES256_GCM,data:C4qbzvteveYpC84v013GDlKWYg6JHHaZuXirpaZ54u/mS6FZusC2qTBvdHz0pmFc95NbMb/wjcvZvYenMuSnU2V5BsK33XQ1UlSKzamt9v2IRAlllTT1vRCJ4EhCs/QEL0/pfGGJkxtUVW0NYFkJI67F6c3sFJUDfHYD,iv:FMRn/52gUfysGrsGhu5qF0OBDmGex6Ye/zfQyY215Xs=,tag:9tdvOcPgR28n+L+0zhMMbg==,type:str]
|
||||
loki: ENC[AES256_GCM,data:4wza/33eESjF4C1mcl4NIqGnvYg3Mg==,iv:Bt1ORiuuIUnnGk4XlemhCdmjBNRLZo9ikH1rqrTbS/g=,tag:AYUO/JllKIQszTNpEuEDBw==,type:str]
|
||||
credentials: ENC[AES256_GCM,data:5Z/cTmxSuMq8BfRgYLGZZJ7o6AtmrQM3yNjR17YHr29S7ZWvGsjfM7DsLKectem01nvv3HoT4uyWSdhkOmZahzDb5OF1NEgjJhLqkKlCETMu0mmpwe1cx6iOd7kjB3E6Az/MWpXqZ/TrryL9FrQD2nnx9bHyWWIHRQv8,iv:jiYZXfU+OssC0rh/3yFZLEzD1+5mVDDl6gQ3oyk76E4=,tag:bevDszFv1zSa+/2qQIgC0w==,type:str]
|
||||
loki: ENC[AES256_GCM,data:ShC6hfsKifVaxLWRo1fqaOpsrYh4+w==,iv:KVSlPd0mBvPZikg/Agnl6q0UhxTmsNOeYdercYOhqMg=,tag:cj6ex9m7vDjInTJDGUlqFQ==,type:str]
|
||||
docker:
|
||||
minecraft: ENC[AES256_GCM,data:sAEACU9U51uafNJ2RlPBwN4/+EyEI3X9jXffMvrBYI+BgzmzQumhRMvUqYk4R4oFDtvz0RwTL2vGWMorFt1YaVQN+anfHDM=,iv:UXCicw5gC4hQUNqxbeuqtidMwJY0kvH55nMkEop5Ytc=,tag:X5b06FBnybFr4qSxU6NtmQ==,type:str]
|
||||
foundry: ENC[AES256_GCM,data:CmMkfKPTq+oszR66vs+AshEdPDy81DA4OiPI/sgU2UwwXcgNTyuvmJRX7QNxsmiRQPb6EX+H6pcTESQtaqjYbCeo7n47b3BqyrTe/QrwkCAAtsdleWNNpTmmvIJi0RmxsagKheg=,iv:WxfWLsFqkTZajJT49CSi1ThQVrgYZl0vlsQo+MhrYsE=,tag:3XPErBxipOMkvze4pCerCg==,type:str]
|
||||
nextcloud: ENC[AES256_GCM,data:zAntt5+pFGhLMmC4T10F8ocj1+kYSnH0k+6H1Wk4WxiPTJvHQ/t+hZ+QzjmlKkiTvcaAoYWyrMJIB4U2MaIrrKAPN8vHtq1ihodBWlFifO+P6zfxCgQl3dbAMqXwini4ziKBZCw28x8dvFEozztIk7/a/TGNGj05fvGR9XUBHtVkt23ubXJqqZ4no6aGU8wgGizJ4OhQd/xTnM6m5ZvGPcv4vmtYXP0ts9eccx7nxDLmNgCHwIQSHEgR3JLUGKBcHZsc5jLQWdUPrhWXVmsAN1DKONrh5XC6rmN0/h1s5/qYCwx92XBzM6EssNBWKCtbmQ9ytSLAS90ygIs/Cr7Pu0qV9c0By+gimw054izWv4Ey2q/d/sJ9rblcmYFndgD1IOySFN8OUOO3Jd15DK6lojqrWtReVNcK/PbBPAvW+nNG3408YhQNC0eo5XMoK2RWiH8Npr39kykR/g/iDgR2cwrycbr/tnZp/OrPEGJx2axg1+om69joIfPBRL1Xe3KFKDmzTH8Cg0Wk78/6e71I3t8CheFfK8Ge6NfCti6nfUGd95Vp8ZS2LxQevloYDboV4mliOxK+JM4rRYHOCWZYAe8lZEqfdwjhYiYHfkKy2DJg,iv:YLyg9+YdggHV43oLs6Mq0unUVkjXJnPA0jwvFbjUMfs=,tag:jxIEVqENGHFrAANH4ZUFPg==,type:str]
|
||||
redis: ENC[AES256_GCM,data:b2pCrTVQqfIQEUv39imLrDFGu6KU1Dx/KD6Njcvt+x8=,iv:EHYRESYo8oTIiguay/SNbbuSVaok7szug5uiNNW0XEE=,tag:H+eQ+YvQDUOLHQJfX6qM2A==,type:str]
|
||||
act-runner: ENC[AES256_GCM,data:4y61oMlpGQf6GSJACyRwLQVOKJTg9jSkOW4sqYOWI1+D0ObIllFNyYiFQvuIf3Vdo/ymhReWH50vKS0mMAdH+3BgBlbf0tjMsUBNlnjNbnXQXS+M0gia9RVcFxSuA+QGKIED37s5OrBJWbWk2HZIX04=,iv:x6SUtA0n56AazXDdsdhym0R8e4vY4q/5zzZ5fddZXPg=,tag:PlAjXV/ndmhX3iQ3AM3Eiw==,type:str]
|
||||
collabora: ENC[AES256_GCM,data:3dcTIqpZtUEtEg/FG2zIyKfouLx/Yq0djOeNlS+78PltZipLYVT3jKhdVeb7,iv:wNKZF/DeKdJtZJRIXLj2AjT2cab5DQ1sr+wCYqgnNqg=,tag:AS0XIntMaGDv27TB7bRs5g==,type:str]
|
||||
delugevpn: ENC[AES256_GCM,data:Dn+2Bk2fS0ptFYLQ0CFc+bTdJbXbE0j/Dy8N/5Up7MBP2Bk8MF1MdaY3ZFMU7RgDT+zvrgOjmiwGu8JCu2svDkq0SlqmiTZ8,iv:kPDQhEatzK/6Q1qjQGTru+bpOKQWD5R/VKndZrNlLbU=,tag:c4hzv9Ipjv9bIkAyqrgiQw==,type:str]
|
||||
protonvpn-start-script: ENC[AES256_GCM,data:Hx/bYQg5qsx6z4G9yni+EzLCMKLAVxCHi+PYVl10PGOImfYlnG/puiSBRzu3zw68rcRg4fIz0yZgEjcYk3lu7kInx3GPrYCKSC0ej84W+j9vtEuYXC4WgXvqwTi/6P5428UhFP2FWiN5wV87pFp/LnEkVxi48uTI5r47ogkA06Gl37sQ7TZHvwGoQ106La7G3xYLDfvvdMVeASnDkIepDD6KduTI29C53l/XEoUcXQjOEtz5fmhRyVb7m0xs+DM2wRybEIUVALcIxjdWeiBoxxdVXxGD0+P277Gdy0uJPBt3iDg8dd3AAzwvTceyW+qe/8+RENySQ73cWblGyLzcqLRILCL/1YN82w==,iv:MgIyq8fltzPW9vww2gKvKQs84KkSMn4MFEvTMywTXvc=,tag:vjaH+8Pzpjx8y+7QJMP47Q==,type:str]
|
||||
notifiarr: ENC[AES256_GCM,data:FMAr9EAiSeoibw8Dh9O+JGDtSg026hO3JSZseATzNAaHs8m+0SUTIksek5wwvheF,iv:3WMVFwIonJyEEsBSqIusALWw5C31oVNvxIGntd102Ng=,tag:ty246WK5TasOj04/sA+Qaw==,type:str]
|
||||
bazarr: ENC[AES256_GCM,data:pCEIR+B33UPpjVNONzhNYVIHmMCCrdLED32c+jp+P7BLW60MK84TjPPTXi+touoRENaVgenYIv8qlsntL8Q=,iv:+ymGcWvrxedlrwT5ZF3dFrPOJ9DmiS5g05K0P097Lc0=,tag:qSr3i736oxpofnReUqfV2g==,type:str]
|
||||
prowlarr: ENC[AES256_GCM,data:CzAf8AHWVV5fQNCTtycv4NuT5wwiyj9+qWbJEv9Gd5zCBazHe/x0lB7LW6FbBgQQnEqC/xjHBLGA1/0WWrraxmN8hfVBrKRqaqiRilO12isrhlFf,iv:moyDOW2l44AT1tFzTbx0PgPWGRBfZG6GI5tMeScEa8g=,tag:5CVhEW4fU9kkKO4YBuGb4g==,type:str]
|
||||
radarr: ENC[AES256_GCM,data:a+Bt+rzWl6+Wheq6YpBQAxBpNmVJno4Eh11y3B6S5b9PEiXrmuVn+Druid1/y5P02yqhLA5vTvmPOHWmHERvN+L7WBiHL5ekZ36r+7wn,iv:102LIy/3hxdi6mk0BSIf+xl8k33yrpUGgI26WnJvCTQ=,tag:EmNE/JF3s7vgIZhHxru3Qw==,type:str]
|
||||
sonarr: ENC[AES256_GCM,data:+VU8F7/aFTgMiGs+dejnqa5LJn6BH/V218oc0i5dQL1RcDXOfPdh414Px7coQg9vNoCfc2ZxFaCyHS100ee5dPiyfRHrdHKA72p2zrVF,iv:BIG/u+Eo1RonarTZ2jEEg7xzfkNd3A2agdP4ljEK2eY=,tag:05FzDAp0G09DVXoeOQqJSA==,type:str]
|
||||
lidarr: ENC[AES256_GCM,data:90+0hbCF65gmYlvEkZnmtwUUnmFPv7AlzkBKr68UmF7i2yXz4L/OsEU2gFp/en7KOTNKioXXsdP887x9RzyAShO4TRzXkcb1um3DuKAo,iv:PvEsHuUlx8jxqNysoJIL5qb3kEaIbVHLFzRKS7TfL3g=,tag:s8IoyumcA0sB1c7drG5UNQ==,type:str]
|
||||
jellyseerr: ENC[AES256_GCM,data:G+l6VjgOCUk4c95o5rGo2QUJizw7Ph66cJMqwY4YxYXDjNzZ3+be4GmDQyOzTV/+,iv:gbowrAmLLwAe2poU+H4l8mmpVLfrgQRICWNmLNBUADY=,tag:HI6r+9dBdNLkJnE7eC8BAQ==,type:str]
|
||||
gluetun: ENC[AES256_GCM,data:sW3NXQ9GEGRA+j3+olbYNCVmLReHoRWC1oBhHcOCZ6zwx3jqvihEEZQgxgbEpPZ36NVVHTQgh9dX8wtYKUj1OzPBXuBh6V/AgTIMmrDurNUxul/jC2JVIZIka4RI58S1SKm/Ehz85Xx6lRR+VAKEyGg7R1I5+ciNjsZX8Sn/xHpGMAW4VCy/Iths0hdZWmkbJAG6XTsU1ZM01StI8s/ru/fphgcbLHws6mBMDEViKsATcGUcLMrjBT/xCeDXh24cyBCCba0rge4PtDekQwMT+aGS18cUjMF9LntCJJfdYp+gNDYrA6mprJrFJn6nrUjzT4RUSs0/+nQmuoyYvGE7RLM6lCGUZvkpQwaUJU2djzFBAHrpqqFF86ghbryt7vz6DZO2NaKpUsexAfDOsLFSXYacAVpZmm6aHsBm8woRzH67gGkxj51bIvzS1VWLjiEBxIJfChkWPqSbYh9I9mnKQdOxSSKCg0UlVCdWc5hB8NDDfZObJ2o5Ascw4eILR8lgjXm1XQxc08ZjEV9sqSlH8tr8IMSbWLAcVCHrATVfxsAG5bTbTOy/jyBGXjG7f7WQlN2RvHpHVrd8ppkYgPWJ0qbRsl6YJcvmEqeghLB+c0uYLXfoGMPOy8mb9SAfSLUMI/ODRy0=,iv:f46r4KA7IXFX3Xb5b6fxSPf+pkFjI126Ecie1S9Ggos=,tag:+Xzjz+FwIiC51J7dBcJBVQ==,type:str]
|
||||
gluetun-qbitvpn: ENC[AES256_GCM,data:Ep8mXRdxEetIjkV+a5/yqbANPKOcvp2+WQ1YeTGeqiTxiskuHHnWhP6gJN9yN/oBJfFdNKssbC6CbLPAvuL/aJRGveWzzrr9iHZ5vo/+3NzDWUReSDFVCgstbDB2eBomd2wHVJm+hbEUUtmO3iNBEarEInRqq+5+HWSt1d4/pW61WgJ8buzNhdFjmjEEBoOUYCj5mfRl2kP7UC+WUvJRvl5l2RMt8fEDv0M4Z7RQLEbJtWRVpbIhauWpTbrMDDC+VA9lcvbXpRsxntWz5Va8Ya4GWBkJfM0bJX30TyFo9Iy+XZhlQ4rzbWtfdO9Dx/TCc9i6NQt9FBUZGy+jTt/rTAulB5mgDaIq3AfNsWbfDUYZ47U6S0hXW7qAKbJ6/KjDe9dac9Jttx1ihJhXK0lt/uM6E76AxCqOtutmKryo6mScNOxkRjeYqYwwpasNWpnZ47ytctAy3ZXt5Y5xAl77dqAv+UnMczUy1pzg/oNWZrtN55tFmT2Om0FjHW5lDPQSEfxr/qKEvsBdaofw6xWpqkrU0lejSpvFSFsVPMr2M5ZOlCbGm/BGo5yQ5P+Z3u3xGoXumwvpP5KfxFpVAiJb40F6wCcEVbxX8wITprv3+5E=,iv:HlrRrq1667jpnApWFS+4G1rUBYxL1nVbiocy27cCiec=,tag:A2yIHv5yl05CVROglOqUqA==,type:str]
|
||||
gluetun-qbitperm: ENC[AES256_GCM,data:prNClo+0UtscI+TMwXRBdVflRWm5WLYT2kexJDoRTX37cMi9BzWYIRLwr86q/1xUIbcIraCGwHGd11CbOaY8xtfllW3oe4+YhNivAc87GJYz6mAhCiejvXJvyEySInbDq3jp0OJIPx8ROnzPCf7w5AZK9Jlfn3G3JwDYdYF/Cx8HYGv2V5JCFLLxdOG/KNnOOhSYW0IXoWVTcyT1/s62LrOMZ4KKza/yDKXn9XsWVt8+NSKXcc3HjuxhodZtCHptD0Ymqf04WOJNq100Xq++2BtiDsty86pMe45gC1vv2NiNdiBGnTQBhhkhllUM6R7t0zeZu9ROh50PGs1ozERs5WCktDsb4yfZlKtmkmPFwe5gSnHXu1t/n+8lounatfUnzKAk4nIs8KMnPxaEDPDY3DWoRXDgZU6a41/dxesOSt4wMISz0u45jKyNjZ4WFKjopLxYzekRZT9gnIu8qeLwT9G8PlqFjHrM6qxmAqMR0/Y6CHtuIHAoEWzepbZQ7MngOvUH6784r3DKTbAQetRIJHqJBsxRraOODTP9BPoPRb+hVYZm/g2264BtwmMFIs5pAtMpbjkRIMxKllUCBmf+/lL9+7TJ6EOJoIXiqOZNVQ4=,iv:pzhCttJCn2OGozuP/A0jKffdGBwkJnaH/QVnZEq7HJI=,tag:hGTxieZ6SI594FBwRjXAVQ==,type:str]
|
||||
minecraft: ENC[AES256_GCM,data:2k/m0ksnE92fACxQuBlOO72b19T7Nbnr58ezRddmKUVvePEgrdSnIsR3sh7PnmzwmG/ez0WTD+NKbtkQmRMDQ25vruA8gCf8Ig==,iv:X2SUidKTNAPZfbyiXFKprUbAhBxJcbF5bz+YTy4nuEA=,tag:AAvLXO888r9XvtnNfQgCpA==,type:str]
|
||||
foundry: ENC[AES256_GCM,data:5Z0FvVhJBzTwDPRN6c//caZokiTnkdqiLGFFuyen+tYsdjbQ3AXH5y7HfxKbxsJvU5uShOuIg0jVMvow2NYmzyYDDKBKPOz0bgXOmFq06wzCJubjyZmR/mDcWBBDzAFzaazpyW8=,iv:6wLS00zhX0tjJUe5uADAjzEshJP8QOkF2i4Aw+Y9RSk=,tag:sNr/exY1u3evYGcImyCUlA==,type:str]
|
||||
nextcloud: ENC[AES256_GCM,data:dm2Cha+CvFORgdcBvJAzzdOGcJ95vLJYTZcUJnjNp6HOQIIoJrDone1NOAYJh9rdWG/17/ntOmd+TysAj4AsD0dw/PatZmy3I+dcVghkt2XNTc7jD64QjctIHzR+om1joAbKemG1R3St7qDU68TWYxoxIfYZcJvg3ds/lJcYgFRh079UZ/IRlGVR6sWPEXyY+UUrwtk0Fr+y8UtwwWZiLp0akUbIV06huRGiAp/PeWETuPPuacl2++ayIgJFZkJjUl/a52RI1Q0nLG5iyK6QYpY1JSRJTOkiQQ4PB5GRdLCdoM5/ZXTQ6gGcoM5jXFllsTn+yRicNRucuBp7Z2achbk6eITCdjjdXVI7zM4YXpzVLu5fJckLAu07aEIGYCBT7ZXd7TRgfB68POwtwaJGBozg+nuhq8xEH04yi8jFODH6aFplIgJ+bbaP72zw+92lzZa33FEtOwKdtx+YUv0eLLDJs+8Z6Sn6RyN8prwIz1/9LuIMx39g4R7id9W2bV2MXqTU4nN8f0TXWqe+hnb5pDLBaZOBMkwbRka6Vptsi4dbL5Lnexa2DoIHZ2unyxZ+4SkRt9LH39j8fXf2w5JPFCSLstf7+Zu7xzRS0TTCug7k,iv:oOWcFdQJb/+KZKJmQChhJ5jOCcM3o+ojZSMyiRnO9n8=,tag:PWGQkwPe0juLgAdlKiWKpg==,type:str]
|
||||
redis: ENC[AES256_GCM,data:c+55cN6IpUNeKd+wC2zv3eunYjBsmZtXTczokqaxB2Q=,iv:M3pwNUlT9kUMv4JDE6bp/gub9CdBGxdApIvpOt3JpgE=,tag:3rPlV3U0AP9zAeF7xDouKw==,type:str]
|
||||
act-runner: ENC[AES256_GCM,data:gdrqXBBzdMW26MgNfP6P1c/m7pLANCXjcZLvVsxlWcgpAZd8IaO2FUqomL3xFI3UDPveQh0UvC3044ueoWhYJOq7ZmKJGvdf0ZrpP1MkXZKvjFjbTsuf/6/SYKhPqnP28HqznUWIVJYcRmP+A2oVeJY=,iv:/yOqJYDpxbqCm1whqcypp7Ba1Xlaebrv+h6lHr57Qa8=,tag:PzVqxP+QwQq69jqhmagj3w==,type:str]
|
||||
collabora: ENC[AES256_GCM,data:LPRkzPEv5qfzeWSDbf+L+0asfmiK5Mhj8jCdfVyvVQAaD75Cbo4qLD0Nc80z,iv:/l2vAyYYJChhv6T+JkHT4I74ZpdhvbVqxlDWIM4Y4bw=,tag:/+uzn1vtd1RnO9/lGiQAKA==,type:str]
|
||||
acme:
|
||||
bunny: ENC[AES256_GCM,data:uepw99ne34B4l9JAvCMiAXdFlwLjOYB1jCVgKNxb8vWDpMTTQZiwRDeJCcLk0zUSQcRujUwVaWKoPg0jvFYMONoC572NXRBh2F6HYO/avvnW5BkBN1Of,iv:w+84IMQSLkNxkKVJxNCOXhLRkh+DZ23aewAsLrvWn/s=,tag:DCPF9lgeNFZLcF48TSk5pg==,type:str]
|
||||
dnsimple: ENC[AES256_GCM,data:5Xs337NsbdBIF4oN33q2kgJUZuVRklGtdTm4LsimRyvh/8yZeVRjmDSyl3ZX79YOvVgrVZKTLUXsG4Tuqp06J64=,iv:2Ca2wxA59nqWuy0GtbRyWnPA5nQsM1UOXUfCUoY195E=,tag:zQ3AEfmy9FNMvq3rVRQ5rA==,type:str]
|
||||
bunny: ENC[AES256_GCM,data:P2yROVUga9mORcq8VR/l0i4/2Vod1zvlYq+ZJLLNKow0SpblkwQX/i1ucQYAOkTTRddN+3C+t0zj1rMWkdLoaLjEUJJi3VsSxi+chV2FFiVKFQGEcg24,iv:aQvGgGLsgRGoEmwTgZHR8Jm/MYxmGtVTT/fZKaTLeMs=,tag:m3ssF4O8qs4yxvMu6yUcjw==,type:str]
|
||||
dnsimple: ENC[AES256_GCM,data:37FKyBibFtXZgI4EduJQ0z8F+shBc5Q6YlLa3YkVPh9XuJVS20eybi75bfJxiozcZ9d+YRaqcbkBQCSdFOCotDU=,iv:oq3JjqbfAm2C4jcL1lvUb2EOmnwlR07vPoO8H0BmydQ=,tag:E3NO/jMElL6Q817666gIyg==,type:str]
|
||||
server-validation:
|
||||
webhook: ENC[AES256_GCM,data:3uISyfwu4wYlvc0XjtZAikrwh/zeL4akh9/7FE4gJHoxL8o/JcV9tVyyYMNzs0d2jqndLeuS5i3KgEzwZiMy8qtDTD3E1rPvyBxKvwRj4DdnH2BcGgXtpexSk9Tgkc0BoTTQ4M6cYSxUR8i7mqk9AEiDYPgb1FtP2n0Y6bm5IvusjbQGtjImHBx4r77e,iv:WyRLzE5i+HG2jgp+CI2SRc3am3WsLDOoCCvUoIb8Jpc=,tag:kMdZAevE6qL8bpeifmcqdw==,type:str]
|
||||
webhook: ENC[AES256_GCM,data:Lwqy4UhyFutpXjai7EJPKp8MDlI+ayDna4T8jluvC6qkeJ7o1UaaDCOsgLy4Fw7LC77tXhJtkcmep9w37JaiHp2CoDOfy2iAaq8o9CCSi/a0zqMJx+HdZYZNemvmpc6E/be0K+JDrFZLbjr3unSpCidQ3whccC6XyY013R12swN3bFZIu1gtzXCgUZ4U,iv:pVbrRwH3ziu4+R5BfimPV7N71QmyerJEc9M5K4eofOc=,tag:zNrCXrIioQWPEPVz/wMDpQ==,type:str]
|
||||
typhon:
|
||||
hashedPassword: ENC[AES256_GCM,data:Uy/pYazlkvQycWTrYKv1/566EgRIqek/pfjaJnifIqA1GUbTASBOENINnDx4ffJRwXoBzbIe+wSsalxwP9r7k9QERKaWpzr3Gppb+iI3Bpy1scy+R2sAjaR5fU0MKI+USppJSh2ARP1ZMA==,iv:iIGaAp9jX8dUAjiDBjrz/GDaD8x4/VDXJ9F2DN4cgmA=,tag:KrsqkjTdMJjLtsdqNiAsnw==,type:str]
|
||||
garage:
|
||||
rpc-secret: ENC[AES256_GCM,data:EOc5kBoZTPBFDyuvJ+iOm50htGggmgRfDmGTgFlyDgVGUYEtGVimQxlRipxDIexVnbG876u9JHtEXKAgiEK44w==,iv:eSUZ/Db7NZxiaIt9lRSbhKmX2i492o3e7lmDq4NeDXw=,tag:/QGSu3g6MIpaI3Y1uIE8pQ==,type:str]
|
||||
admin-token: ENC[AES256_GCM,data:BvajakvOkU25kLTBmfAWJHkIPvfbgxJsV44D2jLE9w9+n175dnvPV79198c=,iv:oOvtberNXzMhzKXGHPPizQjIozsor7wnM3XiwVgLiBo=,tag:YlGJf1sHk/s7bjRaOZH0iA==,type:str]
|
||||
kanidm:
|
||||
admin_password: ENC[AES256_GCM,data:5xIjsjn9t0sAXLq+qi8LSyPVde99VV519fw+kZVX,iv:n6VjDeEaV6+Ai0zr52Dw1E5OD5DNzK826bFQtFhe3hg=,tag:MKD+PuczuDoH/PKN4cY+xA==,type:str]
|
||||
gitea_oidc_client_secret: ENC[AES256_GCM,data:cr5HGHOwAvJ6LLBPWmfuRxltzJJ8t28vxnzB9sPKC+VwrYZ97ZJjfqtfY+7KyJNfI1knwrNKYNQu+bOqO8lhVQ==,iv:KT/1eiI4VnR8RG/pLCUhypVRctoLdM6hQunzpE8P05c=,tag:NV54JL5A3S8VVkbY7BIbhQ==,type:str]
|
||||
restic:
|
||||
kanidm_password: ENC[AES256_GCM,data:Zz+SOj9RBgVba8kNgCxhs5z7iuUPcYdE/a/FLJuJw46rquX290NvyH+4eDU=,iv:em9S1dzQ6Kgc4pZglLZlLPzSvAfey3Y6ZQhzNYIt2Ew=,tag:umN1oi4Fm1L/tFvFpt/RZA==,type:str]
|
||||
kanidm_rest_password: ENC[AES256_GCM,data:alv88Ebr2BmfXjJ+cZfRgRXBPezCrFBYR+DpxOnjAo7hjP2V0sB+B7WTJhtt8z61lKHUoZDS1brxrDa3T8i30JFUjATTDeGs7FY+D8Wn8uIlj4YPQy4gIA==,iv:kxI8npRdyCeb/IbTUKXdF3lsQoPmQBP8S+di7bDKByc=,tag:/2Qc19/hcxiw62tDAsoW9A==,type:str]
|
||||
hashedPassword: ENC[AES256_GCM,data:gMyY8gxUn3HzycQRu2cminqRFWghqWcjzZzTxAQZ5PJqn604iSwDiVdr7icHB7drJfCAfsE7L4oKRJgxaIAE32043oOkb2T7DDH8y2jxMzqmZCfbvrfMI4wdfRTHGqzxb6X/aZ5ai2rr1Q==,iv:4EsTo/lQld0o9iktDX9gobMlPUCitx1i9wn8EL16sIs=,tag:FgVDRHk2glDwpC/mprrPqQ==,type:str]
|
||||
sops:
|
||||
kms: []
|
||||
gcp_kms: []
|
||||
azure_kv: []
|
||||
hc_vault: []
|
||||
age:
|
||||
- recipient: age1qw5k8h72k3fjg5gmlxx8q8gwlc2k6n6u08d8hdzpm2pk9r0fnfxsmw33nh
|
||||
enc: |
|
||||
-----BEGIN AGE ENCRYPTED FILE-----
|
||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB5VkhzZ3cyNkFUYnppYnFa
|
||||
NWtLNTJXSU5BS1V5d2xySlBxbjhtRzloOGxnClBVVjhINnNsWkxod2pNL3BIU0VE
|
||||
b2F4NVpZK045NmphNzlwQkozMmU5S1UKLS0tIDlHcUEwMmVQSjZIVDFiVXNCRXZL
|
||||
NExqdmo4Q0ZNVzUyWkZFVUl6NFdETk0Kd8zrbv2zC610vfDMCejxYv1UCvIvsOqM
|
||||
bmvQ/wG/X1HqE4B8Yt6/5wNsM2/baLuXIBpGYAh7mgUaOQEkptZwMw==
|
||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBFcWo4V1QyZS9HbHNwT3Jl
|
||||
ZktNR2gwZ3BiWnYwZHpLUzR2YTlmN0ZUeEhnCkF6ekdkN0U2VGM1RFVhdTM0RW5u
|
||||
bWdreGZrU0JwNDY1TnR2S1M3OTdKaWcKLS0tIEVBekE2eU8rcEhpVkhhWmxPc3JN
|
||||
cXNZWmZqd0R0SmhINExscHBKWmxvblUKEFEQvt/zQFARba4S8vHz/1SoKdKg69At
|
||||
LZ58XQGOmlGbBhPr7EzYQ2XSY4flWbnnD174cmCR8DNFm15DsNA5fw==
|
||||
-----END AGE ENCRYPTED FILE-----
|
||||
lastmodified: "2026-05-02T16:58:48Z"
|
||||
mac: ENC[AES256_GCM,data:ca+UYNGmlLgu5mLfES/ZUf+XyuRtwk8GKeeu/UtbgNGqSoGXlsTmPdiGfKhO+qUFmpTv7ZAs/zbXD7C6rScNjudtlXB2lNAlLFWnudCjD4cDxokhOWoWYf+1ezw/IubAeqbW3lHUGUeK/hpVp5Suk/93fEVRUpnZM4r9/WizNfE=,iv:BipesRJv/P/iPEOW7bTxv42ABwo9efvwFgBvEX+TokA=,tag:uHxfZML61MYll77pYUuMrg==,type:str]
|
||||
lastmodified: "2025-03-26T05:47:58Z"
|
||||
mac: ENC[AES256_GCM,data:ZP9HglMmn9FDv6/vtQAxz/qP76QniPqM6bzMQVvVU/OhDmjuneGKZY7d1Es7LC9o5qmJ+T3Dh3/bkmuRdgdnd2TO6iuvM++DEPxwnoHis+0lbMxv5a6ibzvoXXm2CrL4HPETqLKbLahGJRmDNgnkCEWxAs16zrqe5kgDpD53R5c=,iv:DcCXNGyb41ToV9uSnrnrl0dWiw2pvykM8z86Yk814P4=,tag:T9PFl48qABwBSy7vIhSmLA==,type:str]
|
||||
pgp:
|
||||
- created_at: "2026-05-02T16:58:48Z"
|
||||
- created_at: "2024-11-28T18:56:39Z"
|
||||
enc: |-
|
||||
-----BEGIN PGP MESSAGE-----
|
||||
|
||||
hF4DQWNzDMjrP2ISAQdAPCymqFWzYGcr+bPFS6IusIV2LHdy5g2ROGtXCoTmah4w
|
||||
AezxMLS7d5zT9p277Vfoqwa1KFvrhoXbb3ORKAl4ONmACZpWOO3TobSkP0FvyEqi
|
||||
0l4BrPiYgcK3Lz01cotP4KwfW1I/w7uW4OpxF0gUBiQe8pvxMgcO77S3pA3WdA4U
|
||||
MmbwWW3dxGaora+gCSZjyx+y7vy5nDieUSjSskM1lYYsZQ52qRjiPVENzorEHDLD
|
||||
=3fFC
|
||||
hF4DQWNzDMjrP2ISAQdAPOYlp/3ZJrcXZbu5+XI+BHNzMbzw7+YhTYOfNgujU1gw
|
||||
QfJDWAhiMd8cZF5PpX+RdN+Zrk5CCMgZH4hotv9gjf1oxitWuF2hv14k/RlAx8kr
|
||||
1GgBCQIQB+LOoKIo7AHeucdV9NsM6H4Akv+Bzy8boarA4BGcyvgRWhS2u8zOQJc5
|
||||
RKfRonTO51yjlKm0MEspvwrClO+aIuBaNNemuHdk4yhDUnNKVBFyLLOuqXbsFd+G
|
||||
aSTmqvI3a/T5Cw==
|
||||
=ph+p
|
||||
-----END PGP MESSAGE-----
|
||||
fp: 5EFFB75F7C9B74EAA5C4637547940175096C1330
|
||||
unencrypted_suffix: _unencrypted
|
||||
version: 3.12.2
|
||||
version: 3.9.4
|
||||
|
||||
@@ -17,9 +17,4 @@ rec {
|
||||
primary_nextcloud = "${zfs_primary}/nextcloud";
|
||||
primary_redis = "${zfs_primary}/redis";
|
||||
primary_torr = "${zfs_primary}/torr";
|
||||
primary_plex = "${zfs_primary}/plex";
|
||||
primary_plex_storage = "${zfs_primary}/plex_storage";
|
||||
primary_ollama = "${zfs_primary}/ollama";
|
||||
primary_mattermost = "${zfs_primary}/mattermost";
|
||||
primary_kanidm = "${zfs_primary}/kanidm";
|
||||
}
|
||||
|
||||
@@ -12,107 +12,6 @@
|
||||
options zfs zfs_arc_min=82463372083
|
||||
options zfs zfs_arc_max=192414534860
|
||||
'';
|
||||
|
||||
initrd.systemd.services = {
|
||||
zfs-import-zfs-primary = {
|
||||
description = "Import ZFS-primary pool in initrd";
|
||||
wantedBy = [ "initrd-root-fs.target" ];
|
||||
wants = [ "systemd-udev-settle.service" ];
|
||||
after = [ "systemd-udev-settle.service" ];
|
||||
before = [
|
||||
"sysroot.mount"
|
||||
"initrd-root-fs.target"
|
||||
];
|
||||
unitConfig.DefaultDependencies = "no";
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
path = with pkgs; [
|
||||
coreutils
|
||||
gawk
|
||||
zfs
|
||||
];
|
||||
script = ''
|
||||
ZFS_FORCE="-f"
|
||||
msg=""
|
||||
|
||||
for o in $(cat /proc/cmdline); do
|
||||
case "$o" in
|
||||
zfs_force|zfs_force=1|zfs_force=y)
|
||||
ZFS_FORCE="-f"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
pool_ready() {
|
||||
pool="$1"
|
||||
state="$(zpool import -d /dev/disk/by-id/ 2>/dev/null | awk '/pool: '"$pool"'/ { found = 1 }; /state:/ { if (found == 1) { print $2; exit } }; END { if (found == 0) { print "MISSING" } }')"
|
||||
if [ "$state" = "ONLINE" ]; then
|
||||
return 0
|
||||
fi
|
||||
echo "Pool $pool in state $state, waiting"
|
||||
return 1
|
||||
}
|
||||
|
||||
pool_imported() {
|
||||
pool="$1"
|
||||
zpool list "$pool" >/dev/null 2>/dev/null
|
||||
}
|
||||
|
||||
pool_import() {
|
||||
pool="$1"
|
||||
zpool import -d /dev/disk/by-id/ -N $ZFS_FORCE "$pool"
|
||||
}
|
||||
|
||||
echo -n 'importing root ZFS pool "ZFS-primary"...'
|
||||
# Loop until import succeeds, because by-id devices may not be discovered yet.
|
||||
if ! pool_imported "ZFS-primary"; then
|
||||
trial=1
|
||||
while [ "$trial" -le 60 ]; do
|
||||
if pool_ready "ZFS-primary" >/dev/null && msg="$(pool_import "ZFS-primary" 2>&1)"; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
echo -n .
|
||||
trial=$((trial + 1))
|
||||
done
|
||||
echo
|
||||
if [ -n "$msg" ]; then
|
||||
echo "$msg"
|
||||
fi
|
||||
pool_imported "ZFS-primary" || pool_import "ZFS-primary" # Try one last time, e.g. to import a degraded pool.
|
||||
fi
|
||||
'';
|
||||
};
|
||||
|
||||
zfs-load-nix-key = {
|
||||
description = "Load ZFS key for ZFS-primary/nix in initrd";
|
||||
wantedBy = [ "initrd-fs.target" ];
|
||||
requires = [
|
||||
"sysroot.mount"
|
||||
"zfs-import-zfs-primary.service"
|
||||
];
|
||||
after = [
|
||||
"sysroot.mount"
|
||||
"zfs-import-zfs-primary.service"
|
||||
];
|
||||
before = [
|
||||
"initrd-fs.target"
|
||||
"sysroot-nix.mount"
|
||||
];
|
||||
unitConfig.DefaultDependencies = "no";
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
path = with pkgs; [ zfs ];
|
||||
script = ''
|
||||
key_file="/sysroot/crypto/keys/zfs-nix-store-key"
|
||||
zfs load-key -L "file://$key_file" "ZFS-primary/nix"
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services = {
|
||||
@@ -132,7 +31,6 @@
|
||||
"ZFS-primary/docker".useTemplate = [ "production" ];
|
||||
"ZFS-primary/hydra".useTemplate = [ "nix-prod" ];
|
||||
"ZFS-primary/nextcloud".useTemplate = [ "production" ];
|
||||
"ZFS-primary/mattermost".useTemplate = [ "production" ];
|
||||
# all docker containers should have a bind mount if they expect lasting zfs snapshots
|
||||
"ZFS-primary/vardocker".useTemplate = [ "nix-prod" ];
|
||||
"ZFS-primary/minio".useTemplate = [ "nix-prod" ];
|
||||
@@ -151,7 +49,7 @@
|
||||
daily = 30;
|
||||
weekly = 0;
|
||||
monthly = 6;
|
||||
yearly = 2;
|
||||
yearly = 3;
|
||||
autosnap = true;
|
||||
autoprune = true;
|
||||
};
|
||||
@@ -183,4 +81,69 @@
|
||||
};
|
||||
};
|
||||
|
||||
# hack to make sure pool is imported before keys are loaded,
|
||||
# and also keys are imported before things get mounted
|
||||
# note to self: move zfs encryption over to luks lol
|
||||
boot.initrd.postResumeCommands = ''
|
||||
ZFS_FORCE="-f"
|
||||
|
||||
for o in $(cat /proc/cmdline); do
|
||||
case $o in
|
||||
zfs_force|zfs_force=1|zfs_force=y)
|
||||
ZFS_FORCE="-f"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
poolReady() {
|
||||
pool="$1"
|
||||
state="$("zpool" import -d "/dev/disk/by-id/" 2>/dev/null | "awk" "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
|
||||
if [[ "$state" = "ONLINE" ]]; then
|
||||
return 0
|
||||
else
|
||||
echo "Pool $pool in state $state, waiting"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
poolImported() {
|
||||
pool="$1"
|
||||
"zpool" list "$pool" >/dev/null 2>/dev/null
|
||||
}
|
||||
poolImport() {
|
||||
pool="$1"
|
||||
"zpool" import -d "/dev/disk/by-id/" -N $ZFS_FORCE "$pool"
|
||||
}
|
||||
|
||||
echo -n "importing root ZFS pool \"ZFS-primary\"..."
|
||||
# Loop across the import until it succeeds, because the devices needed may not be discovered yet.
|
||||
if ! poolImported "ZFS-primary"; then
|
||||
for trial in `seq 1 60`; do
|
||||
poolReady "ZFS-primary" > /dev/null && msg="$(poolImport "ZFS-primary" 2>&1)" && break
|
||||
sleep 1
|
||||
echo -n .
|
||||
done
|
||||
echo
|
||||
if [[ -n "$msg" ]]; then
|
||||
echo "$msg";
|
||||
fi
|
||||
poolImported "ZFS-primary" || poolImport "ZFS-primary" # Try one last time, e.g. to import a degraded pool.
|
||||
fi
|
||||
|
||||
# let root mount and everything, then manually unlock stuff
|
||||
load_zfs_nix() {
|
||||
local device="/dev/disk/by-uuid/8bfaa32b-09dd-45c8-831e-05e80be82f9e"
|
||||
local mountPoint="/"
|
||||
local options="x-initrd.mount,noatime,nodiratime"
|
||||
local fsType="ext4"
|
||||
|
||||
echo "manually mounting key location, then unmounting"
|
||||
udevadm settle
|
||||
|
||||
mountFS "$device" "$(escapeFstab "$mountPoint")" "$(escapeFstab "$options")" "$fsType"
|
||||
|
||||
zfs load-key -L "file://$targetRoot/crypto/keys/zfs-nix-store-key" "ZFS-primary/nix"
|
||||
umount "$targetRoot/"
|
||||
}
|
||||
|
||||
load_zfs_nix
|
||||
'';
|
||||
}
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
{ pkgs, ... }:
|
||||
|
||||
{
|
||||
# rtkit is optional but recommended
|
||||
security.rtkit.enable = true;
|
||||
services = {
|
||||
pipewire = {
|
||||
enable = true;
|
||||
alsa.enable = true;
|
||||
alsa.support32Bit = true;
|
||||
pulse.enable = true;
|
||||
# If you want to use JACK applications, uncomment this
|
||||
#jack.enable = true;
|
||||
};
|
||||
|
||||
pipewire.wireplumber.configPackages = [
|
||||
(pkgs.writeTextDir "share/wireplumber/bluetooth.lua.d/51-bluez-config.lua" ''
|
||||
bluez_monitor.properties = {
|
||||
["bluez5.enable-sbc-xq"] = true,
|
||||
["bluez5.enable-msbc"] = true,
|
||||
["bluez5.enable-hw-volume"] = true,
|
||||
["bluez5.headset-roles"] = "[ hsp_hs hsp_ag hfp_hf hfp_ag ]"
|
||||
}
|
||||
'')
|
||||
];
|
||||
blueman.enable = true;
|
||||
};
|
||||
|
||||
hardware.bluetooth.enable = true;
|
||||
hardware.bluetooth.powerOnBoot = true;
|
||||
|
||||
environment.systemPackages = with pkgs; [ pavucontrol ];
|
||||
|
||||
programs.noisetorch.enable = true;
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
./audio.nix
|
||||
./desktop.nix
|
||||
./fonts.nix
|
||||
./graphics.nix
|
||||
./polkit.nix
|
||||
./programs.nix
|
||||
./steam.nix
|
||||
./stylix.nix
|
||||
];
|
||||
|
||||
time.timeZone = "America/New_York";
|
||||
|
||||
# temp workaround for building while in nixos-enter
|
||||
#services.logrotate.checkConfig = false;
|
||||
|
||||
networking = {
|
||||
hostId = "9f2e1ff9";
|
||||
firewall.enable = true;
|
||||
useNetworkd = true;
|
||||
};
|
||||
|
||||
boot = {
|
||||
kernelPackages = lib.mkForce pkgs.linuxPackages_xanmod;
|
||||
useSystemdBoot = true;
|
||||
default = true;
|
||||
};
|
||||
|
||||
sops.age.sshKeyPaths = [ "/etc/ssh/ssh_host_ed25519_key" ];
|
||||
|
||||
services = {
|
||||
flatpak.enable = true;
|
||||
gvfs.enable = true;
|
||||
openssh.enable = lib.mkForce false;
|
||||
};
|
||||
|
||||
system.stateVersion = "25.11";
|
||||
|
||||
sops = {
|
||||
defaultSopsFile = ./secrets.yaml;
|
||||
};
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
{ inputs, ... }:
|
||||
{
|
||||
system = "x86_64-linux";
|
||||
home = true;
|
||||
sops = true;
|
||||
server = false;
|
||||
users = [ "alice" ];
|
||||
modules = [
|
||||
inputs.nixos-hardware.nixosModules.common-pc
|
||||
inputs.nixos-hardware.nixosModules.common-pc-ssd
|
||||
inputs.nixos-hardware.nixosModules.common-gpu-nvidia-nonprime
|
||||
inputs.nixos-hardware.nixosModules.common-cpu-amd
|
||||
inputs.nixos-hardware.nixosModules.common-cpu-amd-pstate
|
||||
inputs.nixos-hardware.nixosModules.common-cpu-amd-zenpower
|
||||
inputs.stylix.nixosModules.stylix
|
||||
{
|
||||
environment.systemPackages = [
|
||||
inputs.wired-notify.packages.x86_64-linux.default
|
||||
inputs.hyprland-contrib.packages.x86_64-linux.grimblast
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
{ pkgs, ... }:
|
||||
|
||||
{
|
||||
# installs hyprland, and its dependencies
|
||||
|
||||
programs = {
|
||||
hyprland = {
|
||||
enable = true;
|
||||
xwayland.enable = true;
|
||||
withUWSM = true;
|
||||
};
|
||||
hyprlock.enable = true;
|
||||
ydotool.enable = true;
|
||||
};
|
||||
# Optional, hint electron apps to use wayland:
|
||||
environment.sessionVariables.NIXOS_OZONE_WL = "1";
|
||||
|
||||
services = {
|
||||
displayManager.gdm = {
|
||||
enable = true;
|
||||
wayland = true;
|
||||
};
|
||||
|
||||
dbus = {
|
||||
enable = true;
|
||||
implementation = "broker";
|
||||
};
|
||||
};
|
||||
|
||||
powerManagement = {
|
||||
enable = true;
|
||||
};
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
libsForQt5.qt5.qtwayland
|
||||
qt6.qtwayland
|
||||
];
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
fonts = {
|
||||
fontconfig.enable = true;
|
||||
enableDefaultPackages = true;
|
||||
packages = with pkgs.nerd-fonts; [
|
||||
fira-code
|
||||
droid-sans-mono
|
||||
hack
|
||||
dejavu-sans-mono
|
||||
noto
|
||||
open-dyslexic
|
||||
];
|
||||
};
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
hardware.graphics = {
|
||||
## radv: an open-source Vulkan driver from freedesktop
|
||||
enable = true;
|
||||
enable32Bit = true;
|
||||
|
||||
};
|
||||
hardware.nvidia = {
|
||||
|
||||
# Modesetting is required.
|
||||
modesetting.enable = true;
|
||||
|
||||
# Nvidia power management. Experimental, and can cause sleep/suspend to fail.
|
||||
# Enable this if you have graphical corruption issues or application crashes after waking
|
||||
# up from sleep. This fixes it by saving the entire VRAM memory to /tmp/ instead
|
||||
# of just the bare essentials.
|
||||
powerManagement.enable = false;
|
||||
|
||||
# Fine-grained power management. Turns off GPU when not in use.
|
||||
# Experimental and only works on modern Nvidia GPUs (Turing or newer).
|
||||
powerManagement.finegrained = false;
|
||||
|
||||
# Use the NVidia open source kernel module (not to be confused with the
|
||||
# independent third-party "nouveau" open source driver).
|
||||
# Support is limited to the Turing and later architectures. Full list of
|
||||
# supported GPUs is at:
|
||||
# https://github.com/NVIDIA/open-gpu-kernel-modules#compatible-gpus
|
||||
# Only available from driver 515.43.04+
|
||||
open = false;
|
||||
|
||||
# Enable the Nvidia settings menu,
|
||||
# accessible via `nvidia-settings`.
|
||||
nvidiaSettings = true;
|
||||
|
||||
# Optionally, you may need to select the appropriate driver version for your specific GPU.
|
||||
package = config.boot.kernelPackages.nvidiaPackages.stable;
|
||||
};
|
||||
}
|
||||
@@ -1,96 +0,0 @@
|
||||
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
||||
# and may be overwritten by future invocations. Please make changes
|
||||
# to /etc/nixos/configuration.nix instead.
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
modulesPath,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
imports = [ (modulesPath + "/installer/scan/not-detected.nix") ];
|
||||
|
||||
boot = {
|
||||
initrd.availableKernelModules = [
|
||||
"nvme"
|
||||
"xhci_pci"
|
||||
"thunderbolt"
|
||||
"usb_storage"
|
||||
"usbhid"
|
||||
"sd_mod"
|
||||
"ip_vs"
|
||||
"ip_vs_rr"
|
||||
"nf_conntrack"
|
||||
];
|
||||
initrd.kernelModules = [
|
||||
"dm-snapshot"
|
||||
"r8152"
|
||||
];
|
||||
kernelModules = [ "kvm-amd" ];
|
||||
extraModulePackages = [ ];
|
||||
kernelParams = [
|
||||
"amdgpu.sg_display=0"
|
||||
"amdgpu.graphics_sg=0"
|
||||
"amdgpu.abmlevel=3"
|
||||
];
|
||||
};
|
||||
|
||||
fileSystems = {
|
||||
|
||||
"/" = lib.mkDefault {
|
||||
device = "/dev/disk/by-uuid/f3c11d62-37f4-495e-b668-1ff49e0d3a47";
|
||||
fsType = "ext4";
|
||||
options = [
|
||||
"noatime"
|
||||
"nodiratime"
|
||||
];
|
||||
};
|
||||
|
||||
"/home" = {
|
||||
device = "/dev/disk/by-uuid/720af942-464c-4c1e-be41-0438936264f0";
|
||||
fsType = "ext4";
|
||||
options = [
|
||||
"noatime"
|
||||
"nodiratime"
|
||||
];
|
||||
};
|
||||
|
||||
"/nix" = {
|
||||
device = "/dev/disk/by-uuid/035f23f8-d895-4b0c-bcf5-45885a5dbbd9";
|
||||
fsType = "ext4";
|
||||
options = [
|
||||
"noatime"
|
||||
"nodiratime"
|
||||
];
|
||||
};
|
||||
|
||||
"/boot" = {
|
||||
device = "/dev/disk/by-uuid/5AD7-6005";
|
||||
fsType = "vfat";
|
||||
options = [
|
||||
"noatime"
|
||||
"nodiratime"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
swapDevices = [ { device = "/dev/disk/by-uuid/3ec276b5-9088-45b0-9cb4-60812f2d1a73"; } ];
|
||||
|
||||
boot.initrd.luks.devices = {
|
||||
"nixos-pv" = {
|
||||
device = "/dev/disk/by-uuid/12a7f660-bbcc-4066-81d0-e66005ee534a";
|
||||
preLVM = true;
|
||||
allowDiscards = true;
|
||||
};
|
||||
};
|
||||
|
||||
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
|
||||
# (the default) this is the recommended approach. When using systemd-networkd it's
|
||||
# still possible to use this option, but it's recommended to use it in conjunction
|
||||
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
|
||||
networking.interfaces.wlp4s0.useDHCP = lib.mkDefault true;
|
||||
|
||||
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
{ pkgs, ... }:
|
||||
|
||||
{
|
||||
security.polkit.enable = true;
|
||||
environment.systemPackages = with pkgs; [ polkit_gnome ];
|
||||
|
||||
systemd = {
|
||||
user.services.polkit-gnome-authentication-agent-1 = {
|
||||
description = "polkit-gnome-authentication-agent-1";
|
||||
wantedBy = [ "graphical-session.target" ];
|
||||
wants = [ "graphical-session.target" ];
|
||||
after = [ "graphical-session.target" ];
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
ExecStart = "${pkgs.polkit_gnome}/libexec/polkit-gnome-authentication-agent-1";
|
||||
Restart = "on-failure";
|
||||
RestartSec = 1;
|
||||
TimeoutStopSec = 10;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user