Compare commits
396 Commits
c85beff7ed
...
pia-vpn-v2
| Author | SHA1 | Date | |
|---|---|---|---|
| 3b71f4b1fd | |||
| dc3c2194ab | |||
| 39009cbc18 | |||
| 3365a1652c | |||
| 6466406975 | |||
| 4eb0401263 | |||
| f4a4edf478 | |||
| 1ac3f05e3e | |||
| c1030c1dfe | |||
| 52469693e3 | |||
| ffce43b8d0 | |||
| 96a6007693 | |||
| 32cb438db9 | |||
| 0368661e24 | |||
| 12209b69b8 | |||
| 3bc41dfdb3 | |||
| 1cbbe64707 | |||
| 6191e4060f | |||
| a0fcacdcf9 | |||
| 684851d641 | |||
| 4cf50b5fb1 | |||
| 288a2841aa | |||
| 0589ca5748 | |||
| a4c5cb589a | |||
| a697ea10ad | |||
| 200d5a5d22 | |||
| 339eac52c6 | |||
| bab4b3ff8e | |||
| 54ab576914 | |||
| c84c0716ce | |||
| a921f40644 | |||
|
|
a6c17164fa | ||
| 9df8390f1f | |||
| 156f0183bd | |||
| 8b92e51ef7 | |||
| 7798872bbf | |||
| cf41285cb8 | |||
| 5a0a525f64 | |||
| 9154595910 | |||
| 1b92363b08 | |||
| 136f024cf0 | |||
| 3d08a3e9bc | |||
| 99ef62d31a | |||
| 298f473ceb | |||
| 546bd08f83 | |||
| 10f3e3a7bf | |||
| d44bd12e17 | |||
| 60e89dfc90 | |||
| 869b6af7f7 | |||
| d6a0e8ec49 | |||
| 8293a7dc2a | |||
| cbf2aedcad | |||
| 69fc3ad837 | |||
| 6041d4d09f | |||
| cf71b74d6f | |||
| 5178ea6835 | |||
| 87db330e5b | |||
| 70f0064d7b | |||
| cef8456332 | |||
| c22855175a | |||
| 0a06e3c1ae | |||
| eb416ae409 | |||
| ae2a62515a | |||
| 2810ba1412 | |||
| e42e30d3cc | |||
| 83b5d3b8c2 | |||
| 0b604fd99c | |||
| 51fbae98c5 | |||
| d8eff26864 | |||
| 5f7335c2a0 | |||
| bab2df5d7e | |||
| adc04d1bc7 | |||
| da9a8f8c03 | |||
| 415cbca33e | |||
| 51272a172b | |||
| f053c677e8 | |||
| c130ce6edd | |||
| 4718326cb6 | |||
| 61698aa7e2 | |||
| e0af023ac9 | |||
| c0088553ff | |||
| 577736fcb2 | |||
| cf087b0e39 | |||
| cb1c4752ec | |||
| b77fb54dc6 | |||
| 3d6a759827 | |||
| 0c455baebd | |||
| b58df0632a | |||
| 4956e41285 | |||
| ead6653de1 | |||
| dd4a5729d4 | |||
| f248c129c8 | |||
| c011faab18 | |||
| a5d0b3b748 | |||
| ed3bee2e4e | |||
| dbde2a40f2 | |||
| 6c69d82156 | |||
| 01b01f06b4 | |||
| cf560d4e53 | |||
| 8cf4957e15 | |||
| dc02438a63 | |||
| 948984af2d | |||
| be23526c2c | |||
| e234577268 | |||
| 82b67ed566 | |||
| 53c2e2222c | |||
| 846da159d0 | |||
| a45125421e | |||
| f4e40955c8 | |||
| af9e462b27 | |||
| 2faea9d380 | |||
| 8571922796 | |||
| 131d5e9313 | |||
| fe0ce3a245 | |||
| 7b26cfb4eb | |||
| 1c9fa418b3 | |||
| 8c4dc9cb74 | |||
| 1f9fbd87ac | |||
| 23c8076e4d | |||
| 75ae399b5a | |||
| 87ddad27a4 | |||
| 8dd2a00123 | |||
| 944a783ff2 | |||
| c2cb43fd2c | |||
| 02b2fb6309 | |||
| b43660aaef | |||
| 567d755850 | |||
| adc9b9f2b7 | |||
| 9181e3bfa3 | |||
| 9845270512 | |||
| b3b3044690 | |||
| fb1970c316 | |||
| 34f1edf3b3 | |||
| 823f0a6ef2 | |||
| 00d2ccc684 | |||
| b2acaff783 | |||
| c51f4ad65b | |||
| eb6a50664c | |||
| 89ce0f7fc0 | |||
| 8ff552818b | |||
| 020689d987 | |||
| 9109e356bd | |||
| c7d9e84f73 | |||
| 5b666a0565 | |||
| 6bc11767ca | |||
| bdd2d9bef9 | |||
| 5acc8b3fca | |||
| 1e25d8bb71 | |||
| ac1cf1c531 | |||
| 02357198bc | |||
| 89b49aafc0 | |||
| e56271b2c3 | |||
| f9ef5e4b89 | |||
| e516bd87b5 | |||
| 7c9c657bd0 | |||
| dff7d65456 | |||
| d269d2e5a0 | |||
| 2527b614e9 | |||
| 528a53a606 | |||
| 66bfc62566 | |||
| 91874b9d53 | |||
| 50fc0a53d2 | |||
| 0b3322afda | |||
| b32f6fa315 | |||
| fe41ffc788 | |||
| eac443f280 | |||
| d557820d6c | |||
| 4d658e10d3 | |||
| 9ac9613d67 | |||
| e657ebb134 | |||
| d1b07ec06b | |||
| 89621945f8 | |||
| e69fd5bf8f | |||
| c856b762e7 | |||
| b7f82f2d44 | |||
| 588e94dcf4 | |||
| fd1ead0b62 | |||
| 37bd7254b9 | |||
| 74e41de9d6 | |||
| 0bf0b8b88b | |||
| 702129d778 | |||
| 88c67dde84 | |||
| 8e3a0761e8 | |||
| a785890990 | |||
| b482a8c106 | |||
| efe50be604 | |||
| 99904d0066 | |||
| 55e44bc3d0 | |||
| da7ffa839b | |||
| 01af25a57e | |||
| bfc1bb2da9 | |||
| 0e59fa3518 | |||
| 7e812001f0 | |||
| 14c19b80ef | |||
| e8dd0cb5ff | |||
| dc9f5e969a | |||
| 03150667b6 | |||
| 1dfd7bc8a2 | |||
| fa649b1e2a | |||
| e34752c791 | |||
| 75031567bd | |||
| 800a95d431 | |||
| 932b05a42e | |||
| b5cc4d4609 | |||
| ba3d15d82a | |||
| e80fb7b3db | |||
| 84e1f6e573 | |||
| c4847bd39b | |||
| c0c1ec5c67 | |||
| 6739115cfb | |||
| 4606cc32ba | |||
| 2d27bf7505 | |||
| d07af6d101 | |||
| 4890dc20e0 | |||
| 8b01a9b240 | |||
| 8dfba8646c | |||
| 63c0f52955 | |||
| 5413a8e7db | |||
| 330c801e43 | |||
| 8ba08ce982 | |||
| 2b50aeba93 | |||
| c1aef574b1 | |||
| 52ed25f1b9 | |||
| 0446d18712 | |||
| d2bbbb827e | |||
| 6fba594625 | |||
| fa6e092c06 | |||
| 3a6dae2b82 | |||
| 62bb740634 | |||
| 577e0d21bc | |||
| b481a518f5 | |||
| f93b2c6908 | |||
| 890b24200e | |||
| d3259457de | |||
| 8eb42ee68b | |||
| 9d4c48badb | |||
| 9cf2b82e92 | |||
| 61ca918cca | |||
| ef61792da4 | |||
| 3dc97f4960 | |||
| f4a26a8d15 | |||
| 37782a26d5 | |||
| 1434bd2df1 | |||
| e49ea3a7c4 | |||
| 9a6cde1e89 | |||
| 35972b6d68 | |||
| b8021c1756 | |||
| 4b21489141 | |||
| a256ab7728 | |||
| da7ebe7baa | |||
| 1922bbbcfd | |||
| b17be86927 | |||
| ec73a63e09 | |||
| af26a004e5 | |||
| d83782f315 | |||
| 162b544249 | |||
| 0c58e62ed4 | |||
| 96de109d62 | |||
| 0efcf8f3fc | |||
| 2009180827 | |||
| 306ce8bc3f | |||
| b5dd983ba3 | |||
| 832894edfc | |||
| feb6270952 | |||
| b4dd2d4a92 | |||
| 38c2e5aece | |||
| 0ef689b750 | |||
| e72e19b7e8 | |||
| 03603119e5 | |||
| 71baa09bd2 | |||
| a02775a234 | |||
| 5800359214 | |||
| 0bd42f1850 | |||
| 40f0e5d2ac | |||
| f90b9f85fd | |||
| 5b084fffcc | |||
| 4dd6401f8c | |||
| 260bbc1ffd | |||
| c8132a67d0 | |||
| 3412d5caf9 | |||
| 1065cc4b59 | |||
| 154b37879b | |||
| a34238b3a9 | |||
| 42e2ebd294 | |||
| 378cf47683 | |||
| f68a4f4431 | |||
| 3c683e7b9e | |||
| 68bd70b525 | |||
| 2189ab9a1b | |||
| acbbb8a37a | |||
| d1e6d21d66 | |||
| 1a98e039fe | |||
| 3459ce5058 | |||
| c48b1995f8 | |||
| 53c0e7ba1f | |||
| 820cd392f1 | |||
| 759fe04185 | |||
| db441fcf98 | |||
| 83e9280bb4 | |||
| 478235fe32 | |||
| 440401a391 | |||
| 42c0dcae2d | |||
| 7159868b57 | |||
| ab2cc0cc0a | |||
| aaa1800d0c | |||
| a795c65c32 | |||
| 5ed02e924d | |||
| 1d620372b8 | |||
| 9684a975e2 | |||
| c3c3a9e77f | |||
| ecb6d1ef63 | |||
| a5f7bb8a22 | |||
| cea9b9452b | |||
| 8fb45a7ee5 | |||
| b53f03bb7d | |||
| dee0243268 | |||
| 8b6bc354bd | |||
| aff5611cdb | |||
| c5e7d8b2fe | |||
| 90a3549237 | |||
| 63f2a82ad1 | |||
| 0cc39bfbe0 | |||
| ec54b27d67 | |||
| bba4f27465 | |||
| b5c77611d7 | |||
| 987919417d | |||
| d8dbb12959 | |||
| 3e0cde40b8 | |||
| 2c8576a295 | |||
| 8aecc04d01 | |||
| 9bcf7cc50d | |||
| cb2ac1c1ba | |||
| 7f1e304012 | |||
| 9e3dae4b16 | |||
| c649b04bdd | |||
| 6fce2e1116 | |||
| 3e192b3321 | |||
| bc863de165 | |||
| cfa5c9428e | |||
| abddc5a680 | |||
| 577dc4faaa | |||
| a8b0385c6d | |||
| fc85627bd6 | |||
| f9cadba3eb | |||
| c192c2d52f | |||
| 04c7a9ea51 | |||
| 6f9edd8870 | |||
| 076bdb3ab4 | |||
| fcbd877d06 | |||
| 27f4b5af78 | |||
| 7238d6e6c5 | |||
| 094905a727 | |||
| cf3fa0ff12 | |||
| 7c7b356aab | |||
| c57e4f022f | |||
|
|
f5a9f04cf2 | ||
|
|
50fd928cda | ||
| 11072c374b | |||
| 60f1235848 | |||
| 55ea5aebc4 | |||
| 2738f6b794 | |||
| ec2b248ed8 | |||
| aa7bbc5932 | |||
| eef574c9f7 | |||
| 25fb7a1645 | |||
| 301fd8462b | |||
| a92800cbcc | |||
| 5e361b2fc8 | |||
| b41e4dc375 | |||
| 7e615f814d | |||
| c560a63182 | |||
| 2f14d07f82 | |||
| a89fde8aa5 | |||
| 1856fe00d6 | |||
| 388599e08c | |||
| 75a33a0b5e | |||
| 918b53e383 | |||
| c643244dab | |||
| 9fc6f816fb | |||
| 63902fcb46 | |||
| 8a1e0b76f1 | |||
| f144bda9e6 | |||
| b8c9278f37 | |||
| 9f45df7903 | |||
| a894a5429e | |||
| dfec18e904 | |||
| 91e38f5866 | |||
| fed1aecd64 | |||
| ec3056f8c1 | |||
| 339eed1f55 | |||
| 5ac5b4551b | |||
| d378a287fa | |||
| d71af55727 | |||
| de05a535ea | |||
| 910af494b5 | |||
| 3d1c078a44 |
160
.claude/skills/create-workspace/SKILL.md
Normal file
160
.claude/skills/create-workspace/SKILL.md
Normal file
@@ -0,0 +1,160 @@
|
||||
---
|
||||
name: create-workspace
|
||||
description: >
|
||||
Creates a new sandboxed workspace (isolated dev environment) by adding
|
||||
NixOS configuration for a VM, container, or Incus instance. Use when
|
||||
the user wants to create, set up, or add a new sandboxed workspace.
|
||||
---
|
||||
|
||||
# Create Sandboxed Workspace
|
||||
|
||||
Creates an isolated development environment backed by a VM (microvm.nix), container (systemd-nspawn), or Incus instance. This produces:
|
||||
|
||||
1. A workspace config file at `machines/<machine>/workspaces/<name>.nix`
|
||||
2. A registration entry in `machines/<machine>/default.nix`
|
||||
|
||||
## Step 1: Parse Arguments
|
||||
|
||||
Extract the workspace name and backend type from `$ARGUMENTS`. If either is missing, ask the user.
|
||||
|
||||
- **Name**: lowercase alphanumeric with hyphens (e.g., `my-project`)
|
||||
- **Type**: one of `vm`, `container`, or `incus`
|
||||
|
||||
## Step 2: Detect Machine
|
||||
|
||||
Run `hostname` to get the current machine name. Verify that `machines/<hostname>/default.nix` exists.
|
||||
|
||||
If the machine directory doesn't exist, stop and tell the user this machine isn't managed by this flake.
|
||||
|
||||
## Step 3: Allocate IP Address
|
||||
|
||||
Read `machines/<hostname>/default.nix` to find existing `sandboxed-workspace.workspaces` entries and their IPs.
|
||||
|
||||
All IPs are in the `192.168.83.0/24` subnet. Use these ranges by convention:
|
||||
|
||||
| Type | IP Range |
|
||||
|------|----------|
|
||||
| vm | 192.168.83.10 - .49 |
|
||||
| container | 192.168.83.50 - .89 |
|
||||
| incus | 192.168.83.90 - .129 |
|
||||
|
||||
Pick the next available IP in the appropriate range. If no workspaces exist yet for that type, use the first IP in the range.
|
||||
|
||||
## Step 4: Create Workspace Config File
|
||||
|
||||
Create `machines/<hostname>/workspaces/<name>.nix`. Use this template:
|
||||
|
||||
```nix
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
# Add packages here
|
||||
];
|
||||
}
|
||||
```
|
||||
|
||||
Ask the user if they want any packages pre-installed.
|
||||
|
||||
Create the `workspaces/` directory if it doesn't exist.
|
||||
|
||||
**Important:** After creating the file, run `git add` on it immediately. Nix flakes only see files tracked by git, so new files must be staged before `nix build` will work.
|
||||
|
||||
## Step 5: Register Workspace
|
||||
|
||||
Edit `machines/<hostname>/default.nix` to add the workspace entry inside the `sandboxed-workspace` block.
|
||||
|
||||
The entry should look like:
|
||||
|
||||
```nix
|
||||
workspaces.<name> = {
|
||||
type = "<type>";
|
||||
config = ./workspaces/<name>.nix;
|
||||
ip = "<allocated-ip>";
|
||||
};
|
||||
```
|
||||
|
||||
**If `sandboxed-workspace` block doesn't exist yet**, add the full block:
|
||||
|
||||
```nix
|
||||
sandboxed-workspace = {
|
||||
enable = true;
|
||||
workspaces.<name> = {
|
||||
type = "<type>";
|
||||
config = ./workspaces/<name>.nix;
|
||||
ip = "<allocated-ip>";
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
The machine also needs `networking.sandbox.upstreamInterface` set. Check if it exists; if not, ask the user for their primary network interface name (they can find it with `ip route show default`).
|
||||
|
||||
Do **not** set `hostKey` — it gets auto-generated on first boot and can be added later.
|
||||
|
||||
## Step 6: Verify Build
|
||||
|
||||
Run a build to check for configuration errors:
|
||||
|
||||
```
|
||||
nix build .#nixosConfigurations.<hostname>.config.system.build.toplevel --no-link
|
||||
```
|
||||
|
||||
If the build fails, fix the configuration and retry.
|
||||
|
||||
## Step 7: Deploy
|
||||
|
||||
Tell the user to deploy by running:
|
||||
|
||||
```
|
||||
doas nixos-rebuild switch --flake .
|
||||
```
|
||||
|
||||
**Never run this command yourself** — it requires privileges.
|
||||
|
||||
## Step 8: Post-Deploy Info
|
||||
|
||||
Tell the user to deploy and then start the workspace so the host key gets generated. Provide these instructions:
|
||||
|
||||
**Deploy:**
|
||||
```
|
||||
doas nixos-rebuild switch --flake .
|
||||
```
|
||||
|
||||
**Starting the workspace:**
|
||||
```
|
||||
doas systemctl start <service>
|
||||
```
|
||||
|
||||
Where `<service>` is:
|
||||
- VM: `microvm@<name>`
|
||||
- Container: `container@<name>`
|
||||
- Incus: `incus-workspace-<name>`
|
||||
|
||||
Or use the auto-generated shell alias: `workspace_<name>_start`
|
||||
|
||||
**Connecting:**
|
||||
```
|
||||
ssh googlebot@workspace-<name>
|
||||
```
|
||||
|
||||
Or use the alias: `workspace_<name>`
|
||||
|
||||
**Never run deploy or start commands yourself** — they require privileges.
|
||||
|
||||
## Step 9: Add Host Key
|
||||
|
||||
After the user has deployed and started the workspace, add the SSH host key to the workspace config. Do NOT skip this step — always wait for the user to confirm they've started the workspace, then proceed.
|
||||
|
||||
1. Read the host key from `~/sandboxed/<name>/ssh-host-keys/ssh_host_ed25519_key.pub`
|
||||
2. Add `hostKey = "<contents>";` to the workspace entry in `machines/<hostname>/default.nix`
|
||||
3. Run the build again to verify
|
||||
4. Tell the user to redeploy with `doas nixos-rebuild switch --flake .`
|
||||
|
||||
## Backend Reference
|
||||
|
||||
| | VM | Container | Incus |
|
||||
|---|---|---|---|
|
||||
| Isolation | Full kernel (cloud-hypervisor) | Shared kernel (systemd-nspawn) | Unprivileged container |
|
||||
| Overhead | Higher (separate kernel) | Lower (bind mounts) | Medium |
|
||||
| Filesystem | virtiofs shares | Bind mounts | Incus-managed |
|
||||
| Use case | Untrusted code, kernel-level isolation | Fast dev environments | Better security than nspawn |
|
||||
29
.gitea/scripts/build-and-cache.sh
Executable file
29
.gitea/scripts/build-and-cache.sh
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Configure Attic cache
|
||||
attic login local "$ATTIC_ENDPOINT" "$ATTIC_TOKEN"
|
||||
attic use local:nixos
|
||||
|
||||
# Check flake
|
||||
nix flake check --all-systems --print-build-logs --log-format raw --show-trace
|
||||
|
||||
# Build all systems
|
||||
nix eval .#nixosConfigurations --apply 'cs: builtins.attrNames cs' --json \
|
||||
| jq -r '.[]' \
|
||||
| xargs -I{} nix build ".#nixosConfigurations.{}.config.system.build.toplevel" \
|
||||
--no-link --print-build-logs --log-format raw
|
||||
|
||||
# Push to cache (only locally-built paths >= 0.5MB)
|
||||
toplevels=$(nix eval .#nixosConfigurations \
|
||||
--apply 'cs: map (n: "${cs.${n}.config.system.build.toplevel}") (builtins.attrNames cs)' \
|
||||
--json | jq -r '.[]')
|
||||
echo "Found $(echo "$toplevels" | wc -l) system toplevels"
|
||||
paths=$(echo "$toplevels" \
|
||||
| xargs nix path-info -r --json \
|
||||
| jq -r '[to_entries[] | select(
|
||||
(.value.signatures | all(startswith("cache.nixos.org") | not))
|
||||
and .value.narSize >= 524288
|
||||
) | .key] | unique[]')
|
||||
echo "Pushing $(echo "$paths" | wc -l) unique paths to cache"
|
||||
echo "$paths" | xargs attic push local:nixos
|
||||
60
.gitea/workflows/auto-update.yaml
Normal file
60
.gitea/workflows/auto-update.yaml
Normal file
@@ -0,0 +1,60 @@
|
||||
name: Auto Update Flake
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 6 * * *'
|
||||
workflow_dispatch: {}
|
||||
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
PATH: /run/current-system/sw/bin/
|
||||
XDG_CONFIG_HOME: ${{ runner.temp }}/.config
|
||||
ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }}
|
||||
ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }}
|
||||
|
||||
jobs:
|
||||
auto-update:
|
||||
runs-on: nixos
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: master
|
||||
token: ${{ secrets.PUSH_TOKEN }}
|
||||
|
||||
- name: Configure git identity
|
||||
run: |
|
||||
git config user.name "gitea-runner"
|
||||
git config user.email "gitea-runner@neet.dev"
|
||||
|
||||
- name: Update flake inputs
|
||||
id: update
|
||||
run: |
|
||||
nix flake update
|
||||
if git diff --quiet flake.lock; then
|
||||
echo "No changes to flake.lock, nothing to do"
|
||||
echo "changed=false" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
git add flake.lock
|
||||
git commit -m "flake.lock: update inputs"
|
||||
echo "changed=true" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Build and cache
|
||||
if: steps.update.outputs.changed == 'true'
|
||||
run: bash .gitea/scripts/build-and-cache.sh
|
||||
|
||||
- name: Push updated lockfile
|
||||
if: steps.update.outputs.changed == 'true'
|
||||
run: git push
|
||||
|
||||
- name: Notify on failure
|
||||
if: failure() && steps.update.outputs.changed == 'true'
|
||||
run: |
|
||||
curl -s \
|
||||
-H "Authorization: Bearer ${{ secrets.NTFY_TOKEN }}" \
|
||||
-H "Title: Flake auto-update failed" \
|
||||
-H "Priority: high" \
|
||||
-H "Tags: warning" \
|
||||
-d "Auto-update workflow failed. Check: ${{ gitea.server_url }}/${{ gitea.repository }}/actions/runs/${{ gitea.run_number }}" \
|
||||
https://ntfy.neet.dev/nix-flake-updates
|
||||
33
.gitea/workflows/check-flake.yaml
Normal file
33
.gitea/workflows/check-flake.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
name: Check Flake
|
||||
|
||||
on: [push]
|
||||
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
PATH: /run/current-system/sw/bin/
|
||||
XDG_CONFIG_HOME: ${{ runner.temp }}/.config
|
||||
ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }}
|
||||
ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }}
|
||||
|
||||
jobs:
|
||||
check-flake:
|
||||
runs-on: nixos
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build and cache
|
||||
run: bash .gitea/scripts/build-and-cache.sh
|
||||
|
||||
- name: Notify on failure
|
||||
if: failure()
|
||||
run: |
|
||||
curl -s \
|
||||
-H "Authorization: Bearer ${{ secrets.NTFY_TOKEN }}" \
|
||||
-H "Title: Flake check failed" \
|
||||
-H "Priority: high" \
|
||||
-H "Tags: warning" \
|
||||
-d "Check failed for ${{ gitea.ref_name }}. Check: ${{ gitea.server_url }}/${{ gitea.repository }}/actions/runs/${{ gitea.run_number }}" \
|
||||
https://ntfy.neet.dev/nix-flake-updates
|
||||
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
result
|
||||
.claude/worktrees
|
||||
101
CLAUDE.md
Normal file
101
CLAUDE.md
Normal file
@@ -0,0 +1,101 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## What This Is
|
||||
|
||||
A NixOS flake managing multiple machines. All machines import `/common` for shared config, and each machine has its own directory under `/machines/<hostname>/` with a `default.nix` (machine-specific config), `hardware-configuration.nix`, and `properties.nix` (metadata: hostnames, arch, roles, SSH keys).
|
||||
|
||||
## Common Commands
|
||||
|
||||
```bash
|
||||
# Build a machine config (check for errors without deploying)
|
||||
nix build .#nixosConfigurations.<hostname>.config.system.build.toplevel --no-link
|
||||
|
||||
# Deploy to local machine (user must run this themselves - requires privileges)
|
||||
doas nixos-rebuild switch --flake .
|
||||
|
||||
# Deploy to a remote machine (boot-only, no activate)
|
||||
deploy --remote-build --boot --debug-logs --skip-checks .#<hostname>
|
||||
|
||||
# Deploy to a remote machine (activate immediately)
|
||||
deploy --remote-build --debug-logs --skip-checks .#<hostname>
|
||||
|
||||
# Update flake lockfile
|
||||
make update-lockfile
|
||||
|
||||
# Update a single flake input
|
||||
make update-input <input-name>
|
||||
|
||||
# Edit an agenix secret
|
||||
make edit-secret <secret-filename>
|
||||
|
||||
# Rekey all secrets (after adding/removing machine host keys)
|
||||
make rekey-secrets
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
### Machine Discovery (Auto-Registration)
|
||||
|
||||
Machines are **not** listed in `flake.nix`. Instead, `common/machine-info/default.nix` recursively scans `/machines/` for any `properties.nix` file and auto-registers that directory as a machine. To add a machine, create `machines/<name>/properties.nix` and `machines/<name>/default.nix`.
|
||||
|
||||
`properties.nix` returns a plain attrset (no NixOS module args) with: `hostNames`, `arch`, `systemRoles`, `hostKey`, and optionally `userKeys`, `deployKeys`, `remoteUnlock`.
|
||||
|
||||
### Role System
|
||||
|
||||
Each machine declares `systemRoles` in its `properties.nix` (e.g., `["personal" "dns-challenge"]`). Roles drive conditional config:
|
||||
- `config.thisMachine.hasRole.<role>` - boolean, used to conditionally enable features (e.g., `de.enable` for `personal` role)
|
||||
- `config.machines.withRole.<role>` - list of hostnames with that role
|
||||
- Roles also determine which machines can decrypt which agenix secrets (see `secrets/secrets.nix`)
|
||||
|
||||
### Secrets (agenix)
|
||||
|
||||
Secrets in `/secrets/` are encrypted `.age` files. `secrets.nix` maps each secret to the SSH host keys (by role) that can decrypt it. After changing which machines have access, run `make rekey-secrets`.
|
||||
|
||||
### Sandboxed Workspaces
|
||||
|
||||
`common/sandboxed-workspace/` provides isolated dev environments. Three backends: `vm` (microvm/cloud-hypervisor), `container` (systemd-nspawn), `incus`. Workspaces are defined in machine `default.nix` files and their per-workspace config goes in `machines/<hostname>/workspaces/<name>.nix`. The base config (`base.nix`) handles networking, SSH, user setup, and Claude Code pre-configuration.
|
||||
|
||||
IP allocation convention: VMs `.10-.49`, containers `.50-.89`, incus `.90-.129` in `192.168.83.0/24`.
|
||||
|
||||
### Backups
|
||||
|
||||
`common/backups.nix` defines a `backup.group` option. Machines declare backup groups with paths; restic handles daily backups to Backblaze B2 with automatic ZFS/btrfs snapshot support. Each group gets a `restic_<group>` CLI wrapper for manual operations.
|
||||
|
||||
### Nixpkgs Patching
|
||||
|
||||
`flake.nix` applies patches from `/patches/` to nixpkgs before building (workaround for nix#3920).
|
||||
|
||||
### Service Dashboard & Monitoring
|
||||
|
||||
When adding or removing a web-facing service, update both:
|
||||
- **Gatus** (`common/server/gatus.nix`) — add/remove the endpoint monitor
|
||||
- **Dashy** — add/remove the service entry from the dashboard config
|
||||
|
||||
### Key Conventions
|
||||
|
||||
- Uses `doas` instead of `sudo` everywhere
|
||||
- Fish shell is the default user shell
|
||||
- Home Manager is used for user-level config (`home/googlebot.nix`)
|
||||
- `lib/default.nix` extends nixpkgs lib with custom utility functions (extends via `nixpkgs.lib.extend`)
|
||||
- Overlays are in `/overlays/` and applied globally via `flake.nix`
|
||||
- The Nix formatter for this project is `nixpkgs-fmt`
|
||||
- Do not add "Co-Authored-By" lines to commit messages
|
||||
- Always use `--no-link` when running `nix build`
|
||||
- Don't use `nix build --dry-run` unless you only need evaluation — it skips the actual build
|
||||
- Avoid `2>&1` on nix commands — it can cause error output to be missed
|
||||
|
||||
## Git Worktrees
|
||||
|
||||
When the user asks you to "start a worktree" or work in a worktree, **do not create one manually** with `git worktree add`. Instead, tell the user to start a new session with:
|
||||
|
||||
```bash
|
||||
claude --worktree <name>
|
||||
```
|
||||
|
||||
This is the built-in Claude Code worktree workflow. It creates the worktree at `.claude/worktrees/<name>/` with a branch `worktree-<name>` and starts a new Claude session inside it. Cleanup is handled automatically on exit.
|
||||
|
||||
When instructed to work in a git worktree (e.g., via `isolation: "worktree"` on a subagent), you **MUST** do so. If you are unable to create or use a git worktree, you **MUST** stop work immediately and report the failure to the user. Do not fall back to working in the main working tree.
|
||||
|
||||
When applying work from a git worktree back to the main branch, commit in the worktree first, then use `git cherry-pick` from the main working tree to bring the commit over. Do not use `git checkout` or `git apply` to copy files directly. Do **not** automatically apply worktree work to the main branch — always ask the user for approval first.
|
||||
52
Makefile
Normal file
52
Makefile
Normal file
@@ -0,0 +1,52 @@
|
||||
# Lockfile utils
|
||||
.PHONY: update-lockfile
|
||||
update-lockfile:
|
||||
nix flake update --commit-lock-file
|
||||
|
||||
.PHONY: update-lockfile-without-commit
|
||||
update-lockfile-without-commit:
|
||||
nix flake update
|
||||
|
||||
# Agenix utils
|
||||
.PHONY: edit-secret
|
||||
edit-secret:
|
||||
cd secrets && agenix -e $(filter-out $@,$(MAKECMDGOALS))
|
||||
|
||||
.PHONY: rekey-secrets
|
||||
rekey-secrets:
|
||||
cd secrets && agenix -r
|
||||
|
||||
# NixOS utils
|
||||
.PHONY: clean-old-nixos-profiles
|
||||
clean-old-nixos-profiles:
|
||||
doas nix-collect-garbage -d
|
||||
|
||||
# Garbage Collect
|
||||
.PHONY: gc
|
||||
gc:
|
||||
nix store gc
|
||||
|
||||
# Update a flake input by name (ex: 'nixpkgs')
|
||||
.PHONY: update-input
|
||||
update-input:
|
||||
nix flake update $(filter-out $@,$(MAKECMDGOALS))
|
||||
|
||||
# Build Custom Install ISO
|
||||
.PHONY: iso
|
||||
iso:
|
||||
nix build .#packages.x86_64-linux.iso
|
||||
|
||||
# Build Custom kexec image
|
||||
.PHONY: kexec-img
|
||||
kexec-img:
|
||||
nix build .#packages.x86_64-linux.kexec
|
||||
|
||||
# Deploy a host by name (ex: 's0') but don't activate
|
||||
.PHONY: deploy
|
||||
deploy:
|
||||
deploy --remote-build --boot --debug-logs --skip-checks .#$(filter-out $@,$(MAKECMDGOALS))
|
||||
|
||||
# Deploy a host by name (ex: 's0')
|
||||
.PHONY: deploy-activate
|
||||
deploy-activate:
|
||||
deploy --remote-build --debug-logs --skip-checks .#$(filter-out $@,$(MAKECMDGOALS))
|
||||
32
README.md
Normal file
32
README.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# NixOS Configuration
|
||||
|
||||
A NixOS flake managing multiple machines with role-based configuration, agenix secrets, and sandboxed dev workspaces.
|
||||
|
||||
## Layout
|
||||
|
||||
- `/common` - shared configuration imported by all machines
|
||||
- `/boot` - bootloaders, CPU microcode, remote LUKS unlock over Tor
|
||||
- `/network` - Tailscale, VPN tunneling via PIA
|
||||
- `/pc` - desktop/graphical config (enabled by the `personal` role)
|
||||
- `/server` - service definitions and extensions
|
||||
- `/sandboxed-workspace` - isolated dev environments (VM, container, or Incus)
|
||||
- `/machines` - per-machine config (`default.nix`, `hardware-configuration.nix`, `properties.nix`)
|
||||
- `/secrets` - agenix-encrypted secrets, decryptable by machines based on their roles
|
||||
- `/home` - Home Manager user config
|
||||
- `/lib` - custom library functions extending nixpkgs lib
|
||||
- `/overlays` - nixpkgs overlays applied globally
|
||||
- `/patches` - patches applied to nixpkgs at build time
|
||||
|
||||
## Notable Features
|
||||
|
||||
**Auto-discovery & roles** — Machines register themselves by placing a `properties.nix` under `/machines/`. No manual listing in `flake.nix`. Roles declared per-machine (`"personal"`, `"dns-challenge"`, etc.) drive feature enablement via `config.thisMachine.hasRole.<role>` and control which agenix secrets each machine can decrypt.
|
||||
|
||||
**Machine properties module system** — `properties.nix` files form a separate lightweight module system (`machine-info`) for recording machine metadata (hostnames, architecture, roles, SSH keys). Since every machine's properties are visible to every other machine, each system can reflect on the properties of the entire fleet — enabling automatic SSH trust, role-based secret access, and cross-machine coordination without duplicating information.
|
||||
|
||||
**Remote LUKS unlock over Tor** — Machines with encrypted root disks can be unlocked remotely via SSH. An embedded Tor hidden service starts in the initrd so the machine is reachable even without a known IP, using a separate SSH host key for the boot environment.
|
||||
|
||||
**VPN containers** — A `vpn-container` module spins up an ephemeral NixOS container with a PIA WireGuard tunnel. The host creates the WireGuard interface and authenticates with PIA, then hands it off to the container's network namespace. This ensures that the container can **never** have direct internet access. Leakage is impossible.
|
||||
|
||||
**Sandboxed workspaces** — Isolated dev environments backed by microVMs (cloud-hypervisor), systemd-nspawn containers, or Incus. Each workspace gets a static IP on a NAT'd bridge, auto-generated SSH host keys, shell aliases for management, and comes pre-configured with Claude Code. The sandbox network blocks access to the local LAN while allowing internet.
|
||||
|
||||
**Snapshot-aware backups** — Restic backups to Backblaze B2 automatically create ZFS snapshots or btrfs read-only snapshots before backing up, using mount namespaces to bind-mount frozen data over the original paths so restic records correct paths. Each backup group gets a `restic_<group>` CLI wrapper. Supports `.nobackup` marker files.
|
||||
84
TODO.md
Normal file
84
TODO.md
Normal file
@@ -0,0 +1,84 @@
|
||||
# A place for brain dump ideas maybe to be taken off of the shelve one day
|
||||
|
||||
### NixOS webtools
|
||||
- Better options search https://mynixos.com/options/services
|
||||
|
||||
### Interesting ideas for restructuring nixos config
|
||||
- https://github.com/gytis-ivaskevicius/flake-utils-plus
|
||||
- https://github.com/divnix/digga/tree/main/examples/devos
|
||||
- https://digga.divnix.com/
|
||||
- https://nixos.wiki/wiki/Comparison_of_NixOS_setups
|
||||
|
||||
### Housekeeping
|
||||
- Cleanup the line between hardware-configuration.nix and configuration.nix in machine config
|
||||
- remove `options.currentSystem`
|
||||
- allow `hostname` option for webservices to be null to disable configuring nginx
|
||||
|
||||
### NAS
|
||||
- safely turn off NAS on power disconnect
|
||||
|
||||
### Shell Comands
|
||||
- tailexitnode = `sudo tailscale up --exit-node=<exit-node-ip> --exit-node-allow-lan-access=true`
|
||||
|
||||
### Services
|
||||
- setup archivebox
|
||||
- radio https://tildegit.org/tilderadio/site
|
||||
- music
|
||||
- mopidy
|
||||
- use the jellyfin plugin?
|
||||
- navidrome
|
||||
- spotify secrets for navidrome
|
||||
- picard for music tagging
|
||||
- alternative music software
|
||||
- https://www.smarthomebeginner.com/best-music-server-software-options/
|
||||
- https://funkwhale.audio/
|
||||
- https://github.com/epoupon/lms
|
||||
- https://github.com/benkaiser/stretto
|
||||
- https://github.com/blackcandy-org/black_candy
|
||||
- https://github.com/koel/koel
|
||||
- https://airsonic.github.io/
|
||||
- https://ampache.org/
|
||||
- replace nextcloud with seafile
|
||||
|
||||
### Archive
|
||||
- email
|
||||
- https://github.com/Disassembler0/dovecot-archive/blob/main/src/dovecot_archive.py
|
||||
- http://kb.unixservertech.com/software/dovecot/archiveserver
|
||||
|
||||
### Paranoia
|
||||
- https://christine.website/blog/paranoid-nixos-2021-07-18
|
||||
- https://nixos.wiki/wiki/Impermanence
|
||||
|
||||
# Setup CI
|
||||
- CI
|
||||
- hydra
|
||||
- https://docs.cachix.org/continuous-integration-setup/
|
||||
- Binary Cache
|
||||
- Maybe use cachix https://gvolpe.com/blog/nixos-binary-cache-ci/
|
||||
- Self hosted binary cache? https://www.tweag.io/blog/2019-11-21-untrusted-ci/
|
||||
- https://github.com/edolstra/nix-serve
|
||||
- https://nixos.wiki/wiki/Binary_Cache
|
||||
- https://discourse.nixos.org/t/introducing-attic-a-self-hostable-nix-binary-cache-server/24343
|
||||
- Both
|
||||
- https://garnix.io/
|
||||
- https://nixbuild.net
|
||||
|
||||
|
||||
# Secrets
|
||||
- consider using headscale
|
||||
- Replace luks over tor for remote unlock with luks over tailscale using ephemeral keys
|
||||
- Rollover luks FDE passwords
|
||||
- /secrets on personal computers should only be readable using a trusted ssh key, preferably requiring a yubikey
|
||||
- Rollover shared yubikey secrets
|
||||
- offsite backup yubikey, pw db, and ssh key with /secrets access
|
||||
|
||||
### Misc
|
||||
- for automated kernel upgrades on luks systems, need to kexec with initrd that contains luks key
|
||||
- https://github.com/flowztul/keyexec/blob/master/etc/default/kexec-cryptroot
|
||||
- https://github.com/pop-os/system76-scheduler
|
||||
- improve email a little bit https://helloinbox.email
|
||||
- remap razer keys https://github.com/sezanzeb/input-remapper
|
||||
|
||||
### Future Interests (upon merge into nixpkgs)
|
||||
- nixos/thelounge: add users option https://github.com/NixOS/nixpkgs/pull/157477
|
||||
- glorytun: init at 0.3.4 https://github.com/NixOS/nixpkgs/pull/153356
|
||||
@@ -4,11 +4,12 @@
|
||||
|
||||
let
|
||||
cfg = config.system.autoUpgrade;
|
||||
in {
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
system.autoUpgrade = {
|
||||
flake = "git+https://git.neet.dev/zuckerberg/nix-config.git";
|
||||
flags = [ "--recreate-lock-file" ]; # ignore lock file, just pull the latest
|
||||
flags = [ "--recreate-lock-file" "--no-write-lock-file" ]; # ignore lock file, just pull the latest
|
||||
};
|
||||
};
|
||||
}
|
||||
179
common/backups.nix
Normal file
179
common/backups.nix
Normal file
@@ -0,0 +1,179 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.backup;
|
||||
|
||||
mkRespository = group: "s3:s3.us-west-004.backblazeb2.com/D22TgIt0-main-backup/${group}";
|
||||
|
||||
findmnt = "${pkgs.util-linux}/bin/findmnt";
|
||||
mount = "${pkgs.util-linux}/bin/mount";
|
||||
umount = "${pkgs.util-linux}/bin/umount";
|
||||
btrfs = "${pkgs.btrfs-progs}/bin/btrfs";
|
||||
zfs = "/run/current-system/sw/bin/zfs";
|
||||
|
||||
# Creates snapshots and bind mounts them over original paths within the
|
||||
# service's mount namespace, so restic sees correct paths but reads frozen data
|
||||
snapshotHelperFn = ''
|
||||
snapshot_for_path() {
|
||||
local group="$1" path="$2" action="$3"
|
||||
local pathhash fstype
|
||||
|
||||
pathhash=$(echo -n "$path" | sha256sum | cut -c1-8)
|
||||
fstype=$(${findmnt} -n -o FSTYPE -T "$path" 2>/dev/null || echo "unknown")
|
||||
|
||||
case "$fstype" in
|
||||
zfs)
|
||||
local dataset mount subpath snapname snappath
|
||||
dataset=$(${findmnt} -n -o SOURCE -T "$path")
|
||||
mount=$(${findmnt} -n -o TARGET -T "$path")
|
||||
subpath=''${path#"$mount"}
|
||||
[[ "$subpath" != /* ]] && subpath="/$subpath"
|
||||
snapname="''${dataset}@restic-''${group}-''${pathhash}"
|
||||
snappath="''${mount}/.zfs/snapshot/restic-''${group}-''${pathhash}''${subpath}"
|
||||
case "$action" in
|
||||
create)
|
||||
${zfs} destroy "$snapname" 2>/dev/null || true
|
||||
${zfs} snapshot "$snapname"
|
||||
${mount} --bind "$snappath" "$path"
|
||||
echo "$path"
|
||||
;;
|
||||
destroy)
|
||||
${umount} "$path" 2>/dev/null || true
|
||||
${zfs} destroy "$snapname" 2>/dev/null || true
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
btrfs)
|
||||
local mount subpath snapdir snappath
|
||||
mount=$(${findmnt} -n -o TARGET -T "$path")
|
||||
subpath=''${path#"$mount"}
|
||||
[[ "$subpath" != /* ]] && subpath="/$subpath"
|
||||
snapdir="/.restic-snapshots/''${group}-''${pathhash}"
|
||||
snappath="''${snapdir}''${subpath}"
|
||||
case "$action" in
|
||||
create)
|
||||
${btrfs} subvolume delete "$snapdir" 2>/dev/null || true
|
||||
mkdir -p /.restic-snapshots
|
||||
${btrfs} subvolume snapshot -r "$mount" "$snapdir" >&2
|
||||
${mount} --bind "$snappath" "$path"
|
||||
echo "$path"
|
||||
;;
|
||||
destroy)
|
||||
${umount} "$path" 2>/dev/null || true
|
||||
${btrfs} subvolume delete "$snapdir" 2>/dev/null || true
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "No snapshot support for $fstype ($path), using original" >&2
|
||||
[ "$action" = "create" ] && echo "$path"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
'';
|
||||
|
||||
mkBackup = group: paths: {
|
||||
repository = mkRespository group;
|
||||
|
||||
dynamicFilesFrom = "cat /run/restic-backup-${group}/paths";
|
||||
|
||||
backupPrepareCommand = ''
|
||||
mkdir -p /run/restic-backup-${group}
|
||||
: > /run/restic-backup-${group}/paths
|
||||
|
||||
${snapshotHelperFn}
|
||||
|
||||
for path in ${lib.escapeShellArgs paths}; do
|
||||
snapshot_for_path ${lib.escapeShellArg group} "$path" create >> /run/restic-backup-${group}/paths
|
||||
done
|
||||
'';
|
||||
|
||||
backupCleanupCommand = ''
|
||||
${snapshotHelperFn}
|
||||
|
||||
for path in ${lib.escapeShellArgs paths}; do
|
||||
snapshot_for_path ${lib.escapeShellArg group} "$path" destroy
|
||||
done
|
||||
|
||||
rm -rf /run/restic-backup-${group}
|
||||
'';
|
||||
|
||||
initialize = true;
|
||||
|
||||
timerConfig = {
|
||||
OnCalendar = "daily";
|
||||
RandomizedDelaySec = "1h";
|
||||
};
|
||||
|
||||
extraBackupArgs = [
|
||||
''--exclude-if-present ".nobackup"''
|
||||
];
|
||||
|
||||
# Keeps backups from up to 6 months ago
|
||||
pruneOpts = [
|
||||
"--keep-daily 7" # one backup for each of the last n days
|
||||
"--keep-weekly 5" # one backup for each of the last n weeks
|
||||
"--keep-monthly 6" # one backup for each of the last n months
|
||||
];
|
||||
|
||||
environmentFile = "/run/agenix/backblaze-s3-backups";
|
||||
passwordFile = "/run/agenix/restic-password";
|
||||
};
|
||||
|
||||
# example usage: "sudo restic_samba unlock" (removes lockfile)
|
||||
mkResticGroupCmd = group: pkgs.writeShellScriptBin "restic_${group}" ''
|
||||
if [ "$EUID" -ne 0 ]
|
||||
then echo "Run as root"
|
||||
exit
|
||||
fi
|
||||
. /run/agenix/backblaze-s3-backups
|
||||
export AWS_SECRET_ACCESS_KEY
|
||||
export AWS_ACCESS_KEY_ID
|
||||
export RESTIC_PASSWORD_FILE=/run/agenix/restic-password
|
||||
export RESTIC_REPOSITORY="${mkRespository group}"
|
||||
exec ${pkgs.restic}/bin/restic "$@"
|
||||
'';
|
||||
in
|
||||
{
|
||||
options.backup = {
|
||||
group = lib.mkOption {
|
||||
default = null;
|
||||
type = lib.types.nullOr (lib.types.attrsOf (lib.types.submodule {
|
||||
options = {
|
||||
paths = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = ''
|
||||
Paths to backup
|
||||
'';
|
||||
};
|
||||
};
|
||||
}));
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf (cfg.group != null) {
|
||||
assertions = lib.mapAttrsToList (group: _: {
|
||||
assertion = config.systemd.services."restic-backups-${group}".enable;
|
||||
message = "Expected systemd service 'restic-backups-${group}' not found. The nixpkgs restic module may have changed its naming convention.";
|
||||
}) cfg.group;
|
||||
|
||||
services.restic.backups = lib.concatMapAttrs
|
||||
(group: groupCfg: {
|
||||
${group} = mkBackup group groupCfg.paths;
|
||||
})
|
||||
cfg.group;
|
||||
|
||||
# Mount namespace lets us bind mount snapshots over original paths,
|
||||
# so restic backs up from frozen snapshots while recording correct paths
|
||||
systemd.services = lib.concatMapAttrs
|
||||
(group: _: {
|
||||
"restic-backups-${group}".serviceConfig.PrivateMounts = true;
|
||||
})
|
||||
cfg.group;
|
||||
|
||||
age.secrets.backblaze-s3-backups.file = ../secrets/backblaze-s3-backups.age;
|
||||
age.secrets.restic-password.file = ../secrets/restic-password.age;
|
||||
|
||||
environment.systemPackages = map mkResticGroupCmd (builtins.attrNames cfg.group);
|
||||
};
|
||||
}
|
||||
29
common/binary-cache.nix
Normal file
29
common/binary-cache.nix
Normal file
@@ -0,0 +1,29 @@
|
||||
{ config, ... }:
|
||||
|
||||
{
|
||||
nix = {
|
||||
settings = {
|
||||
substituters = [
|
||||
"https://cache.nixos.org/"
|
||||
"https://nix-community.cachix.org"
|
||||
"http://s0.neet.dev:28338/nixos"
|
||||
];
|
||||
trusted-public-keys = [
|
||||
"nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
|
||||
"nixos:e5AMCUWWEX9MESWAAMjBkZdGUpl588NhgsUO3HsdhFw="
|
||||
];
|
||||
|
||||
# Allow substituters to be offline
|
||||
# This isn't exactly ideal since it would be best if I could set up a system
|
||||
# so that it is an error if a derivation isn't available for any substituters
|
||||
# and use this flag as intended for deciding if it should build missing
|
||||
# derivations locally. See https://github.com/NixOS/nix/issues/6901
|
||||
fallback = true;
|
||||
|
||||
# Authenticate to private nixos cache
|
||||
netrc-file = config.age.secrets.attic-netrc.path;
|
||||
};
|
||||
};
|
||||
|
||||
age.secrets.attic-netrc.file = ../secrets/attic-netrc.age;
|
||||
}
|
||||
@@ -3,24 +3,27 @@
|
||||
with lib;
|
||||
let
|
||||
cfg = config.bios;
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.bios = {
|
||||
enable = mkEnableOption "enable bios boot";
|
||||
device = mkOption {
|
||||
type = types.str;
|
||||
};
|
||||
configurationLimit = mkOption {
|
||||
default = 20;
|
||||
type = types.int;
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
# Use GRUB 2 for BIOS
|
||||
boot.loader = {
|
||||
timeout = 2;
|
||||
grub = {
|
||||
enable = true;
|
||||
device = cfg.device;
|
||||
version = 2;
|
||||
useOSProber = true;
|
||||
configurationLimit = 20;
|
||||
configurationLimit = cfg.configurationLimit;
|
||||
theme = pkgs.nixos-grub2-theme;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
{ lib, config, pkgs, ... }:
|
||||
{ ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./firmware.nix
|
||||
./efi.nix
|
||||
./bios.nix
|
||||
./luks.nix
|
||||
./remote-luks-unlock.nix
|
||||
];
|
||||
}
|
||||
@@ -3,24 +3,27 @@
|
||||
with lib;
|
||||
let
|
||||
cfg = config.efi;
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.efi = {
|
||||
enable = mkEnableOption "enable efi boot";
|
||||
configurationLimit = mkOption {
|
||||
default = 20;
|
||||
type = types.int;
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
# Use GRUB2 for EFI
|
||||
boot.loader = {
|
||||
efi.canTouchEfiVariables = true;
|
||||
timeout = 2;
|
||||
grub = {
|
||||
enable = true;
|
||||
device = "nodev";
|
||||
version = 2;
|
||||
efiSupport = true;
|
||||
useOSProber = true;
|
||||
# memtest86.enable = true;
|
||||
configurationLimit = 20;
|
||||
# memtest86.enable = true;
|
||||
configurationLimit = cfg.configurationLimit;
|
||||
theme = pkgs.nixos-grub2-theme;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
{ lib, config, pkgs, ... }:
|
||||
{ lib, config, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.firmware;
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.firmware.x86_64 = {
|
||||
enable = mkEnableOption "enable x86_64 firmware";
|
||||
};
|
||||
|
||||
@@ -1,101 +0,0 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.luks;
|
||||
in {
|
||||
options.luks = {
|
||||
enable = lib.mkEnableOption "enable luks root remote decrypt over ssh/tor";
|
||||
device = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "enc-pv";
|
||||
};
|
||||
path = lib.mkOption {
|
||||
type = lib.types.either lib.types.str lib.types.path;
|
||||
};
|
||||
allowDiscards = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
};
|
||||
};
|
||||
sshHostKeys = lib.mkOption {
|
||||
type = lib.types.listOf (lib.types.either lib.types.str lib.types.path);
|
||||
default = [
|
||||
"/secret/ssh_host_rsa_key"
|
||||
"/secret/ssh_host_ed25519_key"
|
||||
];
|
||||
};
|
||||
sshAuthorizedKeys = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = config.users.users.googlebot.openssh.authorizedKeys.keys;
|
||||
};
|
||||
onionConfig = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
default = /secret/onion;
|
||||
};
|
||||
kernelModules = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [ "e1000" "e1000e" "virtio_pci" "r8169" ];
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
boot.initrd.luks.devices.${cfg.device.name} = {
|
||||
device = cfg.device.path;
|
||||
allowDiscards = cfg.device.allowDiscards;
|
||||
};
|
||||
|
||||
# Unlock LUKS disk over ssh
|
||||
boot.initrd.network.enable = true;
|
||||
boot.initrd.kernelModules = cfg.kernelModules;
|
||||
boot.initrd.network.ssh = {
|
||||
enable = true;
|
||||
port = 22;
|
||||
hostKeys = cfg.sshHostKeys;
|
||||
authorizedKeys = cfg.sshAuthorizedKeys;
|
||||
};
|
||||
|
||||
boot.initrd.postDeviceCommands = ''
|
||||
echo 'waiting for root device to be opened...'
|
||||
mkfifo /crypt-ramfs/passphrase
|
||||
echo /crypt-ramfs/passphrase >> /dev/null
|
||||
'';
|
||||
|
||||
# Make machine accessable over tor for boot unlock
|
||||
boot.initrd.secrets = {
|
||||
"/etc/tor/onion/bootup" = cfg.onionConfig;
|
||||
};
|
||||
boot.initrd.extraUtilsCommands = ''
|
||||
copy_bin_and_libs ${pkgs.tor}/bin/tor
|
||||
copy_bin_and_libs ${pkgs.haveged}/bin/haveged
|
||||
'';
|
||||
# start tor during boot process
|
||||
boot.initrd.network.postCommands = let
|
||||
torRc = (pkgs.writeText "tor.rc" ''
|
||||
DataDirectory /etc/tor
|
||||
SOCKSPort 127.0.0.1:9050 IsolateDestAddr
|
||||
SOCKSPort 127.0.0.1:9063
|
||||
HiddenServiceDir /etc/tor/onion/bootup
|
||||
HiddenServicePort 22 127.0.0.1:22
|
||||
'');
|
||||
in ''
|
||||
# Add nice prompt for giving LUKS passphrase over ssh
|
||||
echo 'read -s -p "Unlock Passphrase: " passphrase && echo $passphrase > /crypt-ramfs/passphrase && exit' >> /root/.profile
|
||||
|
||||
echo "tor: preparing onion folder"
|
||||
# have to do this otherwise tor does not want to start
|
||||
chmod -R 700 /etc/tor
|
||||
|
||||
echo "make sure localhost is up"
|
||||
ip a a 127.0.0.1/8 dev lo
|
||||
ip link set lo up
|
||||
|
||||
echo "haveged: starting haveged"
|
||||
haveged -F &
|
||||
|
||||
echo "tor: starting tor"
|
||||
tor -f ${torRc} --verify-config
|
||||
tor -f ${torRc} &
|
||||
'';
|
||||
};
|
||||
}
|
||||
96
common/boot/remote-luks-unlock.nix
Normal file
96
common/boot/remote-luks-unlock.nix
Normal file
@@ -0,0 +1,96 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
# TODO: use tailscale instead of tor https://gist.github.com/antifuchs/e30d58a64988907f282c82231dde2cbc
|
||||
|
||||
let
|
||||
cfg = config.remoteLuksUnlock;
|
||||
in
|
||||
{
|
||||
options.remoteLuksUnlock = {
|
||||
enable = lib.mkEnableOption "enable luks root remote decrypt over ssh/tor";
|
||||
enableTorUnlock = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = cfg.enable;
|
||||
description = "Make machine accessable over tor for ssh boot unlock";
|
||||
};
|
||||
sshHostKeys = lib.mkOption {
|
||||
type = lib.types.listOf (lib.types.either lib.types.str lib.types.path);
|
||||
default = [
|
||||
"/secret/ssh_host_rsa_key"
|
||||
"/secret/ssh_host_ed25519_key"
|
||||
];
|
||||
};
|
||||
sshAuthorizedKeys = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = config.users.users.googlebot.openssh.authorizedKeys.keys;
|
||||
};
|
||||
onionConfig = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
default = /secret/onion;
|
||||
};
|
||||
kernelModules = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [ "e1000" "e1000e" "virtio_pci" "r8169" ];
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
# Unlock LUKS disk over ssh
|
||||
boot.initrd.network.enable = true;
|
||||
boot.initrd.kernelModules = cfg.kernelModules;
|
||||
boot.initrd.network.ssh = {
|
||||
enable = true;
|
||||
port = 22;
|
||||
hostKeys = cfg.sshHostKeys;
|
||||
authorizedKeys = cfg.sshAuthorizedKeys;
|
||||
};
|
||||
|
||||
boot.initrd.postDeviceCommands = ''
|
||||
echo 'waiting for root device to be opened...'
|
||||
mkfifo /crypt-ramfs/passphrase
|
||||
echo /crypt-ramfs/passphrase >> /dev/null
|
||||
'';
|
||||
|
||||
boot.initrd.secrets = lib.mkIf cfg.enableTorUnlock {
|
||||
"/etc/tor/onion/bootup" = cfg.onionConfig;
|
||||
};
|
||||
boot.initrd.extraUtilsCommands = lib.mkIf cfg.enableTorUnlock ''
|
||||
copy_bin_and_libs ${pkgs.tor}/bin/tor
|
||||
copy_bin_and_libs ${pkgs.haveged}/bin/haveged
|
||||
'';
|
||||
boot.initrd.network.postCommands = lib.mkMerge [
|
||||
(
|
||||
''
|
||||
# Add nice prompt for giving LUKS passphrase over ssh
|
||||
echo 'read -s -p "Unlock Passphrase: " passphrase && echo $passphrase > /crypt-ramfs/passphrase && exit' >> /root/.profile
|
||||
''
|
||||
)
|
||||
|
||||
(
|
||||
let torRc = (pkgs.writeText "tor.rc" ''
|
||||
DataDirectory /etc/tor
|
||||
SOCKSPort 127.0.0.1:9050 IsolateDestAddr
|
||||
SOCKSPort 127.0.0.1:9063
|
||||
HiddenServiceDir /etc/tor/onion/bootup
|
||||
HiddenServicePort 22 127.0.0.1:22
|
||||
''); in
|
||||
lib.mkIf cfg.enableTorUnlock ''
|
||||
echo "tor: preparing onion folder"
|
||||
# have to do this otherwise tor does not want to start
|
||||
chmod -R 700 /etc/tor
|
||||
|
||||
echo "make sure localhost is up"
|
||||
ip a a 127.0.0.1/8 dev lo
|
||||
ip link set lo up
|
||||
|
||||
echo "haveged: starting haveged"
|
||||
haveged -F &
|
||||
|
||||
echo "tor: starting tor"
|
||||
tor -f ${torRc} --verify-config
|
||||
tor -f ${torRc} &
|
||||
''
|
||||
)
|
||||
];
|
||||
};
|
||||
}
|
||||
@@ -1,37 +1,56 @@
|
||||
{ config, pkgs, ... }:
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./backups.nix
|
||||
./binary-cache.nix
|
||||
./flakes.nix
|
||||
./pia.nix
|
||||
./zerotier.nix
|
||||
./auto-update.nix
|
||||
./hosts.nix
|
||||
./ntfy-alerts.nix
|
||||
./zfs-alerts.nix
|
||||
./shell.nix
|
||||
./network
|
||||
./boot
|
||||
./server
|
||||
./pc
|
||||
./machine-info
|
||||
./nix-builder.nix
|
||||
./ssh.nix
|
||||
./sandboxed-workspace
|
||||
];
|
||||
|
||||
nix.flakes.enable = true;
|
||||
|
||||
system.stateVersion = "21.11";
|
||||
system.stateVersion = "23.11";
|
||||
|
||||
networking.useDHCP = false;
|
||||
networking.useDHCP = lib.mkDefault true;
|
||||
|
||||
networking.firewall.enable = true;
|
||||
networking.firewall.allowPing = true;
|
||||
|
||||
time.timeZone = "America/New_York";
|
||||
i18n.defaultLocale = "en_US.UTF-8";
|
||||
time.timeZone = "America/Los_Angeles";
|
||||
i18n = {
|
||||
defaultLocale = "en_US.UTF-8";
|
||||
extraLocaleSettings = {
|
||||
LANGUAGE = "en_US.UTF-8";
|
||||
LC_ALL = "en_US.UTF-8";
|
||||
};
|
||||
};
|
||||
|
||||
services.openssh.enable = true;
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings = {
|
||||
PasswordAuthentication = false;
|
||||
};
|
||||
};
|
||||
programs.mosh.enable = true;
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
wget
|
||||
kakoune
|
||||
htop
|
||||
git git-lfs
|
||||
git
|
||||
git-lfs
|
||||
dnsutils
|
||||
tmux
|
||||
nethogs
|
||||
@@ -39,10 +58,13 @@
|
||||
pciutils
|
||||
usbutils
|
||||
killall
|
||||
screen
|
||||
micro
|
||||
helix
|
||||
lm_sensors
|
||||
picocom
|
||||
lf
|
||||
gnumake
|
||||
tree
|
||||
];
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
@@ -55,16 +77,30 @@
|
||||
"dialout" # serial
|
||||
];
|
||||
shell = pkgs.fish;
|
||||
openssh.authorizedKeys.keys = (import ./ssh.nix).users;
|
||||
openssh.authorizedKeys.keys = config.machines.ssh.userKeys;
|
||||
hashedPassword = "$6$TuDO46rILr$gkPUuLKZe3psexhs8WFZMpzgEBGksE.c3Tjh1f8sD0KMC4oV89K2pqAABfl.Lpxu2jVdr5bgvR5cWnZRnji/r/";
|
||||
uid = 1000;
|
||||
};
|
||||
nix.trustedUsers = [ "root" "googlebot" ];
|
||||
users.users.root = {
|
||||
openssh.authorizedKeys.keys = config.machines.ssh.deployKeys;
|
||||
};
|
||||
nix.settings = {
|
||||
trusted-users = [ "root" "googlebot" ];
|
||||
};
|
||||
|
||||
nix.gc.automatic = true;
|
||||
# don't use sudo
|
||||
security.doas.enable = true;
|
||||
security.sudo.enable = false;
|
||||
security.doas.extraRules = [
|
||||
# don't ask for password every time
|
||||
{ groups = [ "wheel" ]; persist = true; }
|
||||
];
|
||||
|
||||
programs.fish.enable = true;
|
||||
programs.fish.shellInit = ''
|
||||
set fish_greeting
|
||||
'';
|
||||
nix.gc.automatic = !config.boot.isContainer;
|
||||
|
||||
security.acme.acceptTerms = true;
|
||||
security.acme.defaults.email = "zuckerberg@neet.dev";
|
||||
|
||||
# Enable Desktop Environment if this is a PC (machine role is "personal")
|
||||
de.enable = lib.mkDefault (config.thisMachine.hasRole."personal" && !config.boot.isContainer);
|
||||
}
|
||||
|
||||
@@ -1,21 +1,18 @@
|
||||
{ lib, pkgs, config, ... }:
|
||||
{ lib, config, ... }:
|
||||
with lib;
|
||||
let
|
||||
cfg = config.nix.flakes;
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.nix.flakes = {
|
||||
enable = mkEnableOption "use nix flakes";
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
nix = {
|
||||
package = pkgs.nixFlakes;
|
||||
extraOptions = ''
|
||||
experimental-features = nix-command flakes
|
||||
'';
|
||||
|
||||
# pin nixpkgs for system commands such as "nix shell"
|
||||
registry.nixpkgs.flake = config.inputs.nixpkgs;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
{ config, lib, ... }:
|
||||
|
||||
let
|
||||
system = (import ./ssh.nix).system;
|
||||
in {
|
||||
networking.hosts = {
|
||||
# some DNS providers filter local ip results from DNS request
|
||||
"172.30.145.180" = [ "s0.zt.neet.dev" ];
|
||||
"172.30.109.9" = [ "ponyo.zt.neet.dev" ];
|
||||
"172.30.189.212" = [ "ray.zt.neet.dev" ];
|
||||
};
|
||||
|
||||
programs.ssh.knownHosts = {
|
||||
liza = {
|
||||
hostNames = [ "liza" "liza.neet.dev" ];
|
||||
publicKey = system.liza;
|
||||
};
|
||||
ponyo = {
|
||||
hostNames = [ "ponyo" "ponyo.neet.dev" "ponyo.zt.neet.dev" ];
|
||||
publicKey = system.ponyo;
|
||||
};
|
||||
ponyo-unlock = {
|
||||
hostNames = [ "unlock.ponyo.neet.dev" "cfamr6artx75qvt7ho3rrbsc7mkucmv5aawebwflsfuorusayacffryd.onion" ];
|
||||
publicKey = system.ponyo-unlock;
|
||||
};
|
||||
ray = {
|
||||
hostNames = [ "ray" "ray.zt.neet.dev" ];
|
||||
publicKey = system.ray;
|
||||
};
|
||||
s0 = {
|
||||
hostNames = [ "s0" "s0.zt.neet.dev" ];
|
||||
publicKey = system.s0;
|
||||
};
|
||||
n1 = {
|
||||
hostNames = [ "n1" ];
|
||||
publicKey = system.n1;
|
||||
};
|
||||
n2 = {
|
||||
hostNames = [ "n2" ];
|
||||
publicKey = system.n2;
|
||||
};
|
||||
n3 = {
|
||||
hostNames = [ "n3" ];
|
||||
publicKey = system.n3;
|
||||
};
|
||||
n4 = {
|
||||
hostNames = [ "n4" ];
|
||||
publicKey = system.n4;
|
||||
};
|
||||
n5 = {
|
||||
hostNames = [ "n5" ];
|
||||
publicKey = system.n5;
|
||||
};
|
||||
n6 = {
|
||||
hostNames = [ "n6" ];
|
||||
publicKey = system.n6;
|
||||
};
|
||||
n7 = {
|
||||
hostNames = [ "n7" ];
|
||||
publicKey = system.n7;
|
||||
};
|
||||
};
|
||||
}
|
||||
207
common/machine-info/default.nix
Normal file
207
common/machine-info/default.nix
Normal file
@@ -0,0 +1,207 @@
|
||||
# Gathers info about each machine to constuct overall configuration
|
||||
# Ex: Each machine already trusts each others SSH fingerprint already
|
||||
|
||||
{ config, lib, ... }:
|
||||
|
||||
let
|
||||
machines = config.machines.hosts;
|
||||
|
||||
hostOptionsSubmoduleType = lib.types.submodule {
|
||||
options = {
|
||||
hostNames = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = ''
|
||||
List of hostnames for this machine. The first one is the default so it is the target of deployments.
|
||||
Used for automatically trusting hosts for ssh connections.
|
||||
'';
|
||||
};
|
||||
arch = lib.mkOption {
|
||||
type = lib.types.enum [ "x86_64-linux" "aarch64-linux" ];
|
||||
description = ''
|
||||
The architecture of this machine.
|
||||
'';
|
||||
};
|
||||
systemRoles = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str; # TODO: maybe use an enum?
|
||||
description = ''
|
||||
The set of roles this machine holds. Affects secrets available. (TODO add service config as well using this info)
|
||||
'';
|
||||
};
|
||||
hostKey = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
The system ssh host key of this machine. Used for automatically trusting hosts for ssh connections
|
||||
and for decrypting secrets with agenix.
|
||||
'';
|
||||
};
|
||||
remoteUnlock = lib.mkOption {
|
||||
default = null;
|
||||
type = lib.types.nullOr (lib.types.submodule {
|
||||
options = {
|
||||
|
||||
hostKey = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
The system ssh host key of this machine used for luks boot unlocking only.
|
||||
'';
|
||||
};
|
||||
|
||||
clearnetHost = lib.mkOption {
|
||||
default = null;
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
description = ''
|
||||
The hostname resolvable over clearnet used to luks boot unlock this machine
|
||||
'';
|
||||
};
|
||||
|
||||
onionHost = lib.mkOption {
|
||||
default = null;
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
description = ''
|
||||
The hostname resolvable over tor used to luks boot unlock this machine
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
});
|
||||
};
|
||||
userKeys = lib.mkOption {
|
||||
default = [ ];
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = ''
|
||||
The list of user keys. Each key here can be used to log into all other systems as `googlebot`.
|
||||
|
||||
TODO: consider auto populating other programs that use ssh keys such as gitea
|
||||
'';
|
||||
};
|
||||
deployKeys = lib.mkOption {
|
||||
default = [ ];
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = ''
|
||||
The list of deployment keys. Each key here can be used to log into all other systems as `root`.
|
||||
'';
|
||||
};
|
||||
configurationPath = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
description = ''
|
||||
The path to this machine's configuration directory.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./ssh.nix
|
||||
./roles.nix
|
||||
];
|
||||
|
||||
options.machines = {
|
||||
hosts = lib.mkOption {
|
||||
type = lib.types.attrsOf hostOptionsSubmoduleType;
|
||||
};
|
||||
};
|
||||
|
||||
options.thisMachine.config = lib.mkOption {
|
||||
# For ease of use, a direct copy of the host config from machines.hosts.${hostName}
|
||||
type = hostOptionsSubmoduleType;
|
||||
};
|
||||
|
||||
config = {
|
||||
assertions = (lib.concatLists (lib.mapAttrsToList
|
||||
(
|
||||
name: cfg: [
|
||||
{
|
||||
assertion = builtins.length cfg.hostNames > 0;
|
||||
message = ''
|
||||
Error with config for ${name}
|
||||
There must be at least one hostname.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = builtins.length cfg.systemRoles > 0;
|
||||
message = ''
|
||||
Error with config for ${name}
|
||||
There must be at least one system role.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = cfg.remoteUnlock == null || cfg.remoteUnlock.hostKey != cfg.hostKey;
|
||||
message = ''
|
||||
Error with config for ${name}
|
||||
Unlock hostkey and hostkey cannot be the same because unlock hostkey is in /boot, unencrypted.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = cfg.remoteUnlock == null || (cfg.remoteUnlock.clearnetHost != null || cfg.remoteUnlock.onionHost != null);
|
||||
message = ''
|
||||
Error with config for ${name}
|
||||
At least one of clearnet host or onion host must be defined.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = cfg.remoteUnlock == null || cfg.remoteUnlock.clearnetHost == null || builtins.elem cfg.remoteUnlock.clearnetHost cfg.hostNames == false;
|
||||
message = ''
|
||||
Error with config for ${name}
|
||||
Clearnet unlock hostname cannot be in the list of hostnames for security reasons.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = cfg.remoteUnlock == null || cfg.remoteUnlock.onionHost == null || lib.strings.hasSuffix ".onion" cfg.remoteUnlock.onionHost;
|
||||
message = ''
|
||||
Error with config for ${name}
|
||||
Tor unlock hostname must be an onion address.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = builtins.elem "personal" cfg.systemRoles || builtins.length cfg.userKeys == 0;
|
||||
message = ''
|
||||
Error with config for ${name}
|
||||
There must be at least one userkey defined for personal machines.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = builtins.elem "deploy" cfg.systemRoles || builtins.length cfg.deployKeys == 0;
|
||||
message = ''
|
||||
Error with config for ${name}
|
||||
Only deploy machines are allowed to have deploy keys for security reasons.
|
||||
'';
|
||||
}
|
||||
]
|
||||
)
|
||||
machines));
|
||||
|
||||
# Set per machine properties automatically using each of their `properties.nix` files respectively
|
||||
machines.hosts =
|
||||
let
|
||||
properties = dir: lib.concatMapAttrs
|
||||
(name: path: {
|
||||
${name} =
|
||||
import path
|
||||
//
|
||||
{ configurationPath = builtins.dirOf path; };
|
||||
})
|
||||
(propertiesFiles dir);
|
||||
propertiesFiles = dir:
|
||||
lib.foldl (lib.mergeAttrs) { } (propertiesFiles' dir);
|
||||
propertiesFiles' = dir:
|
||||
let
|
||||
propFiles = lib.filter (p: baseNameOf p == "properties.nix") (lib.filesystem.listFilesRecursive dir);
|
||||
dirName = path: builtins.baseNameOf (builtins.dirOf path);
|
||||
in
|
||||
builtins.map (p: { "${dirName p}" = p; }) propFiles;
|
||||
in
|
||||
properties ../../machines;
|
||||
|
||||
# Don't try to evaluate "thisMachine" when reflecting using moduleless.nix.
|
||||
# When evaluated by moduleless.nix this will fail due to networking.hostName not
|
||||
# existing. This is because moduleless.nix is not intended for reflection from the
|
||||
# perspective of a perticular machine but is instead intended for reflecting on
|
||||
# the properties of all machines as a whole system.
|
||||
thisMachine.config = config.machines.hosts.${config.networking.hostName};
|
||||
|
||||
# Add ssh keys from KeepassXC
|
||||
machines.ssh.userKeys = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILACiZO7QnB4bcmziVaUkUE0ZPMR0M/yJbbHYsHIZz9g" ];
|
||||
machines.ssh.deployKeys = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID58MvKGs3GDMMcN8Iyi9S59SciSrVM97wKtOvUAl3li" ];
|
||||
};
|
||||
}
|
||||
15
common/machine-info/moduleless.nix
Normal file
15
common/machine-info/moduleless.nix
Normal file
@@ -0,0 +1,15 @@
|
||||
# Allows getting machine-info outside the scope of nixos configuration
|
||||
|
||||
{ nixpkgs ? import <nixpkgs> { }
|
||||
, assertionsModule ? <nixpkgs/nixos/modules/misc/assertions.nix>
|
||||
}:
|
||||
|
||||
{
|
||||
machines =
|
||||
(nixpkgs.lib.evalModules {
|
||||
modules = [
|
||||
./default.nix
|
||||
assertionsModule
|
||||
];
|
||||
}).config.machines;
|
||||
}
|
||||
55
common/machine-info/roles.nix
Normal file
55
common/machine-info/roles.nix
Normal file
@@ -0,0 +1,55 @@
|
||||
{ config, lib, ... }:
|
||||
|
||||
# Maps roles to their hosts.
|
||||
# machines.withRole = {
|
||||
# personal = [
|
||||
# "machine1" "machine3"
|
||||
# ];
|
||||
# cache = [
|
||||
# "machine2"
|
||||
# ];
|
||||
# };
|
||||
#
|
||||
# A list of all possible roles
|
||||
# machines.allRoles = [
|
||||
# "personal"
|
||||
# "cache"
|
||||
# ];
|
||||
#
|
||||
# For each role has true or false if the current machine has that role
|
||||
# thisMachine.hasRole = {
|
||||
# personal = true;
|
||||
# cache = false;
|
||||
# };
|
||||
|
||||
{
|
||||
options.machines.withRole = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.listOf lib.types.str);
|
||||
};
|
||||
|
||||
options.machines.allRoles = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
};
|
||||
|
||||
options.thisMachine.hasRole = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.bool;
|
||||
};
|
||||
|
||||
config = {
|
||||
machines.withRole = lib.zipAttrs
|
||||
(lib.mapAttrsToList
|
||||
(host: cfg:
|
||||
lib.foldl (lib.mergeAttrs) { }
|
||||
(builtins.map (role: { ${role} = host; })
|
||||
cfg.systemRoles))
|
||||
config.machines.hosts);
|
||||
|
||||
machines.allRoles = lib.attrNames config.machines.withRole;
|
||||
|
||||
thisMachine.hasRole = lib.mapAttrs
|
||||
(role: cfg:
|
||||
builtins.elem config.networking.hostName config.machines.withRole.${role}
|
||||
)
|
||||
config.machines.withRole;
|
||||
};
|
||||
}
|
||||
44
common/machine-info/ssh.nix
Normal file
44
common/machine-info/ssh.nix
Normal file
@@ -0,0 +1,44 @@
|
||||
{ config, lib, ... }:
|
||||
|
||||
let
|
||||
machines = config.machines;
|
||||
|
||||
sshkeys = keyType: lib.foldl (l: cfg: l ++ cfg.${keyType}) [ ] (builtins.attrValues machines.hosts);
|
||||
in
|
||||
{
|
||||
options.machines.ssh = {
|
||||
userKeys = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = ''
|
||||
List of user keys aggregated from all machines.
|
||||
'';
|
||||
};
|
||||
|
||||
deployKeys = lib.mkOption {
|
||||
default = [ ];
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = ''
|
||||
List of deploy keys aggregated from all machines.
|
||||
'';
|
||||
};
|
||||
|
||||
hostKeysByRole = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.listOf lib.types.str);
|
||||
description = ''
|
||||
Machine host keys divided into their roles.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
machines.ssh.userKeys = sshkeys "userKeys";
|
||||
machines.ssh.deployKeys = sshkeys "deployKeys";
|
||||
|
||||
machines.ssh.hostKeysByRole = lib.mapAttrs
|
||||
(role: hosts:
|
||||
builtins.map
|
||||
(host: machines.hosts.${host}.hostKey)
|
||||
hosts)
|
||||
machines.withRole;
|
||||
};
|
||||
}
|
||||
21
common/network/default.nix
Normal file
21
common/network/default.nix
Normal file
@@ -0,0 +1,21 @@
|
||||
{ config, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.networking;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./pia-vpn
|
||||
./tailscale.nix
|
||||
./sandbox.nix
|
||||
];
|
||||
|
||||
options.networking.ip_forward = mkEnableOption "Enable ip forwarding";
|
||||
|
||||
config = mkIf cfg.ip_forward {
|
||||
boot.kernel.sysctl."net.ipv4.ip_forward" = 1;
|
||||
boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = 1;
|
||||
};
|
||||
}
|
||||
89
common/network/pia-vpn/README.md
Normal file
89
common/network/pia-vpn/README.md
Normal file
@@ -0,0 +1,89 @@
|
||||
# PIA VPN Multi-Container Module
|
||||
|
||||
Routes service containers through a PIA WireGuard VPN using a shared bridge network.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
internet
|
||||
│
|
||||
┌──────┴──────┐
|
||||
│ Host │
|
||||
│ tinyproxy │ ← PIA API bootstrap proxy
|
||||
│ 10.100.0.1 │
|
||||
└──────┬───────┘
|
||||
│ br-vpn (no IPMasquerade)
|
||||
┌────────────┼──────────────┐
|
||||
│ │ │
|
||||
┌──────┴──────┐ ┌───┴────┐ ┌─────┴──────┐
|
||||
│ VPN ctr │ │ servarr│ │transmission│
|
||||
│ 10.100.0.2 │ │ .11 │ │ .10 │
|
||||
│ piaw (WG) │ │ │ │ │
|
||||
│ gateway+NAT │ └────────┘ └────────────┘
|
||||
└─────────────┘
|
||||
```
|
||||
|
||||
- **Host** creates the WG interface (encrypted UDP stays in host netns) and runs tinyproxy on the bridge so the VPN container can bootstrap PIA auth before WG is up.
|
||||
- **VPN container** authenticates with PIA via the proxy, configures WG, sets up NAT (masquerade bridge→WG) and optional port forwarding DNAT.
|
||||
- **Service containers** default-route through the VPN container. No WG interface = no internet if VPN is down = leak-proof by topology.
|
||||
- **Host** reaches containers directly on the bridge for nginx reverse proxying.
|
||||
|
||||
## Key design decisions
|
||||
|
||||
- **Bridge, not veth pairs**: All containers share one bridge (`br-vpn`), so the VPN container can act as a single gateway. The host does NOT masquerade bridge traffic — only the VPN container does (through WG).
|
||||
- **Port forwarding is implicit**: If any container sets `receiveForwardedPort`, the VPN container automatically handles PIA port forwarding and DNAT. No separate toggle needed.
|
||||
- **DNS through WG**: Service containers use the VPN container as their DNS server. The VPN container runs `systemd-resolved` listening on its bridge IP, forwarding queries through the WG tunnel.
|
||||
- **Monthly renewal**: `pia-vpn-setup` uses `Type=simple` + `Restart=always` + `RuntimeMaxSec=30d` to periodically re-authenticate with PIA and get a fresh port forwarding signature (signatures expire after ~2 months). Service containers are unaffected during renewal.
|
||||
|
||||
## Files
|
||||
|
||||
| File | Purpose |
|
||||
|---|---|
|
||||
| `default.nix` | Options, bridge, tinyproxy, host firewall, WG interface creation, assertions |
|
||||
| `vpn-container.nix` | VPN container: PIA auth, WG config, NAT, DNAT, port refresh timer |
|
||||
| `service-container.nix` | Generates service containers with static IP and gateway→VPN |
|
||||
| `scripts.nix` | Bash function library for PIA API calls and WG configuration |
|
||||
| `ca.rsa.4096.crt` | PIA CA certificate for API TLS verification |
|
||||
|
||||
## Usage
|
||||
|
||||
```nix
|
||||
pia-vpn = {
|
||||
enable = true;
|
||||
serverLocation = "swiss";
|
||||
|
||||
containers.my-service = {
|
||||
ip = "10.100.0.10";
|
||||
mounts."/data".hostPath = "/data";
|
||||
config = { services.my-app.enable = true; };
|
||||
|
||||
# Optional: receive PIA's forwarded port (at most one container)
|
||||
receiveForwardedPort = { port = 8080; protocol = "both"; };
|
||||
onPortForwarded = ''
|
||||
echo "PIA assigned port $PORT, forwarding to $TARGET_IP:8080"
|
||||
'';
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
## Debugging
|
||||
|
||||
```bash
|
||||
# Check VPN container status
|
||||
machinectl shell pia-vpn
|
||||
systemctl status pia-vpn-setup
|
||||
journalctl -u pia-vpn-setup
|
||||
|
||||
# Verify WG tunnel
|
||||
wg show
|
||||
|
||||
# Check NAT/DNAT rules
|
||||
iptables -t nat -L -v
|
||||
iptables -L FORWARD -v
|
||||
|
||||
# From a service container — verify VPN routing
|
||||
curl ifconfig.me
|
||||
|
||||
# Port refresh logs
|
||||
journalctl -u pia-vpn-port-refresh
|
||||
```
|
||||
43
common/network/pia-vpn/ca.rsa.4096.crt
Normal file
43
common/network/pia-vpn/ca.rsa.4096.crt
Normal file
@@ -0,0 +1,43 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIHqzCCBZOgAwIBAgIJAJ0u+vODZJntMA0GCSqGSIb3DQEBDQUAMIHoMQswCQYD
|
||||
VQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNVBAcTCkxvc0FuZ2VsZXMxIDAeBgNV
|
||||
BAoTF1ByaXZhdGUgSW50ZXJuZXQgQWNjZXNzMSAwHgYDVQQLExdQcml2YXRlIElu
|
||||
dGVybmV0IEFjY2VzczEgMB4GA1UEAxMXUHJpdmF0ZSBJbnRlcm5ldCBBY2Nlc3Mx
|
||||
IDAeBgNVBCkTF1ByaXZhdGUgSW50ZXJuZXQgQWNjZXNzMS8wLQYJKoZIhvcNAQkB
|
||||
FiBzZWN1cmVAcHJpdmF0ZWludGVybmV0YWNjZXNzLmNvbTAeFw0xNDA0MTcxNzQw
|
||||
MzNaFw0zNDA0MTIxNzQwMzNaMIHoMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0Ex
|
||||
EzARBgNVBAcTCkxvc0FuZ2VsZXMxIDAeBgNVBAoTF1ByaXZhdGUgSW50ZXJuZXQg
|
||||
QWNjZXNzMSAwHgYDVQQLExdQcml2YXRlIEludGVybmV0IEFjY2VzczEgMB4GA1UE
|
||||
AxMXUHJpdmF0ZSBJbnRlcm5ldCBBY2Nlc3MxIDAeBgNVBCkTF1ByaXZhdGUgSW50
|
||||
ZXJuZXQgQWNjZXNzMS8wLQYJKoZIhvcNAQkBFiBzZWN1cmVAcHJpdmF0ZWludGVy
|
||||
bmV0YWNjZXNzLmNvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALVk
|
||||
hjumaqBbL8aSgj6xbX1QPTfTd1qHsAZd2B97m8Vw31c/2yQgZNf5qZY0+jOIHULN
|
||||
De4R9TIvyBEbvnAg/OkPw8n/+ScgYOeH876VUXzjLDBnDb8DLr/+w9oVsuDeFJ9K
|
||||
V2UFM1OYX0SnkHnrYAN2QLF98ESK4NCSU01h5zkcgmQ+qKSfA9Ny0/UpsKPBFqsQ
|
||||
25NvjDWFhCpeqCHKUJ4Be27CDbSl7lAkBuHMPHJs8f8xPgAbHRXZOxVCpayZ2SND
|
||||
fCwsnGWpWFoMGvdMbygngCn6jA/W1VSFOlRlfLuuGe7QFfDwA0jaLCxuWt/BgZyl
|
||||
p7tAzYKR8lnWmtUCPm4+BtjyVDYtDCiGBD9Z4P13RFWvJHw5aapx/5W/CuvVyI7p
|
||||
Kwvc2IT+KPxCUhH1XI8ca5RN3C9NoPJJf6qpg4g0rJH3aaWkoMRrYvQ+5PXXYUzj
|
||||
tRHImghRGd/ydERYoAZXuGSbPkm9Y/p2X8unLcW+F0xpJD98+ZI+tzSsI99Zs5wi
|
||||
jSUGYr9/j18KHFTMQ8n+1jauc5bCCegN27dPeKXNSZ5riXFL2XX6BkY68y58UaNz
|
||||
meGMiUL9BOV1iV+PMb7B7PYs7oFLjAhh0EdyvfHkrh/ZV9BEhtFa7yXp8XR0J6vz
|
||||
1YV9R6DYJmLjOEbhU8N0gc3tZm4Qz39lIIG6w3FDAgMBAAGjggFUMIIBUDAdBgNV
|
||||
HQ4EFgQUrsRtyWJftjpdRM0+925Y6Cl08SUwggEfBgNVHSMEggEWMIIBEoAUrsRt
|
||||
yWJftjpdRM0+925Y6Cl08SWhge6kgeswgegxCzAJBgNVBAYTAlVTMQswCQYDVQQI
|
||||
EwJDQTETMBEGA1UEBxMKTG9zQW5nZWxlczEgMB4GA1UEChMXUHJpdmF0ZSBJbnRl
|
||||
cm5ldCBBY2Nlc3MxIDAeBgNVBAsTF1ByaXZhdGUgSW50ZXJuZXQgQWNjZXNzMSAw
|
||||
HgYDVQQDExdQcml2YXRlIEludGVybmV0IEFjY2VzczEgMB4GA1UEKRMXUHJpdmF0
|
||||
ZSBJbnRlcm5ldCBBY2Nlc3MxLzAtBgkqhkiG9w0BCQEWIHNlY3VyZUBwcml2YXRl
|
||||
aW50ZXJuZXRhY2Nlc3MuY29tggkAnS7684Nkme0wDAYDVR0TBAUwAwEB/zANBgkq
|
||||
hkiG9w0BAQ0FAAOCAgEAJsfhsPk3r8kLXLxY+v+vHzbr4ufNtqnL9/1Uuf8NrsCt
|
||||
pXAoyZ0YqfbkWx3NHTZ7OE9ZRhdMP/RqHQE1p4N4Sa1nZKhTKasV6KhHDqSCt/dv
|
||||
Em89xWm2MVA7nyzQxVlHa9AkcBaemcXEiyT19XdpiXOP4Vhs+J1R5m8zQOxZlV1G
|
||||
tF9vsXmJqWZpOVPmZ8f35BCsYPvv4yMewnrtAC8PFEK/bOPeYcKN50bol22QYaZu
|
||||
LfpkHfNiFTnfMh8sl/ablPyNY7DUNiP5DRcMdIwmfGQxR5WEQoHL3yPJ42LkB5zs
|
||||
6jIm26DGNXfwura/mi105+ENH1CaROtRYwkiHb08U6qLXXJz80mWJkT90nr8Asj3
|
||||
5xN2cUppg74nG3YVav/38P48T56hG1NHbYF5uOCske19F6wi9maUoto/3vEr0rnX
|
||||
JUp2KODmKdvBI7co245lHBABWikk8VfejQSlCtDBXn644ZMtAdoxKNfR2WTFVEwJ
|
||||
iyd1Fzx0yujuiXDROLhISLQDRjVVAvawrAtLZWYK31bY7KlezPlQnl/D9Asxe85l
|
||||
8jO5+0LdJ6VyOs/Hd4w52alDW/MFySDZSfQHMTIc30hLBJ8OnCEIvluVQQ2UQvoW
|
||||
+no177N9L2Y+M9TcTA62ZyMXShHQGeh20rb4kK8f+iFX8NxtdHVSkxMEFSfDDyQ=
|
||||
-----END CERTIFICATE-----
|
||||
279
common/network/pia-vpn/default.nix
Normal file
279
common/network/pia-vpn/default.nix
Normal file
@@ -0,0 +1,279 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
# PIA VPN multi-container module.
|
||||
#
|
||||
# Architecture:
|
||||
# Host creates WG interface, runs tinyproxy on bridge for PIA API bootstrap.
|
||||
# VPN container does all PIA logic via proxy, configures WG, masquerades bridge→piaw.
|
||||
# Service containers default route → VPN container (leak-proof by topology).
|
||||
#
|
||||
# Reference: https://www.wireguard.com/netns/#ordinary-containerization
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.pia-vpn;
|
||||
|
||||
# Derive prefix length from subnet CIDR (e.g. "10.100.0.0/24" → "24")
|
||||
subnetPrefixLen = last (splitString "/" cfg.subnet);
|
||||
|
||||
containerSubmodule = types.submodule ({ name, ... }: {
|
||||
options = {
|
||||
ip = mkOption {
|
||||
type = types.str;
|
||||
description = "Static IP address for this container on the VPN bridge";
|
||||
};
|
||||
|
||||
config = mkOption {
|
||||
type = types.anything;
|
||||
default = { };
|
||||
description = "NixOS configuration for this container";
|
||||
};
|
||||
|
||||
mounts = mkOption {
|
||||
type = types.attrsOf (types.submodule {
|
||||
options = {
|
||||
hostPath = mkOption {
|
||||
type = types.str;
|
||||
description = "Path on the host to bind mount";
|
||||
};
|
||||
isReadOnly = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Whether the mount is read-only";
|
||||
};
|
||||
};
|
||||
});
|
||||
default = { };
|
||||
description = "Bind mounts for the container";
|
||||
};
|
||||
|
||||
receiveForwardedPort = mkOption {
|
||||
type = types.nullOr (types.submodule {
|
||||
options = {
|
||||
port = mkOption {
|
||||
type = types.nullOr types.port;
|
||||
default = null;
|
||||
description = ''
|
||||
Target port to forward to. If null, forwards to the same PIA-assigned port.
|
||||
PIA-assigned ports below 10000 are rejected to avoid accidentally
|
||||
forwarding traffic to other services.
|
||||
'';
|
||||
};
|
||||
protocol = mkOption {
|
||||
type = types.enum [ "tcp" "udp" "both" ];
|
||||
default = "both";
|
||||
description = "Protocol(s) to forward";
|
||||
};
|
||||
};
|
||||
});
|
||||
default = null;
|
||||
description = "Port forwarding configuration. At most one container may set this.";
|
||||
};
|
||||
|
||||
onPortForwarded = mkOption {
|
||||
type = types.nullOr types.lines;
|
||||
default = null;
|
||||
description = ''
|
||||
Optional script run in the VPN container after port forwarding is established.
|
||||
Available environment variables: $PORT (PIA-assigned port), $TARGET_IP (this container's IP).
|
||||
'';
|
||||
};
|
||||
};
|
||||
});
|
||||
|
||||
# NOTE: All derivations of cfg.containers are kept INSIDE config = mkIf ... { }
|
||||
# to avoid infinite recursion. The module system's pushDownProperties eagerly
|
||||
# evaluates let bindings and mkMerge contents, so any top-level let binding
|
||||
# that touches cfg.containers would force config evaluation during structure
|
||||
# discovery, creating a cycle.
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./vpn-container.nix
|
||||
./service-container.nix
|
||||
];
|
||||
|
||||
options.pia-vpn = {
|
||||
enable = mkEnableOption "PIA VPN multi-container setup";
|
||||
|
||||
serverLocation = mkOption {
|
||||
type = types.str;
|
||||
default = "swiss";
|
||||
description = "PIA server region ID";
|
||||
};
|
||||
|
||||
interfaceName = mkOption {
|
||||
type = types.str;
|
||||
default = "piaw";
|
||||
description = "WireGuard interface name";
|
||||
};
|
||||
|
||||
wireguardListenPort = mkOption {
|
||||
type = types.port;
|
||||
default = 51820;
|
||||
description = "WireGuard listen port";
|
||||
};
|
||||
|
||||
bridgeName = mkOption {
|
||||
type = types.str;
|
||||
default = "br-vpn";
|
||||
description = "Bridge interface name for VPN containers";
|
||||
};
|
||||
|
||||
subnet = mkOption {
|
||||
type = types.str;
|
||||
default = "10.100.0.0/24";
|
||||
description = "Subnet CIDR for VPN bridge network";
|
||||
};
|
||||
|
||||
hostAddress = mkOption {
|
||||
type = types.str;
|
||||
default = "10.100.0.1";
|
||||
description = "Host IP on the VPN bridge";
|
||||
};
|
||||
|
||||
vpnAddress = mkOption {
|
||||
type = types.str;
|
||||
default = "10.100.0.2";
|
||||
description = "VPN container IP on the bridge";
|
||||
};
|
||||
|
||||
proxyPort = mkOption {
|
||||
type = types.port;
|
||||
default = 8888;
|
||||
description = "Tinyproxy port for PIA API bootstrap";
|
||||
};
|
||||
|
||||
containers = mkOption {
|
||||
type = types.attrsOf containerSubmodule;
|
||||
default = { };
|
||||
description = "Service containers that route through the VPN";
|
||||
};
|
||||
|
||||
# Subnet prefix length derived from cfg.subnet (exposed for other submodules)
|
||||
subnetPrefixLen = mkOption {
|
||||
type = types.str;
|
||||
default = subnetPrefixLen;
|
||||
description = "Prefix length derived from subnet CIDR";
|
||||
readOnly = true;
|
||||
};
|
||||
|
||||
# Shared host entries for all containers (host + VPN + service containers)
|
||||
containerHosts = mkOption {
|
||||
type = types.attrsOf (types.listOf types.str);
|
||||
internal = true;
|
||||
readOnly = true;
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
assertions =
|
||||
let
|
||||
forwardingContainers = filterAttrs (_: c: c.receiveForwardedPort != null) cfg.containers;
|
||||
containerIPs = mapAttrsToList (_: c: c.ip) cfg.containers;
|
||||
in
|
||||
[
|
||||
{
|
||||
assertion = length (attrNames forwardingContainers) <= 1;
|
||||
message = "At most one pia-vpn container may set receiveForwardedPort";
|
||||
}
|
||||
{
|
||||
assertion = length containerIPs == length (unique containerIPs);
|
||||
message = "pia-vpn container IPs must be unique";
|
||||
}
|
||||
];
|
||||
|
||||
# Enable systemd-networkd for bridge management
|
||||
systemd.network.enable = true;
|
||||
|
||||
# TODO: re-enable once primary networking uses networkd
|
||||
systemd.network.wait-online.enable = false;
|
||||
|
||||
# Tell NetworkManager to ignore VPN bridge and container interfaces
|
||||
networking.networkmanager.unmanaged = mkIf config.networking.networkmanager.enable [
|
||||
"interface-name:${cfg.bridgeName}"
|
||||
"interface-name:ve-*"
|
||||
];
|
||||
|
||||
# Bridge network device
|
||||
systemd.network.netdevs."20-${cfg.bridgeName}".netdevConfig = {
|
||||
Kind = "bridge";
|
||||
Name = cfg.bridgeName;
|
||||
};
|
||||
|
||||
# Bridge network configuration — NO IPMasquerade (host must NOT be gateway)
|
||||
systemd.network.networks."20-${cfg.bridgeName}" = {
|
||||
matchConfig.Name = cfg.bridgeName;
|
||||
networkConfig = {
|
||||
Address = "${cfg.hostAddress}/${cfg.subnetPrefixLen}";
|
||||
DHCPServer = false;
|
||||
};
|
||||
linkConfig.RequiredForOnline = "no";
|
||||
};
|
||||
|
||||
# Allow wireguard traffic through rpfilter
|
||||
networking.firewall.checkReversePath = "loose";
|
||||
|
||||
# Block bridge → outside forwarding (prevents host from being a gateway for containers)
|
||||
networking.firewall.extraForwardRules = ''
|
||||
iifname "${cfg.bridgeName}" oifname != "${cfg.bridgeName}" drop
|
||||
'';
|
||||
|
||||
# Allow tinyproxy from bridge (tinyproxy itself restricts to VPN container IP)
|
||||
networking.firewall.interfaces.${cfg.bridgeName}.allowedTCPPorts = [ cfg.proxyPort ];
|
||||
|
||||
# Tinyproxy — runs on bridge IP so VPN container can bootstrap PIA auth
|
||||
services.tinyproxy = {
|
||||
enable = true;
|
||||
settings = {
|
||||
Listen = cfg.hostAddress;
|
||||
Port = cfg.proxyPort;
|
||||
};
|
||||
};
|
||||
systemd.services.tinyproxy.before = [ "container@pia-vpn.service" ];
|
||||
|
||||
# WireGuard interface creation (host-side oneshot)
|
||||
# Creates the interface in the host namespace so encrypted UDP stays in host netns.
|
||||
# The container takes ownership of the interface on startup via `interfaces = [ ... ]`.
|
||||
systemd.services.pia-vpn-wg-create = {
|
||||
description = "Create PIA VPN WireGuard interface";
|
||||
|
||||
before = [ "container@pia-vpn.service" ];
|
||||
requiredBy = [ "container@pia-vpn.service" ];
|
||||
partOf = [ "container@pia-vpn.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
path = with pkgs; [ iproute2 ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
|
||||
script = ''
|
||||
[[ -z $(ip link show dev ${cfg.interfaceName} 2>/dev/null) ]] || exit 0
|
||||
ip link add ${cfg.interfaceName} type wireguard
|
||||
'';
|
||||
|
||||
preStop = ''
|
||||
ip link del ${cfg.interfaceName} 2>/dev/null || true
|
||||
'';
|
||||
};
|
||||
|
||||
# Host entries for container hostnames — NixOS only auto-creates these for
|
||||
# hostAddress/localAddress containers, not hostBridge. Use the standard
|
||||
# {name}.containers convention.
|
||||
pia-vpn.containerHosts =
|
||||
{ ${cfg.vpnAddress} = [ "pia-vpn.containers" ]; }
|
||||
// mapAttrs' (name: ctr: nameValuePair ctr.ip [ "${name}.containers" ]) cfg.containers;
|
||||
|
||||
networking.hosts = cfg.containerHosts;
|
||||
|
||||
# PIA login secret
|
||||
age.secrets."pia-login.conf".file = ../../../secrets/pia-login.age;
|
||||
|
||||
# IP forwarding needed for bridge traffic between containers
|
||||
networking.ip_forward = true;
|
||||
};
|
||||
}
|
||||
209
common/network/pia-vpn/scripts.nix
Normal file
209
common/network/pia-vpn/scripts.nix
Normal file
@@ -0,0 +1,209 @@
|
||||
let
|
||||
caPath = ./ca.rsa.4096.crt;
|
||||
in
|
||||
|
||||
# Bash function library for PIA VPN WireGuard operations.
|
||||
# All PIA API calls accept an optional $proxy variable:
|
||||
# proxy="http://10.100.0.1:8888" fetchPIAToken
|
||||
# When $proxy is set, curl uses --proxy "$proxy"; otherwise direct connection.
|
||||
|
||||
# Reference materials:
|
||||
# https://serverlist.piaservers.net/vpninfo/servers/v6
|
||||
# https://github.com/pia-foss/manual-connections
|
||||
# https://github.com/thrnz/docker-wireguard-pia/blob/master/extra/wg-gen.sh
|
||||
# https://www.wireguard.com/netns/#ordinary-containerization
|
||||
|
||||
{
|
||||
scriptCommon = ''
|
||||
proxy_args() {
|
||||
if [[ -n "''${proxy:-}" ]]; then
|
||||
echo "--proxy $proxy"
|
||||
fi
|
||||
}
|
||||
|
||||
fetchPIAToken() {
|
||||
local PIA_USER PIA_PASS resp
|
||||
echo "Reading PIA credentials..."
|
||||
PIA_USER=$(sed '1q;d' /run/agenix/pia-login.conf)
|
||||
PIA_PASS=$(sed '2q;d' /run/agenix/pia-login.conf)
|
||||
echo "Requesting PIA authentication token..."
|
||||
resp=$(curl -s $(proxy_args) -u "$PIA_USER:$PIA_PASS" \
|
||||
"https://www.privateinternetaccess.com/gtoken/generateToken")
|
||||
PIA_TOKEN=$(echo "$resp" | jq -r '.token')
|
||||
if [[ -z "$PIA_TOKEN" || "$PIA_TOKEN" == "null" ]]; then
|
||||
echo "ERROR: Failed to fetch PIA token: $resp" >&2
|
||||
return 1
|
||||
fi
|
||||
echo "PIA token acquired"
|
||||
}
|
||||
|
||||
choosePIAServer() {
|
||||
local serverLocation=$1
|
||||
local servers servers_json totalservers serverindex
|
||||
servers=$(mktemp)
|
||||
servers_json=$(mktemp)
|
||||
echo "Fetching PIA server list..."
|
||||
curl -s $(proxy_args) \
|
||||
"https://serverlist.piaservers.net/vpninfo/servers/v6" > "$servers"
|
||||
head -n 1 "$servers" | tr -d '\n' > "$servers_json"
|
||||
|
||||
totalservers=$(jq -r \
|
||||
'.regions | .[] | select(.id=="'"$serverLocation"'") | .servers.wg | length' \
|
||||
"$servers_json")
|
||||
if ! [[ "$totalservers" =~ ^[0-9]+$ ]] || [ "$totalservers" -eq 0 ] 2>/dev/null; then
|
||||
echo "ERROR: Location \"$serverLocation\" not found." >&2
|
||||
rm -f "$servers_json" "$servers"
|
||||
return 1
|
||||
fi
|
||||
echo "Found $totalservers WireGuard servers in region '$serverLocation'"
|
||||
serverindex=$(( RANDOM % totalservers ))
|
||||
|
||||
WG_HOSTNAME=$(jq -r \
|
||||
'.regions | .[] | select(.id=="'"$serverLocation"'") | .servers.wg | .['"$serverindex"'].cn' \
|
||||
"$servers_json")
|
||||
WG_SERVER_IP=$(jq -r \
|
||||
'.regions | .[] | select(.id=="'"$serverLocation"'") | .servers.wg | .['"$serverindex"'].ip' \
|
||||
"$servers_json")
|
||||
WG_SERVER_PORT=$(jq -r '.groups.wg | .[0] | .ports | .[0]' "$servers_json")
|
||||
|
||||
rm -f "$servers_json" "$servers"
|
||||
echo "Selected server $serverindex/$totalservers: $WG_HOSTNAME ($WG_SERVER_IP:$WG_SERVER_PORT)"
|
||||
}
|
||||
|
||||
generateWireguardKey() {
|
||||
PRIVATE_KEY=$(wg genkey)
|
||||
PUBLIC_KEY=$(echo "$PRIVATE_KEY" | wg pubkey)
|
||||
echo "Generated WireGuard keypair"
|
||||
}
|
||||
|
||||
authorizeKeyWithPIAServer() {
|
||||
local addKeyResponse
|
||||
echo "Sending addKey request to $WG_HOSTNAME ($WG_SERVER_IP:$WG_SERVER_PORT)..."
|
||||
addKeyResponse=$(curl -s -G $(proxy_args) \
|
||||
--connect-to "$WG_HOSTNAME::$WG_SERVER_IP:" \
|
||||
--cacert "${caPath}" \
|
||||
--data-urlencode "pt=$PIA_TOKEN" \
|
||||
--data-urlencode "pubkey=$PUBLIC_KEY" \
|
||||
"https://$WG_HOSTNAME:$WG_SERVER_PORT/addKey")
|
||||
local status
|
||||
status=$(echo "$addKeyResponse" | jq -r '.status')
|
||||
if [[ "$status" != "OK" ]]; then
|
||||
echo "ERROR: addKey failed: $addKeyResponse" >&2
|
||||
return 1
|
||||
fi
|
||||
MY_IP=$(echo "$addKeyResponse" | jq -r '.peer_ip')
|
||||
WG_SERVER_PUBLIC_KEY=$(echo "$addKeyResponse" | jq -r '.server_key')
|
||||
WG_SERVER_PORT=$(echo "$addKeyResponse" | jq -r '.server_port')
|
||||
echo "Key authorized — assigned VPN IP: $MY_IP, server port: $WG_SERVER_PORT"
|
||||
}
|
||||
|
||||
writeWireguardQuickFile() {
|
||||
local wgFile=$1
|
||||
local listenPort=$2
|
||||
rm -f "$wgFile"
|
||||
touch "$wgFile"
|
||||
chmod 700 "$wgFile"
|
||||
cat > "$wgFile" <<WGEOF
|
||||
[Interface]
|
||||
PrivateKey = $PRIVATE_KEY
|
||||
ListenPort = $listenPort
|
||||
[Peer]
|
||||
PersistentKeepalive = 25
|
||||
PublicKey = $WG_SERVER_PUBLIC_KEY
|
||||
AllowedIPs = 0.0.0.0/0
|
||||
Endpoint = $WG_SERVER_IP:$WG_SERVER_PORT
|
||||
WGEOF
|
||||
echo "Wrote WireGuard config to $wgFile (listen=$listenPort)"
|
||||
}
|
||||
|
||||
writeChosenServerToFile() {
|
||||
local serverFile=$1
|
||||
jq -n \
|
||||
--arg hostname "$WG_HOSTNAME" \
|
||||
--arg ip "$WG_SERVER_IP" \
|
||||
--arg port "$WG_SERVER_PORT" \
|
||||
'{hostname: $hostname, ip: $ip, port: $port}' > "$serverFile"
|
||||
chmod 700 "$serverFile"
|
||||
echo "Wrote server info to $serverFile"
|
||||
}
|
||||
|
||||
loadChosenServerFromFile() {
|
||||
local serverFile=$1
|
||||
WG_HOSTNAME=$(jq -r '.hostname' "$serverFile")
|
||||
WG_SERVER_IP=$(jq -r '.ip' "$serverFile")
|
||||
WG_SERVER_PORT=$(jq -r '.port' "$serverFile")
|
||||
echo "Loaded server info from $serverFile: $WG_HOSTNAME ($WG_SERVER_IP:$WG_SERVER_PORT)"
|
||||
}
|
||||
|
||||
# Reset WG interface and tear down NAT/forwarding rules.
|
||||
# Called on startup (clear stale state) and on exit via trap.
|
||||
cleanupVpn() {
|
||||
local interfaceName=$1
|
||||
wg set "$interfaceName" listen-port 0 2>/dev/null || true
|
||||
ip -4 address flush dev "$interfaceName" 2>/dev/null || true
|
||||
ip route del default dev "$interfaceName" 2>/dev/null || true
|
||||
iptables -t nat -F 2>/dev/null || true
|
||||
iptables -F FORWARD 2>/dev/null || true
|
||||
}
|
||||
|
||||
connectToServer() {
|
||||
local wgFile=$1
|
||||
local interfaceName=$2
|
||||
|
||||
echo "Applying WireGuard config to $interfaceName..."
|
||||
wg setconf "$interfaceName" "$wgFile"
|
||||
ip -4 address add "$MY_IP" dev "$interfaceName"
|
||||
ip link set mtu 1420 up dev "$interfaceName"
|
||||
echo "WireGuard interface $interfaceName is up with IP $MY_IP"
|
||||
}
|
||||
|
||||
reservePortForward() {
|
||||
local payload_and_signature
|
||||
echo "Requesting port forward signature from $WG_HOSTNAME..."
|
||||
payload_and_signature=$(curl -s -m 5 \
|
||||
--connect-to "$WG_HOSTNAME::$WG_SERVER_IP:" \
|
||||
--cacert "${caPath}" \
|
||||
-G --data-urlencode "token=$PIA_TOKEN" \
|
||||
"https://$WG_HOSTNAME:19999/getSignature")
|
||||
local status
|
||||
status=$(echo "$payload_and_signature" | jq -r '.status')
|
||||
if [[ "$status" != "OK" ]]; then
|
||||
echo "ERROR: getSignature failed: $payload_and_signature" >&2
|
||||
return 1
|
||||
fi
|
||||
PORT_SIGNATURE=$(echo "$payload_and_signature" | jq -r '.signature')
|
||||
PORT_PAYLOAD=$(echo "$payload_and_signature" | jq -r '.payload')
|
||||
PORT=$(echo "$PORT_PAYLOAD" | base64 -d | jq -r '.port')
|
||||
echo "Port forward reserved: port $PORT"
|
||||
}
|
||||
|
||||
writePortRenewalFile() {
|
||||
local portRenewalFile=$1
|
||||
jq -n \
|
||||
--arg signature "$PORT_SIGNATURE" \
|
||||
--arg payload "$PORT_PAYLOAD" \
|
||||
'{signature: $signature, payload: $payload}' > "$portRenewalFile"
|
||||
chmod 700 "$portRenewalFile"
|
||||
echo "Wrote port renewal data to $portRenewalFile"
|
||||
}
|
||||
|
||||
readPortRenewalFile() {
|
||||
local portRenewalFile=$1
|
||||
PORT_SIGNATURE=$(jq -r '.signature' "$portRenewalFile")
|
||||
PORT_PAYLOAD=$(jq -r '.payload' "$portRenewalFile")
|
||||
echo "Loaded port renewal data from $portRenewalFile"
|
||||
}
|
||||
|
||||
refreshPIAPort() {
|
||||
local bindPortResponse
|
||||
echo "Refreshing port forward binding with $WG_HOSTNAME..."
|
||||
bindPortResponse=$(curl -Gs -m 5 \
|
||||
--connect-to "$WG_HOSTNAME::$WG_SERVER_IP:" \
|
||||
--cacert "${caPath}" \
|
||||
--data-urlencode "payload=$PORT_PAYLOAD" \
|
||||
--data-urlencode "signature=$PORT_SIGNATURE" \
|
||||
"https://$WG_HOSTNAME:19999/bindPort")
|
||||
echo "bindPort response: $bindPortResponse"
|
||||
}
|
||||
'';
|
||||
}
|
||||
87
common/network/pia-vpn/service-container.nix
Normal file
87
common/network/pia-vpn/service-container.nix
Normal file
@@ -0,0 +1,87 @@
|
||||
{ config, lib, allModules, ... }:
|
||||
|
||||
# Generates service containers that route all traffic through the VPN container.
|
||||
# Each container gets a static IP on the VPN bridge with default route → VPN container.
|
||||
#
|
||||
# Uses lazy mapAttrs inside fixed config keys to avoid infinite recursion.
|
||||
# (mkMerge + mapAttrsToList at the top level forces eager evaluation of cfg.containers
|
||||
# during module structure discovery, which creates a cycle with config evaluation.)
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.pia-vpn;
|
||||
|
||||
mkContainer = name: ctr: {
|
||||
autoStart = true;
|
||||
ephemeral = true;
|
||||
privateNetwork = true;
|
||||
hostBridge = cfg.bridgeName;
|
||||
|
||||
bindMounts = mapAttrs
|
||||
(_: mount: {
|
||||
hostPath = mount.hostPath;
|
||||
isReadOnly = mount.isReadOnly;
|
||||
})
|
||||
ctr.mounts;
|
||||
|
||||
config = { config, pkgs, lib, ... }: {
|
||||
imports = allModules ++ [ ctr.config ];
|
||||
|
||||
# Static IP with gateway pointing to VPN container
|
||||
networking.useNetworkd = true;
|
||||
systemd.network.enable = true;
|
||||
networking.useDHCP = false;
|
||||
|
||||
systemd.network.networks."20-eth0" = {
|
||||
matchConfig.Name = "eth0";
|
||||
networkConfig = {
|
||||
Address = "${ctr.ip}/${cfg.subnetPrefixLen}";
|
||||
Gateway = cfg.vpnAddress;
|
||||
DNS = [ cfg.vpnAddress ];
|
||||
};
|
||||
};
|
||||
|
||||
networking.hosts = cfg.containerHosts;
|
||||
|
||||
# DNS through VPN container (queries go through WG tunnel = no DNS leak)
|
||||
networking.nameservers = [ cfg.vpnAddress ];
|
||||
|
||||
# Wait for actual VPN connectivity before network-online.target.
|
||||
# Without this, services start before the VPN tunnel is ready and failures
|
||||
# can't be reported to ntfy (no outbound connectivity yet).
|
||||
systemd.services.wait-for-vpn = {
|
||||
description = "Wait for VPN connectivity";
|
||||
before = [ "network-online.target" ];
|
||||
wantedBy = [ "network-online.target" ];
|
||||
after = [ "systemd-networkd-wait-online.service" ];
|
||||
serviceConfig.Type = "oneshot";
|
||||
path = [ pkgs.iputils ];
|
||||
script = ''
|
||||
until ping -c1 -W2 1.1.1.1 >/dev/null 2>&1; do
|
||||
echo "Waiting for VPN connectivity..."
|
||||
sleep 1
|
||||
done
|
||||
'';
|
||||
};
|
||||
|
||||
# Trust the bridge interface (host reaches us directly for nginx)
|
||||
networking.firewall.trustedInterfaces = [ "eth0" ];
|
||||
|
||||
# Disable host resolv.conf — we use our own networkd DNS config
|
||||
networking.useHostResolvConf = false;
|
||||
};
|
||||
};
|
||||
|
||||
mkContainerOrdering = name: _ctr: nameValuePair "container@${name}" {
|
||||
after = [ "container@pia-vpn.service" ];
|
||||
requires = [ "container@pia-vpn.service" ];
|
||||
partOf = [ "container@pia-vpn.service" ];
|
||||
};
|
||||
in
|
||||
{
|
||||
config = mkIf cfg.enable {
|
||||
containers = mapAttrs mkContainer cfg.containers;
|
||||
systemd.services = mapAttrs' mkContainerOrdering cfg.containers;
|
||||
};
|
||||
}
|
||||
232
common/network/pia-vpn/vpn-container.nix
Normal file
232
common/network/pia-vpn/vpn-container.nix
Normal file
@@ -0,0 +1,232 @@
|
||||
{ config, lib, allModules, ... }:
|
||||
|
||||
# VPN container: runs all PIA logic, acts as WireGuard gateway + NAT for service containers.
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.pia-vpn;
|
||||
scripts = import ./scripts.nix;
|
||||
|
||||
# Port forwarding derived state
|
||||
forwardingContainers = filterAttrs (_: c: c.receiveForwardedPort != null) cfg.containers;
|
||||
portForwarding = forwardingContainers != { };
|
||||
forwardingContainerName = if portForwarding then head (attrNames forwardingContainers) else null;
|
||||
forwardingContainer = if portForwarding then forwardingContainers.${forwardingContainerName} else null;
|
||||
|
||||
serverFile = "/var/lib/pia-vpn/server.json";
|
||||
wgFile = "/var/lib/pia-vpn/wg.conf";
|
||||
portRenewalFile = "/var/lib/pia-vpn/port-renewal.json";
|
||||
proxy = "http://${cfg.hostAddress}:${toString cfg.proxyPort}";
|
||||
|
||||
# DNAT/forwarding rules for port forwarding
|
||||
dnatSetupScript = optionalString portForwarding (
|
||||
let
|
||||
fwd = forwardingContainer.receiveForwardedPort;
|
||||
targetIp = forwardingContainer.ip;
|
||||
dnatTarget = if fwd.port != null then "${targetIp}:${toString fwd.port}" else targetIp;
|
||||
targetPort = if fwd.port != null then toString fwd.port else "$PORT";
|
||||
tcpRules = optionalString (fwd.protocol == "tcp" || fwd.protocol == "both") ''
|
||||
echo "Setting up TCP DNAT: port $PORT → ${targetIp}:${targetPort}"
|
||||
iptables -t nat -A PREROUTING -i ${cfg.interfaceName} -p tcp --dport $PORT -j DNAT --to ${dnatTarget}
|
||||
iptables -A FORWARD -i ${cfg.interfaceName} -d ${targetIp} -p tcp --dport ${targetPort} -j ACCEPT
|
||||
'';
|
||||
udpRules = optionalString (fwd.protocol == "udp" || fwd.protocol == "both") ''
|
||||
echo "Setting up UDP DNAT: port $PORT → ${targetIp}:${targetPort}"
|
||||
iptables -t nat -A PREROUTING -i ${cfg.interfaceName} -p udp --dport $PORT -j DNAT --to ${dnatTarget}
|
||||
iptables -A FORWARD -i ${cfg.interfaceName} -d ${targetIp} -p udp --dport ${targetPort} -j ACCEPT
|
||||
'';
|
||||
onPortForwarded = optionalString (forwardingContainer.onPortForwarded != null) ''
|
||||
TARGET_IP="${targetIp}"
|
||||
export PORT TARGET_IP
|
||||
echo "Running onPortForwarded hook for ${forwardingContainerName} (port=$PORT, target=$TARGET_IP)"
|
||||
${forwardingContainer.onPortForwarded}
|
||||
'';
|
||||
in
|
||||
''
|
||||
if [ "$PORT" -lt 10000 ]; then
|
||||
echo "ERROR: PIA assigned low port $PORT (< 10000), refusing to set up DNAT" >&2
|
||||
else
|
||||
${tcpRules}
|
||||
${udpRules}
|
||||
${onPortForwarded}
|
||||
fi
|
||||
''
|
||||
);
|
||||
in
|
||||
{
|
||||
config = mkIf cfg.enable {
|
||||
# Give the container more time to boot (pia-vpn-setup retries can delay readiness)
|
||||
systemd.services."container@pia-vpn".serviceConfig.TimeoutStartSec = mkForce "180s";
|
||||
|
||||
containers.pia-vpn = {
|
||||
autoStart = true;
|
||||
ephemeral = true;
|
||||
privateNetwork = true;
|
||||
hostBridge = cfg.bridgeName;
|
||||
interfaces = [ cfg.interfaceName ];
|
||||
|
||||
bindMounts."/run/agenix" = {
|
||||
hostPath = "/run/agenix";
|
||||
isReadOnly = true;
|
||||
};
|
||||
|
||||
config = { config, pkgs, lib, ... }:
|
||||
let
|
||||
scriptPkgs = with pkgs; [ wireguard-tools iproute2 curl jq iptables coreutils ];
|
||||
in
|
||||
{
|
||||
imports = allModules;
|
||||
|
||||
networking.hosts = cfg.containerHosts;
|
||||
|
||||
# Static IP on bridge — no gateway (VPN container routes via WG only)
|
||||
networking.useNetworkd = true;
|
||||
systemd.network.enable = true;
|
||||
networking.useDHCP = false;
|
||||
|
||||
systemd.network.networks."20-eth0" = {
|
||||
matchConfig.Name = "eth0";
|
||||
networkConfig = {
|
||||
Address = "${cfg.vpnAddress}/${cfg.subnetPrefixLen}";
|
||||
DHCPServer = false;
|
||||
};
|
||||
};
|
||||
|
||||
# Ignore WG interface for wait-online (it's configured manually, not by networkd)
|
||||
systemd.network.wait-online.ignoredInterfaces = [ cfg.interfaceName ];
|
||||
|
||||
# Route ntfy alerts through the host proxy (VPN container has no gateway on eth0)
|
||||
ntfy-alerts.curlExtraArgs = "--proxy http://${cfg.hostAddress}:${toString cfg.proxyPort}";
|
||||
|
||||
# Enable forwarding so bridge traffic can go through WG
|
||||
boot.kernel.sysctl."net.ipv4.ip_forward" = 1;
|
||||
|
||||
# Trust bridge interface
|
||||
networking.firewall.trustedInterfaces = [ "eth0" ];
|
||||
|
||||
# DNS: use systemd-resolved listening on bridge IP so service containers
|
||||
# can use VPN container as DNS server (queries go through WG tunnel = no DNS leak)
|
||||
services.resolved = {
|
||||
enable = true;
|
||||
settings.Resolve.DNSStubListenerExtra = cfg.vpnAddress;
|
||||
};
|
||||
|
||||
# Don't use host resolv.conf — resolved manages DNS
|
||||
networking.useHostResolvConf = false;
|
||||
|
||||
# State directory for PIA config files
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /var/lib/pia-vpn 0700 root root -"
|
||||
];
|
||||
|
||||
# PIA VPN setup service — does all the PIA auth, WG config, and NAT setup
|
||||
systemd.services.pia-vpn-setup = {
|
||||
description = "PIA VPN WireGuard Setup";
|
||||
|
||||
wants = [ "network-online.target" ];
|
||||
after = [ "network.target" "network-online.target" "systemd-networkd.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
path = scriptPkgs;
|
||||
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
Restart = "always";
|
||||
RestartSec = "10s";
|
||||
RuntimeMaxSec = "30d";
|
||||
};
|
||||
|
||||
script = ''
|
||||
set -euo pipefail
|
||||
${scripts.scriptCommon}
|
||||
|
||||
trap 'cleanupVpn ${cfg.interfaceName}' EXIT
|
||||
cleanupVpn ${cfg.interfaceName}
|
||||
|
||||
proxy="${proxy}"
|
||||
|
||||
# 1. Authenticate with PIA via proxy (VPN container has no internet yet)
|
||||
echo "Choosing PIA server in region '${cfg.serverLocation}'..."
|
||||
choosePIAServer '${cfg.serverLocation}'
|
||||
|
||||
echo "Fetching PIA authentication token..."
|
||||
fetchPIAToken
|
||||
|
||||
# 2. Generate WG keys and authorize with PIA server
|
||||
echo "Generating WireGuard keypair..."
|
||||
generateWireguardKey
|
||||
|
||||
echo "Authorizing key with PIA server $WG_HOSTNAME..."
|
||||
authorizeKeyWithPIAServer
|
||||
|
||||
# 3. Configure WG interface (already created by host and moved into our namespace)
|
||||
echo "Configuring WireGuard interface ${cfg.interfaceName}..."
|
||||
writeWireguardQuickFile '${wgFile}' ${toString cfg.wireguardListenPort}
|
||||
writeChosenServerToFile '${serverFile}'
|
||||
connectToServer '${wgFile}' '${cfg.interfaceName}'
|
||||
|
||||
# 4. Default route through WG
|
||||
ip route replace default dev ${cfg.interfaceName}
|
||||
echo "Default route set through ${cfg.interfaceName}"
|
||||
|
||||
# 5. NAT: masquerade bridge → WG (so service containers' traffic appears to come from VPN IP)
|
||||
echo "Setting up NAT masquerade..."
|
||||
iptables -t nat -A POSTROUTING -o ${cfg.interfaceName} -j MASQUERADE
|
||||
iptables -A FORWARD -i eth0 -o ${cfg.interfaceName} -j ACCEPT
|
||||
iptables -A FORWARD -i ${cfg.interfaceName} -o eth0 -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
|
||||
${optionalString portForwarding ''
|
||||
# 6. Port forwarding setup
|
||||
echo "Reserving port forward..."
|
||||
reservePortForward
|
||||
writePortRenewalFile '${portRenewalFile}'
|
||||
|
||||
# First bindPort triggers actual port allocation
|
||||
echo "Binding port $PORT..."
|
||||
refreshPIAPort
|
||||
|
||||
echo "PIA assigned port: $PORT"
|
||||
|
||||
# DNAT rules to forward PIA port to target container
|
||||
${dnatSetupScript}
|
||||
''}
|
||||
|
||||
echo "PIA VPN setup complete"
|
||||
exec sleep infinity
|
||||
'';
|
||||
|
||||
};
|
||||
|
||||
# Port refresh timer (every 10 min) — keeps PIA port forwarding alive
|
||||
systemd.services.pia-vpn-port-refresh = mkIf portForwarding {
|
||||
description = "PIA VPN Port Forward Refresh";
|
||||
after = [ "pia-vpn-setup.service" ];
|
||||
requires = [ "pia-vpn-setup.service" ];
|
||||
|
||||
path = scriptPkgs;
|
||||
|
||||
serviceConfig.Type = "oneshot";
|
||||
|
||||
script = ''
|
||||
set -euo pipefail
|
||||
${scripts.scriptCommon}
|
||||
loadChosenServerFromFile '${serverFile}'
|
||||
readPortRenewalFile '${portRenewalFile}'
|
||||
echo "Refreshing PIA port forward..."
|
||||
refreshPIAPort
|
||||
'';
|
||||
};
|
||||
|
||||
systemd.timers.pia-vpn-port-refresh = mkIf portForwarding {
|
||||
partOf = [ "pia-vpn-port-refresh.service" ];
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "*:0/10";
|
||||
RandomizedDelaySec = "1m";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
126
common/network/sandbox.nix
Normal file
126
common/network/sandbox.nix
Normal file
@@ -0,0 +1,126 @@
|
||||
{ config, lib, ... }:
|
||||
|
||||
# Network configuration for sandboxed workspaces (VMs and containers)
|
||||
# Creates a bridge network with NAT for isolated environments
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.networking.sandbox;
|
||||
in
|
||||
{
|
||||
options.networking.sandbox = {
|
||||
enable = mkEnableOption "sandboxed workspace network bridge";
|
||||
|
||||
bridgeName = mkOption {
|
||||
type = types.str;
|
||||
default = "sandbox-br";
|
||||
description = "Name of the bridge interface for sandboxed workspaces";
|
||||
};
|
||||
|
||||
subnet = mkOption {
|
||||
type = types.str;
|
||||
default = "192.168.83.0/24";
|
||||
description = "Subnet for sandboxed workspace network";
|
||||
};
|
||||
|
||||
hostAddress = mkOption {
|
||||
type = types.str;
|
||||
default = "192.168.83.1";
|
||||
description = "Host address on the sandbox bridge";
|
||||
};
|
||||
|
||||
upstreamInterface = mkOption {
|
||||
type = types.str;
|
||||
description = "Upstream network interface for NAT";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
networking.ip_forward = true;
|
||||
|
||||
# Create the bridge interface
|
||||
systemd.network.netdevs."10-${cfg.bridgeName}" = {
|
||||
netdevConfig = {
|
||||
Kind = "bridge";
|
||||
Name = cfg.bridgeName;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.network.networks."10-${cfg.bridgeName}" = {
|
||||
matchConfig.Name = cfg.bridgeName;
|
||||
networkConfig = {
|
||||
Address = "${cfg.hostAddress}/24";
|
||||
DHCPServer = false;
|
||||
IPv4Forwarding = true;
|
||||
IPv6Forwarding = false;
|
||||
IPMasquerade = "ipv4";
|
||||
};
|
||||
linkConfig.RequiredForOnline = "no";
|
||||
};
|
||||
|
||||
# Automatically attach VM tap interfaces to the bridge
|
||||
systemd.network.networks."11-vm" = {
|
||||
matchConfig.Name = "vm-*";
|
||||
networkConfig.Bridge = cfg.bridgeName;
|
||||
linkConfig.RequiredForOnline = "no";
|
||||
};
|
||||
|
||||
# Automatically attach container veth interfaces to the bridge
|
||||
systemd.network.networks."11-container" = {
|
||||
matchConfig.Name = "ve-*";
|
||||
networkConfig.Bridge = cfg.bridgeName;
|
||||
linkConfig.RequiredForOnline = "no";
|
||||
};
|
||||
|
||||
# NAT configuration for sandboxed workspaces
|
||||
networking.nat = {
|
||||
enable = true;
|
||||
internalInterfaces = [ cfg.bridgeName ];
|
||||
externalInterface = cfg.upstreamInterface;
|
||||
};
|
||||
|
||||
# Enable systemd-networkd (required for bridge setup)
|
||||
systemd.network.enable = true;
|
||||
|
||||
# When NetworkManager handles primary networking, disable systemd-networkd-wait-online.
|
||||
# The bridge is the only interface managed by systemd-networkd and it never reaches
|
||||
# "online" state without connected workspaces. NetworkManager-wait-online.service already
|
||||
# gates network-online.target for the primary interface.
|
||||
# On pure systemd-networkd systems (no NM), we just ignore the bridge.
|
||||
systemd.network.wait-online.enable =
|
||||
!config.networking.networkmanager.enable;
|
||||
systemd.network.wait-online.ignoredInterfaces =
|
||||
lib.mkIf (!config.networking.networkmanager.enable) [ cfg.bridgeName ];
|
||||
|
||||
# If NetworkManager is enabled, tell it to ignore sandbox interfaces
|
||||
# This allows systemd-networkd and NetworkManager to coexist
|
||||
networking.networkmanager.unmanaged = [
|
||||
"interface-name:${cfg.bridgeName}"
|
||||
"interface-name:vm-*"
|
||||
"interface-name:ve-*"
|
||||
"interface-name:veth*"
|
||||
];
|
||||
|
||||
# Make systemd-resolved listen on the bridge for workspace DNS queries.
|
||||
# By default resolved only listens on 127.0.0.53 (localhost).
|
||||
# DNSStubListenerExtra adds the bridge address so workspaces can use the host as DNS.
|
||||
services.resolved.settings.Resolve.DNSStubListenerExtra = cfg.hostAddress;
|
||||
|
||||
# Allow DNS traffic from workspaces to the host
|
||||
networking.firewall.interfaces.${cfg.bridgeName} = {
|
||||
allowedTCPPorts = [ 53 ];
|
||||
allowedUDPPorts = [ 53 ];
|
||||
};
|
||||
|
||||
# Block sandboxes from reaching the local network (private RFC1918 ranges)
|
||||
# while still allowing public internet access via NAT.
|
||||
# The sandbox subnet itself is allowed so workspaces can reach the host gateway.
|
||||
networking.firewall.extraForwardRules = ''
|
||||
iifname ${cfg.bridgeName} ip daddr ${cfg.hostAddress} accept
|
||||
iifname ${cfg.bridgeName} ip daddr 10.0.0.0/8 drop
|
||||
iifname ${cfg.bridgeName} ip daddr 172.16.0.0/12 drop
|
||||
iifname ${cfg.bridgeName} ip daddr 192.168.0.0/16 drop
|
||||
'';
|
||||
};
|
||||
}
|
||||
24
common/network/tailscale.nix
Normal file
24
common/network/tailscale.nix
Normal file
@@ -0,0 +1,24 @@
|
||||
{ config, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.tailscale;
|
||||
in
|
||||
{
|
||||
options.services.tailscale.exitNode = mkEnableOption "Enable exit node support";
|
||||
|
||||
config.services.tailscale.enable = mkDefault (!config.boot.isContainer);
|
||||
|
||||
# Trust Tailscale interface - access control is handled by Tailscale ACLs.
|
||||
# Required because nftables (used by Incus) breaks Tailscale's automatic iptables rules.
|
||||
config.networking.firewall.trustedInterfaces = mkIf cfg.enable [ "tailscale0" ];
|
||||
|
||||
# MagicDNS
|
||||
config.networking.nameservers = mkIf cfg.enable [ "1.1.1.1" "8.8.8.8" ];
|
||||
config.networking.search = mkIf cfg.enable [ "koi-bebop.ts.net" ];
|
||||
|
||||
# exit node
|
||||
config.networking.firewall.checkReversePath = mkIf cfg.exitNode "loose";
|
||||
config.networking.ip_forward = mkIf cfg.exitNode true;
|
||||
}
|
||||
56
common/nix-builder.nix
Normal file
56
common/nix-builder.nix
Normal file
@@ -0,0 +1,56 @@
|
||||
{ config, lib, ... }:
|
||||
|
||||
let
|
||||
builderUserName = "nix-builder";
|
||||
|
||||
builderRole = "nix-builder";
|
||||
builders = config.machines.withRole.${builderRole} or [];
|
||||
thisMachineIsABuilder = config.thisMachine.hasRole.${builderRole} or false;
|
||||
|
||||
# builders don't include themselves as a remote builder
|
||||
otherBuilders = lib.filter (hostname: hostname != config.networking.hostName) builders;
|
||||
in
|
||||
lib.mkMerge [
|
||||
# configure builder
|
||||
(lib.mkIf (thisMachineIsABuilder && !config.boot.isContainer) {
|
||||
users.users.${builderUserName} = {
|
||||
description = "Distributed Nix Build User";
|
||||
group = builderUserName;
|
||||
isSystemUser = true;
|
||||
createHome = true;
|
||||
home = "/var/lib/nix-builder";
|
||||
useDefaultShell = true;
|
||||
openssh.authorizedKeys.keys = builtins.map
|
||||
(builderCfg: builderCfg.hostKey)
|
||||
(builtins.attrValues config.machines.hosts);
|
||||
};
|
||||
users.groups.${builderUserName} = { };
|
||||
|
||||
nix.settings.trusted-users = [
|
||||
builderUserName
|
||||
];
|
||||
})
|
||||
|
||||
# use each builder
|
||||
{
|
||||
nix.distributedBuilds = true;
|
||||
|
||||
nix.buildMachines = builtins.map
|
||||
(builderHostname: {
|
||||
hostName = builderHostname;
|
||||
system = config.machines.hosts.${builderHostname}.arch;
|
||||
protocol = "ssh-ng";
|
||||
sshUser = builderUserName;
|
||||
sshKey = "/etc/ssh/ssh_host_ed25519_key";
|
||||
maxJobs = 3;
|
||||
speedFactor = 10;
|
||||
supportedFeatures = [ "nixos-test" "benchmark" "big-parallel" "kvm" ];
|
||||
})
|
||||
otherBuilders;
|
||||
|
||||
# It is very likely that the builder's internet is faster or just as fast
|
||||
nix.extraOptions = ''
|
||||
builders-use-substitutes = true
|
||||
'';
|
||||
}
|
||||
]
|
||||
64
common/ntfy-alerts.nix
Normal file
64
common/ntfy-alerts.nix
Normal file
@@ -0,0 +1,64 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.ntfy-alerts;
|
||||
in
|
||||
{
|
||||
options.ntfy-alerts = {
|
||||
serverUrl = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "https://ntfy.neet.dev";
|
||||
description = "Base URL of the ntfy server.";
|
||||
};
|
||||
|
||||
topic = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "service-failures";
|
||||
description = "ntfy topic to publish alerts to.";
|
||||
};
|
||||
|
||||
curlExtraArgs = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
description = "Extra arguments to pass to curl (e.g. --proxy http://host:port).";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.thisMachine.hasRole."ntfy" {
|
||||
age.secrets.ntfy-token.file = ../secrets/ntfy-token.age;
|
||||
|
||||
systemd.services."ntfy-failure@" = {
|
||||
description = "Send ntfy alert for failed unit %i";
|
||||
wants = [ "network-online.target" ];
|
||||
after = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
EnvironmentFile = "/run/agenix/ntfy-token";
|
||||
ExecStart = "${pkgs.writeShellScript "ntfy-failure-notify" ''
|
||||
unit="$1"
|
||||
${lib.getExe pkgs.curl} \
|
||||
--fail --silent --show-error \
|
||||
--max-time 30 --retry 3 \
|
||||
${cfg.curlExtraArgs} \
|
||||
-H "Authorization: Bearer $NTFY_TOKEN" \
|
||||
-H "Title: Service failure on ${config.networking.hostName}" \
|
||||
-H "Priority: high" \
|
||||
-H "Tags: rotating_light" \
|
||||
-d "Unit $unit failed at $(date +%c)" \
|
||||
"${cfg.serverUrl}/${cfg.topic}"
|
||||
''} %i";
|
||||
};
|
||||
};
|
||||
|
||||
# Apply OnFailure to all services via a systemd drop-in
|
||||
systemd.packages = [
|
||||
(pkgs.runCommand "ntfy-on-failure-dropin" { } ''
|
||||
mkdir -p $out/lib/systemd/system/service.d
|
||||
cat > $out/lib/systemd/system/service.d/ntfy-on-failure.conf <<'EOF'
|
||||
[Unit]
|
||||
OnFailure=ntfy-failure@%p.service
|
||||
EOF
|
||||
'')
|
||||
];
|
||||
};
|
||||
}
|
||||
@@ -1,8 +1,9 @@
|
||||
{ lib, config, pkgs, ... }:
|
||||
{ lib, config, ... }:
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
in {
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
# enable pulseaudio support for packages
|
||||
nixpkgs.config.pulseaudio = true;
|
||||
@@ -16,44 +17,14 @@ in {
|
||||
alsa.support32Bit = true;
|
||||
pulse.enable = true;
|
||||
jack.enable = true;
|
||||
};
|
||||
|
||||
# use the example session manager (no others are packaged yet so this is enabled by default,
|
||||
# no need to redefine it in your config for now)
|
||||
#media-session.enable = true;
|
||||
|
||||
config.pipewire = {
|
||||
"context.objects" = [
|
||||
{
|
||||
# A default dummy driver. This handles nodes marked with the "node.always-driver"
|
||||
# properyty when no other driver is currently active. JACK clients need this.
|
||||
factory = "spa-node-factory";
|
||||
args = {
|
||||
"factory.name" = "support.node.driver";
|
||||
"node.name" = "Dummy-Driver";
|
||||
"priority.driver" = 8000;
|
||||
};
|
||||
}
|
||||
{
|
||||
factory = "adapter";
|
||||
args = {
|
||||
"factory.name" = "support.null-audio-sink";
|
||||
"node.name" = "Microphone-Proxy";
|
||||
"node.description" = "Microphone";
|
||||
"media.class" = "Audio/Source/Virtual";
|
||||
"audio.position" = "MONO";
|
||||
};
|
||||
}
|
||||
{
|
||||
factory = "adapter";
|
||||
args = {
|
||||
"factory.name" = "support.null-audio-sink";
|
||||
"node.name" = "Main-Output-Proxy";
|
||||
"node.description" = "Main Output";
|
||||
"media.class" = "Audio/Sink";
|
||||
"audio.position" = "FL,FR";
|
||||
};
|
||||
}
|
||||
];
|
||||
services.pipewire.extraConfig.pipewire."92-fix-wine-audio" = {
|
||||
context.properties = {
|
||||
default.clock.rate = 48000;
|
||||
default.clock.quantum = 256;
|
||||
default.clock.min-quantum = 256;
|
||||
default.clock.max-quantum = 2048;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -17,39 +17,8 @@ let
|
||||
"PREFIX=$(out)"
|
||||
];
|
||||
};
|
||||
|
||||
nvidia-vaapi-driver = pkgs.stdenv.mkDerivation rec {
|
||||
pname = "nvidia-vaapi-driver";
|
||||
version = "0.0.5";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "elFarto";
|
||||
repo = pname;
|
||||
rev = "v${version}";
|
||||
sha256 = "2bycqKolVoaHK64XYcReteuaON9TjzrFhaG5kty28YY=";
|
||||
};
|
||||
|
||||
patches = [
|
||||
./use-meson-v57.patch
|
||||
];
|
||||
|
||||
nativeBuildInputs = with pkgs; [
|
||||
meson
|
||||
cmake
|
||||
ninja
|
||||
pkg-config
|
||||
];
|
||||
|
||||
buildInputs = with pkgs; [
|
||||
nv-codec-headers-11-1-5-1
|
||||
libva
|
||||
gst_all_1.gstreamer
|
||||
gst_all_1.gst-plugins-bad
|
||||
libglvnd
|
||||
];
|
||||
};
|
||||
|
||||
in {
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
# chromium with specific extensions + settings
|
||||
programs.chromium = {
|
||||
@@ -60,7 +29,6 @@ in {
|
||||
"oboonakemofpalcgghocfoadofidjkkk" # keepassxc plugin
|
||||
"cimiefiiaegbelhefglklhhakcgmhkai" # plasma integration
|
||||
"hkgfoiooedgoejojocmhlaklaeopbecg" # picture in picture
|
||||
"fihnjjcciajhdojfnbdddfaoknhalnja" # I don't care about cookies
|
||||
"mnjggcdmjocbbbhaepdhchncahnbgone" # SponsorBlock
|
||||
"dhdgffkkebhmkfjojejmpbldmpobfkfo" # Tampermonkey
|
||||
# "ehpdicggenhgapiikfpnmppdonadlnmp" # Disable Scroll Jacking
|
||||
@@ -73,33 +41,28 @@ in {
|
||||
"SpellcheckLanguage" = [ "en-US" ];
|
||||
};
|
||||
defaultSearchProviderSuggestURL = null;
|
||||
defaultSearchProviderSearchURL = " https://duckduckgo.com/?q={searchTerms}&kp=-1&kl=us-en";
|
||||
defaultSearchProviderSearchURL = "https://duckduckgo.com/?q={searchTerms}&kp=-1&kl=us-en";
|
||||
};
|
||||
|
||||
# hardware accelerated video playback (on intel)
|
||||
nixpkgs.config.packageOverrides = pkgs: {
|
||||
vaapiIntel = pkgs.vaapiIntel.override { enableHybridCodec = true; };
|
||||
chromium = pkgs.chromium.override {
|
||||
gnomeKeyringSupport = true;
|
||||
enableWideVine = true;
|
||||
# ungoogled = true;
|
||||
# --enable-native-gpu-memory-buffers # fails on AMD APU
|
||||
# --enable-webrtc-vp9-support
|
||||
commandLineArgs = "--use-vulkan --use-gl=desktop --enable-zero-copy --enable-hardware-overlays --enable-features=VaapiVideoDecoder,CanvasOopRasterization --ignore-gpu-blocklist --enable-accelerated-mjpeg-decode --enable-accelerated-video --enable-gpu-rasterization";
|
||||
commandLineArgs = "--use-vulkan";
|
||||
};
|
||||
};
|
||||
# todo vulkan in chrome
|
||||
# todo video encoding in chrome
|
||||
hardware.opengl = {
|
||||
hardware.graphics = {
|
||||
enable = true;
|
||||
extraPackages = with pkgs; [
|
||||
intel-media-driver # LIBVA_DRIVER_NAME=iHD
|
||||
vaapiIntel # LIBVA_DRIVER_NAME=i965 (older but works better for Firefox/Chromium)
|
||||
# vaapiVdpau
|
||||
libvdpau-va-gl
|
||||
nvidia-vaapi-driver
|
||||
];
|
||||
extraPackages32 = with pkgs.pkgsi686Linux; [ vaapiIntel ];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
@@ -2,22 +2,21 @@
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
in {
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./kde.nix
|
||||
./xfce.nix
|
||||
./yubikey.nix
|
||||
./chromium.nix
|
||||
# ./firefox.nix
|
||||
./firefox.nix
|
||||
./audio.nix
|
||||
# ./torbrowser.nix
|
||||
./pithos.nix
|
||||
./spotify.nix
|
||||
./vscodium.nix
|
||||
./discord.nix
|
||||
./steam.nix
|
||||
./touchpad.nix
|
||||
./mount-samba.nix
|
||||
./udev.nix
|
||||
./virtualisation.nix
|
||||
];
|
||||
|
||||
options.de = {
|
||||
@@ -25,9 +24,10 @@ in {
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
# vulkan
|
||||
hardware.opengl.driSupport = true;
|
||||
hardware.opengl.driSupport32Bit = true;
|
||||
environment.systemPackages = with pkgs; [
|
||||
# https://github.com/NixOS/nixpkgs/pull/328086#issuecomment-2235384618
|
||||
gparted
|
||||
];
|
||||
|
||||
# Applications
|
||||
users.users.googlebot.packages = with pkgs; [
|
||||
@@ -36,39 +36,81 @@ in {
|
||||
mumble
|
||||
tigervnc
|
||||
bluez-tools
|
||||
vscodium
|
||||
element-desktop
|
||||
mpv
|
||||
nextcloud-client
|
||||
signal-desktop
|
||||
minecraft
|
||||
sauerbraten
|
||||
gparted
|
||||
libreoffice-fresh
|
||||
thunderbird
|
||||
spotifyd
|
||||
spotify-qt
|
||||
spotify
|
||||
arduino
|
||||
yt-dlp
|
||||
joplin-desktop
|
||||
config.inputs.deploy-rs.packages.${config.currentSystem}.deploy-rs
|
||||
lxqt.pavucontrol-qt
|
||||
deskflow
|
||||
file-roller
|
||||
android-tools
|
||||
logseq
|
||||
|
||||
# For Nix IDE
|
||||
nixpkgs-fmt
|
||||
nixd
|
||||
nil
|
||||
|
||||
godot-mono
|
||||
];
|
||||
|
||||
# Networking
|
||||
networking.networkmanager.enable = true;
|
||||
users.users.googlebot.extraGroups = [ "networkmanager" ];
|
||||
|
||||
# Printing
|
||||
services.printing.enable = true;
|
||||
services.printing.drivers = with pkgs; [
|
||||
gutenprint
|
||||
];
|
||||
# Printer discovery
|
||||
services.avahi.enable = true;
|
||||
services.avahi.nssmdns = true;
|
||||
|
||||
programs.file-roller.enable = true;
|
||||
# Scanning
|
||||
hardware.sane.enable = true;
|
||||
hardware.sane.extraBackends = with pkgs; [
|
||||
# Enable support for "driverless" scanners
|
||||
# Check for support here: https://mfi.apple.com/account/airprint-search
|
||||
sane-airscan
|
||||
];
|
||||
|
||||
# Printer/Scanner discovery
|
||||
services.avahi.enable = true;
|
||||
services.avahi.nssmdns4 = true;
|
||||
|
||||
# Security
|
||||
services.gnome.gnome-keyring.enable = true;
|
||||
security.pam.services.googlebot.enableGnomeKeyring = true;
|
||||
|
||||
# Mount personal SMB stores
|
||||
services.mount-samba.enable = true;
|
||||
|
||||
# allow building ARM derivations
|
||||
boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
|
||||
|
||||
# for luks onlock over tor
|
||||
services.tor.enable = true;
|
||||
services.tor.client.enable = true;
|
||||
|
||||
# Enable wayland support in various chromium based applications
|
||||
environment.sessionVariables.NIXOS_OZONE_WL = "1";
|
||||
|
||||
fonts.packages = with pkgs; [ nerd-fonts.symbols-only ];
|
||||
|
||||
# SSH Ask pass
|
||||
programs.ssh.enableAskPassword = true;
|
||||
programs.ssh.askPassword = "${pkgs.kdePackages.ksshaskpass}/bin/ksshaskpass";
|
||||
|
||||
users.users.googlebot.extraGroups = [
|
||||
# Networking
|
||||
"networkmanager"
|
||||
# Scanning
|
||||
"scanner"
|
||||
"lp"
|
||||
];
|
||||
};
|
||||
}
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
in {
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
users.users.googlebot.packages = [
|
||||
pkgs.discord
|
||||
|
||||
@@ -20,31 +20,6 @@ let
|
||||
};
|
||||
|
||||
firefox = pkgs.wrapFirefox somewhatPrivateFF {
|
||||
desktopName = "Sneed Browser";
|
||||
|
||||
nixExtensions = [
|
||||
(pkgs.fetchFirefoxAddon {
|
||||
name = "ublock-origin";
|
||||
url = "https://addons.mozilla.org/firefox/downloads/file/3719054/ublock_origin-1.33.2-an+fx.xpi";
|
||||
sha256 = "XDpe9vW1R1iVBTI4AmNgAg1nk7BVQdIAMuqd0cnK5FE=";
|
||||
})
|
||||
(pkgs.fetchFirefoxAddon {
|
||||
name = "sponsorblock";
|
||||
url = "https://addons.mozilla.org/firefox/downloads/file/3720594/sponsorblock_skip_sponsorships_on_youtube-2.0.12.3-an+fx.xpi";
|
||||
sha256 = "HRtnmZWyXN3MKo4AvSYgNJGkBEsa2RaMamFbkz+YzQg=";
|
||||
})
|
||||
(pkgs.fetchFirefoxAddon {
|
||||
name = "KeePassXC-Browser";
|
||||
url = "https://addons.mozilla.org/firefox/downloads/file/3720664/keepassxc_browser-1.7.6-fx.xpi";
|
||||
sha256 = "3K404/eq3amHhIT0WhzQtC892he5I0kp2SvbzE9dbZg=";
|
||||
})
|
||||
(pkgs.fetchFirefoxAddon {
|
||||
name = "https-everywhere";
|
||||
url = "https://addons.mozilla.org/firefox/downloads/file/3716461/https_everywhere-2021.1.27-an+fx.xpi";
|
||||
sha256 = "2gSXSLunKCwPjAq4Wsj0lOeV551r3G+fcm1oeqjMKh8=";
|
||||
})
|
||||
];
|
||||
|
||||
extraPolicies = {
|
||||
CaptivePortal = false;
|
||||
DisableFirefoxStudies = true;
|
||||
@@ -71,14 +46,8 @@ let
|
||||
TopSites = false;
|
||||
};
|
||||
UserMessaging = {
|
||||
ExtensionRecommendations = false;
|
||||
SkipOnboarding = true;
|
||||
};
|
||||
WebsiteFilter = {
|
||||
Block = [
|
||||
"http://paradigminteractive.io/"
|
||||
"https://paradigminteractive.io/"
|
||||
];
|
||||
ExtensionRecommendations = false;
|
||||
SkipOnboarding = true;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -2,22 +2,21 @@
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
in {
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
# kde plasma
|
||||
services.xserver = {
|
||||
enable = true;
|
||||
desktopManager.plasma5.enable = true;
|
||||
displayManager.sddm.enable = true;
|
||||
};
|
||||
services.displayManager.sddm.enable = true;
|
||||
services.displayManager.sddm.wayland.enable = true;
|
||||
services.desktopManager.plasma6.enable = true;
|
||||
|
||||
# kde apps
|
||||
nixpkgs.config.firefox.enablePlasmaBrowserIntegration = true;
|
||||
users.users.googlebot.packages = with pkgs; [
|
||||
# akonadi
|
||||
# kmail
|
||||
# plasma5Packages.kmail-account-wizard
|
||||
kate
|
||||
kdePackages.kate
|
||||
kdePackages.kdeconnect-kde
|
||||
kdePackages.skanpage
|
||||
];
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,36 +1,50 @@
|
||||
# mounts the samba share on s0 over zeroteir
|
||||
# mounts the samba share on s0 over tailscale
|
||||
|
||||
{ config, lib, ... }:
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.mount-samba;
|
||||
|
||||
# prevents hanging on network split
|
||||
network_opts = "x-systemd.automount,noauto,x-systemd.idle-timeout=60,x-systemd.device-timeout=5s,x-systemd.mount-timeout=5s";
|
||||
# prevents hanging on network split and other similar niceties to ensure a stable connection
|
||||
network_opts = "nostrictsync,cache=strict,handlecache,handletimeout=30000,rwpidforward,mapposix,soft,resilienthandles,echo_interval=10,noblocksend,fsc";
|
||||
|
||||
systemd_opts = "x-systemd.automount,noauto,x-systemd.idle-timeout=60,x-systemd.device-timeout=5s,x-systemd.mount-timeout=5s";
|
||||
user_opts = "uid=${toString config.users.users.googlebot.uid},file_mode=0660,dir_mode=0770,user";
|
||||
auth_opts = "credentials=/run/agenix/smb-secrets";
|
||||
version_opts = "vers=2.1";
|
||||
auth_opts = "sec=ntlmv2i,credentials=/run/agenix/smb-secrets";
|
||||
version_opts = "vers=3.1.1";
|
||||
|
||||
opts = "${network_opts},${user_opts},${version_opts}";
|
||||
in {
|
||||
public_user_opts = "gid=${toString config.users.groups.users.gid}";
|
||||
|
||||
opts = "${systemd_opts},${network_opts},${user_opts},${version_opts},${auth_opts}";
|
||||
in
|
||||
{
|
||||
options.services.mount-samba = {
|
||||
enable = lib.mkEnableOption "enable mounting samba shares";
|
||||
};
|
||||
|
||||
config = lib.mkIf (cfg.enable && config.services.zerotierone.enable) {
|
||||
config = lib.mkIf (cfg.enable && config.services.tailscale.enable) {
|
||||
fileSystems."/mnt/public" = {
|
||||
device = "//s0.zt.neet.dev/public";
|
||||
fsType = "cifs";
|
||||
options = ["guest,${opts}"];
|
||||
device = "//s0.koi-bebop.ts.net/public";
|
||||
fsType = "cifs";
|
||||
options = [ "${opts},${public_user_opts}" ];
|
||||
};
|
||||
|
||||
fileSystems."/mnt/private" = {
|
||||
device = "//s0.zt.neet.dev/googlebot";
|
||||
fsType = "cifs";
|
||||
options = ["${auth_opts},${opts}"];
|
||||
device = "//s0.koi-bebop.ts.net/googlebot";
|
||||
fsType = "cifs";
|
||||
options = [ opts ];
|
||||
};
|
||||
|
||||
age.secrets.smb-secrets.file = ../../secrets/smb-secrets.age;
|
||||
|
||||
environment.shellAliases = {
|
||||
# remount storage
|
||||
remount_public = "sudo systemctl restart mnt-public.mount";
|
||||
remount_private = "sudo systemctl restart mnt-private.mount";
|
||||
|
||||
# Encrypted Vault
|
||||
vault_unlock = "${pkgs.gocryptfs}/bin/gocryptfs /mnt/private/.vault/ /mnt/vault/";
|
||||
vault_lock = "umount /mnt/vault/";
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -2,7 +2,8 @@
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
in {
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
nixpkgs.overlays = [
|
||||
(self: super: {
|
||||
@@ -11,7 +12,7 @@ in {
|
||||
version = "1.5.1";
|
||||
src = super.fetchFromGitHub {
|
||||
owner = pname;
|
||||
repo = pname;
|
||||
repo = pname;
|
||||
rev = version;
|
||||
sha256 = "il7OAALpHFZ6wjco9Asp04zWHCD8Ni+iBdiJWcMiQA4=";
|
||||
};
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
{ lib, config, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.spotifyd;
|
||||
toml = pkgs.formats.toml {};
|
||||
spotifydConf = toml.generate "spotify.conf" cfg.settings;
|
||||
in
|
||||
{
|
||||
disabledModules = [
|
||||
"services/audio/spotifyd.nix"
|
||||
];
|
||||
|
||||
options = {
|
||||
services.spotifyd = {
|
||||
enable = mkEnableOption "spotifyd, a Spotify playing daemon";
|
||||
|
||||
settings = mkOption {
|
||||
default = {};
|
||||
type = toml.type;
|
||||
example = { global.bitrate = 320; };
|
||||
description = ''
|
||||
Configuration for Spotifyd. For syntax and directives, see
|
||||
<link xlink:href="https://github.com/Spotifyd/spotifyd#Configuration"/>.
|
||||
'';
|
||||
};
|
||||
|
||||
users = mkOption {
|
||||
type = with types; listOf str;
|
||||
default = [];
|
||||
description = ''
|
||||
Usernames to be added to the "spotifyd" group, so that they
|
||||
can start and interact with the userspace daemon.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
# username specific stuff because i'm lazy...
|
||||
services.spotifyd.users = [ "googlebot" ];
|
||||
users.users.googlebot.packages = with pkgs; [
|
||||
spotify
|
||||
spotify-tui
|
||||
];
|
||||
|
||||
users.groups.spotifyd = {
|
||||
members = cfg.users;
|
||||
};
|
||||
|
||||
age.secrets.spotifyd = {
|
||||
file = ../../secrets/spotifyd.age;
|
||||
group = "spotifyd";
|
||||
mode = "0440"; # group can read
|
||||
};
|
||||
|
||||
# spotifyd to read secrets and run as user service
|
||||
services.spotifyd = {
|
||||
settings.global = {
|
||||
username_cmd = "sed '1q;d' /run/agenix/spotifyd";
|
||||
password_cmd = "sed '2q;d' /run/agenix/spotifyd";
|
||||
bitrate = 320;
|
||||
backend = "pulseaudio";
|
||||
device_name = config.networking.hostName;
|
||||
device_type = "computer";
|
||||
# on_song_change_hook = "command_to_run_on_playback_events"
|
||||
autoplay = true;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.user.services.spotifyd-daemon = {
|
||||
enable = true;
|
||||
wantedBy = [ "graphical-session.target" ];
|
||||
partOf = [ "graphical-session.target" ];
|
||||
description = "spotifyd, a Spotify playing daemon";
|
||||
environment.SHELL = "/bin/sh";
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.spotifyd}/bin/spotifyd --no-daemon --config-path ${spotifydConf}";
|
||||
Restart = "always";
|
||||
CacheDirectory = "spotifyd";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -2,7 +2,8 @@
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
in {
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
programs.steam.enable = true;
|
||||
hardware.steam-hardware.enable = true; # steam controller
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
{ lib, config, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
in {
|
||||
config = lib.mkIf cfg.enable {
|
||||
nixpkgs.overlays = [
|
||||
(self: super: {
|
||||
tor-browser-bundle-bin = super.tor-browser-bundle-bin.overrideAttrs (old: rec {
|
||||
version = "10.0.10";
|
||||
lang = "en-US";
|
||||
src = pkgs.fetchurl {
|
||||
url = "https://dist.torproject.org/torbrowser/${version}/tor-browser-linux64-${version}_${lang}.tar.xz";
|
||||
sha256 = "vYWZ+NsGN8YH5O61+zrUjlFv3rieaBqjBQ+a18sQcZg=";
|
||||
};
|
||||
});
|
||||
})
|
||||
];
|
||||
|
||||
users.users.googlebot.packages = with pkgs; [
|
||||
tor-browser-bundle-bin
|
||||
];
|
||||
};
|
||||
}
|
||||
@@ -1,14 +1,11 @@
|
||||
{ lib, config, pkgs, ... }:
|
||||
{ lib, config, ... }:
|
||||
|
||||
let
|
||||
cfg = config.de.touchpad;
|
||||
in {
|
||||
options.de.touchpad = {
|
||||
enable = lib.mkEnableOption "enable touchpad";
|
||||
};
|
||||
|
||||
cfg = config.de;
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.xserver.libinput.enable = true;
|
||||
services.xserver.libinput.touchpad.naturalScrolling = true;
|
||||
services.libinput.enable = true;
|
||||
services.libinput.touchpad.naturalScrolling = true;
|
||||
};
|
||||
}
|
||||
|
||||
25
common/pc/udev.nix
Normal file
25
common/pc/udev.nix
Normal file
@@ -0,0 +1,25 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.udev.extraRules = ''
|
||||
# depthai
|
||||
SUBSYSTEM=="usb", ATTRS{idVendor}=="03e7", MODE="0666"
|
||||
|
||||
# Moonlander
|
||||
# Rules for Oryx web flashing and live training
|
||||
KERNEL=="hidraw*", ATTRS{idVendor}=="16c0", MODE="0664", GROUP="plugdev"
|
||||
KERNEL=="hidraw*", ATTRS{idVendor}=="3297", MODE="0664", GROUP="plugdev"
|
||||
# Wally Flashing rules for the Moonlander and Planck EZ
|
||||
SUBSYSTEMS=="usb", ATTRS{idVendor}=="0483", ATTRS{idProduct}=="df11", MODE:="0666", SYMLINK+="stm32_dfu"
|
||||
'';
|
||||
services.udev.packages = [ pkgs.platformio ];
|
||||
|
||||
users.groups.plugdev = {
|
||||
members = [ "googlebot" ];
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
diff --git a/meson.build b/meson.build
|
||||
index dace367..8c0e290 100644
|
||||
--- a/meson.build
|
||||
+++ b/meson.build
|
||||
@@ -8,7 +8,7 @@ project(
|
||||
'warning_level=0',
|
||||
],
|
||||
license: 'MIT',
|
||||
- meson_version: '>= 0.58.0',
|
||||
+ meson_version: '>= 0.57.0',
|
||||
)
|
||||
|
||||
cc = meson.get_compiler('c')
|
||||
@@ -47,8 +47,3 @@ shared_library(
|
||||
gnu_symbol_visibility: 'hidden',
|
||||
)
|
||||
|
||||
-meson.add_devenv(environment({
|
||||
- 'NVD_LOG': '1',
|
||||
- 'LIBVA_DRIVER_NAME': 'nvidia',
|
||||
- 'LIBVA_DRIVERS_PATH': meson.project_build_root(),
|
||||
-}))
|
||||
23
common/pc/virtualisation.nix
Normal file
23
common/pc/virtualisation.nix
Normal file
@@ -0,0 +1,23 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
# AppVMs
|
||||
virtualisation.appvm.enable = true;
|
||||
virtualisation.appvm.user = "googlebot";
|
||||
|
||||
# Use podman instead of docker
|
||||
virtualisation.podman.enable = true;
|
||||
virtualisation.podman.dockerCompat = true;
|
||||
|
||||
# virt-manager
|
||||
virtualisation.libvirtd.enable = true;
|
||||
programs.dconf.enable = true;
|
||||
virtualisation.spiceUSBRedirection.enable = true;
|
||||
environment.systemPackages = with pkgs; [ virt-manager ];
|
||||
users.users.googlebot.extraGroups = [ "libvirtd" "adbusers" ];
|
||||
};
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
{ lib, config, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
|
||||
extensions = with pkgs.vscode-extensions; [
|
||||
# bbenoist.Nix # nix syntax support
|
||||
# arrterian.nix-env-selector # nix dev envs
|
||||
];
|
||||
|
||||
vscodium-with-extensions = pkgs.vscode-with-extensions.override {
|
||||
vscode = pkgs.vscodium;
|
||||
vscodeExtensions = extensions;
|
||||
};
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
users.users.googlebot.packages = [
|
||||
vscodium-with-extensions
|
||||
];
|
||||
};
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
{ lib, config, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
in {
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.xserver = {
|
||||
enable = true;
|
||||
desktopManager = {
|
||||
xterm.enable = false;
|
||||
xfce.enable = true;
|
||||
};
|
||||
displayManager.sddm.enable = true;
|
||||
};
|
||||
|
||||
# xfce apps
|
||||
# TODO for some reason whiskermenu needs to be global for it to work
|
||||
environment.systemPackages = with pkgs; [
|
||||
xfce.xfce4-whiskermenu-plugin
|
||||
];
|
||||
};
|
||||
}
|
||||
@@ -2,7 +2,8 @@
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
in {
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
# yubikey
|
||||
services.pcscd.enable = true;
|
||||
|
||||
108
common/pia.nix
108
common/pia.nix
@@ -1,108 +0,0 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.pia;
|
||||
vpnfailsafe = pkgs.stdenv.mkDerivation {
|
||||
pname = "vpnfailsafe";
|
||||
version = "0.0.1";
|
||||
src = ./.;
|
||||
installPhase = ''
|
||||
mkdir -p $out
|
||||
cp vpnfailsafe.sh $out/vpnfailsafe.sh
|
||||
sed -i 's|getent|${pkgs.getent}/bin/getent|' $out/vpnfailsafe.sh
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
options.pia = {
|
||||
enable = lib.mkEnableOption "Enable private internet access";
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.openvpn = {
|
||||
servers = {
|
||||
pia = {
|
||||
config = ''
|
||||
client
|
||||
dev tun
|
||||
proto udp
|
||||
remote us-washingtondc.privacy.network 1198
|
||||
resolv-retry infinite
|
||||
nobind
|
||||
persist-key
|
||||
persist-tun
|
||||
cipher aes-128-cbc
|
||||
auth sha1
|
||||
tls-client
|
||||
remote-cert-tls server
|
||||
|
||||
auth-user-pass
|
||||
compress
|
||||
verb 1
|
||||
reneg-sec 0
|
||||
<crl-verify>
|
||||
-----BEGIN X509 CRL-----
|
||||
MIICWDCCAUAwDQYJKoZIhvcNAQENBQAwgegxCzAJBgNVBAYTAlVTMQswCQYDVQQI
|
||||
EwJDQTETMBEGA1UEBxMKTG9zQW5nZWxlczEgMB4GA1UEChMXUHJpdmF0ZSBJbnRl
|
||||
cm5ldCBBY2Nlc3MxIDAeBgNVBAsTF1ByaXZhdGUgSW50ZXJuZXQgQWNjZXNzMSAw
|
||||
HgYDVQQDExdQcml2YXRlIEludGVybmV0IEFjY2VzczEgMB4GA1UEKRMXUHJpdmF0
|
||||
ZSBJbnRlcm5ldCBBY2Nlc3MxLzAtBgkqhkiG9w0BCQEWIHNlY3VyZUBwcml2YXRl
|
||||
aW50ZXJuZXRhY2Nlc3MuY29tFw0xNjA3MDgxOTAwNDZaFw0zNjA3MDMxOTAwNDZa
|
||||
MCYwEQIBARcMMTYwNzA4MTkwMDQ2MBECAQYXDDE2MDcwODE5MDA0NjANBgkqhkiG
|
||||
9w0BAQ0FAAOCAQEAQZo9X97ci8EcPYu/uK2HB152OZbeZCINmYyluLDOdcSvg6B5
|
||||
jI+ffKN3laDvczsG6CxmY3jNyc79XVpEYUnq4rT3FfveW1+Ralf+Vf38HdpwB8EW
|
||||
B4hZlQ205+21CALLvZvR8HcPxC9KEnev1mU46wkTiov0EKc+EdRxkj5yMgv0V2Re
|
||||
ze7AP+NQ9ykvDScH4eYCsmufNpIjBLhpLE2cuZZXBLcPhuRzVoU3l7A9lvzG9mjA
|
||||
5YijHJGHNjlWFqyrn1CfYS6koa4TGEPngBoAziWRbDGdhEgJABHrpoaFYaL61zqy
|
||||
MR6jC0K2ps9qyZAN74LEBedEfK7tBOzWMwr58A==
|
||||
-----END X509 CRL-----
|
||||
</crl-verify>
|
||||
|
||||
<ca>
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFqzCCBJOgAwIBAgIJAKZ7D5Yv87qDMA0GCSqGSIb3DQEBDQUAMIHoMQswCQYD
|
||||
VQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNVBAcTCkxvc0FuZ2VsZXMxIDAeBgNV
|
||||
BAoTF1ByaXZhdGUgSW50ZXJuZXQgQWNjZXNzMSAwHgYDVQQLExdQcml2YXRlIElu
|
||||
dGVybmV0IEFjY2VzczEgMB4GA1UEAxMXUHJpdmF0ZSBJbnRlcm5ldCBBY2Nlc3Mx
|
||||
IDAeBgNVBCkTF1ByaXZhdGUgSW50ZXJuZXQgQWNjZXNzMS8wLQYJKoZIhvcNAQkB
|
||||
FiBzZWN1cmVAcHJpdmF0ZWludGVybmV0YWNjZXNzLmNvbTAeFw0xNDA0MTcxNzM1
|
||||
MThaFw0zNDA0MTIxNzM1MThaMIHoMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0Ex
|
||||
EzARBgNVBAcTCkxvc0FuZ2VsZXMxIDAeBgNVBAoTF1ByaXZhdGUgSW50ZXJuZXQg
|
||||
QWNjZXNzMSAwHgYDVQQLExdQcml2YXRlIEludGVybmV0IEFjY2VzczEgMB4GA1UE
|
||||
AxMXUHJpdmF0ZSBJbnRlcm5ldCBBY2Nlc3MxIDAeBgNVBCkTF1ByaXZhdGUgSW50
|
||||
ZXJuZXQgQWNjZXNzMS8wLQYJKoZIhvcNAQkBFiBzZWN1cmVAcHJpdmF0ZWludGVy
|
||||
bmV0YWNjZXNzLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPXD
|
||||
L1L9tX6DGf36liA7UBTy5I869z0UVo3lImfOs/GSiFKPtInlesP65577nd7UNzzX
|
||||
lH/P/CnFPdBWlLp5ze3HRBCc/Avgr5CdMRkEsySL5GHBZsx6w2cayQ2EcRhVTwWp
|
||||
cdldeNO+pPr9rIgPrtXqT4SWViTQRBeGM8CDxAyTopTsobjSiYZCF9Ta1gunl0G/
|
||||
8Vfp+SXfYCC+ZzWvP+L1pFhPRqzQQ8k+wMZIovObK1s+nlwPaLyayzw9a8sUnvWB
|
||||
/5rGPdIYnQWPgoNlLN9HpSmsAcw2z8DXI9pIxbr74cb3/HSfuYGOLkRqrOk6h4RC
|
||||
OfuWoTrZup1uEOn+fw8CAwEAAaOCAVQwggFQMB0GA1UdDgQWBBQv63nQ/pJAt5tL
|
||||
y8VJcbHe22ZOsjCCAR8GA1UdIwSCARYwggESgBQv63nQ/pJAt5tLy8VJcbHe22ZO
|
||||
sqGB7qSB6zCB6DELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRMwEQYDVQQHEwpM
|
||||
b3NBbmdlbGVzMSAwHgYDVQQKExdQcml2YXRlIEludGVybmV0IEFjY2VzczEgMB4G
|
||||
A1UECxMXUHJpdmF0ZSBJbnRlcm5ldCBBY2Nlc3MxIDAeBgNVBAMTF1ByaXZhdGUg
|
||||
SW50ZXJuZXQgQWNjZXNzMSAwHgYDVQQpExdQcml2YXRlIEludGVybmV0IEFjY2Vz
|
||||
czEvMC0GCSqGSIb3DQEJARYgc2VjdXJlQHByaXZhdGVpbnRlcm5ldGFjY2Vzcy5j
|
||||
b22CCQCmew+WL/O6gzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBDQUAA4IBAQAn
|
||||
a5PgrtxfwTumD4+3/SYvwoD66cB8IcK//h1mCzAduU8KgUXocLx7QgJWo9lnZ8xU
|
||||
ryXvWab2usg4fqk7FPi00bED4f4qVQFVfGfPZIH9QQ7/48bPM9RyfzImZWUCenK3
|
||||
7pdw4Bvgoys2rHLHbGen7f28knT2j/cbMxd78tQc20TIObGjo8+ISTRclSTRBtyC
|
||||
GohseKYpTS9himFERpUgNtefvYHbn70mIOzfOJFTVqfrptf9jXa9N8Mpy3ayfodz
|
||||
1wiqdteqFXkTYoSDctgKMiZ6GdocK9nMroQipIQtpnwd4yBDWIyC6Bvlkrq5TQUt
|
||||
YDQ8z9v+DMO6iwyIDRiU
|
||||
-----END CERTIFICATE-----
|
||||
</ca>
|
||||
|
||||
disable-occ
|
||||
auth-user-pass /run/agenix/pia-login.conf
|
||||
'';
|
||||
autoStart = true;
|
||||
up = "${vpnfailsafe}/vpnfailsafe.sh";
|
||||
down = "${vpnfailsafe}/vpnfailsafe.sh";
|
||||
};
|
||||
};
|
||||
};
|
||||
age.secrets."pia-login.conf".file = ../secrets/pia-login.conf;
|
||||
};
|
||||
}
|
||||
155
common/sandboxed-workspace/base.nix
Normal file
155
common/sandboxed-workspace/base.nix
Normal file
@@ -0,0 +1,155 @@
|
||||
{ hostConfig, workspaceName, ip, networkInterface }:
|
||||
|
||||
# Base configuration shared by all sandboxed workspaces (VMs and containers)
|
||||
# This provides common settings for networking, SSH, users, and packages
|
||||
#
|
||||
# Parameters:
|
||||
# hostConfig - The host's NixOS config (for inputs, ssh keys, etc.)
|
||||
# workspaceName - Name of the workspace (used as hostname)
|
||||
# ip - Static IP address for the workspace
|
||||
# networkInterface - Match config for systemd-networkd (e.g., { Type = "ether"; } or { Name = "host0"; })
|
||||
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
claudeConfigFile = pkgs.writeText "claude-config.json" (builtins.toJSON {
|
||||
hasCompletedOnboarding = true;
|
||||
theme = "dark";
|
||||
projects = {
|
||||
"/home/googlebot/workspace" = {
|
||||
hasTrustDialogAccepted = true;
|
||||
};
|
||||
};
|
||||
});
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
../shell.nix
|
||||
hostConfig.inputs.home-manager.nixosModules.home-manager
|
||||
hostConfig.inputs.nix-index-database.nixosModules.default
|
||||
hostConfig.inputs.agenix.nixosModules.default
|
||||
];
|
||||
|
||||
nixpkgs.overlays = [
|
||||
hostConfig.inputs.claude-code-nix.overlays.default
|
||||
];
|
||||
|
||||
# Basic system configuration
|
||||
system.stateVersion = "25.11";
|
||||
|
||||
# Set hostname to match the workspace name
|
||||
networking.hostName = workspaceName;
|
||||
|
||||
# Networking with systemd-networkd
|
||||
networking.useNetworkd = true;
|
||||
systemd.network.enable = true;
|
||||
|
||||
# Enable resolved to populate /etc/resolv.conf from networkd's DNS settings
|
||||
services.resolved.enable = true;
|
||||
|
||||
# Basic networking configuration
|
||||
networking.useDHCP = false;
|
||||
|
||||
# Static IP configuration
|
||||
# Uses the host as DNS server (host forwards to upstream DNS)
|
||||
systemd.network.networks."20-workspace" = {
|
||||
matchConfig = networkInterface;
|
||||
networkConfig = {
|
||||
Address = "${ip}/24";
|
||||
Gateway = hostConfig.networking.sandbox.hostAddress;
|
||||
DNS = [ hostConfig.networking.sandbox.hostAddress ];
|
||||
};
|
||||
};
|
||||
|
||||
# Disable firewall inside workspaces (we're behind NAT)
|
||||
networking.firewall.enable = false;
|
||||
|
||||
# Enable SSH for access
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings = {
|
||||
PasswordAuthentication = false;
|
||||
PermitRootLogin = "prohibit-password";
|
||||
};
|
||||
};
|
||||
|
||||
# Use persistent SSH host keys from shared directory
|
||||
services.openssh.hostKeys = lib.mkForce [
|
||||
{
|
||||
path = "/etc/ssh-host-keys/ssh_host_ed25519_key";
|
||||
type = "ed25519";
|
||||
}
|
||||
];
|
||||
|
||||
# Basic system packages
|
||||
environment.systemPackages = with pkgs; [
|
||||
claude-code
|
||||
kakoune
|
||||
vim
|
||||
git
|
||||
htop
|
||||
wget
|
||||
curl
|
||||
tmux
|
||||
dnsutils
|
||||
];
|
||||
|
||||
# User configuration
|
||||
users.mutableUsers = false;
|
||||
users.users.googlebot = {
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "wheel" ];
|
||||
shell = pkgs.fish;
|
||||
openssh.authorizedKeys.keys = hostConfig.machines.ssh.userKeys;
|
||||
};
|
||||
|
||||
security.doas.enable = true;
|
||||
security.sudo.enable = false;
|
||||
security.doas.extraRules = [
|
||||
{ groups = [ "wheel" ]; noPass = true; }
|
||||
];
|
||||
|
||||
# Minimal locale settings
|
||||
i18n.defaultLocale = "en_US.UTF-8";
|
||||
time.timeZone = "America/Los_Angeles";
|
||||
|
||||
# Enable flakes
|
||||
nix.settings.experimental-features = [ "nix-command" "flakes" ];
|
||||
nix.settings.trusted-users = [ "googlebot" ];
|
||||
|
||||
# Binary cache configuration (inherited from host's common/binary-cache.nix)
|
||||
nix.settings.substituters = hostConfig.nix.settings.substituters;
|
||||
nix.settings.trusted-public-keys = hostConfig.nix.settings.trusted-public-keys;
|
||||
nix.settings.fallback = true;
|
||||
nix.settings.netrc-file = config.age.secrets.attic-netrc.path;
|
||||
age.secrets.attic-netrc.file = ../../secrets/attic-netrc.age;
|
||||
|
||||
# Make nixpkgs available in NIX_PATH and registry (like the NixOS ISO)
|
||||
# This allows `nix-shell -p`, `nix repl '<nixpkgs>'`, etc. to work
|
||||
nix.nixPath = [ "nixpkgs=${hostConfig.inputs.nixpkgs}" ];
|
||||
nix.registry.nixpkgs.flake = hostConfig.inputs.nixpkgs;
|
||||
|
||||
# Enable fish shell
|
||||
programs.fish.enable = true;
|
||||
|
||||
# Initialize Claude Code config on first boot (skips onboarding, trusts workspace)
|
||||
systemd.services.claude-config-init = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
User = "googlebot";
|
||||
Group = "users";
|
||||
};
|
||||
script = ''
|
||||
if [ ! -f /home/googlebot/claude-config/.claude.json ]; then
|
||||
cp ${claudeConfigFile} /home/googlebot/claude-config/.claude.json
|
||||
fi
|
||||
'';
|
||||
};
|
||||
|
||||
# Home Manager configuration
|
||||
home-manager.useGlobalPkgs = true;
|
||||
home-manager.useUserPackages = true;
|
||||
home-manager.users.googlebot = import ./home.nix;
|
||||
}
|
||||
74
common/sandboxed-workspace/container.nix
Normal file
74
common/sandboxed-workspace/container.nix
Normal file
@@ -0,0 +1,74 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
# Container-specific configuration for sandboxed workspaces using systemd-nspawn
|
||||
# This module is imported by default.nix for workspaces with type = "container"
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.sandboxed-workspace;
|
||||
hostConfig = config;
|
||||
|
||||
# Filter for container-type workspaces only
|
||||
containerWorkspaces = filterAttrs (n: ws: ws.type == "container") cfg.workspaces;
|
||||
in
|
||||
{
|
||||
config = mkIf (cfg.enable && containerWorkspaces != { }) {
|
||||
# NixOS container module only sets restartIfChanged when autoStart=true
|
||||
# Work around this by setting it directly on the systemd service
|
||||
systemd.services = mapAttrs'
|
||||
(name: ws: nameValuePair "container@${name}" {
|
||||
restartIfChanged = lib.mkForce true;
|
||||
restartTriggers = [
|
||||
config.containers.${name}.path
|
||||
config.environment.etc."nixos-containers/${name}.conf".source
|
||||
];
|
||||
})
|
||||
containerWorkspaces;
|
||||
|
||||
# Convert container workspace configs to NixOS containers format
|
||||
containers = mapAttrs
|
||||
(name: ws: {
|
||||
autoStart = ws.autoStart;
|
||||
privateNetwork = true;
|
||||
ephemeral = true;
|
||||
restartIfChanged = true;
|
||||
|
||||
# Attach container's veth to the sandbox bridge
|
||||
# This creates the veth pair and attaches host side to the bridge
|
||||
hostBridge = config.networking.sandbox.bridgeName;
|
||||
|
||||
bindMounts = {
|
||||
"/home/googlebot/workspace" = {
|
||||
hostPath = "/home/googlebot/sandboxed/${name}/workspace";
|
||||
isReadOnly = false;
|
||||
};
|
||||
"/etc/ssh-host-keys" = {
|
||||
hostPath = "/home/googlebot/sandboxed/${name}/ssh-host-keys";
|
||||
isReadOnly = false;
|
||||
};
|
||||
"/home/googlebot/claude-config" = {
|
||||
hostPath = "/home/googlebot/sandboxed/${name}/claude-config";
|
||||
isReadOnly = false;
|
||||
};
|
||||
};
|
||||
|
||||
config = { config, lib, pkgs, ... }: {
|
||||
imports = [
|
||||
(import ./base.nix {
|
||||
inherit hostConfig;
|
||||
workspaceName = name;
|
||||
ip = ws.ip;
|
||||
networkInterface = { Name = "eth0"; };
|
||||
})
|
||||
(import ws.config)
|
||||
];
|
||||
|
||||
networking.useHostResolvConf = false;
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
};
|
||||
})
|
||||
containerWorkspaces;
|
||||
};
|
||||
}
|
||||
164
common/sandboxed-workspace/default.nix
Normal file
164
common/sandboxed-workspace/default.nix
Normal file
@@ -0,0 +1,164 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
# Unified sandboxed workspace module supporting both VMs and containers
|
||||
# This module provides isolated development environments with shared configuration
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.sandboxed-workspace;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./vm.nix
|
||||
./container.nix
|
||||
./incus.nix
|
||||
];
|
||||
|
||||
options.sandboxed-workspace = {
|
||||
enable = mkEnableOption "sandboxed workspace management";
|
||||
|
||||
workspaces = mkOption {
|
||||
type = types.attrsOf (types.submodule {
|
||||
options = {
|
||||
type = mkOption {
|
||||
type = types.enum [ "vm" "container" "incus" ];
|
||||
description = ''
|
||||
Backend type for this workspace:
|
||||
- "vm": microVM with cloud-hypervisor (more isolation, uses virtiofs)
|
||||
- "container": systemd-nspawn via NixOS containers (less overhead, uses bind mounts)
|
||||
- "incus": Incus/LXD container (unprivileged, better security than NixOS containers)
|
||||
'';
|
||||
};
|
||||
|
||||
config = mkOption {
|
||||
type = types.path;
|
||||
description = "Path to the workspace configuration file";
|
||||
};
|
||||
|
||||
ip = mkOption {
|
||||
type = types.str;
|
||||
example = "192.168.83.10";
|
||||
description = ''
|
||||
Static IP address for this workspace on the microvm bridge network.
|
||||
Configures the workspace's network interface and adds an entry to /etc/hosts
|
||||
on the host so the workspace can be accessed by name (e.g., ssh workspace-example).
|
||||
Must be in the 192.168.83.0/24 subnet (or whatever networking.sandbox.subnet is).
|
||||
'';
|
||||
};
|
||||
|
||||
hostKey = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAA...";
|
||||
description = ''
|
||||
SSH host public key for this workspace. If set, adds to programs.ssh.knownHosts
|
||||
so the host automatically trusts the workspace without prompting.
|
||||
Get the key from: ~/sandboxed/<name>/ssh-host-keys/ssh_host_ed25519_key.pub
|
||||
'';
|
||||
};
|
||||
|
||||
autoStart = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Whether to automatically start this workspace on boot";
|
||||
};
|
||||
|
||||
cid = mkOption {
|
||||
type = types.nullOr types.int;
|
||||
default = null;
|
||||
description = ''
|
||||
vsock Context Identifier for this workspace (VM-only, ignored for containers).
|
||||
If null, auto-generated from workspace name.
|
||||
Must be unique per host. Valid range: 3 to 4294967294.
|
||||
See: https://man7.org/linux/man-pages/man7/vsock.7.html
|
||||
'';
|
||||
};
|
||||
};
|
||||
});
|
||||
default = { };
|
||||
description = "Sandboxed workspace configurations";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
# Automatically enable sandbox networking when workspaces are defined
|
||||
networking.sandbox.enable = mkIf (cfg.workspaces != { }) true;
|
||||
|
||||
# Add workspace hostnames to /etc/hosts so they can be accessed by name
|
||||
networking.hosts = lib.mkMerge (lib.mapAttrsToList
|
||||
(name: ws: {
|
||||
${ws.ip} = [ "workspace-${name}" ];
|
||||
})
|
||||
cfg.workspaces);
|
||||
|
||||
# Add workspace SSH host keys to known_hosts so host trusts workspaces without prompting
|
||||
programs.ssh.knownHosts = lib.mkMerge (lib.mapAttrsToList
|
||||
(name: ws:
|
||||
lib.optionalAttrs (ws.hostKey != null) {
|
||||
"workspace-${name}" = {
|
||||
publicKey = ws.hostKey;
|
||||
extraHostNames = [ ws.ip ];
|
||||
};
|
||||
})
|
||||
cfg.workspaces);
|
||||
|
||||
# Shell aliases for workspace management
|
||||
environment.shellAliases = lib.mkMerge (lib.mapAttrsToList
|
||||
(name: ws:
|
||||
let
|
||||
serviceName =
|
||||
if ws.type == "vm" then "microvm@${name}"
|
||||
else if ws.type == "incus" then "incus-workspace-${name}"
|
||||
else "container@${name}";
|
||||
in
|
||||
{
|
||||
"workspace_${name}" = "ssh googlebot@workspace-${name}";
|
||||
"workspace_${name}_start" = "doas systemctl start ${serviceName}";
|
||||
"workspace_${name}_stop" = "doas systemctl stop ${serviceName}";
|
||||
"workspace_${name}_restart" = "doas systemctl restart ${serviceName}";
|
||||
"workspace_${name}_status" = "doas systemctl status ${serviceName}";
|
||||
})
|
||||
cfg.workspaces);
|
||||
|
||||
# Automatically generate SSH host keys and directories for all workspaces
|
||||
systemd.services = lib.mapAttrs'
|
||||
(name: ws:
|
||||
let
|
||||
serviceName =
|
||||
if ws.type == "vm" then "microvm@${name}"
|
||||
else if ws.type == "incus" then "incus-workspace-${name}"
|
||||
else "container@${name}";
|
||||
in
|
||||
lib.nameValuePair "workspace-${name}-setup" {
|
||||
description = "Setup directories and SSH keys for workspace ${name}";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
before = [ "${serviceName}.service" ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
|
||||
script = ''
|
||||
# Create directories if they don't exist
|
||||
mkdir -p /home/googlebot/sandboxed/${name}/workspace
|
||||
mkdir -p /home/googlebot/sandboxed/${name}/ssh-host-keys
|
||||
mkdir -p /home/googlebot/sandboxed/${name}/claude-config
|
||||
|
||||
# Fix ownership
|
||||
chown -R googlebot:users /home/googlebot/sandboxed/${name}
|
||||
|
||||
# Generate SSH host key if it doesn't exist
|
||||
if [ ! -f /home/googlebot/sandboxed/${name}/ssh-host-keys/ssh_host_ed25519_key ]; then
|
||||
${pkgs.openssh}/bin/ssh-keygen -t ed25519 -N "" \
|
||||
-f /home/googlebot/sandboxed/${name}/ssh-host-keys/ssh_host_ed25519_key
|
||||
chown googlebot:users /home/googlebot/sandboxed/${name}/ssh-host-keys/ssh_host_ed25519_key*
|
||||
echo "Generated SSH host key for workspace ${name}"
|
||||
fi
|
||||
'';
|
||||
}
|
||||
)
|
||||
cfg.workspaces;
|
||||
};
|
||||
}
|
||||
50
common/sandboxed-workspace/home.nix
Normal file
50
common/sandboxed-workspace/home.nix
Normal file
@@ -0,0 +1,50 @@
|
||||
{ lib, pkgs, ... }:
|
||||
|
||||
# Home Manager configuration for sandboxed workspace user environment
|
||||
# This sets up the shell and tools inside VMs and containers
|
||||
|
||||
{
|
||||
home.username = "googlebot";
|
||||
home.homeDirectory = "/home/googlebot";
|
||||
home.stateVersion = "24.11";
|
||||
|
||||
programs.home-manager.enable = true;
|
||||
|
||||
# Shell configuration
|
||||
programs.fish.enable = true;
|
||||
programs.starship.enable = true;
|
||||
programs.starship.enableFishIntegration = true;
|
||||
programs.starship.settings.container.disabled = true;
|
||||
|
||||
# Basic command-line tools
|
||||
programs.btop.enable = true;
|
||||
programs.ripgrep.enable = true;
|
||||
programs.eza.enable = true;
|
||||
|
||||
# Git configuration
|
||||
programs.git = {
|
||||
enable = true;
|
||||
settings = {
|
||||
user.name = lib.mkDefault "googlebot";
|
||||
user.email = lib.mkDefault "zuckerberg@neet.dev";
|
||||
};
|
||||
};
|
||||
|
||||
# Shell aliases
|
||||
home.shellAliases = {
|
||||
ls = "eza";
|
||||
la = "eza -la";
|
||||
ll = "eza -l";
|
||||
};
|
||||
|
||||
# Environment variables for Claude Code
|
||||
home.sessionVariables = {
|
||||
# Isolate Claude config to a specific directory on the host
|
||||
CLAUDE_CONFIG_DIR = "/home/googlebot/claude-config";
|
||||
};
|
||||
|
||||
# Additional packages for development
|
||||
home.packages = with pkgs; [
|
||||
# Add packages as needed per workspace
|
||||
];
|
||||
}
|
||||
182
common/sandboxed-workspace/incus.nix
Normal file
182
common/sandboxed-workspace/incus.nix
Normal file
@@ -0,0 +1,182 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
# Incus-specific configuration for sandboxed workspaces
|
||||
# Creates fully declarative Incus containers from NixOS configurations
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.sandboxed-workspace;
|
||||
hostConfig = config;
|
||||
|
||||
incusWorkspaces = filterAttrs (n: ws: ws.type == "incus") cfg.workspaces;
|
||||
|
||||
# Build a NixOS LXC image for a workspace
|
||||
mkContainerImage = name: ws:
|
||||
let
|
||||
nixpkgs = hostConfig.inputs.nixpkgs;
|
||||
containerSystem = nixpkgs.lib.nixosSystem {
|
||||
modules = [
|
||||
(import ./base.nix {
|
||||
inherit hostConfig;
|
||||
workspaceName = name;
|
||||
ip = ws.ip;
|
||||
networkInterface = { Name = "eth0"; };
|
||||
})
|
||||
|
||||
(import ws.config)
|
||||
|
||||
({ config, lib, pkgs, ... }: {
|
||||
nixpkgs.hostPlatform = hostConfig.currentSystem;
|
||||
boot.isContainer = true;
|
||||
networking.useHostResolvConf = false;
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
|
||||
# Incus containers don't support the kernel features nix sandbox requires
|
||||
nix.settings.sandbox = false;
|
||||
|
||||
environment.systemPackages = [
|
||||
(lib.hiPrio (pkgs.writeShellScriptBin "claude" ''
|
||||
exec ${pkgs.claude-code}/bin/claude --dangerously-skip-permissions "$@"
|
||||
''))
|
||||
];
|
||||
})
|
||||
];
|
||||
};
|
||||
in
|
||||
{
|
||||
rootfs = containerSystem.config.system.build.images.lxc;
|
||||
metadata = containerSystem.config.system.build.images.lxc-metadata;
|
||||
toplevel = containerSystem.config.system.build.toplevel;
|
||||
};
|
||||
|
||||
mkIncusService = name: ws:
|
||||
let
|
||||
images = mkContainerImage name ws;
|
||||
hash = builtins.substring 0 12 (builtins.hashString "sha256" "${images.rootfs}");
|
||||
imageName = "nixos-workspace-${name}-${hash}";
|
||||
containerName = "workspace-${name}";
|
||||
|
||||
bridgeName = config.networking.sandbox.bridgeName;
|
||||
mac = lib.mkMac "incus-${name}";
|
||||
|
||||
addDevices = ''
|
||||
incus config device add ${containerName} eth0 nic nictype=bridged parent=${bridgeName} hwaddr=${mac}
|
||||
incus config device add ${containerName} workspace disk source=/home/googlebot/sandboxed/${name}/workspace path=/home/googlebot/workspace shift=true
|
||||
incus config device add ${containerName} ssh-keys disk source=/home/googlebot/sandboxed/${name}/ssh-host-keys path=/etc/ssh-host-keys shift=true
|
||||
incus config device add ${containerName} claude-config disk source=/home/googlebot/sandboxed/${name}/claude-config path=/home/googlebot/claude-config shift=true
|
||||
'';
|
||||
in
|
||||
{
|
||||
description = "Incus workspace ${name}";
|
||||
after = [ "incus.service" "incus-preseed.service" "workspace-${name}-setup.service" ];
|
||||
requires = [ "incus.service" ];
|
||||
wants = [ "workspace-${name}-setup.service" ];
|
||||
wantedBy = optional ws.autoStart "multi-user.target";
|
||||
|
||||
path = [ config.virtualisation.incus.package pkgs.gnutar pkgs.xz pkgs.util-linux ];
|
||||
|
||||
restartTriggers = [ images.rootfs images.metadata ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
|
||||
script = ''
|
||||
set -euo pipefail
|
||||
|
||||
# Serialize incus operations - concurrent container creation causes race conditions
|
||||
exec 9>/run/incus-workspace.lock
|
||||
flock -x 9
|
||||
|
||||
# Import image if not present
|
||||
if ! incus image list --format csv | grep -q "${imageName}"; then
|
||||
metadata_tarball=$(echo ${images.metadata}/tarball/*.tar.xz)
|
||||
rootfs_tarball=$(echo ${images.rootfs}/tarball/*.tar.xz)
|
||||
incus image import "$metadata_tarball" "$rootfs_tarball" --alias ${imageName}
|
||||
|
||||
# Clean up old images for this workspace
|
||||
incus image list --format csv | grep "nixos-workspace-${name}-" | grep -v "${imageName}" | cut -d, -f2 | while read old_image; do
|
||||
incus image delete "$old_image" || true
|
||||
done || true
|
||||
fi
|
||||
|
||||
# Always recreate container for ephemeral behavior
|
||||
incus stop ${containerName} --force 2>/dev/null || true
|
||||
incus delete ${containerName} --force 2>/dev/null || true
|
||||
|
||||
incus init ${imageName} ${containerName}
|
||||
${addDevices}
|
||||
incus start ${containerName}
|
||||
|
||||
# Wait for container to start
|
||||
for i in $(seq 1 30); do
|
||||
if incus list --format csv | grep -q "^${containerName},RUNNING"; then
|
||||
exit 0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
exit 1
|
||||
'';
|
||||
|
||||
preStop = ''
|
||||
exec 9>/run/incus-workspace.lock
|
||||
flock -x 9
|
||||
|
||||
incus stop ${containerName} --force 2>/dev/null || true
|
||||
incus delete ${containerName} --force 2>/dev/null || true
|
||||
|
||||
# Clean up all images for this workspace
|
||||
incus image list --format csv 2>/dev/null | grep "nixos-workspace-${name}-" | cut -d, -f2 | while read img; do
|
||||
incus image delete "$img" 2>/dev/null || true
|
||||
done
|
||||
'';
|
||||
};
|
||||
|
||||
|
||||
in
|
||||
{
|
||||
config = mkIf (cfg.enable && incusWorkspaces != { }) {
|
||||
|
||||
virtualisation.incus.enable = true;
|
||||
networking.nftables.enable = true;
|
||||
|
||||
virtualisation.incus.preseed = {
|
||||
storage_pools = [{
|
||||
name = "default";
|
||||
driver = "dir";
|
||||
config = {
|
||||
source = "/var/lib/incus/storage-pools/default";
|
||||
};
|
||||
}];
|
||||
|
||||
profiles = [{
|
||||
name = "default";
|
||||
config = {
|
||||
"security.privileged" = "false";
|
||||
"security.idmap.isolated" = "true";
|
||||
};
|
||||
devices = {
|
||||
root = {
|
||||
path = "/";
|
||||
pool = "default";
|
||||
type = "disk";
|
||||
};
|
||||
};
|
||||
}];
|
||||
};
|
||||
|
||||
systemd.services = mapAttrs'
|
||||
(name: ws: nameValuePair "incus-workspace-${name}" (mkIncusService name ws))
|
||||
incusWorkspaces;
|
||||
|
||||
# Extra alias for incus shell access (ssh is also available via default.nix aliases)
|
||||
environment.shellAliases = mkMerge (mapAttrsToList
|
||||
(name: ws: {
|
||||
"workspace_${name}_shell" = "doas incus exec workspace-${name} -- su -l googlebot";
|
||||
})
|
||||
incusWorkspaces);
|
||||
};
|
||||
}
|
||||
147
common/sandboxed-workspace/vm.nix
Normal file
147
common/sandboxed-workspace/vm.nix
Normal file
@@ -0,0 +1,147 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
# VM-specific configuration for sandboxed workspaces using microvm.nix
|
||||
# This module is imported by default.nix for workspaces with type = "vm"
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.sandboxed-workspace;
|
||||
hostConfig = config;
|
||||
|
||||
# Generate a deterministic vsock CID from workspace name.
|
||||
#
|
||||
# vsock (virtual sockets) enables host-VM communication without networking.
|
||||
# cloud-hypervisor uses vsock for systemd-notify integration: when a VM finishes
|
||||
# booting, systemd sends READY=1 to the host via vsock, allowing the host's
|
||||
# microvm@ service to accurately track VM boot status instead of guessing.
|
||||
#
|
||||
# Each VM needs a unique CID (Context Identifier). Reserved CIDs per vsock(7):
|
||||
# - VMADDR_CID_HYPERVISOR (0): reserved for hypervisor
|
||||
# - VMADDR_CID_LOCAL (1): loopback address
|
||||
# - VMADDR_CID_HOST (2): host address
|
||||
# See: https://man7.org/linux/man-pages/man7/vsock.7.html
|
||||
# https://docs.kernel.org/virt/kvm/vsock.html
|
||||
#
|
||||
# We auto-generate from SHA256 hash to ensure uniqueness without manual assignment.
|
||||
# Range: 100 - 16777315 (offset avoids reserved CIDs and leaves 3-99 for manual use)
|
||||
nameToCid = name:
|
||||
let
|
||||
hash = builtins.hashString "sha256" name;
|
||||
hexPart = builtins.substring 0 6 hash;
|
||||
in
|
||||
100 + (builtins.foldl'
|
||||
(acc: c: acc * 16 + (
|
||||
if c == "a" then 10
|
||||
else if c == "b" then 11
|
||||
else if c == "c" then 12
|
||||
else if c == "d" then 13
|
||||
else if c == "e" then 14
|
||||
else if c == "f" then 15
|
||||
else lib.strings.toInt c
|
||||
)) 0
|
||||
(lib.stringToCharacters hexPart));
|
||||
|
||||
# Filter for VM-type workspaces only
|
||||
vmWorkspaces = filterAttrs (n: ws: ws.type == "vm") cfg.workspaces;
|
||||
|
||||
# Generate VM configuration for a workspace
|
||||
mkVmConfig = name: ws: {
|
||||
inherit pkgs; # Use host's pkgs (includes allowUnfree)
|
||||
config = import ws.config;
|
||||
specialArgs = { inputs = hostConfig.inputs; };
|
||||
extraModules = [
|
||||
(import ./base.nix {
|
||||
inherit hostConfig;
|
||||
workspaceName = name;
|
||||
ip = ws.ip;
|
||||
networkInterface = { Type = "ether"; };
|
||||
})
|
||||
{
|
||||
environment.systemPackages = [
|
||||
(lib.hiPrio (pkgs.writeShellScriptBin "claude" ''
|
||||
exec ${pkgs.claude-code}/bin/claude --dangerously-skip-permissions "$@"
|
||||
''))
|
||||
];
|
||||
|
||||
# MicroVM specific configuration
|
||||
microvm = {
|
||||
# Use cloud-hypervisor for better performance
|
||||
hypervisor = lib.mkDefault "cloud-hypervisor";
|
||||
|
||||
# Resource allocation
|
||||
vcpu = 8;
|
||||
mem = 4096; # 4GB RAM
|
||||
|
||||
# Disk for writable overlay
|
||||
volumes = [{
|
||||
image = "overlay.img";
|
||||
mountPoint = "/nix/.rw-store";
|
||||
size = 8192; # 8GB
|
||||
}];
|
||||
|
||||
# Shared directories with host using virtiofs
|
||||
shares = [
|
||||
{
|
||||
# Share the host's /nix/store for accessing packages
|
||||
proto = "virtiofs";
|
||||
tag = "ro-store";
|
||||
source = "/nix/store";
|
||||
mountPoint = "/nix/.ro-store";
|
||||
}
|
||||
{
|
||||
proto = "virtiofs";
|
||||
tag = "workspace";
|
||||
source = "/home/googlebot/sandboxed/${name}/workspace";
|
||||
mountPoint = "/home/googlebot/workspace";
|
||||
}
|
||||
{
|
||||
proto = "virtiofs";
|
||||
tag = "ssh-host-keys";
|
||||
source = "/home/googlebot/sandboxed/${name}/ssh-host-keys";
|
||||
mountPoint = "/etc/ssh-host-keys";
|
||||
}
|
||||
{
|
||||
proto = "virtiofs";
|
||||
tag = "claude-config";
|
||||
source = "/home/googlebot/sandboxed/${name}/claude-config";
|
||||
mountPoint = "/home/googlebot/claude-config";
|
||||
}
|
||||
];
|
||||
|
||||
# Writeable overlay for /nix/store
|
||||
writableStoreOverlay = "/nix/.rw-store";
|
||||
|
||||
# TAP interface for bridged networking
|
||||
# The interface name "vm-*" matches the pattern in common/network/microvm.nix
|
||||
# which automatically attaches it to the microbr bridge
|
||||
interfaces = [{
|
||||
type = "tap";
|
||||
id = "vm-${name}";
|
||||
mac = lib.mkMac "vm-${name}";
|
||||
}];
|
||||
|
||||
# Enable vsock for systemd-notify integration
|
||||
vsock.cid =
|
||||
if ws.cid != null
|
||||
then ws.cid
|
||||
else nameToCid name;
|
||||
};
|
||||
}
|
||||
];
|
||||
autostart = ws.autoStart;
|
||||
};
|
||||
in
|
||||
{
|
||||
config = mkMerge [
|
||||
(mkIf (cfg.enable && vmWorkspaces != { }) {
|
||||
# Convert VM workspace configs to microvm.nix format
|
||||
microvm.vms = mapAttrs mkVmConfig vmWorkspaces;
|
||||
})
|
||||
|
||||
# microvm.nixosModules.host enables KSM, but /sys is read-only in containers
|
||||
(mkIf config.boot.isContainer {
|
||||
hardware.ksm.enable = false;
|
||||
})
|
||||
];
|
||||
}
|
||||
16
common/server/actualbudget.nix
Normal file
16
common/server/actualbudget.nix
Normal file
@@ -0,0 +1,16 @@
|
||||
{ config, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.actual;
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.actual.settings = {
|
||||
port = 25448;
|
||||
};
|
||||
|
||||
backup.group."actual-budget".paths = [
|
||||
"/var/lib/actual"
|
||||
];
|
||||
};
|
||||
}
|
||||
62
common/server/atticd.nix
Normal file
62
common/server/atticd.nix
Normal file
@@ -0,0 +1,62 @@
|
||||
{ config, lib, ... }:
|
||||
|
||||
{
|
||||
config = lib.mkIf (config.thisMachine.hasRole."binary-cache" && !config.boot.isContainer) {
|
||||
services.atticd = {
|
||||
enable = true;
|
||||
environmentFile = config.age.secrets.atticd-credentials.path;
|
||||
settings = {
|
||||
listen = "[::]:28338";
|
||||
database.url = "postgresql:///atticd?host=/run/postgresql";
|
||||
require-proof-of-possession = false;
|
||||
|
||||
# Disable chunking — the dedup savings don't justify the CPU/IO
|
||||
# overhead for local storage, especially on ZFS which already
|
||||
# does block-level compression.
|
||||
chunking = {
|
||||
nar-size-threshold = 0;
|
||||
min-size = 16 * 1024;
|
||||
avg-size = 64 * 1024;
|
||||
max-size = 256 * 1024;
|
||||
};
|
||||
|
||||
# Let ZFS handle compression instead of double-compressing.
|
||||
compression.type = "none";
|
||||
|
||||
garbage-collection.default-retention-period = "6 months";
|
||||
};
|
||||
};
|
||||
|
||||
# PostgreSQL for atticd
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
ensureDatabases = [ "atticd" ];
|
||||
ensureUsers = [{
|
||||
name = "atticd";
|
||||
ensureDBOwnership = true;
|
||||
}];
|
||||
};
|
||||
|
||||
# Use a static user so the ZFS mountpoint at /var/lib/atticd works
|
||||
# (DynamicUser conflicts with ZFS mountpoints)
|
||||
users.users.atticd = {
|
||||
isSystemUser = true;
|
||||
group = "atticd";
|
||||
home = "/var/lib/atticd";
|
||||
};
|
||||
users.groups.atticd = { };
|
||||
|
||||
systemd.services.atticd = {
|
||||
after = [ "postgresql.service" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
partOf = [ "postgresql.service" ];
|
||||
serviceConfig = {
|
||||
DynamicUser = lib.mkForce false;
|
||||
User = "atticd";
|
||||
Group = "atticd";
|
||||
};
|
||||
};
|
||||
|
||||
age.secrets.atticd-credentials.file = ../../secrets/atticd-credentials.age;
|
||||
};
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
{ config, lib, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.ceph;
|
||||
in {
|
||||
options.ceph = {
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
# ceph.enable = true;
|
||||
|
||||
## S3 Object gateway
|
||||
#ceph.rgw.enable = true;
|
||||
#ceph.rgw.daemons = [
|
||||
#];
|
||||
|
||||
# https://docs.ceph.com/en/latest/start/intro/
|
||||
|
||||
# meta object storage daemon
|
||||
ceph.osd.enable = true;
|
||||
ceph.osd.daemons = [
|
||||
|
||||
];
|
||||
# monitor's ceph state
|
||||
ceph.mon.enable = true;
|
||||
ceph.mon.daemons = [
|
||||
|
||||
];
|
||||
# manage ceph
|
||||
ceph.mgr.enable = true;
|
||||
ceph.mgr.daemons = [
|
||||
|
||||
];
|
||||
# metadata server
|
||||
ceph.mds.enable = true;
|
||||
ceph.mds.daemons = [
|
||||
|
||||
];
|
||||
ceph.global.fsid = "925773DC-D95F-476C-BBCD-08E01BF0865F";
|
||||
|
||||
};
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.cloudflared;
|
||||
settingsFormat = pkgs.formats.yaml { };
|
||||
in
|
||||
{
|
||||
meta.maintainers = with maintainers; [ pmc ];
|
||||
|
||||
options = {
|
||||
services.cloudflared = {
|
||||
enable = mkEnableOption "cloudflared";
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.cloudflared;
|
||||
description = "The cloudflared package to use";
|
||||
example = literalExpression ''pkgs.cloudflared'';
|
||||
};
|
||||
config = mkOption {
|
||||
type = settingsFormat.type;
|
||||
description = "Contents of the config.yaml as an attrset; see https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/configuration/configuration-file for documentation on the contents";
|
||||
example = literalExpression ''
|
||||
{
|
||||
url = "http://localhost:3000";
|
||||
tunnel = "505c8dd1-e4fb-4ea4-b909-26b8f61ceaaf";
|
||||
credentials-file = "/var/lib/cloudflared/505c8dd1-e4fb-4ea4-b909-26b8f61ceaaf.json";
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
configFile = mkOption {
|
||||
type = types.path;
|
||||
description = "Path to cloudflared config.yaml.";
|
||||
example = literalExpression ''"/etc/cloudflared/config.yaml"'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable ({
|
||||
# Prefer the config file over settings if both are set.
|
||||
services.cloudflared.configFile = mkDefault (settingsFormat.generate "cloudflared.yaml" cfg.config);
|
||||
|
||||
systemd.services.cloudflared = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
description = "Cloudflare Argo Tunnel";
|
||||
serviceConfig = {
|
||||
TimeoutStartSec = 0;
|
||||
Type = "notify";
|
||||
ExecStart = "${cfg.package}/bin/cloudflared --config ${cfg.configFile} --no-autoupdate tunnel run";
|
||||
Restart = "on-failure";
|
||||
RestartSec = "5s";
|
||||
};
|
||||
};
|
||||
});
|
||||
}
|
||||
@@ -1,18 +1,22 @@
|
||||
{ config, pkgs, ... }:
|
||||
{ ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./nginx.nix
|
||||
./thelounge.nix
|
||||
./mumble.nix
|
||||
./icecast.nix
|
||||
./nginx-stream.nix
|
||||
./matrix.nix
|
||||
./zerobin.nix
|
||||
./gitea.nix
|
||||
./privatebin/privatebin.nix
|
||||
./radio.nix
|
||||
./samba.nix
|
||||
./cloudflared.nix
|
||||
./owncast.nix
|
||||
./mailserver.nix
|
||||
./nextcloud.nix
|
||||
./gitea-actions-runner.nix
|
||||
./atticd.nix
|
||||
./librechat.nix
|
||||
./actualbudget.nix
|
||||
./unifi.nix
|
||||
./ntfy.nix
|
||||
./gatus.nix
|
||||
];
|
||||
}
|
||||
146
common/server/gatus.nix
Normal file
146
common/server/gatus.nix
Normal file
@@ -0,0 +1,146 @@
|
||||
{ lib, config, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.gatus;
|
||||
port = 31103;
|
||||
in
|
||||
{
|
||||
options.services.gatus = {
|
||||
hostname = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
example = "status.example.com";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.gatus = {
|
||||
environmentFile = "/run/agenix/ntfy-token";
|
||||
settings = {
|
||||
storage = {
|
||||
type = "sqlite";
|
||||
path = "/var/lib/gatus/data.db";
|
||||
};
|
||||
|
||||
web = {
|
||||
address = "127.0.0.1";
|
||||
port = port;
|
||||
};
|
||||
|
||||
alerting.ntfy = {
|
||||
url = "https://ntfy.neet.dev";
|
||||
topic = "service-failures";
|
||||
priority = 4;
|
||||
default-alert = {
|
||||
enabled = true;
|
||||
failure-threshold = 3;
|
||||
success-threshold = 2;
|
||||
send-on-resolved = true;
|
||||
};
|
||||
token = "$NTFY_TOKEN";
|
||||
};
|
||||
|
||||
endpoints = [
|
||||
{
|
||||
name = "Gitea";
|
||||
group = "services";
|
||||
url = "https://git.neet.dev";
|
||||
interval = "5m";
|
||||
conditions = [
|
||||
"[STATUS] == 200"
|
||||
];
|
||||
alerts = [{ type = "ntfy"; }];
|
||||
}
|
||||
{
|
||||
name = "The Lounge";
|
||||
group = "services";
|
||||
url = "https://irc.neet.dev";
|
||||
interval = "5m";
|
||||
conditions = [
|
||||
"[STATUS] == 200"
|
||||
];
|
||||
alerts = [{ type = "ntfy"; }];
|
||||
}
|
||||
{
|
||||
name = "ntfy";
|
||||
group = "services";
|
||||
url = "https://ntfy.neet.dev/v1/health";
|
||||
interval = "5m";
|
||||
conditions = [
|
||||
"[STATUS] == 200"
|
||||
];
|
||||
alerts = [{ type = "ntfy"; }];
|
||||
}
|
||||
{
|
||||
name = "Librechat";
|
||||
group = "services";
|
||||
url = "https://chat.neet.dev";
|
||||
interval = "5m";
|
||||
conditions = [
|
||||
"[STATUS] == 200"
|
||||
];
|
||||
alerts = [{ type = "ntfy"; }];
|
||||
}
|
||||
{
|
||||
name = "Owncast";
|
||||
group = "services";
|
||||
url = "https://live.neet.dev";
|
||||
interval = "5m";
|
||||
conditions = [
|
||||
"[STATUS] == 200"
|
||||
];
|
||||
alerts = [{ type = "ntfy"; }];
|
||||
}
|
||||
{
|
||||
name = "Nextcloud";
|
||||
group = "services";
|
||||
url = "https://neet.cloud";
|
||||
interval = "5m";
|
||||
conditions = [
|
||||
"[STATUS] == any(200, 302)"
|
||||
];
|
||||
alerts = [{ type = "ntfy"; }];
|
||||
}
|
||||
{
|
||||
name = "Element Web";
|
||||
group = "services";
|
||||
url = "https://chat.neet.space";
|
||||
interval = "5m";
|
||||
conditions = [
|
||||
"[STATUS] == 200"
|
||||
];
|
||||
alerts = [{ type = "ntfy"; }];
|
||||
}
|
||||
{
|
||||
name = "Mumble";
|
||||
group = "services";
|
||||
url = "tcp://voice.neet.space:23563";
|
||||
interval = "5m";
|
||||
conditions = [
|
||||
"[CONNECTED] == true"
|
||||
];
|
||||
alerts = [{ type = "ntfy"; }];
|
||||
}
|
||||
{
|
||||
name = "Navidrome";
|
||||
group = "services";
|
||||
url = "https://navidrome.neet.cloud";
|
||||
interval = "5m";
|
||||
conditions = [
|
||||
"[STATUS] == 200"
|
||||
];
|
||||
alerts = [{ type = "ntfy"; }];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
services.nginx.enable = true;
|
||||
services.nginx.virtualHosts.${cfg.hostname} = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://127.0.0.1:${toString port}";
|
||||
proxyWebsockets = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
86
common/server/gitea-actions-runner.nix
Normal file
86
common/server/gitea-actions-runner.nix
Normal file
@@ -0,0 +1,86 @@
|
||||
{ config, lib, ... }:
|
||||
|
||||
# Gitea Actions Runner inside a NixOS container.
|
||||
# The container shares the host's /nix/store (read-only) and nix-daemon socket,
|
||||
# so builds go through the host daemon and outputs land in the host store.
|
||||
# Warning: NixOS containers are not fully secure — do not run untrusted code.
|
||||
# To enable, assign a machine the 'gitea-actions-runner' system role.
|
||||
|
||||
let
|
||||
thisMachineIsARunner = config.thisMachine.hasRole."gitea-actions-runner";
|
||||
containerName = "gitea-runner";
|
||||
giteaRunnerUid = 991;
|
||||
giteaRunnerGid = 989;
|
||||
in
|
||||
{
|
||||
config = lib.mkIf (thisMachineIsARunner && !config.boot.isContainer) {
|
||||
|
||||
containers.${containerName} = {
|
||||
autoStart = true;
|
||||
ephemeral = true;
|
||||
|
||||
bindMounts = {
|
||||
"/run/agenix/gitea-actions-runner-token" = {
|
||||
hostPath = "/run/agenix/gitea-actions-runner-token";
|
||||
isReadOnly = true;
|
||||
};
|
||||
"/var/lib/gitea-runner" = {
|
||||
hostPath = "/var/lib/gitea-runner";
|
||||
isReadOnly = false;
|
||||
};
|
||||
};
|
||||
|
||||
config = { config, lib, pkgs, ... }: {
|
||||
system.stateVersion = "25.11";
|
||||
|
||||
services.gitea-actions-runner.instances.inst = {
|
||||
enable = true;
|
||||
name = containerName;
|
||||
url = "https://git.neet.dev/";
|
||||
tokenFile = "/run/agenix/gitea-actions-runner-token";
|
||||
labels = [ "nixos:host" ];
|
||||
};
|
||||
|
||||
# Disable dynamic user so runner state persists via bind mount
|
||||
assertions = [{
|
||||
assertion = config.systemd.services.gitea-runner-inst.enable;
|
||||
message = "Expected systemd service 'gitea-runner-inst' is not enabled — the gitea-actions-runner module may have changed its naming scheme.";
|
||||
}];
|
||||
systemd.services.gitea-runner-inst.serviceConfig.DynamicUser = lib.mkForce false;
|
||||
users.users.gitea-runner = {
|
||||
uid = giteaRunnerUid;
|
||||
home = "/var/lib/gitea-runner";
|
||||
group = "gitea-runner";
|
||||
isSystemUser = true;
|
||||
createHome = true;
|
||||
};
|
||||
users.groups.gitea-runner.gid = giteaRunnerGid;
|
||||
|
||||
nix.settings.experimental-features = [ "nix-command" "flakes" ];
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
git
|
||||
nodejs
|
||||
jq
|
||||
attic-client
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
# Needs to be outside of the container because container uses's the host's nix-daemon
|
||||
nix.settings.trusted-users = [ "gitea-runner" ];
|
||||
|
||||
# Matching user on host — the container's gitea-runner UID must be
|
||||
# recognized by the host's nix-daemon as trusted (shared UID namespace)
|
||||
users.users.gitea-runner = {
|
||||
uid = giteaRunnerUid;
|
||||
home = "/var/lib/gitea-runner";
|
||||
group = "gitea-runner";
|
||||
isSystemUser = true;
|
||||
createHome = true;
|
||||
};
|
||||
users.groups.gitea-runner.gid = giteaRunnerGid;
|
||||
|
||||
age.secrets.gitea-actions-runner-token.file = ../../secrets/gitea-actions-runner-token.age;
|
||||
};
|
||||
}
|
||||
@@ -2,7 +2,8 @@
|
||||
|
||||
let
|
||||
cfg = config.services.gitea;
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.services.gitea = {
|
||||
hostname = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
@@ -11,29 +12,63 @@ in {
|
||||
};
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.gitea = {
|
||||
domain = cfg.hostname;
|
||||
rootUrl = "https://${cfg.hostname}/";
|
||||
appName = cfg.hostname;
|
||||
ssh.enable = true;
|
||||
# lfs.enable = true;
|
||||
dump.enable = true;
|
||||
cookieSecure = true;
|
||||
disableRegistration = true;
|
||||
lfs.enable = true;
|
||||
# dump.enable = true;
|
||||
settings = {
|
||||
server = {
|
||||
ROOT_URL = "https://${cfg.hostname}/";
|
||||
DOMAIN = cfg.hostname;
|
||||
};
|
||||
other = {
|
||||
SHOW_FOOTER_VERSION = false;
|
||||
};
|
||||
ui = {
|
||||
DEFAULT_THEME = "arc-green";
|
||||
DEFAULT_THEME = "gitea-dark";
|
||||
};
|
||||
service = {
|
||||
DISABLE_REGISTRATION = true;
|
||||
};
|
||||
session = {
|
||||
COOKIE_SECURE = true;
|
||||
PROVIDER = "db";
|
||||
SESSION_LIFE_TIME = 259200; # 3 days
|
||||
GC_INTERVAL_TIME = 259200; # 3 days
|
||||
};
|
||||
mailer = {
|
||||
ENABLED = true;
|
||||
MAILER_TYPE = "smtp";
|
||||
SMTP_ADDR = "mail.neet.dev";
|
||||
SMTP_PORT = "465";
|
||||
IS_TLS_ENABLED = true;
|
||||
USER = "robot@runyan.org";
|
||||
FROM = "no-reply@neet.dev";
|
||||
};
|
||||
actions = {
|
||||
ENABLED = true;
|
||||
};
|
||||
indexer = {
|
||||
REPO_INDEXER_ENABLED = true;
|
||||
};
|
||||
};
|
||||
mailerPasswordFile = "/run/agenix/robots-email-pw";
|
||||
};
|
||||
age.secrets.robots-email-pw = {
|
||||
file = ../../secrets/robots-email-pw.age;
|
||||
owner = config.services.gitea.user;
|
||||
};
|
||||
|
||||
# backups
|
||||
backup.group."gitea".paths = [
|
||||
config.services.gitea.stateDir
|
||||
];
|
||||
|
||||
services.nginx.enable = true;
|
||||
services.nginx.virtualHosts.${cfg.hostname} = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://localhost:${toString cfg.httpPort}";
|
||||
proxyPass = "http://localhost:${toString cfg.settings.server.HTTP_PORT}";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
services.gitlab = {
|
||||
enable = true;
|
||||
databasePasswordFile = "/var/keys/gitlab/db_password";
|
||||
initialRootPasswordFile = "/var/keys/gitlab/root_password";
|
||||
https = true;
|
||||
host = "git.neet.dev";
|
||||
port = 443;
|
||||
user = "git";
|
||||
group = "git";
|
||||
databaseUsername = "git";
|
||||
smtp = {
|
||||
enable = true;
|
||||
address = "localhost";
|
||||
port = 25;
|
||||
};
|
||||
secrets = {
|
||||
dbFile = "/var/keys/gitlab/db";
|
||||
secretFile = "/var/keys/gitlab/secret";
|
||||
otpFile = "/var/keys/gitlab/otp";
|
||||
jwsFile = "/var/keys/gitlab/jws";
|
||||
};
|
||||
extraConfig = {
|
||||
gitlab = {
|
||||
email_from = "gitlab-no-reply@neet.dev";
|
||||
email_display_name = "neet.dev GitLab";
|
||||
email_reply_to = "gitlab-no-reply@neet.dev";
|
||||
};
|
||||
};
|
||||
pagesExtraArgs = [ "-listen-proxy" "127.0.0.1:8090" ];
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts = {
|
||||
"git.neet.dev" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/".proxyPass = "http://unix:/run/gitlab/gitlab-workhorse.socket";
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
let
|
||||
domain = "hydra.neet.dev";
|
||||
port = 3000;
|
||||
notifyEmail = "hydra@neet.dev";
|
||||
in
|
||||
{
|
||||
services.nginx.virtualHosts."${domain}" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://localhost:${toString port}";
|
||||
};
|
||||
};
|
||||
|
||||
services.hydra = {
|
||||
enable = true;
|
||||
inherit port;
|
||||
hydraURL = "https://${domain}";
|
||||
useSubstitutes = true;
|
||||
notificationSender = notifyEmail;
|
||||
buildMachinesFiles = [];
|
||||
};
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
{ lib, config, ... }:
|
||||
|
||||
# configures icecast to only accept source from localhost
|
||||
# to a audio optimized stream on services.icecast.mount
|
||||
# made available via nginx for http access on
|
||||
# https://host/mount
|
||||
|
||||
let
|
||||
cfg = config.services.icecast;
|
||||
in {
|
||||
options.services.icecast = {
|
||||
mount = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
example = "stream.mp3";
|
||||
};
|
||||
fallback = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
example = "fallback.mp3";
|
||||
};
|
||||
nginx = lib.mkEnableOption "enable nginx";
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.icecast = {
|
||||
listen.address = "0.0.0.0";
|
||||
listen.port = 8001;
|
||||
admin.password = "hackme";
|
||||
extraConf = ''
|
||||
<authentication>
|
||||
<source-password>hackme</source-password>
|
||||
</authentication>
|
||||
<http-headers>
|
||||
<header type="cors" name="Access-Control-Allow-Origin" />
|
||||
</http-headers>
|
||||
<mount type="normal">
|
||||
<mount-name>/${cfg.mount}</mount-name>
|
||||
<max-listeners>30</max-listeners>
|
||||
<bitrate>64000</bitrate>
|
||||
<hidden>false</hidden>
|
||||
<public>false</public>
|
||||
<fallback-mount>/${cfg.fallback}</fallback-mount>
|
||||
<fallback-override>1</fallback-override>
|
||||
</mount>
|
||||
<mount type="normal">
|
||||
<mount-name>/${cfg.fallback}</mount-name>
|
||||
<max-listeners>30</max-listeners>
|
||||
<bitrate>64000</bitrate>
|
||||
<hidden>false</hidden>
|
||||
<public>false</public>
|
||||
</mount>
|
||||
'';
|
||||
};
|
||||
services.nginx.virtualHosts.${cfg.hostname} = lib.mkIf cfg.nginx {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/${cfg.mount}" = {
|
||||
proxyPass = "http://localhost:${toString cfg.listen.port}/${cfg.mount}";
|
||||
extraConfig = ''
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
69
common/server/librechat.nix
Normal file
69
common/server/librechat.nix
Normal file
@@ -0,0 +1,69 @@
|
||||
{ config, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.librechat-container;
|
||||
in
|
||||
{
|
||||
options.services.librechat-container = {
|
||||
enable = mkEnableOption "librechat";
|
||||
port = mkOption {
|
||||
type = types.int;
|
||||
default = 3080;
|
||||
};
|
||||
host = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
example = "example.com";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
virtualisation.oci-containers.containers = {
|
||||
librechat = {
|
||||
image = "ghcr.io/danny-avila/librechat:v0.8.1";
|
||||
environment = {
|
||||
HOST = "0.0.0.0";
|
||||
MONGO_URI = "mongodb://host.containers.internal:27017/LibreChat";
|
||||
ENDPOINTS = "openAI,google,bingAI,gptPlugins";
|
||||
OPENAI_MODELS = lib.concatStringsSep "," [
|
||||
"gpt-4o-mini"
|
||||
"o3-mini"
|
||||
"gpt-4o"
|
||||
"o1"
|
||||
];
|
||||
REFRESH_TOKEN_EXPIRY = toString (1000 * 60 * 60 * 24 * 30); # 30 days
|
||||
};
|
||||
environmentFiles = [
|
||||
"/run/agenix/librechat-env-file"
|
||||
];
|
||||
ports = [
|
||||
"${toString cfg.port}:3080"
|
||||
];
|
||||
};
|
||||
};
|
||||
age.secrets.librechat-env-file.file = ../../secrets/librechat-env-file.age;
|
||||
|
||||
services.mongodb.enable = true;
|
||||
services.mongodb.bind_ip = "0.0.0.0";
|
||||
|
||||
# easier podman maintenance
|
||||
virtualisation.oci-containers.backend = "podman";
|
||||
virtualisation.podman.dockerSocket.enable = true;
|
||||
virtualisation.podman.dockerCompat = true;
|
||||
|
||||
# For mongodb access
|
||||
networking.firewall.trustedInterfaces = [
|
||||
"podman0" # for librechat
|
||||
];
|
||||
|
||||
services.nginx.virtualHosts.${cfg.host} = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://localhost:${toString cfg.port}";
|
||||
proxyWebsockets = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
121
common/server/mailserver.nix
Normal file
121
common/server/mailserver.nix
Normal file
@@ -0,0 +1,121 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with builtins;
|
||||
|
||||
let
|
||||
cfg = config.mailserver;
|
||||
domains = [
|
||||
"neet.space"
|
||||
"neet.dev"
|
||||
"neet.cloud"
|
||||
"runyan.org"
|
||||
"runyan.rocks"
|
||||
"thunderhex.com"
|
||||
"tar.ninja"
|
||||
"bsd.ninja"
|
||||
"bsd.rocks"
|
||||
];
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
# kresd doesn't work with tailscale MagicDNS
|
||||
mailserver.localDnsResolver = false;
|
||||
services.resolved.enable = true;
|
||||
|
||||
mailserver = {
|
||||
fqdn = "mail.neet.dev";
|
||||
dkimKeyBits = 2048;
|
||||
indexDir = "/var/lib/mailindex";
|
||||
enableManageSieve = true;
|
||||
fullTextSearch.enable = true;
|
||||
fullTextSearch.memoryLimit = 500;
|
||||
inherit domains;
|
||||
loginAccounts = {
|
||||
"jeremy@runyan.org" = {
|
||||
hashedPasswordFile = "/run/agenix/hashed-email-pw";
|
||||
# catchall for all domains
|
||||
aliases = map (domain: "@${domain}") domains;
|
||||
};
|
||||
"cris@runyan.org" = {
|
||||
hashedPasswordFile = "/run/agenix/cris-hashed-email-pw";
|
||||
aliases = [ "chris@runyan.org" ];
|
||||
};
|
||||
"robot@runyan.org" = {
|
||||
aliases = [
|
||||
"no-reply@neet.dev"
|
||||
"robot@neet.dev"
|
||||
];
|
||||
sendOnly = true;
|
||||
hashedPasswordFile = "/run/agenix/hashed-robots-email-pw";
|
||||
};
|
||||
};
|
||||
rejectRecipients = [
|
||||
"george@runyan.org"
|
||||
"joslyn@runyan.org"
|
||||
"damon@runyan.org"
|
||||
"jonas@runyan.org"
|
||||
"simon@neet.dev"
|
||||
"ellen@runyan.org"
|
||||
];
|
||||
forwards = {
|
||||
"amazon@runyan.org" = [
|
||||
"jeremy@runyan.org"
|
||||
"cris@runyan.org"
|
||||
];
|
||||
};
|
||||
x509.useACMEHost = config.mailserver.fqdn; # use let's encrypt for certs
|
||||
stateVersion = 3;
|
||||
};
|
||||
age.secrets.hashed-email-pw.file = ../../secrets/hashed-email-pw.age;
|
||||
age.secrets.cris-hashed-email-pw.file = ../../secrets/cris-hashed-email-pw.age;
|
||||
age.secrets.hashed-robots-email-pw.file = ../../secrets/hashed-robots-email-pw.age;
|
||||
|
||||
# Get let's encrypt cert
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
virtualHosts."${config.mailserver.fqdn}" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
};
|
||||
};
|
||||
|
||||
# sendmail to use xxx@domain instead of xxx@mail.domain
|
||||
services.postfix.settings.main.myorigin = "$mydomain";
|
||||
|
||||
# relay sent mail through mailgun
|
||||
# https://www.howtoforge.com/community/threads/different-smtp-relays-for-different-domains-in-postfix.82711/#post-392620
|
||||
services.postfix.settings.main = {
|
||||
smtp_sasl_auth_enable = "yes";
|
||||
smtp_sasl_security_options = "noanonymous";
|
||||
smtp_sasl_password_maps = "hash:/var/lib/postfix/conf/sasl_relay_passwd";
|
||||
smtp_use_tls = "yes";
|
||||
sender_dependent_relayhost_maps = "hash:/var/lib/postfix/conf/sender_relay";
|
||||
smtp_sender_dependent_authentication = "yes";
|
||||
};
|
||||
services.postfix.mapFiles.sender_relay =
|
||||
let
|
||||
relayHost = "[smtp.mailgun.org]:587";
|
||||
in
|
||||
pkgs.writeText "sender_relay"
|
||||
(concatStringsSep "\n" (map (domain: "@${domain} ${relayHost}") domains));
|
||||
services.postfix.mapFiles.sasl_relay_passwd = "/run/agenix/sasl_relay_passwd";
|
||||
age.secrets.sasl_relay_passwd.file = ../../secrets/sasl_relay_passwd.age;
|
||||
|
||||
# webmail
|
||||
services.roundcube = {
|
||||
enable = true;
|
||||
hostName = config.mailserver.fqdn;
|
||||
extraConfig = ''
|
||||
# starttls needed for authentication, so the fqdn required to match the certificate
|
||||
$config['smtp_server'] = "tls://${config.mailserver.fqdn}";
|
||||
$config['smtp_user'] = "%u";
|
||||
$config['smtp_pass'] = "%p";
|
||||
'';
|
||||
};
|
||||
|
||||
# backups
|
||||
backup.group."email".paths = [
|
||||
config.mailserver.mailDirectory
|
||||
];
|
||||
};
|
||||
}
|
||||
@@ -3,7 +3,8 @@
|
||||
let
|
||||
cfg = config.services.matrix;
|
||||
certs = config.security.acme.certs;
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.services.matrix = {
|
||||
enable = lib.mkEnableOption "enable matrix";
|
||||
element-web = {
|
||||
@@ -59,23 +60,25 @@ in {
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.matrix-synapse = {
|
||||
enable = true;
|
||||
server_name = cfg.host;
|
||||
enable_registration = cfg.enable_registration;
|
||||
listeners = [ {
|
||||
bind_address = "127.0.0.1";
|
||||
port = cfg.port;
|
||||
tls = false;
|
||||
resources = [ {
|
||||
compress = true;
|
||||
names = [ "client" "federation" ];
|
||||
} ];
|
||||
} ];
|
||||
turn_uris = [
|
||||
"turn:${cfg.turn.host}:${toString cfg.turn.port}?transport=udp"
|
||||
"turn:${cfg.turn.host}:${toString cfg.turn.port}?transport=tcp"
|
||||
];
|
||||
turn_shared_secret = cfg.turn.secret;
|
||||
turn_user_lifetime = "1h";
|
||||
settings = {
|
||||
server_name = cfg.host;
|
||||
enable_registration = cfg.enable_registration;
|
||||
listeners = [{
|
||||
bind_addresses = [ "127.0.0.1" ];
|
||||
port = cfg.port;
|
||||
tls = false;
|
||||
resources = [{
|
||||
compress = true;
|
||||
names = [ "client" "federation" ];
|
||||
}];
|
||||
}];
|
||||
turn_uris = [
|
||||
"turn:${cfg.turn.host}:${toString cfg.turn.port}?transport=udp"
|
||||
"turn:${cfg.turn.host}:${toString cfg.turn.port}?transport=tcp"
|
||||
];
|
||||
turn_shared_secret = cfg.turn.secret;
|
||||
turn_user_lifetime = "1h";
|
||||
};
|
||||
};
|
||||
|
||||
services.coturn = {
|
||||
@@ -118,7 +121,7 @@ in {
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
|
||||
virtualHosts.${cfg.host} = {
|
||||
virtualHosts.${cfg.host} = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
listen = [
|
||||
@@ -135,7 +138,8 @@ in {
|
||||
];
|
||||
locations."/".proxyPass = "http://localhost:${toString cfg.port}";
|
||||
};
|
||||
virtualHosts.${cfg.turn.host} = { # get TLS cert for TURN server
|
||||
virtualHosts.${cfg.turn.host} = {
|
||||
# get TLS cert for TURN server
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
};
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
let
|
||||
cfg = config.services.murmur;
|
||||
certs = config.security.acme.certs;
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.services.murmur.domain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
};
|
||||
|
||||
155
common/server/nextcloud.nix
Normal file
155
common/server/nextcloud.nix
Normal file
@@ -0,0 +1,155 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
|
||||
let
|
||||
cfg = config.services.nextcloud;
|
||||
|
||||
nextcloudHostname = "runyan.org";
|
||||
collaboraOnlineHostname = "collabora.runyan.org";
|
||||
whiteboardHostname = "whiteboard.runyan.org";
|
||||
whiteboardPort = 3002; # Seems impossible to change
|
||||
|
||||
# Hardcoded public ip of ponyo... I wish I didn't need this...
|
||||
public_ip_address = "147.135.114.130";
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.nextcloud = {
|
||||
https = true;
|
||||
package = pkgs.nextcloud32;
|
||||
hostName = nextcloudHostname;
|
||||
config.dbtype = "sqlite";
|
||||
config.adminuser = "jeremy";
|
||||
config.adminpassFile = "/run/agenix/nextcloud-pw";
|
||||
|
||||
# Apps
|
||||
autoUpdateApps.enable = true;
|
||||
extraAppsEnable = true;
|
||||
extraApps = with config.services.nextcloud.package.packages.apps; {
|
||||
# Want
|
||||
inherit end_to_end_encryption mail spreed;
|
||||
|
||||
# For file and document editing (collabora online and excalidraw)
|
||||
inherit richdocuments whiteboard;
|
||||
|
||||
# Might use
|
||||
inherit calendar qownnotesapi;
|
||||
|
||||
# Try out
|
||||
# inherit bookmarks cookbook deck memories maps music news notes phonetrack polls forms;
|
||||
};
|
||||
|
||||
# Allows installing Apps from the UI (might remove later)
|
||||
appstoreEnable = true;
|
||||
};
|
||||
age.secrets.nextcloud-pw = {
|
||||
file = ../../secrets/nextcloud-pw.age;
|
||||
owner = "nextcloud";
|
||||
};
|
||||
|
||||
# backups
|
||||
backup.group."nextcloud".paths = [
|
||||
config.services.nextcloud.home
|
||||
];
|
||||
|
||||
services.nginx.virtualHosts.${config.services.nextcloud.hostName} = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
};
|
||||
|
||||
# collabora-online
|
||||
# https://diogotc.com/blog/collabora-nextcloud-nixos/
|
||||
services.collabora-online = {
|
||||
enable = true;
|
||||
port = 15972;
|
||||
settings = {
|
||||
# Rely on reverse proxy for SSL
|
||||
ssl = {
|
||||
enable = false;
|
||||
termination = true;
|
||||
};
|
||||
|
||||
# Listen on loopback interface only
|
||||
net = {
|
||||
listen = "loopback";
|
||||
post_allow.host = [ "localhost" ];
|
||||
};
|
||||
|
||||
# Restrict loading documents from WOPI Host
|
||||
storage.wopi = {
|
||||
"@allow" = true;
|
||||
host = [ config.services.nextcloud.hostName ];
|
||||
};
|
||||
|
||||
server_name = collaboraOnlineHostname;
|
||||
};
|
||||
};
|
||||
services.nginx.virtualHosts.${config.services.collabora-online.settings.server_name} = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://localhost:${toString config.services.collabora-online.port}";
|
||||
proxyWebsockets = true;
|
||||
};
|
||||
};
|
||||
systemd.services.nextcloud-config-collabora =
|
||||
let
|
||||
wopi_url = "http://localhost:${toString config.services.collabora-online.port}";
|
||||
public_wopi_url = "https://${collaboraOnlineHostname}";
|
||||
wopi_allowlist = lib.concatStringsSep "," [
|
||||
"127.0.0.1"
|
||||
"::1"
|
||||
public_ip_address
|
||||
];
|
||||
in
|
||||
{
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "nextcloud-setup.service" "coolwsd.service" ];
|
||||
requires = [ "coolwsd.service" ];
|
||||
path = [
|
||||
config.services.nextcloud.occ
|
||||
];
|
||||
script = ''
|
||||
nextcloud-occ config:app:set richdocuments wopi_url --value ${lib.escapeShellArg wopi_url}
|
||||
nextcloud-occ config:app:set richdocuments public_wopi_url --value ${lib.escapeShellArg public_wopi_url}
|
||||
nextcloud-occ config:app:set richdocuments wopi_allowlist --value ${lib.escapeShellArg wopi_allowlist}
|
||||
nextcloud-occ richdocuments:setup
|
||||
'';
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
};
|
||||
};
|
||||
|
||||
# Whiteboard
|
||||
services.nextcloud-whiteboard-server = {
|
||||
enable = true;
|
||||
settings.NEXTCLOUD_URL = "https://${nextcloudHostname}";
|
||||
secrets = [ "/run/agenix/whiteboard-server-jwt-secret" ];
|
||||
};
|
||||
systemd.services.nextcloud-config-whiteboard = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "nextcloud-setup.service" ];
|
||||
requires = [ "coolwsd.service" ];
|
||||
path = [
|
||||
config.services.nextcloud.occ
|
||||
];
|
||||
script = ''
|
||||
nextcloud-occ config:app:set whiteboard collabBackendUrl --value="https://${whiteboardHostname}"
|
||||
nextcloud-occ config:app:set whiteboard jwt_secret_key --value="$JWT_SECRET_KEY"
|
||||
'';
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
EnvironmentFile = [ "/run/agenix/whiteboard-server-jwt-secret" ];
|
||||
};
|
||||
};
|
||||
age.secrets.whiteboard-server-jwt-secret.file = ../../secrets/whiteboard-server-jwt-secret.age;
|
||||
services.nginx.virtualHosts.${whiteboardHostname} = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://localhost:${toString whiteboardPort}";
|
||||
proxyWebsockets = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
{ lib, config, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.nginx.stream;
|
||||
nginxWithRTMP = pkgs.nginx.override {
|
||||
modules = [ pkgs.nginxModules.rtmp ];
|
||||
};
|
||||
in {
|
||||
options.services.nginx.stream = {
|
||||
enable = lib.mkEnableOption "enable nginx rtmp/hls/dash video streaming";
|
||||
port = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 1935;
|
||||
description = "rtmp injest/serve port";
|
||||
};
|
||||
rtmpName = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "live";
|
||||
description = "the name of the rtmp application";
|
||||
};
|
||||
hostname = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "the http host to serve hls";
|
||||
};
|
||||
httpLocation = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "/tmp";
|
||||
description = "the path of the tmp http files";
|
||||
};
|
||||
};
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
|
||||
package = nginxWithRTMP;
|
||||
|
||||
virtualHosts.${cfg.hostname} = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations = {
|
||||
"/stream/hls".root = "${cfg.httpLocation}/hls";
|
||||
"/stream/dash".root = "${cfg.httpLocation}/dash";
|
||||
};
|
||||
extraConfig = ''
|
||||
location /stat {
|
||||
rtmp_stat all;
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
appendConfig = ''
|
||||
rtmp {
|
||||
server {
|
||||
listen ${toString cfg.port};
|
||||
chunk_size 4096;
|
||||
application ${cfg.rtmpName} {
|
||||
allow publish all;
|
||||
allow publish all;
|
||||
live on;
|
||||
record off;
|
||||
hls on;
|
||||
hls_path ${cfg.httpLocation}/hls;
|
||||
dash on;
|
||||
dash_path ${cfg.httpLocation}/dash;
|
||||
}
|
||||
}
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
cfg.port
|
||||
];
|
||||
};
|
||||
}
|
||||
@@ -1,8 +1,13 @@
|
||||
{ lib, config, pkgs, ... }:
|
||||
{ lib, config, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.nginx;
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.services.nginx = {
|
||||
openFirewall = lib.mkEnableOption "Open firewall ports 80 and 443";
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.nginx = {
|
||||
recommendedGzipSettings = true;
|
||||
@@ -11,6 +16,8 @@ in {
|
||||
recommendedTlsSettings = true;
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 80 443 ];
|
||||
services.nginx.openFirewall = lib.mkDefault true;
|
||||
|
||||
networking.firewall.allowedTCPPorts = lib.mkIf cfg.openFirewall [ 80 443 ];
|
||||
};
|
||||
}
|
||||
38
common/server/ntfy.nix
Normal file
38
common/server/ntfy.nix
Normal file
@@ -0,0 +1,38 @@
|
||||
{ lib, config, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.ntfy-sh;
|
||||
in
|
||||
{
|
||||
options.services.ntfy-sh = {
|
||||
hostname = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
example = "ntfy.example.com";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.ntfy-sh.settings = {
|
||||
base-url = "https://${cfg.hostname}";
|
||||
listen-http = "127.0.0.1:2586";
|
||||
auth-default-access = "deny-all";
|
||||
behind-proxy = true;
|
||||
enable-login = true;
|
||||
};
|
||||
|
||||
# backups
|
||||
backup.group."ntfy".paths = [
|
||||
"/var/lib/ntfy-sh"
|
||||
];
|
||||
|
||||
services.nginx.enable = true;
|
||||
services.nginx.virtualHosts.${cfg.hostname} = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://127.0.0.1:2586";
|
||||
proxyWebsockets = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
32
common/server/owncast.nix
Normal file
32
common/server/owncast.nix
Normal file
@@ -0,0 +1,32 @@
|
||||
{ lib, config, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.owncast;
|
||||
in
|
||||
{
|
||||
options.services.owncast = {
|
||||
hostname = lib.mkOption {
|
||||
type = types.str;
|
||||
example = "example.com";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
services.owncast.listen = "127.0.0.1";
|
||||
services.owncast.port = 62419; # random port
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ cfg.rtmp-port ];
|
||||
|
||||
services.nginx.enable = true;
|
||||
services.nginx.virtualHosts.${cfg.hostname} = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://localhost:${toString cfg.port}";
|
||||
proxyWebsockets = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
;<?php http_response_code(403); /*
|
||||
[main]
|
||||
name = "Kode Paste"
|
||||
discussion = false
|
||||
opendiscussion = false
|
||||
password = true
|
||||
fileupload = false
|
||||
burnafterreadingselected = false
|
||||
defaultformatter = "plaintext"
|
||||
sizelimit = 10485760
|
||||
template = "bootstrap"
|
||||
languageselection = false
|
||||
|
||||
[expire]
|
||||
default = "1week"
|
||||
|
||||
[expire_options]
|
||||
5min = 300
|
||||
10min = 600
|
||||
1hour = 3600
|
||||
1day = 86400
|
||||
1week = 604800
|
||||
|
||||
[formatter_options]
|
||||
plaintext = "Plain Text"
|
||||
syntaxhighlighting = "Source Code"
|
||||
markdown = "Markdown"
|
||||
|
||||
[traffic]
|
||||
limit = 10
|
||||
dir = "/var/lib/privatebin"
|
||||
|
||||
[purge]
|
||||
limit = 300
|
||||
batchsize = 10
|
||||
dir = "/var/lib/privatebin"
|
||||
|
||||
[model]
|
||||
class = Filesystem
|
||||
|
||||
[model_options]
|
||||
dir = "/var/lib/privatebin"
|
||||
@@ -1,73 +0,0 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.privatebin;
|
||||
privateBinSrc = pkgs.stdenv.mkDerivation {
|
||||
name = "privatebin";
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "privatebin";
|
||||
repo = "privatebin";
|
||||
rev = "d65bf02d7819a530c3c2a88f6f9947651fe5258d";
|
||||
sha256 = "7ttAvEDL1ab0cUZcqZzXFkXwB2rF2t4eNpPxt48ap94=";
|
||||
};
|
||||
installPhase = ''
|
||||
cp -ar $src $out
|
||||
'';
|
||||
};
|
||||
in {
|
||||
options.services.privatebin = {
|
||||
enable = lib.mkEnableOption "enable privatebin";
|
||||
host = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
example = "example.com";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
|
||||
users.users.privatebin = {
|
||||
description = "privatebin service user";
|
||||
group = "privatebin";
|
||||
isSystemUser = true;
|
||||
};
|
||||
users.groups.privatebin = {};
|
||||
|
||||
services.nginx.enable = true;
|
||||
services.nginx.virtualHosts.${cfg.host} = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/" = {
|
||||
root = privateBinSrc;
|
||||
index = "index.php";
|
||||
};
|
||||
locations."~ \.php$" = {
|
||||
root = privateBinSrc;
|
||||
extraConfig = ''
|
||||
fastcgi_pass unix:${config.services.phpfpm.pools.privatebin.socket};
|
||||
fastcgi_index index.php;
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d '/var/lib/privatebin' 0750 privatebin privatebin - -"
|
||||
];
|
||||
|
||||
services.phpfpm.pools.privatebin = {
|
||||
user = "privatebin";
|
||||
group = "privatebin";
|
||||
phpEnv = {
|
||||
CONFIG_PATH = "${./conf.php}";
|
||||
};
|
||||
settings = {
|
||||
pm = "dynamic";
|
||||
"listen.owner" = config.services.nginx.user;
|
||||
"pm.max_children" = 5;
|
||||
"pm.start_servers" = 2;
|
||||
"pm.min_spare_servers" = 1;
|
||||
"pm.max_spare_servers" = 3;
|
||||
"pm.max_requests" = 500;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.radio;
|
||||
radioPackage = config.inputs.radio.packages.${config.currentSystem}.radio;
|
||||
in {
|
||||
options.services.radio = {
|
||||
enable = lib.mkEnableOption "enable radio";
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "radio";
|
||||
description = ''
|
||||
The user radio should run as
|
||||
'';
|
||||
};
|
||||
group = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "radio";
|
||||
description = ''
|
||||
The group radio should run as
|
||||
'';
|
||||
};
|
||||
dataDir = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "/var/lib/radio";
|
||||
description = ''
|
||||
Path to the radio data directory
|
||||
'';
|
||||
};
|
||||
host = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
Domain radio is hosted on
|
||||
'';
|
||||
};
|
||||
nginx = lib.mkEnableOption "enable nginx";
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.icecast = {
|
||||
enable = true;
|
||||
hostname = cfg.host;
|
||||
mount = "stream.mp3";
|
||||
fallback = "fallback.mp3";
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts.${cfg.host} = lib.mkIf cfg.nginx {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/".root = config.inputs.radio-web;
|
||||
};
|
||||
|
||||
users.users.${cfg.user} = {
|
||||
isSystemUser = true;
|
||||
group = cfg.group;
|
||||
home = cfg.dataDir;
|
||||
createHome = true;
|
||||
};
|
||||
users.groups.${cfg.group} = {};
|
||||
systemd.services.radio = {
|
||||
enable = true;
|
||||
after = ["network.target"];
|
||||
wantedBy = ["multi-user.target"];
|
||||
serviceConfig.ExecStart = "${radioPackage}/bin/radio ${config.services.icecast.listen.address}:${toString config.services.icecast.listen.port} ${config.services.icecast.mount} 5500";
|
||||
serviceConfig.User = cfg.user;
|
||||
serviceConfig.Group = cfg.group;
|
||||
serviceConfig.WorkingDirectory = cfg.dataDir;
|
||||
preStart = ''
|
||||
mkdir -p ${cfg.dataDir}
|
||||
chown ${cfg.user} ${cfg.dataDir}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -5,33 +5,43 @@
|
||||
services.samba = {
|
||||
openFirewall = true;
|
||||
package = pkgs.sambaFull; # printer sharing
|
||||
securityType = "user";
|
||||
|
||||
# should this be on?
|
||||
nsswins = true;
|
||||
|
||||
extraConfig = ''
|
||||
workgroup = HOME
|
||||
server string = smbnix
|
||||
netbios name = smbnix
|
||||
security = user
|
||||
use sendfile = yes
|
||||
min protocol = smb2
|
||||
guest account = nobody
|
||||
map to guest = bad user
|
||||
settings = {
|
||||
global = {
|
||||
security = "user";
|
||||
workgroup = "HOME";
|
||||
"server string" = "smbnix";
|
||||
"netbios name" = "smbnix";
|
||||
"use sendfile" = "yes";
|
||||
"min protocol" = "smb2";
|
||||
"guest account" = "nobody";
|
||||
"map to guest" = "bad user";
|
||||
|
||||
# printing
|
||||
load printers = yes
|
||||
printing = cups
|
||||
printcap name = cups
|
||||
'';
|
||||
# printing
|
||||
"load printers" = "yes";
|
||||
printing = "cups";
|
||||
"printcap name" = "cups";
|
||||
|
||||
shares = {
|
||||
"hide files" = "/.nobackup/.DS_Store/._.DS_Store/";
|
||||
|
||||
# Samba 4.22+ enables SMB3 directory leases by default, allowing clients
|
||||
# to cache directory listings locally. When files are created locally on
|
||||
# the server (bypassing Samba), these cached listings go stale because
|
||||
# kernel oplocks — the mechanism that would break leases on local
|
||||
# changes — is incompatible with smb2 leases. Enabling kernel oplocks
|
||||
# would fix this but forces Samba to disable smb2 leases, durable
|
||||
# handles, and level2 oplocks, losing handle caching performance.
|
||||
# https://wiki.samba.org/index.php/Editing_files_locally_on_server:_interoperability
|
||||
"smb3 directory leases" = "no";
|
||||
};
|
||||
public = {
|
||||
path = "/data/samba/Public";
|
||||
browseable = "yes";
|
||||
"read only" = "no";
|
||||
"guest ok" = "yes";
|
||||
"guest ok" = "no";
|
||||
"create mask" = "0644";
|
||||
"directory mask" = "0755";
|
||||
"force user" = "public_data";
|
||||
@@ -73,6 +83,13 @@
|
||||
};
|
||||
};
|
||||
|
||||
# backups
|
||||
backup.group."samba".paths = [
|
||||
config.services.samba.settings.googlebot.path
|
||||
config.services.samba.settings.cris.path
|
||||
config.services.samba.settings.public.path
|
||||
];
|
||||
|
||||
# Windows discovery of samba server
|
||||
services.samba-wsdd = {
|
||||
enable = true;
|
||||
@@ -88,7 +105,7 @@
|
||||
# Printer discovery
|
||||
# (is this needed?)
|
||||
services.avahi.enable = true;
|
||||
services.avahi.nssmdns = true;
|
||||
services.avahi.nssmdns4 = true;
|
||||
|
||||
# printer sharing
|
||||
systemd.tmpfiles.rules = [
|
||||
@@ -106,6 +123,6 @@
|
||||
# samba user for share
|
||||
users.users.cris.isSystemUser = true;
|
||||
users.users.cris.group = "cris";
|
||||
users.groups.cris = {};
|
||||
users.groups.cris = { };
|
||||
};
|
||||
}
|
||||
@@ -2,7 +2,8 @@
|
||||
|
||||
let
|
||||
cfg = config.services.thelounge;
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.services.thelounge = {
|
||||
fileUploadBaseUrl = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
@@ -23,12 +24,12 @@ in {
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.thelounge = {
|
||||
private = true;
|
||||
public = false;
|
||||
extraConfig = {
|
||||
reverseProxy = true;
|
||||
maxHistory = -1;
|
||||
https.enable = false;
|
||||
# theme = "thelounge-theme-solarized";
|
||||
# theme = "thelounge-theme-solarized";
|
||||
prefetch = false;
|
||||
prefetchStorage = false;
|
||||
fileUpload = {
|
||||
@@ -42,6 +43,10 @@ in {
|
||||
};
|
||||
};
|
||||
|
||||
backup.group."thelounge".paths = [
|
||||
"/var/lib/thelounge/"
|
||||
];
|
||||
|
||||
# the lounge client
|
||||
services.nginx.virtualHosts.${cfg.host} = {
|
||||
enableACME = true;
|
||||
|
||||
26
common/server/unifi.nix
Normal file
26
common/server/unifi.nix
Normal file
@@ -0,0 +1,26 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.unifi;
|
||||
in
|
||||
{
|
||||
options.services.unifi = {
|
||||
# Open select Unifi ports instead of using openFirewall to avoid opening access to unifi's control panel
|
||||
openMinimalFirewall = lib.mkEnableOption "Open bare minimum firewall ports";
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.unifi.unifiPackage = pkgs.unifi;
|
||||
services.unifi.mongodbPackage = pkgs.mongodb-7_0;
|
||||
|
||||
networking.firewall = lib.mkIf cfg.openMinimalFirewall {
|
||||
allowedUDPPorts = [
|
||||
3478 # STUN
|
||||
10001 # used for device discovery.
|
||||
];
|
||||
allowedTCPPorts = [
|
||||
8080 # Used for device and application communication.
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
let
|
||||
# external
|
||||
rtp-port = 8083;
|
||||
webrtc-peer-lower-port = 20000;
|
||||
webrtc-peer-upper-port = 20100;
|
||||
domain = "live.neet.space";
|
||||
|
||||
# internal
|
||||
ingest-port = 8084;
|
||||
web-port = 8085;
|
||||
webrtc-port = 8086;
|
||||
toStr = builtins.toString;
|
||||
in
|
||||
{
|
||||
networking.firewall.allowedUDPPorts = [ rtp-port ];
|
||||
networking.firewall.allowedTCPPortRanges = [ {
|
||||
from = webrtc-peer-lower-port;
|
||||
to = webrtc-peer-upper-port;
|
||||
} ];
|
||||
networking.firewall.allowedUDPPortRanges = [ {
|
||||
from = webrtc-peer-lower-port;
|
||||
to = webrtc-peer-upper-port;
|
||||
} ];
|
||||
|
||||
virtualisation.docker.enable = true;
|
||||
|
||||
services.nginx.virtualHosts.${domain} = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations = {
|
||||
"/" = {
|
||||
proxyPass = "http://localhost:${toStr web-port}";
|
||||
};
|
||||
"websocket" = {
|
||||
proxyPass = "http://localhost:${toStr webrtc-port}/websocket";
|
||||
proxyWebsockets = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
virtualisation.oci-containers = {
|
||||
backend = "docker";
|
||||
containers = {
|
||||
"lightspeed-ingest" = {
|
||||
workdir = "/var/lib/lightspeed-ingest";
|
||||
image = "projectlightspeed/ingest";
|
||||
ports = [
|
||||
"${toStr ingest-port}:8084"
|
||||
];
|
||||
# imageFile = pkgs.dockerTools.pullImage {
|
||||
# imageName = "projectlightspeed/ingest";
|
||||
# finalImageTag = "version-0.1.4";
|
||||
# imageDigest = "sha256:9fc51833b7c27a76d26e40f092b9cec1ac1c4bfebe452e94ad3269f1f73ff2fc";
|
||||
# sha256 = "19kxl02x0a3i6hlnsfcm49hl6qxnq2f3hfmyv1v8qdaz58f35kd5";
|
||||
# };
|
||||
};
|
||||
"lightspeed-react" = {
|
||||
workdir = "/var/lib/lightspeed-react";
|
||||
image = "projectlightspeed/react";
|
||||
ports = [
|
||||
"${toStr web-port}:80"
|
||||
];
|
||||
# imageFile = pkgs.dockerTools.pullImage {
|
||||
# imageName = "projectlightspeed/react";
|
||||
# finalImageTag = "version-0.1.3";
|
||||
# imageDigest = "sha256:b7c58425f1593f7b4304726b57aa399b6e216e55af9c0962c5c19333fae638b6";
|
||||
# sha256 = "0d2jh7mr20h7dxgsp7ml7cw2qd4m8ja9rj75dpy59zyb6v0bn7js";
|
||||
# };
|
||||
};
|
||||
"lightspeed-webrtc" = {
|
||||
workdir = "/var/lib/lightspeed-webrtc";
|
||||
image = "projectlightspeed/webrtc";
|
||||
ports = [
|
||||
"${toStr webrtc-port}:8080"
|
||||
"${toStr rtp-port}:65535/udp"
|
||||
"${toStr webrtc-peer-lower-port}-${toStr webrtc-peer-upper-port}:${toStr webrtc-peer-lower-port}-${toStr webrtc-peer-upper-port}/tcp"
|
||||
"${toStr webrtc-peer-lower-port}-${toStr webrtc-peer-upper-port}:${toStr webrtc-peer-lower-port}-${toStr webrtc-peer-upper-port}/udp"
|
||||
];
|
||||
cmd = [
|
||||
"lightspeed-webrtc" "--addr=0.0.0.0" "--ip=${domain}"
|
||||
"--ports=${toStr webrtc-peer-lower-port}-${toStr webrtc-peer-upper-port}" "run"
|
||||
];
|
||||
# imageFile = pkgs.dockerTools.pullImage {
|
||||
# imageName = "projectlightspeed/webrtc";
|
||||
# finalImageTag = "version-0.1.2";
|
||||
# imageDigest = "sha256:ddf8b3dd294485529ec11d1234a3fc38e365a53c4738998c6bc2c6930be45ecf";
|
||||
# sha256 = "1bdy4ak99fjdphj5bsk8rp13xxmbqdhfyfab14drbyffivg9ad2i";
|
||||
# };
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
Copyright 2020 Matthijs Steen
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
@@ -1,37 +0,0 @@
|
||||
# Visual Studio Code Server support in NixOS
|
||||
|
||||
Experimental support for VS Code Server in NixOS. The NodeJS by default supplied by VS Code cannot be used within NixOS due to missing hardcoded paths, so it is automatically replaced by a symlink to a compatible version of NodeJS that does work under NixOS.
|
||||
|
||||
## Installation
|
||||
|
||||
```nix
|
||||
{
|
||||
imports = [
|
||||
(fetchTarball "https://github.com/msteen/nixos-vscode-server/tarball/master")
|
||||
];
|
||||
|
||||
services.vscode-server.enable = true;
|
||||
}
|
||||
```
|
||||
|
||||
And then enable them for the relevant users:
|
||||
|
||||
```
|
||||
systemctl --user enable auto-fix-vscode-server.service
|
||||
```
|
||||
|
||||
### Home Manager
|
||||
|
||||
```nix
|
||||
{
|
||||
imports = [
|
||||
"${fetchTarball "https://github.com/msteen/nixos-vscode-server/tarball/master"}/modules/vscode-server/home.nix"
|
||||
];
|
||||
|
||||
services.vscode-server.enable = true;
|
||||
}
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
When the service is enabled and running it should simply work, there is nothing for you to do.
|
||||
@@ -1 +0,0 @@
|
||||
import ./modules/vscode-server
|
||||
@@ -1,8 +0,0 @@
|
||||
import ./module.nix ({ name, description, serviceConfig }:
|
||||
|
||||
{
|
||||
systemd.user.services.${name} = {
|
||||
inherit description serviceConfig;
|
||||
wantedBy = [ "default.target" ];
|
||||
};
|
||||
})
|
||||
@@ -1,15 +0,0 @@
|
||||
import ./module.nix ({ name, description, serviceConfig }:
|
||||
|
||||
{
|
||||
systemd.user.services.${name} = {
|
||||
Unit = {
|
||||
Description = description;
|
||||
};
|
||||
|
||||
Service = serviceConfig;
|
||||
|
||||
Install = {
|
||||
WantedBy = [ "default.target" ];
|
||||
};
|
||||
};
|
||||
})
|
||||
@@ -1,42 +0,0 @@
|
||||
moduleConfig:
|
||||
{ lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
{
|
||||
options.services.vscode-server.enable = with types; mkEnableOption "VS Code Server";
|
||||
|
||||
config = moduleConfig rec {
|
||||
name = "auto-fix-vscode-server";
|
||||
description = "Automatically fix the VS Code server used by the remote SSH extension";
|
||||
serviceConfig = {
|
||||
# When a monitored directory is deleted, it will stop being monitored.
|
||||
# Even if it is later recreated it will not restart monitoring it.
|
||||
# Unfortunately the monitor does not kill itself when it stops monitoring,
|
||||
# so rather than creating our own restart mechanism, we leverage systemd to do this for us.
|
||||
Restart = "always";
|
||||
RestartSec = 0;
|
||||
ExecStart = pkgs.writeShellScript "${name}.sh" ''
|
||||
set -euo pipefail
|
||||
PATH=${makeBinPath (with pkgs; [ coreutils inotify-tools ])}
|
||||
bin_dir=~/.vscode-server/bin
|
||||
[[ -e $bin_dir ]] &&
|
||||
find "$bin_dir" -mindepth 2 -maxdepth 2 -name node -type f -exec ln -sfT ${pkgs.nodejs-12_x}/bin/node {} \; ||
|
||||
mkdir -p "$bin_dir"
|
||||
while IFS=: read -r bin_dir event; do
|
||||
# A new version of the VS Code Server is being created.
|
||||
if [[ $event == 'CREATE,ISDIR' ]]; then
|
||||
# Create a trigger to know when their node is being created and replace it for our symlink.
|
||||
touch "$bin_dir/node"
|
||||
inotifywait -qq -e DELETE_SELF "$bin_dir/node"
|
||||
ln -sfT ${pkgs.nodejs-12_x}/bin/node "$bin_dir/node"
|
||||
# The monitored directory is deleted, e.g. when "Uninstall VS Code Server from Host" has been run.
|
||||
elif [[ $event == DELETE_SELF ]]; then
|
||||
# See the comments above Restart in the service config.
|
||||
exit 0
|
||||
fi
|
||||
done < <(inotifywait -q -m -e CREATE,ISDIR -e DELETE_SELF --format '%w%f:%e' "$bin_dir")
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.zerobin;
|
||||
in {
|
||||
options.services.zerobin = {
|
||||
host = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
example = "example.com";
|
||||
};
|
||||
port = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 33422;
|
||||
};
|
||||
};
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.zerobin.listenPort = cfg.port;
|
||||
services.zerobin.listenAddress = "localhost";
|
||||
|
||||
services.nginx.virtualHosts.${cfg.host} = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://localhost:${toString cfg.port}";
|
||||
proxyWebsockets = true;
|
||||
};
|
||||
};
|
||||
|
||||
# zerobin service is broken in nixpkgs currently
|
||||
systemd.services.zerobin.serviceConfig.ExecStart = lib.mkForce
|
||||
"${pkgs.zerobin}/bin/zerobin --host=${cfg.listenAddress} --port=${toString cfg.listenPort} --data-dir=${cfg.dataDir}";
|
||||
};
|
||||
}
|
||||
35
common/shell.nix
Normal file
35
common/shell.nix
Normal file
@@ -0,0 +1,35 @@
|
||||
{ pkgs, ... }:
|
||||
|
||||
# Improvements to the default shell
|
||||
# - use nix-index for command-not-found
|
||||
# - disable fish's annoying greeting message
|
||||
# - add some handy shell commands
|
||||
|
||||
{
|
||||
# nix-index
|
||||
programs.nix-index.enable = true;
|
||||
programs.nix-index.enableFishIntegration = true;
|
||||
programs.command-not-found.enable = false;
|
||||
programs.nix-index-database.comma.enable = true;
|
||||
|
||||
programs.fish = {
|
||||
enable = true;
|
||||
|
||||
shellInit = ''
|
||||
# disable annoying fish shell greeting
|
||||
set fish_greeting
|
||||
'';
|
||||
};
|
||||
|
||||
environment.shellAliases = {
|
||||
myip = "dig +short myip.opendns.com @resolver1.opendns.com";
|
||||
|
||||
# https://linuxreviews.org/HOWTO_Test_Disk_I/O_Performance
|
||||
io_seq_read = "${pkgs.fio}/bin/fio --name TEST --eta-newline=5s --filename=temp.file --rw=read --size=2g --io_size=10g --blocksize=1024k --ioengine=libaio --fsync=10000 --iodepth=32 --direct=1 --numjobs=1 --runtime=60 --group_reporting; rm temp.file";
|
||||
io_seq_write = "${pkgs.fio}/bin/fio --name TEST --eta-newline=5s --filename=temp.file --rw=write --size=2g --io_size=10g --blocksize=1024k --ioengine=libaio --fsync=10000 --iodepth=32 --direct=1 --numjobs=1 --runtime=60 --group_reporting; rm temp.file";
|
||||
io_rand_read = "${pkgs.fio}/bin/fio --name TEST --eta-newline=5s --filename=temp.file --rw=randread --size=2g --io_size=10g --blocksize=4k --ioengine=libaio --fsync=1 --iodepth=1 --direct=1 --numjobs=32 --runtime=60 --group_reporting; rm temp.file";
|
||||
io_rand_write = "${pkgs.fio}/bin/fio --name TEST --eta-newline=5s --filename=temp.file --rw=randrw --size=2g --io_size=10g --blocksize=4k --ioengine=libaio --fsync=1 --iodepth=1 --direct=1 --numjobs=1 --runtime=60 --group_reporting; rm temp.file";
|
||||
|
||||
llsblk = "lsblk -o +uuid,fsType";
|
||||
};
|
||||
}
|
||||
@@ -1,63 +1,36 @@
|
||||
rec {
|
||||
users = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMVR/R3ZOsv7TZbICGBCHdjh1NDT8SnswUyINeJOC7QG"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE0dcqL/FhHmv+a1iz3f9LJ48xubO7MZHy35rW9SZOYM"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO0VFnn3+Mh0nWeN92jov81qNE9fpzTAHYBphNoY7HUx" # reg
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHSkKiRUUmnErOKGx81nyge/9KqjkPh8BfDk0D3oP586" # nat
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFeTK1iARlNIKP/DS8/ObBm9yUM/3L1Ub4XI5A2r9OzP" # ray
|
||||
];
|
||||
system = {
|
||||
liza = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDY/pNyWedEfU7Tq9ikGbriRuF1ZWkHhegGS17L0Vcdl";
|
||||
ponyo = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMBBlTAIp38RhErU1wNNV5MBeb+WGH0mhF/dxh5RsAXN";
|
||||
ponyo-unlock = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIC9LQuuImgWlkjDhEEIbM1wOd+HqRv1RxvYZuLXPSdRi";
|
||||
ray = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDQM8hwKRgl8cZj7UVYATSLYu4LhG7I0WFJ9m2iWowiB";
|
||||
s0 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHkTQNPzrIhsKk3OpTHq8b7slIp9LktB49r1w/DKb/5b";
|
||||
n1 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPWlhd1Oid5Xf2zdcBrcdrR0TlhObutwcJ8piobRTpRt";
|
||||
n2 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJ7bRiRutnI7Bmyt/I238E3Fp5DqiClIXiVibsccipOr";
|
||||
n3 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB+rJEaRrFDGirQC2UoWQkmpzLg4qgTjGJgVqiipWiU5";
|
||||
n4 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINYm2ROIfCeGz6QtDwqAmcj2DX9tq2CZn0eLhskdvB4Z";
|
||||
n5 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE5Qhvwq3PiHEKf+2/4w5ZJkSMNzFLhIRrPOR98m7wW4";
|
||||
n6 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID/P/pa9+qhKAPfvvd8xSO2komJqDW0M1nCK7ZrP6PO7";
|
||||
n7 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPtOlOvTlMX2mxPaXDJ6VlMe5rmroUXpKmJVNxgV32xL";
|
||||
};
|
||||
{ config, lib, ... }:
|
||||
|
||||
# groups
|
||||
systems = with system; [
|
||||
liza
|
||||
ray
|
||||
s0
|
||||
n1
|
||||
n2
|
||||
n3
|
||||
n4
|
||||
n5
|
||||
n6
|
||||
n7
|
||||
];
|
||||
personal = with system; [
|
||||
ray
|
||||
];
|
||||
servers = with system; [
|
||||
liza
|
||||
s0
|
||||
n1
|
||||
n2
|
||||
n3
|
||||
n4
|
||||
n5
|
||||
n6
|
||||
n7
|
||||
];
|
||||
compute = with system; [
|
||||
n1
|
||||
n2
|
||||
n3
|
||||
n4
|
||||
n5
|
||||
n6
|
||||
n7
|
||||
];
|
||||
storage = with system; [
|
||||
s0
|
||||
{
|
||||
programs.ssh.knownHosts = lib.filterAttrs (n: v: v != null) (lib.concatMapAttrs
|
||||
(host: cfg: {
|
||||
${host} = {
|
||||
hostNames = cfg.hostNames;
|
||||
publicKey = cfg.hostKey;
|
||||
};
|
||||
"${host}-remote-unlock" =
|
||||
if cfg.remoteUnlock != null then {
|
||||
hostNames = builtins.filter (h: h != null) [ cfg.remoteUnlock.clearnetHost cfg.remoteUnlock.onionHost ];
|
||||
publicKey = cfg.remoteUnlock.hostKey;
|
||||
} else null;
|
||||
})
|
||||
config.machines.hosts);
|
||||
|
||||
# prebuilt cmds for easy ssh LUKS unlock
|
||||
environment.shellAliases =
|
||||
let
|
||||
unlockHosts = unlockType: lib.concatMapAttrs
|
||||
(host: cfg:
|
||||
if cfg.remoteUnlock != null && cfg.remoteUnlock.${unlockType} != null then {
|
||||
${host} = cfg.remoteUnlock.${unlockType};
|
||||
} else { })
|
||||
config.machines.hosts;
|
||||
in
|
||||
lib.concatMapAttrs (host: addr: { "unlock-over-tor_${host}" = "torsocks ssh root@${addr}"; }) (unlockHosts "onionHost")
|
||||
//
|
||||
lib.concatMapAttrs (host: addr: { "unlock_${host}" = "ssh root@${addr}"; }) (unlockHosts "clearnetHost");
|
||||
|
||||
# TODO: Old ssh keys I will remove some day...
|
||||
machines.ssh.userKeys = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHSkKiRUUmnErOKGx81nyge/9KqjkPh8BfDk0D3oP586" # nat
|
||||
];
|
||||
}
|
||||
@@ -1,187 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -eEo pipefail
|
||||
|
||||
# $@ := ""
|
||||
set_route_vars() {
|
||||
local network_var
|
||||
local -a network_vars; read -ra network_vars <<<"${!route_network_*}"
|
||||
for network_var in "${network_vars[@]}"; do
|
||||
local -i i="${network_var#route_network_}"
|
||||
local -a vars=("route_network_$i" "route_netmask_$i" "route_gateway_$i" "route_metric_$i")
|
||||
route_networks[i]="${!vars[0]}"
|
||||
route_netmasks[i]="${!vars[1]:-255.255.255.255}"
|
||||
route_gateways[i]="${!vars[2]:-$route_vpn_gateway}"
|
||||
route_metrics[i]="${!vars[3]:-0}"
|
||||
done
|
||||
}
|
||||
|
||||
# Configuration.
|
||||
readonly prog="$(basename "$0")"
|
||||
readonly private_nets="127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
|
||||
declare -a remotes cnf_remote_domains cnf_remote_ips route_networks route_netmasks route_gateways route_metrics
|
||||
read -ra remotes <<<"$(env|grep -oP '^remote_[0-9]+=.*'|sort -n|cut -d= -f2|tr '\n' '\t')"
|
||||
read -ra cnf_remote_domains <<<"$(printf '%s\n' "${remotes[@]%%*[0-9]}"|sort -u|tr '\n' '\t')"
|
||||
read -ra cnf_remote_ips <<<"$(printf '%s\n' "${remotes[@]##*[!0-9.]*}"|sort -u|tr '\n' '\t')"
|
||||
set_route_vars
|
||||
read -ra numbered_vars <<<"${!foreign_option_*} ${!proto_*} ${!remote_*} ${!remote_port_*} \
|
||||
${!route_network_*} ${!route_netmask_*} ${!route_gateway_*} ${!route_metric_*}"
|
||||
readonly numbered_vars "${numbered_vars[@]}" dev ifconfig_local ifconfig_netmask ifconfig_remote \
|
||||
route_net_gateway route_vpn_gateway script_type trusted_ip trusted_port untrusted_ip untrusted_port \
|
||||
remotes cnf_remote_domains cnf_remote_ips route_networks route_netmasks route_gateways route_metrics
|
||||
readonly cur_remote_ip="${trusted_ip:-$untrusted_ip}"
|
||||
readonly cur_port="${trusted_port:-$untrusted_port}"
|
||||
|
||||
# $@ := ""
|
||||
update_hosts() {
|
||||
if remote_entries="$(getent -s dns hosts "${cnf_remote_domains[@]}"|grep -v :)"; then
|
||||
local -r beg="# VPNFAILSAFE BEGIN" end="# VPNFAILSAFE END"
|
||||
{
|
||||
sed -e "/^$beg/,/^$end/d" /etc/hosts
|
||||
echo -e "$beg\\n$remote_entries\\n$end"
|
||||
} >/etc/hosts.vpnfailsafe
|
||||
chmod --reference=/etc/hosts /etc/hosts.vpnfailsafe
|
||||
mv /etc/hosts.vpnfailsafe /etc/hosts
|
||||
fi
|
||||
}
|
||||
|
||||
# $@ := "up" | "down"
|
||||
update_routes() {
|
||||
local -a resolved_ips
|
||||
read -ra resolved_ips <<<"$(getent -s files hosts "${cnf_remote_domains[@]:-ENOENT}"|cut -d' ' -f1|tr '\n' '\t' || true)"
|
||||
local -ar remote_ips=("$cur_remote_ip" "${resolved_ips[@]}" "${cnf_remote_ips[@]}")
|
||||
if [[ "$*" == up ]]; then
|
||||
for remote_ip in "${remote_ips[@]}"; do
|
||||
if [[ -n "$remote_ip" && -z "$(ip route show "$remote_ip")" ]]; then
|
||||
ip route add "$remote_ip" via "$route_net_gateway"
|
||||
fi
|
||||
done
|
||||
for net in 0.0.0.0/1 128.0.0.0/1; do
|
||||
if [[ -z "$(ip route show "$net")" ]]; then
|
||||
ip route add "$net" via "$route_vpn_gateway"
|
||||
fi
|
||||
done
|
||||
for i in $(seq 1 "${#route_networks[@]}"); do
|
||||
if [[ -z "$(ip route show "${route_networks[i]}/${route_netmasks[i]}")" ]]; then
|
||||
ip route add "${route_networks[i]}/${route_netmasks[i]}" \
|
||||
via "${route_gateways[i]}" metric "${route_metrics[i]}" dev "$dev"
|
||||
fi
|
||||
done
|
||||
elif [[ "$*" == down ]]; then
|
||||
for route in "${remote_ips[@]}" 0.0.0.0/1 128.0.0.0/1; do
|
||||
if [[ -n "$route" && -n "$(ip route show "$route")" ]]; then
|
||||
ip route del "$route"
|
||||
fi
|
||||
done
|
||||
for i in $(seq 1 "${#route_networks[@]}"); do
|
||||
if [[ -n "$(ip route show "${route_networks[i]}/${route_netmasks[i]}")" ]]; then
|
||||
ip route del "${route_networks[i]}/${route_netmasks[i]}"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
# $@ := ""
|
||||
update_firewall() {
|
||||
# $@ := "INPUT" | "OUTPUT" | "FORWARD"
|
||||
insert_chain() {
|
||||
if iptables -C "$*" -j "VPNFAILSAFE_$*" 2>/dev/null; then
|
||||
iptables -D "$*" -j "VPNFAILSAFE_$*"
|
||||
for opt in F X; do
|
||||
iptables -"$opt" "VPNFAILSAFE_$*"
|
||||
done
|
||||
fi
|
||||
iptables -N "VPNFAILSAFE_$*"
|
||||
iptables -I "$*" -j "VPNFAILSAFE_$*"
|
||||
}
|
||||
|
||||
# $@ := "INPUT" | "OUTPUT"
|
||||
accept_remotes() {
|
||||
case "$@" in
|
||||
INPUT) local -r icmp_type=reply io=i sd=s states="";;
|
||||
OUTPUT) local -r icmp_type=request io=o sd=d states=NEW,;;
|
||||
esac
|
||||
local -r public_nic="$(ip route show "$cur_remote_ip"|cut -d' ' -f5)"
|
||||
local -ar suf=(-m conntrack --ctstate "$states"RELATED,ESTABLISHED -"$io" "${public_nic:?}" -j ACCEPT)
|
||||
icmp_rule() {
|
||||
iptables "$1" "$2" -p icmp --icmp-type "echo-$icmp_type" -"$sd" "$3" "${suf[@]/%ACCEPT/RETURN}"
|
||||
}
|
||||
for ((i=1; i <= ${#remotes[*]}; ++i)); do
|
||||
local port="remote_port_$i"
|
||||
local proto="proto_$i"
|
||||
iptables -A "VPNFAILSAFE_$*" -p "${!proto%-client}" -"$sd" "${remotes[i-1]}" --"$sd"port "${!port}" "${suf[@]}"
|
||||
if ! icmp_rule -C "VPNFAILSAFE_$*" "${remotes[i-1]}" 2>/dev/null; then
|
||||
icmp_rule -A "VPNFAILSAFE_$*" "${remotes[i-1]}"
|
||||
fi
|
||||
done
|
||||
if ! iptables -S|grep -q "^-A VPNFAILSAFE_$* .*-$sd $cur_remote_ip/32 .*-j ACCEPT$"; then
|
||||
for p in tcp udp; do
|
||||
iptables -A "VPNFAILSAFE_$*" -p "$p" -"$sd" "$cur_remote_ip" --"$sd"port "${cur_port}" "${suf[@]}"
|
||||
done
|
||||
icmp_rule -A "VPNFAILSAFE_$*" "$cur_remote_ip"
|
||||
fi
|
||||
}
|
||||
|
||||
# $@ := "OUTPUT" | "FORWARD"
|
||||
reject_dns() {
|
||||
for proto in udp tcp; do
|
||||
iptables -A "VPNFAILSAFE_$*" -p "$proto" --dport 53 ! -o "$dev" -j REJECT
|
||||
done
|
||||
}
|
||||
|
||||
# $@ := "INPUT" | "OUTPUT" | "FORWARD"
|
||||
pass_private_nets() {
|
||||
case "$@" in
|
||||
INPUT) local -r io=i sd=s;;&
|
||||
OUTPUT|FORWARD) local -r io=o sd=d;;&
|
||||
INPUT) local -r vpn="${ifconfig_remote:-$ifconfig_local}/${ifconfig_netmask:-32}"
|
||||
iptables -A "VPNFAILSAFE_$*" -"$sd" "$vpn" -"$io" "$dev" -j RETURN
|
||||
for i in $(seq 1 "${#route_networks[@]}"); do
|
||||
iptables -A "VPNFAILSAFE_$*" -"$sd" "${route_networks[i]}/${route_netmasks[i]}" -"$io" "$dev" -j RETURN
|
||||
done;;&
|
||||
*) iptables -A "VPNFAILSAFE_$*" -"$sd" "$private_nets" ! -"$io" "$dev" -j RETURN;;&
|
||||
INPUT) iptables -A "VPNFAILSAFE_$*" -s "$private_nets" -i "$dev" -j DROP;;&
|
||||
*) for iface in "$dev" lo+; do
|
||||
iptables -A "VPNFAILSAFE_$*" -"$io" "$iface" -j RETURN
|
||||
done;;
|
||||
esac
|
||||
}
|
||||
|
||||
# $@ := "INPUT" | "OUTPUT" | "FORWARD"
|
||||
drop_other() {
|
||||
iptables -A "VPNFAILSAFE_$*" -j DROP
|
||||
}
|
||||
|
||||
for chain in INPUT OUTPUT FORWARD; do
|
||||
insert_chain "$chain"
|
||||
[[ $chain == FORWARD ]] || accept_remotes "$chain"
|
||||
[[ $chain == INPUT ]] || reject_dns "$chain"
|
||||
pass_private_nets "$chain"
|
||||
drop_other "$chain"
|
||||
done
|
||||
}
|
||||
|
||||
# $@ := ""
|
||||
cleanup() {
|
||||
update_resolv down
|
||||
update_routes down
|
||||
}
|
||||
trap cleanup INT TERM
|
||||
|
||||
# $@ := line_number exit_code
|
||||
err_msg() {
|
||||
echo "$0:$1: \`$(sed -n "$1,+0{s/^\\s*//;p}" "$0")' returned $2" >&2
|
||||
cleanup
|
||||
}
|
||||
trap 'err_msg "$LINENO" "$?"' ERR
|
||||
|
||||
# $@ := ""
|
||||
main() {
|
||||
case "${script_type:-down}" in
|
||||
up) for f in hosts routes firewall; do "update_$f" up; done;;
|
||||
down) update_routes down
|
||||
update_resolv down;;
|
||||
esac
|
||||
}
|
||||
|
||||
main
|
||||
@@ -1,14 +0,0 @@
|
||||
{ lib, config, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.zerotierone;
|
||||
in {
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.zerotierone.joinNetworks = [
|
||||
"565799d8f6d654c0"
|
||||
];
|
||||
networking.firewall.allowedUDPPorts = [
|
||||
9993
|
||||
];
|
||||
};
|
||||
}
|
||||
87
common/zfs-alerts.nix
Normal file
87
common/zfs-alerts.nix
Normal file
@@ -0,0 +1,87 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.ntfy-alerts;
|
||||
hasZfs = config.boot.supportedFilesystems.zfs or false;
|
||||
hasNtfy = config.thisMachine.hasRole."ntfy";
|
||||
|
||||
checkScript = pkgs.writeShellScript "zfs-health-check" ''
|
||||
PATH="${lib.makeBinPath [ pkgs.zfs pkgs.coreutils pkgs.gawk pkgs.curl ]}"
|
||||
|
||||
unhealthy=""
|
||||
|
||||
# Check pool health status
|
||||
while IFS=$'\t' read -r pool state; do
|
||||
if [ "$state" != "ONLINE" ]; then
|
||||
unhealthy="$unhealthy"$'\n'"Pool '$pool' is $state"
|
||||
fi
|
||||
done < <(zpool list -H -o name,health)
|
||||
|
||||
# Check for errors (read, write, checksum) on any vdev
|
||||
while IFS=$'\t' read -r pool errors; do
|
||||
if [ "$errors" != "No known data errors" ] && [ -n "$errors" ]; then
|
||||
unhealthy="$unhealthy"$'\n'"Pool '$pool' has errors: $errors"
|
||||
fi
|
||||
done < <(zpool status -x 2>/dev/null | awk '
|
||||
/pool:/ { pool=$2 }
|
||||
/errors:/ { sub(/^[[:space:]]*errors: /, ""); print pool "\t" $0 }
|
||||
')
|
||||
|
||||
# Check for any drives with non-zero error counts
|
||||
drive_errors=$(zpool status 2>/dev/null | awk '
|
||||
/DEGRADED|FAULTED|OFFLINE|UNAVAIL|REMOVED/ && !/pool:/ && !/state:/ {
|
||||
print " " $0
|
||||
}
|
||||
/[0-9]+[[:space:]]+[0-9]+[[:space:]]+[0-9]+/ {
|
||||
if ($3 > 0 || $4 > 0 || $5 > 0) {
|
||||
print " " $1 " (read:" $3 " write:" $4 " cksum:" $5 ")"
|
||||
}
|
||||
}
|
||||
')
|
||||
if [ -n "$drive_errors" ]; then
|
||||
unhealthy="$unhealthy"$'\n'"Device errors:"$'\n'"$drive_errors"
|
||||
fi
|
||||
|
||||
if [ -n "$unhealthy" ]; then
|
||||
message="ZFS health check failed on ${config.networking.hostName}:$unhealthy"
|
||||
|
||||
curl \
|
||||
--fail --silent --show-error \
|
||||
--max-time 30 --retry 3 \
|
||||
-H "Authorization: Bearer $NTFY_TOKEN" \
|
||||
-H "Title: ZFS issue on ${config.networking.hostName}" \
|
||||
-H "Priority: urgent" \
|
||||
-H "Tags: warning" \
|
||||
-d "$message" \
|
||||
"${cfg.serverUrl}/${cfg.topic}"
|
||||
|
||||
echo "$message" >&2
|
||||
fi
|
||||
|
||||
echo "All ZFS pools healthy"
|
||||
'';
|
||||
in
|
||||
{
|
||||
config = lib.mkIf (hasZfs && hasNtfy) {
|
||||
systemd.services.zfs-health-check = {
|
||||
description = "Check ZFS pool health and alert on issues";
|
||||
wants = [ "network-online.target" ];
|
||||
after = [ "network-online.target" "zfs.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
EnvironmentFile = "/run/agenix/ntfy-token";
|
||||
ExecStart = checkScript;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.timers.zfs-health-check = {
|
||||
description = "Periodic ZFS health check";
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "daily";
|
||||
Persistent = true;
|
||||
RandomizedDelaySec = "1h";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user