Compare commits
2 Commits
pia-vpn-v2
...
e15402b8bc
| Author | SHA1 | Date | |
|---|---|---|---|
| e15402b8bc | |||
| 911e081680 |
@@ -1,29 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# Configure Attic cache
|
|
||||||
attic login local "$ATTIC_ENDPOINT" "$ATTIC_TOKEN"
|
|
||||||
attic use local:nixos
|
|
||||||
|
|
||||||
# Check flake
|
|
||||||
nix flake check --all-systems --print-build-logs --log-format raw --show-trace
|
|
||||||
|
|
||||||
# Build all systems
|
|
||||||
nix eval .#nixosConfigurations --apply 'cs: builtins.attrNames cs' --json \
|
|
||||||
| jq -r '.[]' \
|
|
||||||
| xargs -I{} nix build ".#nixosConfigurations.{}.config.system.build.toplevel" \
|
|
||||||
--no-link --print-build-logs --log-format raw
|
|
||||||
|
|
||||||
# Push to cache (only locally-built paths >= 0.5MB)
|
|
||||||
toplevels=$(nix eval .#nixosConfigurations \
|
|
||||||
--apply 'cs: map (n: "${cs.${n}.config.system.build.toplevel}") (builtins.attrNames cs)' \
|
|
||||||
--json | jq -r '.[]')
|
|
||||||
echo "Found $(echo "$toplevels" | wc -l) system toplevels"
|
|
||||||
paths=$(echo "$toplevels" \
|
|
||||||
| xargs nix path-info -r --json \
|
|
||||||
| jq -r '[to_entries[] | select(
|
|
||||||
(.value.signatures | all(startswith("cache.nixos.org") | not))
|
|
||||||
and .value.narSize >= 524288
|
|
||||||
) | .key] | unique[]')
|
|
||||||
echo "Pushing $(echo "$paths" | wc -l) unique paths to cache"
|
|
||||||
echo "$paths" | xargs attic push local:nixos
|
|
||||||
@@ -1,60 +0,0 @@
|
|||||||
name: Auto Update Flake
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: '0 6 * * *'
|
|
||||||
workflow_dispatch: {}
|
|
||||||
|
|
||||||
env:
|
|
||||||
DEBIAN_FRONTEND: noninteractive
|
|
||||||
PATH: /run/current-system/sw/bin/
|
|
||||||
XDG_CONFIG_HOME: ${{ runner.temp }}/.config
|
|
||||||
ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }}
|
|
||||||
ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }}
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
auto-update:
|
|
||||||
runs-on: nixos
|
|
||||||
steps:
|
|
||||||
- name: Checkout the repository
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
ref: master
|
|
||||||
token: ${{ secrets.PUSH_TOKEN }}
|
|
||||||
|
|
||||||
- name: Configure git identity
|
|
||||||
run: |
|
|
||||||
git config user.name "gitea-runner"
|
|
||||||
git config user.email "gitea-runner@neet.dev"
|
|
||||||
|
|
||||||
- name: Update flake inputs
|
|
||||||
id: update
|
|
||||||
run: |
|
|
||||||
nix flake update
|
|
||||||
if git diff --quiet flake.lock; then
|
|
||||||
echo "No changes to flake.lock, nothing to do"
|
|
||||||
echo "changed=false" >> "$GITHUB_OUTPUT"
|
|
||||||
else
|
|
||||||
git add flake.lock
|
|
||||||
git commit -m "flake.lock: update inputs"
|
|
||||||
echo "changed=true" >> "$GITHUB_OUTPUT"
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Build and cache
|
|
||||||
if: steps.update.outputs.changed == 'true'
|
|
||||||
run: bash .gitea/scripts/build-and-cache.sh
|
|
||||||
|
|
||||||
- name: Push updated lockfile
|
|
||||||
if: steps.update.outputs.changed == 'true'
|
|
||||||
run: git push
|
|
||||||
|
|
||||||
- name: Notify on failure
|
|
||||||
if: failure() && steps.update.outputs.changed == 'true'
|
|
||||||
run: |
|
|
||||||
curl -s \
|
|
||||||
-H "Authorization: Bearer ${{ secrets.NTFY_TOKEN }}" \
|
|
||||||
-H "Title: Flake auto-update failed" \
|
|
||||||
-H "Priority: high" \
|
|
||||||
-H "Tags: warning" \
|
|
||||||
-d "Auto-update workflow failed. Check: ${{ gitea.server_url }}/${{ gitea.repository }}/actions/runs/${{ gitea.run_number }}" \
|
|
||||||
https://ntfy.neet.dev/nix-flake-updates
|
|
||||||
@@ -6,8 +6,6 @@ env:
|
|||||||
DEBIAN_FRONTEND: noninteractive
|
DEBIAN_FRONTEND: noninteractive
|
||||||
PATH: /run/current-system/sw/bin/
|
PATH: /run/current-system/sw/bin/
|
||||||
XDG_CONFIG_HOME: ${{ runner.temp }}/.config
|
XDG_CONFIG_HOME: ${{ runner.temp }}/.config
|
||||||
ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }}
|
|
||||||
ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }}
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check-flake:
|
check-flake:
|
||||||
@@ -18,16 +16,34 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Build and cache
|
- name: Configure Attic cache
|
||||||
run: bash .gitea/scripts/build-and-cache.sh
|
|
||||||
|
|
||||||
- name: Notify on failure
|
|
||||||
if: failure()
|
|
||||||
run: |
|
run: |
|
||||||
curl -s \
|
attic login local "${{ vars.ATTIC_ENDPOINT }}" "${{ secrets.ATTIC_TOKEN }}"
|
||||||
-H "Authorization: Bearer ${{ secrets.NTFY_TOKEN }}" \
|
attic use local:nixos
|
||||||
-H "Title: Flake check failed" \
|
|
||||||
-H "Priority: high" \
|
- name: Check Flake
|
||||||
-H "Tags: warning" \
|
run: nix flake check --all-systems --print-build-logs --log-format raw --show-trace
|
||||||
-d "Check failed for ${{ gitea.ref_name }}. Check: ${{ gitea.server_url }}/${{ gitea.repository }}/actions/runs/${{ gitea.run_number }}" \
|
|
||||||
https://ntfy.neet.dev/nix-flake-updates
|
- name: Build all systems
|
||||||
|
run: |
|
||||||
|
nix eval .#nixosConfigurations --apply 'cs: builtins.attrNames cs' --json \
|
||||||
|
| jq -r '.[]' \
|
||||||
|
| xargs -I{} nix build ".#nixosConfigurations.{}.config.system.build.toplevel" --no-link --print-build-logs --log-format raw
|
||||||
|
|
||||||
|
- name: Push to cache
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
# Get all system toplevel store paths
|
||||||
|
toplevels=$(nix eval .#nixosConfigurations --apply 'cs: map (n: "${cs.${n}.config.system.build.toplevel}") (builtins.attrNames cs)' --json | jq -r '.[]')
|
||||||
|
echo "Found $(echo "$toplevels" | wc -l) system toplevels"
|
||||||
|
# Expand to full closures, deduplicate, and filter out paths that are:
|
||||||
|
# - already signed by cache.nixos.org (available upstream)
|
||||||
|
# - smaller than 0.5MB (insignificant build artifacts)
|
||||||
|
paths=$(echo "$toplevels" \
|
||||||
|
| xargs nix path-info -r --json \
|
||||||
|
| jq -r '[to_entries[] | select(
|
||||||
|
(.value.signatures | all(startswith("cache.nixos.org") | not))
|
||||||
|
and .value.narSize >= 524288
|
||||||
|
) | .key] | unique[]')
|
||||||
|
echo "Pushing $(echo "$paths" | wc -l) unique paths to cache"
|
||||||
|
echo "$paths" | xargs attic push local:nixos
|
||||||
|
|||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,2 +1 @@
|
|||||||
result
|
result
|
||||||
.claude/worktrees
|
|
||||||
|
|||||||
20
CLAUDE.md
20
CLAUDE.md
@@ -67,12 +67,6 @@ IP allocation convention: VMs `.10-.49`, containers `.50-.89`, incus `.90-.129`
|
|||||||
|
|
||||||
`flake.nix` applies patches from `/patches/` to nixpkgs before building (workaround for nix#3920).
|
`flake.nix` applies patches from `/patches/` to nixpkgs before building (workaround for nix#3920).
|
||||||
|
|
||||||
### Service Dashboard & Monitoring
|
|
||||||
|
|
||||||
When adding or removing a web-facing service, update both:
|
|
||||||
- **Gatus** (`common/server/gatus.nix`) — add/remove the endpoint monitor
|
|
||||||
- **Dashy** — add/remove the service entry from the dashboard config
|
|
||||||
|
|
||||||
### Key Conventions
|
### Key Conventions
|
||||||
|
|
||||||
- Uses `doas` instead of `sudo` everywhere
|
- Uses `doas` instead of `sudo` everywhere
|
||||||
@@ -85,17 +79,3 @@ When adding or removing a web-facing service, update both:
|
|||||||
- Always use `--no-link` when running `nix build`
|
- Always use `--no-link` when running `nix build`
|
||||||
- Don't use `nix build --dry-run` unless you only need evaluation — it skips the actual build
|
- Don't use `nix build --dry-run` unless you only need evaluation — it skips the actual build
|
||||||
- Avoid `2>&1` on nix commands — it can cause error output to be missed
|
- Avoid `2>&1` on nix commands — it can cause error output to be missed
|
||||||
|
|
||||||
## Git Worktrees
|
|
||||||
|
|
||||||
When the user asks you to "start a worktree" or work in a worktree, **do not create one manually** with `git worktree add`. Instead, tell the user to start a new session with:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
claude --worktree <name>
|
|
||||||
```
|
|
||||||
|
|
||||||
This is the built-in Claude Code worktree workflow. It creates the worktree at `.claude/worktrees/<name>/` with a branch `worktree-<name>` and starts a new Claude session inside it. Cleanup is handled automatically on exit.
|
|
||||||
|
|
||||||
When instructed to work in a git worktree (e.g., via `isolation: "worktree"` on a subagent), you **MUST** do so. If you are unable to create or use a git worktree, you **MUST** stop work immediately and report the failure to the user. Do not fall back to working in the main working tree.
|
|
||||||
|
|
||||||
When applying work from a git worktree back to the main branch, commit in the worktree first, then use `git cherry-pick` from the main working tree to bring the commit over. Do not use `git checkout` or `git apply` to copy files directly. Do **not** automatically apply worktree work to the main branch — always ask the user for approval first.
|
|
||||||
|
|||||||
@@ -6,11 +6,11 @@
|
|||||||
substituters = [
|
substituters = [
|
||||||
"https://cache.nixos.org/"
|
"https://cache.nixos.org/"
|
||||||
"https://nix-community.cachix.org"
|
"https://nix-community.cachix.org"
|
||||||
"http://s0.neet.dev:28338/nixos"
|
"http://s0.koi-bebop.ts.net:28338/nixos"
|
||||||
];
|
];
|
||||||
trusted-public-keys = [
|
trusted-public-keys = [
|
||||||
"nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
|
"nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
|
||||||
"nixos:e5AMCUWWEX9MESWAAMjBkZdGUpl588NhgsUO3HsdhFw="
|
"nixos:SnTTQutdOJbAmxo6AQ3cbRt5w9f4byMXQODCieBH3PQ="
|
||||||
];
|
];
|
||||||
|
|
||||||
# Allow substituters to be offline
|
# Allow substituters to be offline
|
||||||
|
|||||||
@@ -6,8 +6,6 @@
|
|||||||
./binary-cache.nix
|
./binary-cache.nix
|
||||||
./flakes.nix
|
./flakes.nix
|
||||||
./auto-update.nix
|
./auto-update.nix
|
||||||
./ntfy-alerts.nix
|
|
||||||
./zfs-alerts.nix
|
|
||||||
./shell.nix
|
./shell.nix
|
||||||
./network
|
./network
|
||||||
./boot
|
./boot
|
||||||
@@ -96,11 +94,11 @@
|
|||||||
{ groups = [ "wheel" ]; persist = true; }
|
{ groups = [ "wheel" ]; persist = true; }
|
||||||
];
|
];
|
||||||
|
|
||||||
nix.gc.automatic = !config.boot.isContainer;
|
nix.gc.automatic = true;
|
||||||
|
|
||||||
security.acme.acceptTerms = true;
|
security.acme.acceptTerms = true;
|
||||||
security.acme.defaults.email = "zuckerberg@neet.dev";
|
security.acme.defaults.email = "zuckerberg@neet.dev";
|
||||||
|
|
||||||
# Enable Desktop Environment if this is a PC (machine role is "personal")
|
# Enable Desktop Environment if this is a PC (machine role is "personal")
|
||||||
de.enable = lib.mkDefault (config.thisMachine.hasRole."personal" && !config.boot.isContainer);
|
de.enable = lib.mkDefault (config.thisMachine.hasRole."personal");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,8 +7,10 @@ let
|
|||||||
in
|
in
|
||||||
{
|
{
|
||||||
imports = [
|
imports = [
|
||||||
./pia-vpn
|
./pia-openvpn.nix
|
||||||
|
./pia-wireguard.nix
|
||||||
./tailscale.nix
|
./tailscale.nix
|
||||||
|
./vpn.nix
|
||||||
./sandbox.nix
|
./sandbox.nix
|
||||||
];
|
];
|
||||||
|
|
||||||
|
|||||||
113
common/network/pia-openvpn.nix
Normal file
113
common/network/pia-openvpn.nix
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
{ config, pkgs, lib, ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.pia.openvpn;
|
||||||
|
vpnfailsafe = pkgs.stdenv.mkDerivation {
|
||||||
|
pname = "vpnfailsafe";
|
||||||
|
version = "0.0.1";
|
||||||
|
src = ./.;
|
||||||
|
installPhase = ''
|
||||||
|
mkdir -p $out
|
||||||
|
cp vpnfailsafe.sh $out/vpnfailsafe.sh
|
||||||
|
sed -i 's|getent|${pkgs.getent}/bin/getent|' $out/vpnfailsafe.sh
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.pia.openvpn = {
|
||||||
|
enable = lib.mkEnableOption "Enable private internet access";
|
||||||
|
server = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
default = "us-washingtondc.privacy.network";
|
||||||
|
example = "swiss.privacy.network";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = lib.mkIf cfg.enable {
|
||||||
|
services.openvpn = {
|
||||||
|
servers = {
|
||||||
|
pia = {
|
||||||
|
config = ''
|
||||||
|
client
|
||||||
|
dev tun
|
||||||
|
proto udp
|
||||||
|
remote ${cfg.server} 1198
|
||||||
|
resolv-retry infinite
|
||||||
|
nobind
|
||||||
|
persist-key
|
||||||
|
persist-tun
|
||||||
|
cipher aes-128-cbc
|
||||||
|
auth sha1
|
||||||
|
tls-client
|
||||||
|
remote-cert-tls server
|
||||||
|
|
||||||
|
auth-user-pass
|
||||||
|
compress
|
||||||
|
verb 1
|
||||||
|
reneg-sec 0
|
||||||
|
<crl-verify>
|
||||||
|
-----BEGIN X509 CRL-----
|
||||||
|
MIICWDCCAUAwDQYJKoZIhvcNAQENBQAwgegxCzAJBgNVBAYTAlVTMQswCQYDVQQI
|
||||||
|
EwJDQTETMBEGA1UEBxMKTG9zQW5nZWxlczEgMB4GA1UEChMXUHJpdmF0ZSBJbnRl
|
||||||
|
cm5ldCBBY2Nlc3MxIDAeBgNVBAsTF1ByaXZhdGUgSW50ZXJuZXQgQWNjZXNzMSAw
|
||||||
|
HgYDVQQDExdQcml2YXRlIEludGVybmV0IEFjY2VzczEgMB4GA1UEKRMXUHJpdmF0
|
||||||
|
ZSBJbnRlcm5ldCBBY2Nlc3MxLzAtBgkqhkiG9w0BCQEWIHNlY3VyZUBwcml2YXRl
|
||||||
|
aW50ZXJuZXRhY2Nlc3MuY29tFw0xNjA3MDgxOTAwNDZaFw0zNjA3MDMxOTAwNDZa
|
||||||
|
MCYwEQIBARcMMTYwNzA4MTkwMDQ2MBECAQYXDDE2MDcwODE5MDA0NjANBgkqhkiG
|
||||||
|
9w0BAQ0FAAOCAQEAQZo9X97ci8EcPYu/uK2HB152OZbeZCINmYyluLDOdcSvg6B5
|
||||||
|
jI+ffKN3laDvczsG6CxmY3jNyc79XVpEYUnq4rT3FfveW1+Ralf+Vf38HdpwB8EW
|
||||||
|
B4hZlQ205+21CALLvZvR8HcPxC9KEnev1mU46wkTiov0EKc+EdRxkj5yMgv0V2Re
|
||||||
|
ze7AP+NQ9ykvDScH4eYCsmufNpIjBLhpLE2cuZZXBLcPhuRzVoU3l7A9lvzG9mjA
|
||||||
|
5YijHJGHNjlWFqyrn1CfYS6koa4TGEPngBoAziWRbDGdhEgJABHrpoaFYaL61zqy
|
||||||
|
MR6jC0K2ps9qyZAN74LEBedEfK7tBOzWMwr58A==
|
||||||
|
-----END X509 CRL-----
|
||||||
|
</crl-verify>
|
||||||
|
|
||||||
|
<ca>
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIFqzCCBJOgAwIBAgIJAKZ7D5Yv87qDMA0GCSqGSIb3DQEBDQUAMIHoMQswCQYD
|
||||||
|
VQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNVBAcTCkxvc0FuZ2VsZXMxIDAeBgNV
|
||||||
|
BAoTF1ByaXZhdGUgSW50ZXJuZXQgQWNjZXNzMSAwHgYDVQQLExdQcml2YXRlIElu
|
||||||
|
dGVybmV0IEFjY2VzczEgMB4GA1UEAxMXUHJpdmF0ZSBJbnRlcm5ldCBBY2Nlc3Mx
|
||||||
|
IDAeBgNVBCkTF1ByaXZhdGUgSW50ZXJuZXQgQWNjZXNzMS8wLQYJKoZIhvcNAQkB
|
||||||
|
FiBzZWN1cmVAcHJpdmF0ZWludGVybmV0YWNjZXNzLmNvbTAeFw0xNDA0MTcxNzM1
|
||||||
|
MThaFw0zNDA0MTIxNzM1MThaMIHoMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0Ex
|
||||||
|
EzARBgNVBAcTCkxvc0FuZ2VsZXMxIDAeBgNVBAoTF1ByaXZhdGUgSW50ZXJuZXQg
|
||||||
|
QWNjZXNzMSAwHgYDVQQLExdQcml2YXRlIEludGVybmV0IEFjY2VzczEgMB4GA1UE
|
||||||
|
AxMXUHJpdmF0ZSBJbnRlcm5ldCBBY2Nlc3MxIDAeBgNVBCkTF1ByaXZhdGUgSW50
|
||||||
|
ZXJuZXQgQWNjZXNzMS8wLQYJKoZIhvcNAQkBFiBzZWN1cmVAcHJpdmF0ZWludGVy
|
||||||
|
bmV0YWNjZXNzLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPXD
|
||||||
|
L1L9tX6DGf36liA7UBTy5I869z0UVo3lImfOs/GSiFKPtInlesP65577nd7UNzzX
|
||||||
|
lH/P/CnFPdBWlLp5ze3HRBCc/Avgr5CdMRkEsySL5GHBZsx6w2cayQ2EcRhVTwWp
|
||||||
|
cdldeNO+pPr9rIgPrtXqT4SWViTQRBeGM8CDxAyTopTsobjSiYZCF9Ta1gunl0G/
|
||||||
|
8Vfp+SXfYCC+ZzWvP+L1pFhPRqzQQ8k+wMZIovObK1s+nlwPaLyayzw9a8sUnvWB
|
||||||
|
/5rGPdIYnQWPgoNlLN9HpSmsAcw2z8DXI9pIxbr74cb3/HSfuYGOLkRqrOk6h4RC
|
||||||
|
OfuWoTrZup1uEOn+fw8CAwEAAaOCAVQwggFQMB0GA1UdDgQWBBQv63nQ/pJAt5tL
|
||||||
|
y8VJcbHe22ZOsjCCAR8GA1UdIwSCARYwggESgBQv63nQ/pJAt5tLy8VJcbHe22ZO
|
||||||
|
sqGB7qSB6zCB6DELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRMwEQYDVQQHEwpM
|
||||||
|
b3NBbmdlbGVzMSAwHgYDVQQKExdQcml2YXRlIEludGVybmV0IEFjY2VzczEgMB4G
|
||||||
|
A1UECxMXUHJpdmF0ZSBJbnRlcm5ldCBBY2Nlc3MxIDAeBgNVBAMTF1ByaXZhdGUg
|
||||||
|
SW50ZXJuZXQgQWNjZXNzMSAwHgYDVQQpExdQcml2YXRlIEludGVybmV0IEFjY2Vz
|
||||||
|
czEvMC0GCSqGSIb3DQEJARYgc2VjdXJlQHByaXZhdGVpbnRlcm5ldGFjY2Vzcy5j
|
||||||
|
b22CCQCmew+WL/O6gzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBDQUAA4IBAQAn
|
||||||
|
a5PgrtxfwTumD4+3/SYvwoD66cB8IcK//h1mCzAduU8KgUXocLx7QgJWo9lnZ8xU
|
||||||
|
ryXvWab2usg4fqk7FPi00bED4f4qVQFVfGfPZIH9QQ7/48bPM9RyfzImZWUCenK3
|
||||||
|
7pdw4Bvgoys2rHLHbGen7f28knT2j/cbMxd78tQc20TIObGjo8+ISTRclSTRBtyC
|
||||||
|
GohseKYpTS9himFERpUgNtefvYHbn70mIOzfOJFTVqfrptf9jXa9N8Mpy3ayfodz
|
||||||
|
1wiqdteqFXkTYoSDctgKMiZ6GdocK9nMroQipIQtpnwd4yBDWIyC6Bvlkrq5TQUt
|
||||||
|
YDQ8z9v+DMO6iwyIDRiU
|
||||||
|
-----END CERTIFICATE-----
|
||||||
|
</ca>
|
||||||
|
|
||||||
|
disable-occ
|
||||||
|
auth-user-pass /run/agenix/pia-login.conf
|
||||||
|
'';
|
||||||
|
autoStart = true;
|
||||||
|
up = "${vpnfailsafe}/vpnfailsafe.sh";
|
||||||
|
down = "${vpnfailsafe}/vpnfailsafe.sh";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
age.secrets."pia-login.conf".file = ../../secrets/pia-login.age;
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -1,89 +0,0 @@
|
|||||||
# PIA VPN Multi-Container Module
|
|
||||||
|
|
||||||
Routes service containers through a PIA WireGuard VPN using a shared bridge network.
|
|
||||||
|
|
||||||
## Architecture
|
|
||||||
|
|
||||||
```
|
|
||||||
internet
|
|
||||||
│
|
|
||||||
┌──────┴──────┐
|
|
||||||
│ Host │
|
|
||||||
│ tinyproxy │ ← PIA API bootstrap proxy
|
|
||||||
│ 10.100.0.1 │
|
|
||||||
└──────┬───────┘
|
|
||||||
│ br-vpn (no IPMasquerade)
|
|
||||||
┌────────────┼──────────────┐
|
|
||||||
│ │ │
|
|
||||||
┌──────┴──────┐ ┌───┴────┐ ┌─────┴──────┐
|
|
||||||
│ VPN ctr │ │ servarr│ │transmission│
|
|
||||||
│ 10.100.0.2 │ │ .11 │ │ .10 │
|
|
||||||
│ piaw (WG) │ │ │ │ │
|
|
||||||
│ gateway+NAT │ └────────┘ └────────────┘
|
|
||||||
└─────────────┘
|
|
||||||
```
|
|
||||||
|
|
||||||
- **Host** creates the WG interface (encrypted UDP stays in host netns) and runs tinyproxy on the bridge so the VPN container can bootstrap PIA auth before WG is up.
|
|
||||||
- **VPN container** authenticates with PIA via the proxy, configures WG, sets up NAT (masquerade bridge→WG) and optional port forwarding DNAT.
|
|
||||||
- **Service containers** default-route through the VPN container. No WG interface = no internet if VPN is down = leak-proof by topology.
|
|
||||||
- **Host** reaches containers directly on the bridge for nginx reverse proxying.
|
|
||||||
|
|
||||||
## Key design decisions
|
|
||||||
|
|
||||||
- **Bridge, not veth pairs**: All containers share one bridge (`br-vpn`), so the VPN container can act as a single gateway. The host does NOT masquerade bridge traffic — only the VPN container does (through WG).
|
|
||||||
- **Port forwarding is implicit**: If any container sets `receiveForwardedPort`, the VPN container automatically handles PIA port forwarding and DNAT. No separate toggle needed.
|
|
||||||
- **DNS through WG**: Service containers use the VPN container as their DNS server. The VPN container runs `systemd-resolved` listening on its bridge IP, forwarding queries through the WG tunnel.
|
|
||||||
- **Monthly renewal**: `pia-vpn-setup` uses `Type=simple` + `Restart=always` + `RuntimeMaxSec=30d` to periodically re-authenticate with PIA and get a fresh port forwarding signature (signatures expire after ~2 months). Service containers are unaffected during renewal.
|
|
||||||
|
|
||||||
## Files
|
|
||||||
|
|
||||||
| File | Purpose |
|
|
||||||
|---|---|
|
|
||||||
| `default.nix` | Options, bridge, tinyproxy, host firewall, WG interface creation, assertions |
|
|
||||||
| `vpn-container.nix` | VPN container: PIA auth, WG config, NAT, DNAT, port refresh timer |
|
|
||||||
| `service-container.nix` | Generates service containers with static IP and gateway→VPN |
|
|
||||||
| `scripts.nix` | Bash function library for PIA API calls and WG configuration |
|
|
||||||
| `ca.rsa.4096.crt` | PIA CA certificate for API TLS verification |
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
```nix
|
|
||||||
pia-vpn = {
|
|
||||||
enable = true;
|
|
||||||
serverLocation = "swiss";
|
|
||||||
|
|
||||||
containers.my-service = {
|
|
||||||
ip = "10.100.0.10";
|
|
||||||
mounts."/data".hostPath = "/data";
|
|
||||||
config = { services.my-app.enable = true; };
|
|
||||||
|
|
||||||
# Optional: receive PIA's forwarded port (at most one container)
|
|
||||||
receiveForwardedPort = { port = 8080; protocol = "both"; };
|
|
||||||
onPortForwarded = ''
|
|
||||||
echo "PIA assigned port $PORT, forwarding to $TARGET_IP:8080"
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
|
||||||
```
|
|
||||||
|
|
||||||
## Debugging
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Check VPN container status
|
|
||||||
machinectl shell pia-vpn
|
|
||||||
systemctl status pia-vpn-setup
|
|
||||||
journalctl -u pia-vpn-setup
|
|
||||||
|
|
||||||
# Verify WG tunnel
|
|
||||||
wg show
|
|
||||||
|
|
||||||
# Check NAT/DNAT rules
|
|
||||||
iptables -t nat -L -v
|
|
||||||
iptables -L FORWARD -v
|
|
||||||
|
|
||||||
# From a service container — verify VPN routing
|
|
||||||
curl ifconfig.me
|
|
||||||
|
|
||||||
# Port refresh logs
|
|
||||||
journalctl -u pia-vpn-port-refresh
|
|
||||||
```
|
|
||||||
@@ -1,279 +0,0 @@
|
|||||||
{ config, lib, pkgs, ... }:
|
|
||||||
|
|
||||||
# PIA VPN multi-container module.
|
|
||||||
#
|
|
||||||
# Architecture:
|
|
||||||
# Host creates WG interface, runs tinyproxy on bridge for PIA API bootstrap.
|
|
||||||
# VPN container does all PIA logic via proxy, configures WG, masquerades bridge→piaw.
|
|
||||||
# Service containers default route → VPN container (leak-proof by topology).
|
|
||||||
#
|
|
||||||
# Reference: https://www.wireguard.com/netns/#ordinary-containerization
|
|
||||||
|
|
||||||
with lib;
|
|
||||||
|
|
||||||
let
|
|
||||||
cfg = config.pia-vpn;
|
|
||||||
|
|
||||||
# Derive prefix length from subnet CIDR (e.g. "10.100.0.0/24" → "24")
|
|
||||||
subnetPrefixLen = last (splitString "/" cfg.subnet);
|
|
||||||
|
|
||||||
containerSubmodule = types.submodule ({ name, ... }: {
|
|
||||||
options = {
|
|
||||||
ip = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
description = "Static IP address for this container on the VPN bridge";
|
|
||||||
};
|
|
||||||
|
|
||||||
config = mkOption {
|
|
||||||
type = types.anything;
|
|
||||||
default = { };
|
|
||||||
description = "NixOS configuration for this container";
|
|
||||||
};
|
|
||||||
|
|
||||||
mounts = mkOption {
|
|
||||||
type = types.attrsOf (types.submodule {
|
|
||||||
options = {
|
|
||||||
hostPath = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
description = "Path on the host to bind mount";
|
|
||||||
};
|
|
||||||
isReadOnly = mkOption {
|
|
||||||
type = types.bool;
|
|
||||||
default = false;
|
|
||||||
description = "Whether the mount is read-only";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
});
|
|
||||||
default = { };
|
|
||||||
description = "Bind mounts for the container";
|
|
||||||
};
|
|
||||||
|
|
||||||
receiveForwardedPort = mkOption {
|
|
||||||
type = types.nullOr (types.submodule {
|
|
||||||
options = {
|
|
||||||
port = mkOption {
|
|
||||||
type = types.nullOr types.port;
|
|
||||||
default = null;
|
|
||||||
description = ''
|
|
||||||
Target port to forward to. If null, forwards to the same PIA-assigned port.
|
|
||||||
PIA-assigned ports below 10000 are rejected to avoid accidentally
|
|
||||||
forwarding traffic to other services.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
protocol = mkOption {
|
|
||||||
type = types.enum [ "tcp" "udp" "both" ];
|
|
||||||
default = "both";
|
|
||||||
description = "Protocol(s) to forward";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
});
|
|
||||||
default = null;
|
|
||||||
description = "Port forwarding configuration. At most one container may set this.";
|
|
||||||
};
|
|
||||||
|
|
||||||
onPortForwarded = mkOption {
|
|
||||||
type = types.nullOr types.lines;
|
|
||||||
default = null;
|
|
||||||
description = ''
|
|
||||||
Optional script run in the VPN container after port forwarding is established.
|
|
||||||
Available environment variables: $PORT (PIA-assigned port), $TARGET_IP (this container's IP).
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
# NOTE: All derivations of cfg.containers are kept INSIDE config = mkIf ... { }
|
|
||||||
# to avoid infinite recursion. The module system's pushDownProperties eagerly
|
|
||||||
# evaluates let bindings and mkMerge contents, so any top-level let binding
|
|
||||||
# that touches cfg.containers would force config evaluation during structure
|
|
||||||
# discovery, creating a cycle.
|
|
||||||
in
|
|
||||||
{
|
|
||||||
imports = [
|
|
||||||
./vpn-container.nix
|
|
||||||
./service-container.nix
|
|
||||||
];
|
|
||||||
|
|
||||||
options.pia-vpn = {
|
|
||||||
enable = mkEnableOption "PIA VPN multi-container setup";
|
|
||||||
|
|
||||||
serverLocation = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
default = "swiss";
|
|
||||||
description = "PIA server region ID";
|
|
||||||
};
|
|
||||||
|
|
||||||
interfaceName = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
default = "piaw";
|
|
||||||
description = "WireGuard interface name";
|
|
||||||
};
|
|
||||||
|
|
||||||
wireguardListenPort = mkOption {
|
|
||||||
type = types.port;
|
|
||||||
default = 51820;
|
|
||||||
description = "WireGuard listen port";
|
|
||||||
};
|
|
||||||
|
|
||||||
bridgeName = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
default = "br-vpn";
|
|
||||||
description = "Bridge interface name for VPN containers";
|
|
||||||
};
|
|
||||||
|
|
||||||
subnet = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
default = "10.100.0.0/24";
|
|
||||||
description = "Subnet CIDR for VPN bridge network";
|
|
||||||
};
|
|
||||||
|
|
||||||
hostAddress = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
default = "10.100.0.1";
|
|
||||||
description = "Host IP on the VPN bridge";
|
|
||||||
};
|
|
||||||
|
|
||||||
vpnAddress = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
default = "10.100.0.2";
|
|
||||||
description = "VPN container IP on the bridge";
|
|
||||||
};
|
|
||||||
|
|
||||||
proxyPort = mkOption {
|
|
||||||
type = types.port;
|
|
||||||
default = 8888;
|
|
||||||
description = "Tinyproxy port for PIA API bootstrap";
|
|
||||||
};
|
|
||||||
|
|
||||||
containers = mkOption {
|
|
||||||
type = types.attrsOf containerSubmodule;
|
|
||||||
default = { };
|
|
||||||
description = "Service containers that route through the VPN";
|
|
||||||
};
|
|
||||||
|
|
||||||
# Subnet prefix length derived from cfg.subnet (exposed for other submodules)
|
|
||||||
subnetPrefixLen = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
default = subnetPrefixLen;
|
|
||||||
description = "Prefix length derived from subnet CIDR";
|
|
||||||
readOnly = true;
|
|
||||||
};
|
|
||||||
|
|
||||||
# Shared host entries for all containers (host + VPN + service containers)
|
|
||||||
containerHosts = mkOption {
|
|
||||||
type = types.attrsOf (types.listOf types.str);
|
|
||||||
internal = true;
|
|
||||||
readOnly = true;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
config = mkIf cfg.enable {
|
|
||||||
assertions =
|
|
||||||
let
|
|
||||||
forwardingContainers = filterAttrs (_: c: c.receiveForwardedPort != null) cfg.containers;
|
|
||||||
containerIPs = mapAttrsToList (_: c: c.ip) cfg.containers;
|
|
||||||
in
|
|
||||||
[
|
|
||||||
{
|
|
||||||
assertion = length (attrNames forwardingContainers) <= 1;
|
|
||||||
message = "At most one pia-vpn container may set receiveForwardedPort";
|
|
||||||
}
|
|
||||||
{
|
|
||||||
assertion = length containerIPs == length (unique containerIPs);
|
|
||||||
message = "pia-vpn container IPs must be unique";
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
# Enable systemd-networkd for bridge management
|
|
||||||
systemd.network.enable = true;
|
|
||||||
|
|
||||||
# TODO: re-enable once primary networking uses networkd
|
|
||||||
systemd.network.wait-online.enable = false;
|
|
||||||
|
|
||||||
# Tell NetworkManager to ignore VPN bridge and container interfaces
|
|
||||||
networking.networkmanager.unmanaged = mkIf config.networking.networkmanager.enable [
|
|
||||||
"interface-name:${cfg.bridgeName}"
|
|
||||||
"interface-name:ve-*"
|
|
||||||
];
|
|
||||||
|
|
||||||
# Bridge network device
|
|
||||||
systemd.network.netdevs."20-${cfg.bridgeName}".netdevConfig = {
|
|
||||||
Kind = "bridge";
|
|
||||||
Name = cfg.bridgeName;
|
|
||||||
};
|
|
||||||
|
|
||||||
# Bridge network configuration — NO IPMasquerade (host must NOT be gateway)
|
|
||||||
systemd.network.networks."20-${cfg.bridgeName}" = {
|
|
||||||
matchConfig.Name = cfg.bridgeName;
|
|
||||||
networkConfig = {
|
|
||||||
Address = "${cfg.hostAddress}/${cfg.subnetPrefixLen}";
|
|
||||||
DHCPServer = false;
|
|
||||||
};
|
|
||||||
linkConfig.RequiredForOnline = "no";
|
|
||||||
};
|
|
||||||
|
|
||||||
# Allow wireguard traffic through rpfilter
|
|
||||||
networking.firewall.checkReversePath = "loose";
|
|
||||||
|
|
||||||
# Block bridge → outside forwarding (prevents host from being a gateway for containers)
|
|
||||||
networking.firewall.extraForwardRules = ''
|
|
||||||
iifname "${cfg.bridgeName}" oifname != "${cfg.bridgeName}" drop
|
|
||||||
'';
|
|
||||||
|
|
||||||
# Allow tinyproxy from bridge (tinyproxy itself restricts to VPN container IP)
|
|
||||||
networking.firewall.interfaces.${cfg.bridgeName}.allowedTCPPorts = [ cfg.proxyPort ];
|
|
||||||
|
|
||||||
# Tinyproxy — runs on bridge IP so VPN container can bootstrap PIA auth
|
|
||||||
services.tinyproxy = {
|
|
||||||
enable = true;
|
|
||||||
settings = {
|
|
||||||
Listen = cfg.hostAddress;
|
|
||||||
Port = cfg.proxyPort;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
systemd.services.tinyproxy.before = [ "container@pia-vpn.service" ];
|
|
||||||
|
|
||||||
# WireGuard interface creation (host-side oneshot)
|
|
||||||
# Creates the interface in the host namespace so encrypted UDP stays in host netns.
|
|
||||||
# The container takes ownership of the interface on startup via `interfaces = [ ... ]`.
|
|
||||||
systemd.services.pia-vpn-wg-create = {
|
|
||||||
description = "Create PIA VPN WireGuard interface";
|
|
||||||
|
|
||||||
before = [ "container@pia-vpn.service" ];
|
|
||||||
requiredBy = [ "container@pia-vpn.service" ];
|
|
||||||
partOf = [ "container@pia-vpn.service" ];
|
|
||||||
wantedBy = [ "multi-user.target" ];
|
|
||||||
|
|
||||||
path = with pkgs; [ iproute2 ];
|
|
||||||
|
|
||||||
serviceConfig = {
|
|
||||||
Type = "oneshot";
|
|
||||||
RemainAfterExit = true;
|
|
||||||
};
|
|
||||||
|
|
||||||
script = ''
|
|
||||||
[[ -z $(ip link show dev ${cfg.interfaceName} 2>/dev/null) ]] || exit 0
|
|
||||||
ip link add ${cfg.interfaceName} type wireguard
|
|
||||||
'';
|
|
||||||
|
|
||||||
preStop = ''
|
|
||||||
ip link del ${cfg.interfaceName} 2>/dev/null || true
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
# Host entries for container hostnames — NixOS only auto-creates these for
|
|
||||||
# hostAddress/localAddress containers, not hostBridge. Use the standard
|
|
||||||
# {name}.containers convention.
|
|
||||||
pia-vpn.containerHosts =
|
|
||||||
{ ${cfg.vpnAddress} = [ "pia-vpn.containers" ]; }
|
|
||||||
// mapAttrs' (name: ctr: nameValuePair ctr.ip [ "${name}.containers" ]) cfg.containers;
|
|
||||||
|
|
||||||
networking.hosts = cfg.containerHosts;
|
|
||||||
|
|
||||||
# PIA login secret
|
|
||||||
age.secrets."pia-login.conf".file = ../../../secrets/pia-login.age;
|
|
||||||
|
|
||||||
# IP forwarding needed for bridge traffic between containers
|
|
||||||
networking.ip_forward = true;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
@@ -1,209 +0,0 @@
|
|||||||
let
|
|
||||||
caPath = ./ca.rsa.4096.crt;
|
|
||||||
in
|
|
||||||
|
|
||||||
# Bash function library for PIA VPN WireGuard operations.
|
|
||||||
# All PIA API calls accept an optional $proxy variable:
|
|
||||||
# proxy="http://10.100.0.1:8888" fetchPIAToken
|
|
||||||
# When $proxy is set, curl uses --proxy "$proxy"; otherwise direct connection.
|
|
||||||
|
|
||||||
# Reference materials:
|
|
||||||
# https://serverlist.piaservers.net/vpninfo/servers/v6
|
|
||||||
# https://github.com/pia-foss/manual-connections
|
|
||||||
# https://github.com/thrnz/docker-wireguard-pia/blob/master/extra/wg-gen.sh
|
|
||||||
# https://www.wireguard.com/netns/#ordinary-containerization
|
|
||||||
|
|
||||||
{
|
|
||||||
scriptCommon = ''
|
|
||||||
proxy_args() {
|
|
||||||
if [[ -n "''${proxy:-}" ]]; then
|
|
||||||
echo "--proxy $proxy"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
fetchPIAToken() {
|
|
||||||
local PIA_USER PIA_PASS resp
|
|
||||||
echo "Reading PIA credentials..."
|
|
||||||
PIA_USER=$(sed '1q;d' /run/agenix/pia-login.conf)
|
|
||||||
PIA_PASS=$(sed '2q;d' /run/agenix/pia-login.conf)
|
|
||||||
echo "Requesting PIA authentication token..."
|
|
||||||
resp=$(curl -s $(proxy_args) -u "$PIA_USER:$PIA_PASS" \
|
|
||||||
"https://www.privateinternetaccess.com/gtoken/generateToken")
|
|
||||||
PIA_TOKEN=$(echo "$resp" | jq -r '.token')
|
|
||||||
if [[ -z "$PIA_TOKEN" || "$PIA_TOKEN" == "null" ]]; then
|
|
||||||
echo "ERROR: Failed to fetch PIA token: $resp" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
echo "PIA token acquired"
|
|
||||||
}
|
|
||||||
|
|
||||||
choosePIAServer() {
|
|
||||||
local serverLocation=$1
|
|
||||||
local servers servers_json totalservers serverindex
|
|
||||||
servers=$(mktemp)
|
|
||||||
servers_json=$(mktemp)
|
|
||||||
echo "Fetching PIA server list..."
|
|
||||||
curl -s $(proxy_args) \
|
|
||||||
"https://serverlist.piaservers.net/vpninfo/servers/v6" > "$servers"
|
|
||||||
head -n 1 "$servers" | tr -d '\n' > "$servers_json"
|
|
||||||
|
|
||||||
totalservers=$(jq -r \
|
|
||||||
'.regions | .[] | select(.id=="'"$serverLocation"'") | .servers.wg | length' \
|
|
||||||
"$servers_json")
|
|
||||||
if ! [[ "$totalservers" =~ ^[0-9]+$ ]] || [ "$totalservers" -eq 0 ] 2>/dev/null; then
|
|
||||||
echo "ERROR: Location \"$serverLocation\" not found." >&2
|
|
||||||
rm -f "$servers_json" "$servers"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
echo "Found $totalservers WireGuard servers in region '$serverLocation'"
|
|
||||||
serverindex=$(( RANDOM % totalservers ))
|
|
||||||
|
|
||||||
WG_HOSTNAME=$(jq -r \
|
|
||||||
'.regions | .[] | select(.id=="'"$serverLocation"'") | .servers.wg | .['"$serverindex"'].cn' \
|
|
||||||
"$servers_json")
|
|
||||||
WG_SERVER_IP=$(jq -r \
|
|
||||||
'.regions | .[] | select(.id=="'"$serverLocation"'") | .servers.wg | .['"$serverindex"'].ip' \
|
|
||||||
"$servers_json")
|
|
||||||
WG_SERVER_PORT=$(jq -r '.groups.wg | .[0] | .ports | .[0]' "$servers_json")
|
|
||||||
|
|
||||||
rm -f "$servers_json" "$servers"
|
|
||||||
echo "Selected server $serverindex/$totalservers: $WG_HOSTNAME ($WG_SERVER_IP:$WG_SERVER_PORT)"
|
|
||||||
}
|
|
||||||
|
|
||||||
generateWireguardKey() {
|
|
||||||
PRIVATE_KEY=$(wg genkey)
|
|
||||||
PUBLIC_KEY=$(echo "$PRIVATE_KEY" | wg pubkey)
|
|
||||||
echo "Generated WireGuard keypair"
|
|
||||||
}
|
|
||||||
|
|
||||||
authorizeKeyWithPIAServer() {
|
|
||||||
local addKeyResponse
|
|
||||||
echo "Sending addKey request to $WG_HOSTNAME ($WG_SERVER_IP:$WG_SERVER_PORT)..."
|
|
||||||
addKeyResponse=$(curl -s -G $(proxy_args) \
|
|
||||||
--connect-to "$WG_HOSTNAME::$WG_SERVER_IP:" \
|
|
||||||
--cacert "${caPath}" \
|
|
||||||
--data-urlencode "pt=$PIA_TOKEN" \
|
|
||||||
--data-urlencode "pubkey=$PUBLIC_KEY" \
|
|
||||||
"https://$WG_HOSTNAME:$WG_SERVER_PORT/addKey")
|
|
||||||
local status
|
|
||||||
status=$(echo "$addKeyResponse" | jq -r '.status')
|
|
||||||
if [[ "$status" != "OK" ]]; then
|
|
||||||
echo "ERROR: addKey failed: $addKeyResponse" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
MY_IP=$(echo "$addKeyResponse" | jq -r '.peer_ip')
|
|
||||||
WG_SERVER_PUBLIC_KEY=$(echo "$addKeyResponse" | jq -r '.server_key')
|
|
||||||
WG_SERVER_PORT=$(echo "$addKeyResponse" | jq -r '.server_port')
|
|
||||||
echo "Key authorized — assigned VPN IP: $MY_IP, server port: $WG_SERVER_PORT"
|
|
||||||
}
|
|
||||||
|
|
||||||
writeWireguardQuickFile() {
|
|
||||||
local wgFile=$1
|
|
||||||
local listenPort=$2
|
|
||||||
rm -f "$wgFile"
|
|
||||||
touch "$wgFile"
|
|
||||||
chmod 700 "$wgFile"
|
|
||||||
cat > "$wgFile" <<WGEOF
|
|
||||||
[Interface]
|
|
||||||
PrivateKey = $PRIVATE_KEY
|
|
||||||
ListenPort = $listenPort
|
|
||||||
[Peer]
|
|
||||||
PersistentKeepalive = 25
|
|
||||||
PublicKey = $WG_SERVER_PUBLIC_KEY
|
|
||||||
AllowedIPs = 0.0.0.0/0
|
|
||||||
Endpoint = $WG_SERVER_IP:$WG_SERVER_PORT
|
|
||||||
WGEOF
|
|
||||||
echo "Wrote WireGuard config to $wgFile (listen=$listenPort)"
|
|
||||||
}
|
|
||||||
|
|
||||||
writeChosenServerToFile() {
|
|
||||||
local serverFile=$1
|
|
||||||
jq -n \
|
|
||||||
--arg hostname "$WG_HOSTNAME" \
|
|
||||||
--arg ip "$WG_SERVER_IP" \
|
|
||||||
--arg port "$WG_SERVER_PORT" \
|
|
||||||
'{hostname: $hostname, ip: $ip, port: $port}' > "$serverFile"
|
|
||||||
chmod 700 "$serverFile"
|
|
||||||
echo "Wrote server info to $serverFile"
|
|
||||||
}
|
|
||||||
|
|
||||||
loadChosenServerFromFile() {
|
|
||||||
local serverFile=$1
|
|
||||||
WG_HOSTNAME=$(jq -r '.hostname' "$serverFile")
|
|
||||||
WG_SERVER_IP=$(jq -r '.ip' "$serverFile")
|
|
||||||
WG_SERVER_PORT=$(jq -r '.port' "$serverFile")
|
|
||||||
echo "Loaded server info from $serverFile: $WG_HOSTNAME ($WG_SERVER_IP:$WG_SERVER_PORT)"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Reset WG interface and tear down NAT/forwarding rules.
|
|
||||||
# Called on startup (clear stale state) and on exit via trap.
|
|
||||||
cleanupVpn() {
|
|
||||||
local interfaceName=$1
|
|
||||||
wg set "$interfaceName" listen-port 0 2>/dev/null || true
|
|
||||||
ip -4 address flush dev "$interfaceName" 2>/dev/null || true
|
|
||||||
ip route del default dev "$interfaceName" 2>/dev/null || true
|
|
||||||
iptables -t nat -F 2>/dev/null || true
|
|
||||||
iptables -F FORWARD 2>/dev/null || true
|
|
||||||
}
|
|
||||||
|
|
||||||
connectToServer() {
|
|
||||||
local wgFile=$1
|
|
||||||
local interfaceName=$2
|
|
||||||
|
|
||||||
echo "Applying WireGuard config to $interfaceName..."
|
|
||||||
wg setconf "$interfaceName" "$wgFile"
|
|
||||||
ip -4 address add "$MY_IP" dev "$interfaceName"
|
|
||||||
ip link set mtu 1420 up dev "$interfaceName"
|
|
||||||
echo "WireGuard interface $interfaceName is up with IP $MY_IP"
|
|
||||||
}
|
|
||||||
|
|
||||||
reservePortForward() {
|
|
||||||
local payload_and_signature
|
|
||||||
echo "Requesting port forward signature from $WG_HOSTNAME..."
|
|
||||||
payload_and_signature=$(curl -s -m 5 \
|
|
||||||
--connect-to "$WG_HOSTNAME::$WG_SERVER_IP:" \
|
|
||||||
--cacert "${caPath}" \
|
|
||||||
-G --data-urlencode "token=$PIA_TOKEN" \
|
|
||||||
"https://$WG_HOSTNAME:19999/getSignature")
|
|
||||||
local status
|
|
||||||
status=$(echo "$payload_and_signature" | jq -r '.status')
|
|
||||||
if [[ "$status" != "OK" ]]; then
|
|
||||||
echo "ERROR: getSignature failed: $payload_and_signature" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
PORT_SIGNATURE=$(echo "$payload_and_signature" | jq -r '.signature')
|
|
||||||
PORT_PAYLOAD=$(echo "$payload_and_signature" | jq -r '.payload')
|
|
||||||
PORT=$(echo "$PORT_PAYLOAD" | base64 -d | jq -r '.port')
|
|
||||||
echo "Port forward reserved: port $PORT"
|
|
||||||
}
|
|
||||||
|
|
||||||
writePortRenewalFile() {
|
|
||||||
local portRenewalFile=$1
|
|
||||||
jq -n \
|
|
||||||
--arg signature "$PORT_SIGNATURE" \
|
|
||||||
--arg payload "$PORT_PAYLOAD" \
|
|
||||||
'{signature: $signature, payload: $payload}' > "$portRenewalFile"
|
|
||||||
chmod 700 "$portRenewalFile"
|
|
||||||
echo "Wrote port renewal data to $portRenewalFile"
|
|
||||||
}
|
|
||||||
|
|
||||||
readPortRenewalFile() {
|
|
||||||
local portRenewalFile=$1
|
|
||||||
PORT_SIGNATURE=$(jq -r '.signature' "$portRenewalFile")
|
|
||||||
PORT_PAYLOAD=$(jq -r '.payload' "$portRenewalFile")
|
|
||||||
echo "Loaded port renewal data from $portRenewalFile"
|
|
||||||
}
|
|
||||||
|
|
||||||
refreshPIAPort() {
|
|
||||||
local bindPortResponse
|
|
||||||
echo "Refreshing port forward binding with $WG_HOSTNAME..."
|
|
||||||
bindPortResponse=$(curl -Gs -m 5 \
|
|
||||||
--connect-to "$WG_HOSTNAME::$WG_SERVER_IP:" \
|
|
||||||
--cacert "${caPath}" \
|
|
||||||
--data-urlencode "payload=$PORT_PAYLOAD" \
|
|
||||||
--data-urlencode "signature=$PORT_SIGNATURE" \
|
|
||||||
"https://$WG_HOSTNAME:19999/bindPort")
|
|
||||||
echo "bindPort response: $bindPortResponse"
|
|
||||||
}
|
|
||||||
'';
|
|
||||||
}
|
|
||||||
@@ -1,87 +0,0 @@
|
|||||||
{ config, lib, allModules, ... }:
|
|
||||||
|
|
||||||
# Generates service containers that route all traffic through the VPN container.
|
|
||||||
# Each container gets a static IP on the VPN bridge with default route → VPN container.
|
|
||||||
#
|
|
||||||
# Uses lazy mapAttrs inside fixed config keys to avoid infinite recursion.
|
|
||||||
# (mkMerge + mapAttrsToList at the top level forces eager evaluation of cfg.containers
|
|
||||||
# during module structure discovery, which creates a cycle with config evaluation.)
|
|
||||||
|
|
||||||
with lib;
|
|
||||||
|
|
||||||
let
|
|
||||||
cfg = config.pia-vpn;
|
|
||||||
|
|
||||||
mkContainer = name: ctr: {
|
|
||||||
autoStart = true;
|
|
||||||
ephemeral = true;
|
|
||||||
privateNetwork = true;
|
|
||||||
hostBridge = cfg.bridgeName;
|
|
||||||
|
|
||||||
bindMounts = mapAttrs
|
|
||||||
(_: mount: {
|
|
||||||
hostPath = mount.hostPath;
|
|
||||||
isReadOnly = mount.isReadOnly;
|
|
||||||
})
|
|
||||||
ctr.mounts;
|
|
||||||
|
|
||||||
config = { config, pkgs, lib, ... }: {
|
|
||||||
imports = allModules ++ [ ctr.config ];
|
|
||||||
|
|
||||||
# Static IP with gateway pointing to VPN container
|
|
||||||
networking.useNetworkd = true;
|
|
||||||
systemd.network.enable = true;
|
|
||||||
networking.useDHCP = false;
|
|
||||||
|
|
||||||
systemd.network.networks."20-eth0" = {
|
|
||||||
matchConfig.Name = "eth0";
|
|
||||||
networkConfig = {
|
|
||||||
Address = "${ctr.ip}/${cfg.subnetPrefixLen}";
|
|
||||||
Gateway = cfg.vpnAddress;
|
|
||||||
DNS = [ cfg.vpnAddress ];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
networking.hosts = cfg.containerHosts;
|
|
||||||
|
|
||||||
# DNS through VPN container (queries go through WG tunnel = no DNS leak)
|
|
||||||
networking.nameservers = [ cfg.vpnAddress ];
|
|
||||||
|
|
||||||
# Wait for actual VPN connectivity before network-online.target.
|
|
||||||
# Without this, services start before the VPN tunnel is ready and failures
|
|
||||||
# can't be reported to ntfy (no outbound connectivity yet).
|
|
||||||
systemd.services.wait-for-vpn = {
|
|
||||||
description = "Wait for VPN connectivity";
|
|
||||||
before = [ "network-online.target" ];
|
|
||||||
wantedBy = [ "network-online.target" ];
|
|
||||||
after = [ "systemd-networkd-wait-online.service" ];
|
|
||||||
serviceConfig.Type = "oneshot";
|
|
||||||
path = [ pkgs.iputils ];
|
|
||||||
script = ''
|
|
||||||
until ping -c1 -W2 1.1.1.1 >/dev/null 2>&1; do
|
|
||||||
echo "Waiting for VPN connectivity..."
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
# Trust the bridge interface (host reaches us directly for nginx)
|
|
||||||
networking.firewall.trustedInterfaces = [ "eth0" ];
|
|
||||||
|
|
||||||
# Disable host resolv.conf — we use our own networkd DNS config
|
|
||||||
networking.useHostResolvConf = false;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
mkContainerOrdering = name: _ctr: nameValuePair "container@${name}" {
|
|
||||||
after = [ "container@pia-vpn.service" ];
|
|
||||||
requires = [ "container@pia-vpn.service" ];
|
|
||||||
partOf = [ "container@pia-vpn.service" ];
|
|
||||||
};
|
|
||||||
in
|
|
||||||
{
|
|
||||||
config = mkIf cfg.enable {
|
|
||||||
containers = mapAttrs mkContainer cfg.containers;
|
|
||||||
systemd.services = mapAttrs' mkContainerOrdering cfg.containers;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
@@ -1,232 +0,0 @@
|
|||||||
{ config, lib, allModules, ... }:
|
|
||||||
|
|
||||||
# VPN container: runs all PIA logic, acts as WireGuard gateway + NAT for service containers.
|
|
||||||
|
|
||||||
with lib;
|
|
||||||
|
|
||||||
let
|
|
||||||
cfg = config.pia-vpn;
|
|
||||||
scripts = import ./scripts.nix;
|
|
||||||
|
|
||||||
# Port forwarding derived state
|
|
||||||
forwardingContainers = filterAttrs (_: c: c.receiveForwardedPort != null) cfg.containers;
|
|
||||||
portForwarding = forwardingContainers != { };
|
|
||||||
forwardingContainerName = if portForwarding then head (attrNames forwardingContainers) else null;
|
|
||||||
forwardingContainer = if portForwarding then forwardingContainers.${forwardingContainerName} else null;
|
|
||||||
|
|
||||||
serverFile = "/var/lib/pia-vpn/server.json";
|
|
||||||
wgFile = "/var/lib/pia-vpn/wg.conf";
|
|
||||||
portRenewalFile = "/var/lib/pia-vpn/port-renewal.json";
|
|
||||||
proxy = "http://${cfg.hostAddress}:${toString cfg.proxyPort}";
|
|
||||||
|
|
||||||
# DNAT/forwarding rules for port forwarding
|
|
||||||
dnatSetupScript = optionalString portForwarding (
|
|
||||||
let
|
|
||||||
fwd = forwardingContainer.receiveForwardedPort;
|
|
||||||
targetIp = forwardingContainer.ip;
|
|
||||||
dnatTarget = if fwd.port != null then "${targetIp}:${toString fwd.port}" else targetIp;
|
|
||||||
targetPort = if fwd.port != null then toString fwd.port else "$PORT";
|
|
||||||
tcpRules = optionalString (fwd.protocol == "tcp" || fwd.protocol == "both") ''
|
|
||||||
echo "Setting up TCP DNAT: port $PORT → ${targetIp}:${targetPort}"
|
|
||||||
iptables -t nat -A PREROUTING -i ${cfg.interfaceName} -p tcp --dport $PORT -j DNAT --to ${dnatTarget}
|
|
||||||
iptables -A FORWARD -i ${cfg.interfaceName} -d ${targetIp} -p tcp --dport ${targetPort} -j ACCEPT
|
|
||||||
'';
|
|
||||||
udpRules = optionalString (fwd.protocol == "udp" || fwd.protocol == "both") ''
|
|
||||||
echo "Setting up UDP DNAT: port $PORT → ${targetIp}:${targetPort}"
|
|
||||||
iptables -t nat -A PREROUTING -i ${cfg.interfaceName} -p udp --dport $PORT -j DNAT --to ${dnatTarget}
|
|
||||||
iptables -A FORWARD -i ${cfg.interfaceName} -d ${targetIp} -p udp --dport ${targetPort} -j ACCEPT
|
|
||||||
'';
|
|
||||||
onPortForwarded = optionalString (forwardingContainer.onPortForwarded != null) ''
|
|
||||||
TARGET_IP="${targetIp}"
|
|
||||||
export PORT TARGET_IP
|
|
||||||
echo "Running onPortForwarded hook for ${forwardingContainerName} (port=$PORT, target=$TARGET_IP)"
|
|
||||||
${forwardingContainer.onPortForwarded}
|
|
||||||
'';
|
|
||||||
in
|
|
||||||
''
|
|
||||||
if [ "$PORT" -lt 10000 ]; then
|
|
||||||
echo "ERROR: PIA assigned low port $PORT (< 10000), refusing to set up DNAT" >&2
|
|
||||||
else
|
|
||||||
${tcpRules}
|
|
||||||
${udpRules}
|
|
||||||
${onPortForwarded}
|
|
||||||
fi
|
|
||||||
''
|
|
||||||
);
|
|
||||||
in
|
|
||||||
{
|
|
||||||
config = mkIf cfg.enable {
|
|
||||||
# Give the container more time to boot (pia-vpn-setup retries can delay readiness)
|
|
||||||
systemd.services."container@pia-vpn".serviceConfig.TimeoutStartSec = mkForce "180s";
|
|
||||||
|
|
||||||
containers.pia-vpn = {
|
|
||||||
autoStart = true;
|
|
||||||
ephemeral = true;
|
|
||||||
privateNetwork = true;
|
|
||||||
hostBridge = cfg.bridgeName;
|
|
||||||
interfaces = [ cfg.interfaceName ];
|
|
||||||
|
|
||||||
bindMounts."/run/agenix" = {
|
|
||||||
hostPath = "/run/agenix";
|
|
||||||
isReadOnly = true;
|
|
||||||
};
|
|
||||||
|
|
||||||
config = { config, pkgs, lib, ... }:
|
|
||||||
let
|
|
||||||
scriptPkgs = with pkgs; [ wireguard-tools iproute2 curl jq iptables coreutils ];
|
|
||||||
in
|
|
||||||
{
|
|
||||||
imports = allModules;
|
|
||||||
|
|
||||||
networking.hosts = cfg.containerHosts;
|
|
||||||
|
|
||||||
# Static IP on bridge — no gateway (VPN container routes via WG only)
|
|
||||||
networking.useNetworkd = true;
|
|
||||||
systemd.network.enable = true;
|
|
||||||
networking.useDHCP = false;
|
|
||||||
|
|
||||||
systemd.network.networks."20-eth0" = {
|
|
||||||
matchConfig.Name = "eth0";
|
|
||||||
networkConfig = {
|
|
||||||
Address = "${cfg.vpnAddress}/${cfg.subnetPrefixLen}";
|
|
||||||
DHCPServer = false;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# Ignore WG interface for wait-online (it's configured manually, not by networkd)
|
|
||||||
systemd.network.wait-online.ignoredInterfaces = [ cfg.interfaceName ];
|
|
||||||
|
|
||||||
# Route ntfy alerts through the host proxy (VPN container has no gateway on eth0)
|
|
||||||
ntfy-alerts.curlExtraArgs = "--proxy http://${cfg.hostAddress}:${toString cfg.proxyPort}";
|
|
||||||
|
|
||||||
# Enable forwarding so bridge traffic can go through WG
|
|
||||||
boot.kernel.sysctl."net.ipv4.ip_forward" = 1;
|
|
||||||
|
|
||||||
# Trust bridge interface
|
|
||||||
networking.firewall.trustedInterfaces = [ "eth0" ];
|
|
||||||
|
|
||||||
# DNS: use systemd-resolved listening on bridge IP so service containers
|
|
||||||
# can use VPN container as DNS server (queries go through WG tunnel = no DNS leak)
|
|
||||||
services.resolved = {
|
|
||||||
enable = true;
|
|
||||||
settings.Resolve.DNSStubListenerExtra = cfg.vpnAddress;
|
|
||||||
};
|
|
||||||
|
|
||||||
# Don't use host resolv.conf — resolved manages DNS
|
|
||||||
networking.useHostResolvConf = false;
|
|
||||||
|
|
||||||
# State directory for PIA config files
|
|
||||||
systemd.tmpfiles.rules = [
|
|
||||||
"d /var/lib/pia-vpn 0700 root root -"
|
|
||||||
];
|
|
||||||
|
|
||||||
# PIA VPN setup service — does all the PIA auth, WG config, and NAT setup
|
|
||||||
systemd.services.pia-vpn-setup = {
|
|
||||||
description = "PIA VPN WireGuard Setup";
|
|
||||||
|
|
||||||
wants = [ "network-online.target" ];
|
|
||||||
after = [ "network.target" "network-online.target" "systemd-networkd.service" ];
|
|
||||||
wantedBy = [ "multi-user.target" ];
|
|
||||||
|
|
||||||
path = scriptPkgs;
|
|
||||||
|
|
||||||
serviceConfig = {
|
|
||||||
Type = "simple";
|
|
||||||
Restart = "always";
|
|
||||||
RestartSec = "10s";
|
|
||||||
RuntimeMaxSec = "30d";
|
|
||||||
};
|
|
||||||
|
|
||||||
script = ''
|
|
||||||
set -euo pipefail
|
|
||||||
${scripts.scriptCommon}
|
|
||||||
|
|
||||||
trap 'cleanupVpn ${cfg.interfaceName}' EXIT
|
|
||||||
cleanupVpn ${cfg.interfaceName}
|
|
||||||
|
|
||||||
proxy="${proxy}"
|
|
||||||
|
|
||||||
# 1. Authenticate with PIA via proxy (VPN container has no internet yet)
|
|
||||||
echo "Choosing PIA server in region '${cfg.serverLocation}'..."
|
|
||||||
choosePIAServer '${cfg.serverLocation}'
|
|
||||||
|
|
||||||
echo "Fetching PIA authentication token..."
|
|
||||||
fetchPIAToken
|
|
||||||
|
|
||||||
# 2. Generate WG keys and authorize with PIA server
|
|
||||||
echo "Generating WireGuard keypair..."
|
|
||||||
generateWireguardKey
|
|
||||||
|
|
||||||
echo "Authorizing key with PIA server $WG_HOSTNAME..."
|
|
||||||
authorizeKeyWithPIAServer
|
|
||||||
|
|
||||||
# 3. Configure WG interface (already created by host and moved into our namespace)
|
|
||||||
echo "Configuring WireGuard interface ${cfg.interfaceName}..."
|
|
||||||
writeWireguardQuickFile '${wgFile}' ${toString cfg.wireguardListenPort}
|
|
||||||
writeChosenServerToFile '${serverFile}'
|
|
||||||
connectToServer '${wgFile}' '${cfg.interfaceName}'
|
|
||||||
|
|
||||||
# 4. Default route through WG
|
|
||||||
ip route replace default dev ${cfg.interfaceName}
|
|
||||||
echo "Default route set through ${cfg.interfaceName}"
|
|
||||||
|
|
||||||
# 5. NAT: masquerade bridge → WG (so service containers' traffic appears to come from VPN IP)
|
|
||||||
echo "Setting up NAT masquerade..."
|
|
||||||
iptables -t nat -A POSTROUTING -o ${cfg.interfaceName} -j MASQUERADE
|
|
||||||
iptables -A FORWARD -i eth0 -o ${cfg.interfaceName} -j ACCEPT
|
|
||||||
iptables -A FORWARD -i ${cfg.interfaceName} -o eth0 -m state --state RELATED,ESTABLISHED -j ACCEPT
|
|
||||||
|
|
||||||
${optionalString portForwarding ''
|
|
||||||
# 6. Port forwarding setup
|
|
||||||
echo "Reserving port forward..."
|
|
||||||
reservePortForward
|
|
||||||
writePortRenewalFile '${portRenewalFile}'
|
|
||||||
|
|
||||||
# First bindPort triggers actual port allocation
|
|
||||||
echo "Binding port $PORT..."
|
|
||||||
refreshPIAPort
|
|
||||||
|
|
||||||
echo "PIA assigned port: $PORT"
|
|
||||||
|
|
||||||
# DNAT rules to forward PIA port to target container
|
|
||||||
${dnatSetupScript}
|
|
||||||
''}
|
|
||||||
|
|
||||||
echo "PIA VPN setup complete"
|
|
||||||
exec sleep infinity
|
|
||||||
'';
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
# Port refresh timer (every 10 min) — keeps PIA port forwarding alive
|
|
||||||
systemd.services.pia-vpn-port-refresh = mkIf portForwarding {
|
|
||||||
description = "PIA VPN Port Forward Refresh";
|
|
||||||
after = [ "pia-vpn-setup.service" ];
|
|
||||||
requires = [ "pia-vpn-setup.service" ];
|
|
||||||
|
|
||||||
path = scriptPkgs;
|
|
||||||
|
|
||||||
serviceConfig.Type = "oneshot";
|
|
||||||
|
|
||||||
script = ''
|
|
||||||
set -euo pipefail
|
|
||||||
${scripts.scriptCommon}
|
|
||||||
loadChosenServerFromFile '${serverFile}'
|
|
||||||
readPortRenewalFile '${portRenewalFile}'
|
|
||||||
echo "Refreshing PIA port forward..."
|
|
||||||
refreshPIAPort
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
systemd.timers.pia-vpn-port-refresh = mkIf portForwarding {
|
|
||||||
partOf = [ "pia-vpn-port-refresh.service" ];
|
|
||||||
wantedBy = [ "timers.target" ];
|
|
||||||
timerConfig = {
|
|
||||||
OnCalendar = "*:0/10";
|
|
||||||
RandomizedDelaySec = "1m";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
363
common/network/pia-wireguard.nix
Normal file
363
common/network/pia-wireguard.nix
Normal file
@@ -0,0 +1,363 @@
|
|||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
# Server list:
|
||||||
|
# https://serverlist.piaservers.net/vpninfo/servers/v6
|
||||||
|
# Reference materials:
|
||||||
|
# https://github.com/pia-foss/manual-connections
|
||||||
|
# https://github.com/thrnz/docker-wireguard-pia/blob/master/extra/wg-gen.sh
|
||||||
|
|
||||||
|
# TODO handle potential errors (or at least print status, success, and failures to the console)
|
||||||
|
# TODO parameterize names of systemd services so that multiple wg VPNs could coexist in theory easier
|
||||||
|
# TODO implement this module such that the wireguard VPN doesn't have to live in a container
|
||||||
|
# TODO don't add forward rules if the PIA port is the same as cfg.forwardedPort
|
||||||
|
# TODO verify signatures of PIA responses
|
||||||
|
# TODO `RuntimeMaxSec = "30d";` for pia-vpn-wireguard-init isn't allowed per the systemd logs. Find alternative.
|
||||||
|
|
||||||
|
with builtins;
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.pia.wireguard;
|
||||||
|
|
||||||
|
getPIAToken = ''
|
||||||
|
PIA_USER=`sed '1q;d' /run/agenix/pia-login.conf`
|
||||||
|
PIA_PASS=`sed '2q;d' /run/agenix/pia-login.conf`
|
||||||
|
# PIA_TOKEN only lasts 24hrs
|
||||||
|
PIA_TOKEN=`curl -s -u "$PIA_USER:$PIA_PASS" https://www.privateinternetaccess.com/gtoken/generateToken | jq -r '.token'`
|
||||||
|
'';
|
||||||
|
|
||||||
|
chooseWireguardServer = ''
|
||||||
|
servers=$(mktemp)
|
||||||
|
servers_json=$(mktemp)
|
||||||
|
curl -s "https://serverlist.piaservers.net/vpninfo/servers/v6" > "$servers"
|
||||||
|
# extract json part only
|
||||||
|
head -n 1 "$servers" | tr -d '\n' > "$servers_json"
|
||||||
|
|
||||||
|
echo "Available location ids:" && jq '.regions | .[] | {name, id, port_forward}' "$servers_json"
|
||||||
|
|
||||||
|
# Some locations have multiple servers available. Pick a random one.
|
||||||
|
totalservers=$(jq -r '.regions | .[] | select(.id=="'${cfg.serverLocation}'") | .servers.wg | length' "$servers_json")
|
||||||
|
if ! [[ "$totalservers" =~ ^[0-9]+$ ]] || [ "$totalservers" -eq 0 ] 2>/dev/null; then
|
||||||
|
echo "Location \"${cfg.serverLocation}\" not found."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
serverindex=$(( RANDOM % totalservers))
|
||||||
|
WG_HOSTNAME=$(jq -r '.regions | .[] | select(.id=="'${cfg.serverLocation}'") | .servers.wg | .['$serverindex'].cn' "$servers_json")
|
||||||
|
WG_SERVER_IP=$(jq -r '.regions | .[] | select(.id=="'${cfg.serverLocation}'") | .servers.wg | .['$serverindex'].ip' "$servers_json")
|
||||||
|
WG_SERVER_PORT=$(jq -r '.groups.wg | .[0] | .ports | .[0]' "$servers_json")
|
||||||
|
|
||||||
|
# write chosen server
|
||||||
|
rm -f /tmp/${cfg.interfaceName}-server.conf
|
||||||
|
touch /tmp/${cfg.interfaceName}-server.conf
|
||||||
|
chmod 700 /tmp/${cfg.interfaceName}-server.conf
|
||||||
|
echo "$WG_HOSTNAME" >> /tmp/${cfg.interfaceName}-server.conf
|
||||||
|
echo "$WG_SERVER_IP" >> /tmp/${cfg.interfaceName}-server.conf
|
||||||
|
echo "$WG_SERVER_PORT" >> /tmp/${cfg.interfaceName}-server.conf
|
||||||
|
|
||||||
|
rm $servers_json $servers
|
||||||
|
'';
|
||||||
|
|
||||||
|
getChosenWireguardServer = ''
|
||||||
|
WG_HOSTNAME=`sed '1q;d' /tmp/${cfg.interfaceName}-server.conf`
|
||||||
|
WG_SERVER_IP=`sed '2q;d' /tmp/${cfg.interfaceName}-server.conf`
|
||||||
|
WG_SERVER_PORT=`sed '3q;d' /tmp/${cfg.interfaceName}-server.conf`
|
||||||
|
'';
|
||||||
|
|
||||||
|
refreshPIAPort = ''
|
||||||
|
${getChosenWireguardServer}
|
||||||
|
signature=`sed '1q;d' /tmp/${cfg.interfaceName}-port-renewal`
|
||||||
|
payload=`sed '2q;d' /tmp/${cfg.interfaceName}-port-renewal`
|
||||||
|
bind_port_response=`curl -Gs -m 5 --connect-to "$WG_HOSTNAME::$WG_SERVER_IP:" --cacert "${./ca.rsa.4096.crt}" --data-urlencode "payload=$payload" --data-urlencode "signature=$signature" "https://$WG_HOSTNAME:19999/bindPort"`
|
||||||
|
'';
|
||||||
|
|
||||||
|
portForwarding = cfg.forwardPortForTransmission || cfg.forwardedPort != null;
|
||||||
|
|
||||||
|
containerServiceName = "container@${config.vpn-container.containerName}.service";
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.pia.wireguard = {
|
||||||
|
enable = mkEnableOption "Enable private internet access";
|
||||||
|
badPortForwardPorts = mkOption {
|
||||||
|
type = types.listOf types.port;
|
||||||
|
description = ''
|
||||||
|
Ports that will not be accepted from PIA.
|
||||||
|
If PIA assigns a port from this list, the connection is aborted since we cannot ask for a different port.
|
||||||
|
This is used to guarantee we are not assigned a port that is used by a service we do not want exposed.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
wireguardListenPort = mkOption {
|
||||||
|
type = types.port;
|
||||||
|
description = "The port wireguard listens on for this VPN connection";
|
||||||
|
default = 51820;
|
||||||
|
};
|
||||||
|
serverLocation = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "swiss";
|
||||||
|
};
|
||||||
|
interfaceName = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "piaw";
|
||||||
|
};
|
||||||
|
forwardedPort = mkOption {
|
||||||
|
type = types.nullOr types.port;
|
||||||
|
description = "The port to redirect port forwarded TCP VPN traffic too";
|
||||||
|
default = null;
|
||||||
|
};
|
||||||
|
forwardPortForTransmission = mkEnableOption "PIA port forwarding for transmission should be performed.";
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
assertions = [
|
||||||
|
{
|
||||||
|
assertion = cfg.forwardPortForTransmission != (cfg.forwardedPort != null);
|
||||||
|
message = ''
|
||||||
|
The PIA forwarded port cannot simultaneously be used by transmission and redirected to another port.
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
# mounts used to pass the connection parameters to the container
|
||||||
|
# the container doesn't have internet until it uses these parameters so it cannot fetch them itself
|
||||||
|
vpn-container.mounts = [
|
||||||
|
"/tmp/${cfg.interfaceName}.conf"
|
||||||
|
"/tmp/${cfg.interfaceName}-server.conf"
|
||||||
|
"/tmp/${cfg.interfaceName}-address.conf"
|
||||||
|
];
|
||||||
|
|
||||||
|
# The container takes ownership of the wireguard interface on its startup
|
||||||
|
containers.vpn.interfaces = [ cfg.interfaceName ];
|
||||||
|
|
||||||
|
# TODO: while this is much better than "loose" networking, it seems to have issues with firewall restarts
|
||||||
|
# allow traffic for wireguard interface to pass since wireguard trips up rpfilter
|
||||||
|
# networking.firewall = {
|
||||||
|
# extraCommands = ''
|
||||||
|
# ip46tables -t raw -I nixos-fw-rpfilter -p udp -m udp --sport ${toString cfg.wireguardListenPort} -j RETURN
|
||||||
|
# ip46tables -t raw -I nixos-fw-rpfilter -p udp -m udp --dport ${toString cfg.wireguardListenPort} -j RETURN
|
||||||
|
# '';
|
||||||
|
# extraStopCommands = ''
|
||||||
|
# ip46tables -t raw -D nixos-fw-rpfilter -p udp -m udp --sport ${toString cfg.wireguardListenPort} -j RETURN || true
|
||||||
|
# ip46tables -t raw -D nixos-fw-rpfilter -p udp -m udp --dport ${toString cfg.wireguardListenPort} -j RETURN || true
|
||||||
|
# '';
|
||||||
|
# };
|
||||||
|
networking.firewall.checkReversePath = "loose";
|
||||||
|
|
||||||
|
systemd.services.pia-vpn-wireguard-init = {
|
||||||
|
description = "Creates PIA VPN Wireguard Interface";
|
||||||
|
|
||||||
|
wants = [ "network-online.target" ];
|
||||||
|
after = [ "network.target" "network-online.target" ];
|
||||||
|
before = [ containerServiceName ];
|
||||||
|
requiredBy = [ containerServiceName ];
|
||||||
|
partOf = [ containerServiceName ];
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
|
||||||
|
path = with pkgs; [ wireguard-tools jq curl iproute2 iputils ];
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
RemainAfterExit = true;
|
||||||
|
|
||||||
|
# restart once a month; PIA forwarded port expires after two months
|
||||||
|
# because the container is "PartOf" this unit, it gets restarted too
|
||||||
|
RuntimeMaxSec = "30d";
|
||||||
|
};
|
||||||
|
|
||||||
|
script = ''
|
||||||
|
echo Waiting for internet...
|
||||||
|
while ! ping -c 1 -W 1 1.1.1.1; do
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
# Prepare to connect by generating wg secrets and auth'ing with PIA since the container
|
||||||
|
# cannot do without internet to start with. NAT'ing the host's internet would address this
|
||||||
|
# issue but is not ideal because then leaking network outside of the VPN is more likely.
|
||||||
|
|
||||||
|
${chooseWireguardServer}
|
||||||
|
|
||||||
|
${getPIAToken}
|
||||||
|
|
||||||
|
# generate wireguard keys
|
||||||
|
privKey=$(wg genkey)
|
||||||
|
pubKey=$(echo "$privKey" | wg pubkey)
|
||||||
|
|
||||||
|
# authorize our WG keys with the PIA server we are about to connect to
|
||||||
|
wireguard_json=`curl -s -G --connect-to "$WG_HOSTNAME::$WG_SERVER_IP:" --cacert "${./ca.rsa.4096.crt}" --data-urlencode "pt=$PIA_TOKEN" --data-urlencode "pubkey=$pubKey" https://$WG_HOSTNAME:$WG_SERVER_PORT/addKey`
|
||||||
|
|
||||||
|
# create wg-quick config file
|
||||||
|
rm -f /tmp/${cfg.interfaceName}.conf /tmp/${cfg.interfaceName}-address.conf
|
||||||
|
touch /tmp/${cfg.interfaceName}.conf /tmp/${cfg.interfaceName}-address.conf
|
||||||
|
chmod 700 /tmp/${cfg.interfaceName}.conf /tmp/${cfg.interfaceName}-address.conf
|
||||||
|
echo "
|
||||||
|
[Interface]
|
||||||
|
# Address = $(echo "$wireguard_json" | jq -r '.peer_ip')
|
||||||
|
PrivateKey = $privKey
|
||||||
|
ListenPort = ${toString cfg.wireguardListenPort}
|
||||||
|
[Peer]
|
||||||
|
PersistentKeepalive = 25
|
||||||
|
PublicKey = $(echo "$wireguard_json" | jq -r '.server_key')
|
||||||
|
AllowedIPs = 0.0.0.0/0
|
||||||
|
Endpoint = $WG_SERVER_IP:$(echo "$wireguard_json" | jq -r '.server_port')
|
||||||
|
" >> /tmp/${cfg.interfaceName}.conf
|
||||||
|
|
||||||
|
# create file storing the VPN ip address PIA assigned to us
|
||||||
|
echo "$wireguard_json" | jq -r '.peer_ip' >> /tmp/${cfg.interfaceName}-address.conf
|
||||||
|
|
||||||
|
# Create wg interface now so it inherits from the namespace with internet access
|
||||||
|
# the container will handle actually connecting the interface since that info is
|
||||||
|
# not preserved upon moving into the container's networking namespace
|
||||||
|
# Roughly following this guide https://www.wireguard.com/netns/#ordinary-containerization
|
||||||
|
[[ -z $(ip link show dev ${cfg.interfaceName} 2>/dev/null) ]] || exit
|
||||||
|
ip link add ${cfg.interfaceName} type wireguard
|
||||||
|
'';
|
||||||
|
|
||||||
|
preStop = ''
|
||||||
|
# cleanup wireguard interface
|
||||||
|
ip link del ${cfg.interfaceName}
|
||||||
|
rm -f /tmp/${cfg.interfaceName}.conf /tmp/${cfg.interfaceName}-address.conf
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
vpn-container.config.systemd.services.pia-vpn-wireguard = {
|
||||||
|
description = "Initializes the PIA VPN WireGuard Tunnel";
|
||||||
|
|
||||||
|
wants = [ "network-online.target" ];
|
||||||
|
after = [ "network.target" "network-online.target" ];
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
|
||||||
|
path = with pkgs; [ wireguard-tools iproute2 curl jq iptables ];
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
RemainAfterExit = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
script = ''
|
||||||
|
# pseudo calls wg-quick
|
||||||
|
# Near equivalent of "wg-quick up /tmp/${cfg.interfaceName}.conf"
|
||||||
|
# cannot actually call wg-quick because the interface has to be already
|
||||||
|
# created before the container taken ownership of the interface
|
||||||
|
# Thus, assumes wg interface was already created:
|
||||||
|
# ip link add ${cfg.interfaceName} type wireguard
|
||||||
|
|
||||||
|
${getChosenWireguardServer}
|
||||||
|
|
||||||
|
myaddress=`cat /tmp/${cfg.interfaceName}-address.conf`
|
||||||
|
|
||||||
|
wg setconf ${cfg.interfaceName} /tmp/${cfg.interfaceName}.conf
|
||||||
|
ip -4 address add $myaddress dev ${cfg.interfaceName}
|
||||||
|
ip link set mtu 1420 up dev ${cfg.interfaceName}
|
||||||
|
wg set ${cfg.interfaceName} fwmark ${toString cfg.wireguardListenPort}
|
||||||
|
ip -4 route add 0.0.0.0/0 dev ${cfg.interfaceName} table ${toString cfg.wireguardListenPort}
|
||||||
|
|
||||||
|
# TODO is this needed?
|
||||||
|
ip -4 rule add not fwmark ${toString cfg.wireguardListenPort} table ${toString cfg.wireguardListenPort}
|
||||||
|
ip -4 rule add table main suppress_prefixlength 0
|
||||||
|
|
||||||
|
# The rest of the script is only for only for port forwarding skip if not needed
|
||||||
|
if [ ${boolToString portForwarding} == false ]; then exit 0; fi
|
||||||
|
|
||||||
|
# Reserve port
|
||||||
|
${getPIAToken}
|
||||||
|
payload_and_signature=`curl -s -m 5 --connect-to "$WG_HOSTNAME::$WG_SERVER_IP:" --cacert "${./ca.rsa.4096.crt}" -G --data-urlencode "token=$PIA_TOKEN" "https://$WG_HOSTNAME:19999/getSignature"`
|
||||||
|
signature=$(echo "$payload_and_signature" | jq -r '.signature')
|
||||||
|
payload=$(echo "$payload_and_signature" | jq -r '.payload')
|
||||||
|
port=$(echo "$payload" | base64 -d | jq -r '.port')
|
||||||
|
|
||||||
|
# Check if the port is acceptable
|
||||||
|
notallowed=(${concatStringsSep " " (map toString cfg.badPortForwardPorts)})
|
||||||
|
if [[ " ''${notallowed[*]} " =~ " $port " ]]; then
|
||||||
|
# the port PIA assigned is not allowed, kill the connection
|
||||||
|
wg-quick down /tmp/${cfg.interfaceName}.conf
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# write reserved port to file readable for all users
|
||||||
|
echo $port > /tmp/${cfg.interfaceName}-port
|
||||||
|
chmod 644 /tmp/${cfg.interfaceName}-port
|
||||||
|
|
||||||
|
# write payload and signature info needed to allow refreshing allocated forwarded port
|
||||||
|
rm -f /tmp/${cfg.interfaceName}-port-renewal
|
||||||
|
touch /tmp/${cfg.interfaceName}-port-renewal
|
||||||
|
chmod 700 /tmp/${cfg.interfaceName}-port-renewal
|
||||||
|
echo $signature >> /tmp/${cfg.interfaceName}-port-renewal
|
||||||
|
echo $payload >> /tmp/${cfg.interfaceName}-port-renewal
|
||||||
|
|
||||||
|
# Block all traffic from VPN interface except for traffic that is from the forwarded port
|
||||||
|
iptables -I nixos-fw -p tcp --dport $port -j nixos-fw-accept -i ${cfg.interfaceName}
|
||||||
|
iptables -I nixos-fw -p udp --dport $port -j nixos-fw-accept -i ${cfg.interfaceName}
|
||||||
|
|
||||||
|
# The first port refresh triggers the port to be actually allocated
|
||||||
|
${refreshPIAPort}
|
||||||
|
|
||||||
|
${optionalString (cfg.forwardedPort != null) ''
|
||||||
|
# redirect the fowarded port
|
||||||
|
iptables -A INPUT -i ${cfg.interfaceName} -p tcp --dport $port -j ACCEPT
|
||||||
|
iptables -A INPUT -i ${cfg.interfaceName} -p udp --dport $port -j ACCEPT
|
||||||
|
iptables -A INPUT -i ${cfg.interfaceName} -p tcp --dport ${toString cfg.forwardedPort} -j ACCEPT
|
||||||
|
iptables -A INPUT -i ${cfg.interfaceName} -p udp --dport ${toString cfg.forwardedPort} -j ACCEPT
|
||||||
|
iptables -A PREROUTING -t nat -i ${cfg.interfaceName} -p tcp --dport $port -j REDIRECT --to-port ${toString cfg.forwardedPort}
|
||||||
|
iptables -A PREROUTING -t nat -i ${cfg.interfaceName} -p udp --dport $port -j REDIRECT --to-port ${toString cfg.forwardedPort}
|
||||||
|
''}
|
||||||
|
|
||||||
|
${optionalString cfg.forwardPortForTransmission ''
|
||||||
|
# assumes no auth needed for transmission
|
||||||
|
curlout=$(curl localhost:9091/transmission/rpc 2>/dev/null)
|
||||||
|
regex='X-Transmission-Session-Id\: (\w*)'
|
||||||
|
if [[ $curlout =~ $regex ]]; then
|
||||||
|
sessionId=''${BASH_REMATCH[1]}
|
||||||
|
else
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# set the port in transmission
|
||||||
|
data='{"method": "session-set", "arguments": { "peer-port" :'$port' } }'
|
||||||
|
curl http://localhost:9091/transmission/rpc -d "$data" -H "X-Transmission-Session-Id: $sessionId"
|
||||||
|
''}
|
||||||
|
'';
|
||||||
|
|
||||||
|
preStop = ''
|
||||||
|
wg-quick down /tmp/${cfg.interfaceName}.conf
|
||||||
|
|
||||||
|
# The rest of the script is only for only for port forwarding skip if not needed
|
||||||
|
if [ ${boolToString portForwarding} == false ]; then exit 0; fi
|
||||||
|
|
||||||
|
${optionalString (cfg.forwardedPort != null) ''
|
||||||
|
# stop redirecting the forwarded port
|
||||||
|
iptables -D INPUT -i ${cfg.interfaceName} -p tcp --dport $port -j ACCEPT
|
||||||
|
iptables -D INPUT -i ${cfg.interfaceName} -p udp --dport $port -j ACCEPT
|
||||||
|
iptables -D INPUT -i ${cfg.interfaceName} -p tcp --dport ${toString cfg.forwardedPort} -j ACCEPT
|
||||||
|
iptables -D INPUT -i ${cfg.interfaceName} -p udp --dport ${toString cfg.forwardedPort} -j ACCEPT
|
||||||
|
iptables -D PREROUTING -t nat -i ${cfg.interfaceName} -p tcp --dport $port -j REDIRECT --to-port ${toString cfg.forwardedPort}
|
||||||
|
iptables -D PREROUTING -t nat -i ${cfg.interfaceName} -p udp --dport $port -j REDIRECT --to-port ${toString cfg.forwardedPort}
|
||||||
|
''}
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
vpn-container.config.systemd.services.pia-vpn-wireguard-forward-port = {
|
||||||
|
enable = portForwarding;
|
||||||
|
description = "PIA VPN WireGuard Tunnel Port Forwarding";
|
||||||
|
after = [ "pia-vpn-wireguard.service" ];
|
||||||
|
requires = [ "pia-vpn-wireguard.service" ];
|
||||||
|
|
||||||
|
path = with pkgs; [ curl ];
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
};
|
||||||
|
|
||||||
|
script = refreshPIAPort;
|
||||||
|
};
|
||||||
|
|
||||||
|
vpn-container.config.systemd.timers.pia-vpn-wireguard-forward-port = {
|
||||||
|
enable = portForwarding;
|
||||||
|
partOf = [ "pia-vpn-wireguard-forward-port.service" ];
|
||||||
|
wantedBy = [ "timers.target" ];
|
||||||
|
timerConfig = {
|
||||||
|
OnCalendar = "*:0/10"; # 10 minutes
|
||||||
|
RandomizedDelaySec = "1m"; # vary by 1 min to give PIA servers some relief
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
age.secrets."pia-login.conf".file = ../../secrets/pia-login.age;
|
||||||
|
};
|
||||||
|
}
|
||||||
106
common/network/vpn.nix
Normal file
106
common/network/vpn.nix
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
{ config, lib, allModules, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.vpn-container;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.vpn-container = {
|
||||||
|
enable = mkEnableOption "Enable VPN container";
|
||||||
|
|
||||||
|
containerName = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "vpn";
|
||||||
|
description = ''
|
||||||
|
Name of the VPN container.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
mounts = mkOption {
|
||||||
|
type = types.listOf types.str;
|
||||||
|
default = [ "/var/lib" ];
|
||||||
|
example = "/home/example";
|
||||||
|
description = ''
|
||||||
|
List of mounts on the host to bind to the vpn container.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
useOpenVPN = mkEnableOption "Uses OpenVPN instead of wireguard for PIA VPN connection";
|
||||||
|
|
||||||
|
config = mkOption {
|
||||||
|
type = types.anything;
|
||||||
|
default = { };
|
||||||
|
example = ''
|
||||||
|
{
|
||||||
|
services.nginx.enable = true;
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
description = ''
|
||||||
|
NixOS config for the vpn container.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
pia.wireguard.enable = !cfg.useOpenVPN;
|
||||||
|
pia.wireguard.forwardPortForTransmission = !cfg.useOpenVPN;
|
||||||
|
|
||||||
|
containers.${cfg.containerName} = {
|
||||||
|
ephemeral = true;
|
||||||
|
autoStart = true;
|
||||||
|
|
||||||
|
bindMounts = mkMerge ([{
|
||||||
|
"/run/agenix" = {
|
||||||
|
hostPath = "/run/agenix";
|
||||||
|
isReadOnly = true;
|
||||||
|
};
|
||||||
|
}] ++ (lists.forEach cfg.mounts (mount:
|
||||||
|
{
|
||||||
|
"${mount}" = {
|
||||||
|
hostPath = mount;
|
||||||
|
isReadOnly = false;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
)));
|
||||||
|
|
||||||
|
enableTun = cfg.useOpenVPN;
|
||||||
|
privateNetwork = true;
|
||||||
|
hostAddress = "172.16.100.1";
|
||||||
|
localAddress = "172.16.100.2";
|
||||||
|
|
||||||
|
config = {
|
||||||
|
imports = allModules ++ [ cfg.config ];
|
||||||
|
|
||||||
|
# networking.firewall.enable = mkForce false;
|
||||||
|
networking.firewall.trustedInterfaces = [
|
||||||
|
# completely trust internal interface to host
|
||||||
|
"eth0"
|
||||||
|
];
|
||||||
|
|
||||||
|
pia.openvpn.enable = cfg.useOpenVPN;
|
||||||
|
pia.openvpn.server = "swiss.privacy.network"; # swiss vpn
|
||||||
|
|
||||||
|
# TODO fix so it does run it's own resolver again
|
||||||
|
# run it's own DNS resolver
|
||||||
|
networking.useHostResolvConf = false;
|
||||||
|
# services.resolved.enable = true;
|
||||||
|
networking.nameservers = [ "1.1.1.1" "8.8.8.8" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# load secrets the container needs
|
||||||
|
age.secrets = config.containers.${cfg.containerName}.config.age.secrets;
|
||||||
|
|
||||||
|
# forwarding for vpn container (only for OpenVPN)
|
||||||
|
networking.nat.enable = mkIf cfg.useOpenVPN true;
|
||||||
|
networking.nat.internalInterfaces = mkIf cfg.useOpenVPN [
|
||||||
|
"ve-${cfg.containerName}"
|
||||||
|
];
|
||||||
|
networking.ip_forward = mkIf cfg.useOpenVPN true;
|
||||||
|
|
||||||
|
# assumes only one potential interface
|
||||||
|
networking.usePredictableInterfaceNames = false;
|
||||||
|
networking.nat.externalInterface = "eth0";
|
||||||
|
};
|
||||||
|
}
|
||||||
187
common/network/vpnfailsafe.sh
Executable file
187
common/network/vpnfailsafe.sh
Executable file
@@ -0,0 +1,187 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -eEo pipefail
|
||||||
|
|
||||||
|
# $@ := ""
|
||||||
|
set_route_vars() {
|
||||||
|
local network_var
|
||||||
|
local -a network_vars; read -ra network_vars <<<"${!route_network_*}"
|
||||||
|
for network_var in "${network_vars[@]}"; do
|
||||||
|
local -i i="${network_var#route_network_}"
|
||||||
|
local -a vars=("route_network_$i" "route_netmask_$i" "route_gateway_$i" "route_metric_$i")
|
||||||
|
route_networks[i]="${!vars[0]}"
|
||||||
|
route_netmasks[i]="${!vars[1]:-255.255.255.255}"
|
||||||
|
route_gateways[i]="${!vars[2]:-$route_vpn_gateway}"
|
||||||
|
route_metrics[i]="${!vars[3]:-0}"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# Configuration.
|
||||||
|
readonly prog="$(basename "$0")"
|
||||||
|
readonly private_nets="127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
|
||||||
|
declare -a remotes cnf_remote_domains cnf_remote_ips route_networks route_netmasks route_gateways route_metrics
|
||||||
|
read -ra remotes <<<"$(env|grep -oP '^remote_[0-9]+=.*'|sort -n|cut -d= -f2|tr '\n' '\t')"
|
||||||
|
read -ra cnf_remote_domains <<<"$(printf '%s\n' "${remotes[@]%%*[0-9]}"|sort -u|tr '\n' '\t')"
|
||||||
|
read -ra cnf_remote_ips <<<"$(printf '%s\n' "${remotes[@]##*[!0-9.]*}"|sort -u|tr '\n' '\t')"
|
||||||
|
set_route_vars
|
||||||
|
read -ra numbered_vars <<<"${!foreign_option_*} ${!proto_*} ${!remote_*} ${!remote_port_*} \
|
||||||
|
${!route_network_*} ${!route_netmask_*} ${!route_gateway_*} ${!route_metric_*}"
|
||||||
|
readonly numbered_vars "${numbered_vars[@]}" dev ifconfig_local ifconfig_netmask ifconfig_remote \
|
||||||
|
route_net_gateway route_vpn_gateway script_type trusted_ip trusted_port untrusted_ip untrusted_port \
|
||||||
|
remotes cnf_remote_domains cnf_remote_ips route_networks route_netmasks route_gateways route_metrics
|
||||||
|
readonly cur_remote_ip="${trusted_ip:-$untrusted_ip}"
|
||||||
|
readonly cur_port="${trusted_port:-$untrusted_port}"
|
||||||
|
|
||||||
|
# $@ := ""
|
||||||
|
update_hosts() {
|
||||||
|
if remote_entries="$(getent -s dns hosts "${cnf_remote_domains[@]}"|grep -v :)"; then
|
||||||
|
local -r beg="# VPNFAILSAFE BEGIN" end="# VPNFAILSAFE END"
|
||||||
|
{
|
||||||
|
sed -e "/^$beg/,/^$end/d" /etc/hosts
|
||||||
|
echo -e "$beg\\n$remote_entries\\n$end"
|
||||||
|
} >/etc/hosts.vpnfailsafe
|
||||||
|
chmod --reference=/etc/hosts /etc/hosts.vpnfailsafe
|
||||||
|
mv /etc/hosts.vpnfailsafe /etc/hosts
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# $@ := "up" | "down"
|
||||||
|
update_routes() {
|
||||||
|
local -a resolved_ips
|
||||||
|
read -ra resolved_ips <<<"$(getent -s files hosts "${cnf_remote_domains[@]:-ENOENT}"|cut -d' ' -f1|tr '\n' '\t' || true)"
|
||||||
|
local -ar remote_ips=("$cur_remote_ip" "${resolved_ips[@]}" "${cnf_remote_ips[@]}")
|
||||||
|
if [[ "$*" == up ]]; then
|
||||||
|
for remote_ip in "${remote_ips[@]}"; do
|
||||||
|
if [[ -n "$remote_ip" && -z "$(ip route show "$remote_ip")" ]]; then
|
||||||
|
ip route add "$remote_ip" via "$route_net_gateway"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
for net in 0.0.0.0/1 128.0.0.0/1; do
|
||||||
|
if [[ -z "$(ip route show "$net")" ]]; then
|
||||||
|
ip route add "$net" via "$route_vpn_gateway"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
for i in $(seq 1 "${#route_networks[@]}"); do
|
||||||
|
if [[ -z "$(ip route show "${route_networks[i]}/${route_netmasks[i]}")" ]]; then
|
||||||
|
ip route add "${route_networks[i]}/${route_netmasks[i]}" \
|
||||||
|
via "${route_gateways[i]}" metric "${route_metrics[i]}" dev "$dev"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
elif [[ "$*" == down ]]; then
|
||||||
|
for route in "${remote_ips[@]}" 0.0.0.0/1 128.0.0.0/1; do
|
||||||
|
if [[ -n "$route" && -n "$(ip route show "$route")" ]]; then
|
||||||
|
ip route del "$route"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
for i in $(seq 1 "${#route_networks[@]}"); do
|
||||||
|
if [[ -n "$(ip route show "${route_networks[i]}/${route_netmasks[i]}")" ]]; then
|
||||||
|
ip route del "${route_networks[i]}/${route_netmasks[i]}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# $@ := ""
|
||||||
|
update_firewall() {
|
||||||
|
# $@ := "INPUT" | "OUTPUT" | "FORWARD"
|
||||||
|
insert_chain() {
|
||||||
|
if iptables -C "$*" -j "VPNFAILSAFE_$*" 2>/dev/null; then
|
||||||
|
iptables -D "$*" -j "VPNFAILSAFE_$*"
|
||||||
|
for opt in F X; do
|
||||||
|
iptables -"$opt" "VPNFAILSAFE_$*"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
iptables -N "VPNFAILSAFE_$*"
|
||||||
|
iptables -I "$*" -j "VPNFAILSAFE_$*"
|
||||||
|
}
|
||||||
|
|
||||||
|
# $@ := "INPUT" | "OUTPUT"
|
||||||
|
accept_remotes() {
|
||||||
|
case "$@" in
|
||||||
|
INPUT) local -r icmp_type=reply io=i sd=s states="";;
|
||||||
|
OUTPUT) local -r icmp_type=request io=o sd=d states=NEW,;;
|
||||||
|
esac
|
||||||
|
local -r public_nic="$(ip route show "$cur_remote_ip"|cut -d' ' -f5)"
|
||||||
|
local -ar suf=(-m conntrack --ctstate "$states"RELATED,ESTABLISHED -"$io" "${public_nic:?}" -j ACCEPT)
|
||||||
|
icmp_rule() {
|
||||||
|
iptables "$1" "$2" -p icmp --icmp-type "echo-$icmp_type" -"$sd" "$3" "${suf[@]/%ACCEPT/RETURN}"
|
||||||
|
}
|
||||||
|
for ((i=1; i <= ${#remotes[*]}; ++i)); do
|
||||||
|
local port="remote_port_$i"
|
||||||
|
local proto="proto_$i"
|
||||||
|
iptables -A "VPNFAILSAFE_$*" -p "${!proto%-client}" -"$sd" "${remotes[i-1]}" --"$sd"port "${!port}" "${suf[@]}"
|
||||||
|
if ! icmp_rule -C "VPNFAILSAFE_$*" "${remotes[i-1]}" 2>/dev/null; then
|
||||||
|
icmp_rule -A "VPNFAILSAFE_$*" "${remotes[i-1]}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
if ! iptables -S|grep -q "^-A VPNFAILSAFE_$* .*-$sd $cur_remote_ip/32 .*-j ACCEPT$"; then
|
||||||
|
for p in tcp udp; do
|
||||||
|
iptables -A "VPNFAILSAFE_$*" -p "$p" -"$sd" "$cur_remote_ip" --"$sd"port "${cur_port}" "${suf[@]}"
|
||||||
|
done
|
||||||
|
icmp_rule -A "VPNFAILSAFE_$*" "$cur_remote_ip"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# $@ := "OUTPUT" | "FORWARD"
|
||||||
|
reject_dns() {
|
||||||
|
for proto in udp tcp; do
|
||||||
|
iptables -A "VPNFAILSAFE_$*" -p "$proto" --dport 53 ! -o "$dev" -j REJECT
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# $@ := "INPUT" | "OUTPUT" | "FORWARD"
|
||||||
|
pass_private_nets() {
|
||||||
|
case "$@" in
|
||||||
|
INPUT) local -r io=i sd=s;;&
|
||||||
|
OUTPUT|FORWARD) local -r io=o sd=d;;&
|
||||||
|
INPUT) local -r vpn="${ifconfig_remote:-$ifconfig_local}/${ifconfig_netmask:-32}"
|
||||||
|
iptables -A "VPNFAILSAFE_$*" -"$sd" "$vpn" -"$io" "$dev" -j RETURN
|
||||||
|
for i in $(seq 1 "${#route_networks[@]}"); do
|
||||||
|
iptables -A "VPNFAILSAFE_$*" -"$sd" "${route_networks[i]}/${route_netmasks[i]}" -"$io" "$dev" -j RETURN
|
||||||
|
done;;&
|
||||||
|
*) iptables -A "VPNFAILSAFE_$*" -"$sd" "$private_nets" ! -"$io" "$dev" -j RETURN;;&
|
||||||
|
INPUT) iptables -A "VPNFAILSAFE_$*" -s "$private_nets" -i "$dev" -j DROP;;&
|
||||||
|
*) for iface in "$dev" lo+; do
|
||||||
|
iptables -A "VPNFAILSAFE_$*" -"$io" "$iface" -j RETURN
|
||||||
|
done;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
# $@ := "INPUT" | "OUTPUT" | "FORWARD"
|
||||||
|
drop_other() {
|
||||||
|
iptables -A "VPNFAILSAFE_$*" -j DROP
|
||||||
|
}
|
||||||
|
|
||||||
|
for chain in INPUT OUTPUT FORWARD; do
|
||||||
|
insert_chain "$chain"
|
||||||
|
[[ $chain == FORWARD ]] || accept_remotes "$chain"
|
||||||
|
[[ $chain == INPUT ]] || reject_dns "$chain"
|
||||||
|
pass_private_nets "$chain"
|
||||||
|
drop_other "$chain"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# $@ := ""
|
||||||
|
cleanup() {
|
||||||
|
update_resolv down
|
||||||
|
update_routes down
|
||||||
|
}
|
||||||
|
trap cleanup INT TERM
|
||||||
|
|
||||||
|
# $@ := line_number exit_code
|
||||||
|
err_msg() {
|
||||||
|
echo "$0:$1: \`$(sed -n "$1,+0{s/^\\s*//;p}" "$0")' returned $2" >&2
|
||||||
|
cleanup
|
||||||
|
}
|
||||||
|
trap 'err_msg "$LINENO" "$?"' ERR
|
||||||
|
|
||||||
|
# $@ := ""
|
||||||
|
main() {
|
||||||
|
case "${script_type:-down}" in
|
||||||
|
up) for f in hosts routes firewall; do "update_$f" up; done;;
|
||||||
|
down) update_routes down
|
||||||
|
update_resolv down;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
main
|
||||||
@@ -4,15 +4,15 @@ let
|
|||||||
builderUserName = "nix-builder";
|
builderUserName = "nix-builder";
|
||||||
|
|
||||||
builderRole = "nix-builder";
|
builderRole = "nix-builder";
|
||||||
builders = config.machines.withRole.${builderRole} or [];
|
builders = config.machines.withRole.${builderRole};
|
||||||
thisMachineIsABuilder = config.thisMachine.hasRole.${builderRole} or false;
|
thisMachineIsABuilder = config.thisMachine.hasRole.${builderRole};
|
||||||
|
|
||||||
# builders don't include themselves as a remote builder
|
# builders don't include themselves as a remote builder
|
||||||
otherBuilders = lib.filter (hostname: hostname != config.networking.hostName) builders;
|
otherBuilders = lib.filter (hostname: hostname != config.networking.hostName) builders;
|
||||||
in
|
in
|
||||||
lib.mkMerge [
|
lib.mkMerge [
|
||||||
# configure builder
|
# configure builder
|
||||||
(lib.mkIf (thisMachineIsABuilder && !config.boot.isContainer) {
|
(lib.mkIf thisMachineIsABuilder {
|
||||||
users.users.${builderUserName} = {
|
users.users.${builderUserName} = {
|
||||||
description = "Distributed Nix Build User";
|
description = "Distributed Nix Build User";
|
||||||
group = builderUserName;
|
group = builderUserName;
|
||||||
|
|||||||
@@ -1,64 +0,0 @@
|
|||||||
{ config, lib, pkgs, ... }:
|
|
||||||
|
|
||||||
let
|
|
||||||
cfg = config.ntfy-alerts;
|
|
||||||
in
|
|
||||||
{
|
|
||||||
options.ntfy-alerts = {
|
|
||||||
serverUrl = lib.mkOption {
|
|
||||||
type = lib.types.str;
|
|
||||||
default = "https://ntfy.neet.dev";
|
|
||||||
description = "Base URL of the ntfy server.";
|
|
||||||
};
|
|
||||||
|
|
||||||
topic = lib.mkOption {
|
|
||||||
type = lib.types.str;
|
|
||||||
default = "service-failures";
|
|
||||||
description = "ntfy topic to publish alerts to.";
|
|
||||||
};
|
|
||||||
|
|
||||||
curlExtraArgs = lib.mkOption {
|
|
||||||
type = lib.types.str;
|
|
||||||
default = "";
|
|
||||||
description = "Extra arguments to pass to curl (e.g. --proxy http://host:port).";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
config = lib.mkIf config.thisMachine.hasRole."ntfy" {
|
|
||||||
age.secrets.ntfy-token.file = ../secrets/ntfy-token.age;
|
|
||||||
|
|
||||||
systemd.services."ntfy-failure@" = {
|
|
||||||
description = "Send ntfy alert for failed unit %i";
|
|
||||||
wants = [ "network-online.target" ];
|
|
||||||
after = [ "network-online.target" ];
|
|
||||||
serviceConfig = {
|
|
||||||
Type = "oneshot";
|
|
||||||
EnvironmentFile = "/run/agenix/ntfy-token";
|
|
||||||
ExecStart = "${pkgs.writeShellScript "ntfy-failure-notify" ''
|
|
||||||
unit="$1"
|
|
||||||
${lib.getExe pkgs.curl} \
|
|
||||||
--fail --silent --show-error \
|
|
||||||
--max-time 30 --retry 3 \
|
|
||||||
${cfg.curlExtraArgs} \
|
|
||||||
-H "Authorization: Bearer $NTFY_TOKEN" \
|
|
||||||
-H "Title: Service failure on ${config.networking.hostName}" \
|
|
||||||
-H "Priority: high" \
|
|
||||||
-H "Tags: rotating_light" \
|
|
||||||
-d "Unit $unit failed at $(date +%c)" \
|
|
||||||
"${cfg.serverUrl}/${cfg.topic}"
|
|
||||||
''} %i";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# Apply OnFailure to all services via a systemd drop-in
|
|
||||||
systemd.packages = [
|
|
||||||
(pkgs.runCommand "ntfy-on-failure-dropin" { } ''
|
|
||||||
mkdir -p $out/lib/systemd/system/service.d
|
|
||||||
cat > $out/lib/systemd/system/service.d/ntfy-on-failure.conf <<'EOF'
|
|
||||||
[Unit]
|
|
||||||
OnFailure=ntfy-failure@%p.service
|
|
||||||
EOF
|
|
||||||
'')
|
|
||||||
];
|
|
||||||
};
|
|
||||||
}
|
|
||||||
@@ -27,7 +27,6 @@ in
|
|||||||
../shell.nix
|
../shell.nix
|
||||||
hostConfig.inputs.home-manager.nixosModules.home-manager
|
hostConfig.inputs.home-manager.nixosModules.home-manager
|
||||||
hostConfig.inputs.nix-index-database.nixosModules.default
|
hostConfig.inputs.nix-index-database.nixosModules.default
|
||||||
hostConfig.inputs.agenix.nixosModules.default
|
|
||||||
];
|
];
|
||||||
|
|
||||||
nixpkgs.overlays = [
|
nixpkgs.overlays = [
|
||||||
@@ -117,13 +116,6 @@ in
|
|||||||
nix.settings.experimental-features = [ "nix-command" "flakes" ];
|
nix.settings.experimental-features = [ "nix-command" "flakes" ];
|
||||||
nix.settings.trusted-users = [ "googlebot" ];
|
nix.settings.trusted-users = [ "googlebot" ];
|
||||||
|
|
||||||
# Binary cache configuration (inherited from host's common/binary-cache.nix)
|
|
||||||
nix.settings.substituters = hostConfig.nix.settings.substituters;
|
|
||||||
nix.settings.trusted-public-keys = hostConfig.nix.settings.trusted-public-keys;
|
|
||||||
nix.settings.fallback = true;
|
|
||||||
nix.settings.netrc-file = config.age.secrets.attic-netrc.path;
|
|
||||||
age.secrets.attic-netrc.file = ../../secrets/attic-netrc.age;
|
|
||||||
|
|
||||||
# Make nixpkgs available in NIX_PATH and registry (like the NixOS ISO)
|
# Make nixpkgs available in NIX_PATH and registry (like the NixOS ISO)
|
||||||
# This allows `nix-shell -p`, `nix repl '<nixpkgs>'`, etc. to work
|
# This allows `nix-shell -p`, `nix repl '<nixpkgs>'`, etc. to work
|
||||||
nix.nixPath = [ "nixpkgs=${hostConfig.inputs.nixpkgs}" ];
|
nix.nixPath = [ "nixpkgs=${hostConfig.inputs.nixpkgs}" ];
|
||||||
|
|||||||
@@ -133,15 +133,8 @@ let
|
|||||||
};
|
};
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
config = mkMerge [
|
config = mkIf (cfg.enable && vmWorkspaces != { }) {
|
||||||
(mkIf (cfg.enable && vmWorkspaces != { }) {
|
# Convert VM workspace configs to microvm.nix format
|
||||||
# Convert VM workspace configs to microvm.nix format
|
microvm.vms = mapAttrs mkVmConfig vmWorkspaces;
|
||||||
microvm.vms = mapAttrs mkVmConfig vmWorkspaces;
|
};
|
||||||
})
|
|
||||||
|
|
||||||
# microvm.nixosModules.host enables KSM, but /sys is read-only in containers
|
|
||||||
(mkIf config.boot.isContainer {
|
|
||||||
hardware.ksm.enable = false;
|
|
||||||
})
|
|
||||||
];
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
{ config, lib, ... }:
|
{ config, lib, ... }:
|
||||||
|
|
||||||
{
|
{
|
||||||
config = lib.mkIf (config.thisMachine.hasRole."binary-cache" && !config.boot.isContainer) {
|
config = lib.mkIf (config.thisMachine.hasRole."binary-cache") {
|
||||||
services.atticd = {
|
services.atticd = {
|
||||||
enable = true;
|
enable = true;
|
||||||
environmentFile = config.age.secrets.atticd-credentials.path;
|
environmentFile = config.age.secrets.atticd-credentials.path;
|
||||||
@@ -49,7 +49,6 @@
|
|||||||
systemd.services.atticd = {
|
systemd.services.atticd = {
|
||||||
after = [ "postgresql.service" ];
|
after = [ "postgresql.service" ];
|
||||||
requires = [ "postgresql.service" ];
|
requires = [ "postgresql.service" ];
|
||||||
partOf = [ "postgresql.service" ];
|
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
DynamicUser = lib.mkForce false;
|
DynamicUser = lib.mkForce false;
|
||||||
User = "atticd";
|
User = "atticd";
|
||||||
|
|||||||
@@ -16,7 +16,5 @@
|
|||||||
./librechat.nix
|
./librechat.nix
|
||||||
./actualbudget.nix
|
./actualbudget.nix
|
||||||
./unifi.nix
|
./unifi.nix
|
||||||
./ntfy.nix
|
|
||||||
./gatus.nix
|
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,146 +0,0 @@
|
|||||||
{ lib, config, ... }:
|
|
||||||
|
|
||||||
let
|
|
||||||
cfg = config.services.gatus;
|
|
||||||
port = 31103;
|
|
||||||
in
|
|
||||||
{
|
|
||||||
options.services.gatus = {
|
|
||||||
hostname = lib.mkOption {
|
|
||||||
type = lib.types.str;
|
|
||||||
example = "status.example.com";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
config = lib.mkIf cfg.enable {
|
|
||||||
services.gatus = {
|
|
||||||
environmentFile = "/run/agenix/ntfy-token";
|
|
||||||
settings = {
|
|
||||||
storage = {
|
|
||||||
type = "sqlite";
|
|
||||||
path = "/var/lib/gatus/data.db";
|
|
||||||
};
|
|
||||||
|
|
||||||
web = {
|
|
||||||
address = "127.0.0.1";
|
|
||||||
port = port;
|
|
||||||
};
|
|
||||||
|
|
||||||
alerting.ntfy = {
|
|
||||||
url = "https://ntfy.neet.dev";
|
|
||||||
topic = "service-failures";
|
|
||||||
priority = 4;
|
|
||||||
default-alert = {
|
|
||||||
enabled = true;
|
|
||||||
failure-threshold = 3;
|
|
||||||
success-threshold = 2;
|
|
||||||
send-on-resolved = true;
|
|
||||||
};
|
|
||||||
token = "$NTFY_TOKEN";
|
|
||||||
};
|
|
||||||
|
|
||||||
endpoints = [
|
|
||||||
{
|
|
||||||
name = "Gitea";
|
|
||||||
group = "services";
|
|
||||||
url = "https://git.neet.dev";
|
|
||||||
interval = "5m";
|
|
||||||
conditions = [
|
|
||||||
"[STATUS] == 200"
|
|
||||||
];
|
|
||||||
alerts = [{ type = "ntfy"; }];
|
|
||||||
}
|
|
||||||
{
|
|
||||||
name = "The Lounge";
|
|
||||||
group = "services";
|
|
||||||
url = "https://irc.neet.dev";
|
|
||||||
interval = "5m";
|
|
||||||
conditions = [
|
|
||||||
"[STATUS] == 200"
|
|
||||||
];
|
|
||||||
alerts = [{ type = "ntfy"; }];
|
|
||||||
}
|
|
||||||
{
|
|
||||||
name = "ntfy";
|
|
||||||
group = "services";
|
|
||||||
url = "https://ntfy.neet.dev/v1/health";
|
|
||||||
interval = "5m";
|
|
||||||
conditions = [
|
|
||||||
"[STATUS] == 200"
|
|
||||||
];
|
|
||||||
alerts = [{ type = "ntfy"; }];
|
|
||||||
}
|
|
||||||
{
|
|
||||||
name = "Librechat";
|
|
||||||
group = "services";
|
|
||||||
url = "https://chat.neet.dev";
|
|
||||||
interval = "5m";
|
|
||||||
conditions = [
|
|
||||||
"[STATUS] == 200"
|
|
||||||
];
|
|
||||||
alerts = [{ type = "ntfy"; }];
|
|
||||||
}
|
|
||||||
{
|
|
||||||
name = "Owncast";
|
|
||||||
group = "services";
|
|
||||||
url = "https://live.neet.dev";
|
|
||||||
interval = "5m";
|
|
||||||
conditions = [
|
|
||||||
"[STATUS] == 200"
|
|
||||||
];
|
|
||||||
alerts = [{ type = "ntfy"; }];
|
|
||||||
}
|
|
||||||
{
|
|
||||||
name = "Nextcloud";
|
|
||||||
group = "services";
|
|
||||||
url = "https://neet.cloud";
|
|
||||||
interval = "5m";
|
|
||||||
conditions = [
|
|
||||||
"[STATUS] == any(200, 302)"
|
|
||||||
];
|
|
||||||
alerts = [{ type = "ntfy"; }];
|
|
||||||
}
|
|
||||||
{
|
|
||||||
name = "Element Web";
|
|
||||||
group = "services";
|
|
||||||
url = "https://chat.neet.space";
|
|
||||||
interval = "5m";
|
|
||||||
conditions = [
|
|
||||||
"[STATUS] == 200"
|
|
||||||
];
|
|
||||||
alerts = [{ type = "ntfy"; }];
|
|
||||||
}
|
|
||||||
{
|
|
||||||
name = "Mumble";
|
|
||||||
group = "services";
|
|
||||||
url = "tcp://voice.neet.space:23563";
|
|
||||||
interval = "5m";
|
|
||||||
conditions = [
|
|
||||||
"[CONNECTED] == true"
|
|
||||||
];
|
|
||||||
alerts = [{ type = "ntfy"; }];
|
|
||||||
}
|
|
||||||
{
|
|
||||||
name = "Navidrome";
|
|
||||||
group = "services";
|
|
||||||
url = "https://navidrome.neet.cloud";
|
|
||||||
interval = "5m";
|
|
||||||
conditions = [
|
|
||||||
"[STATUS] == 200"
|
|
||||||
];
|
|
||||||
alerts = [{ type = "ntfy"; }];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
services.nginx.enable = true;
|
|
||||||
services.nginx.virtualHosts.${cfg.hostname} = {
|
|
||||||
enableACME = true;
|
|
||||||
forceSSL = true;
|
|
||||||
locations."/" = {
|
|
||||||
proxyPass = "http://127.0.0.1:${toString port}";
|
|
||||||
proxyWebsockets = true;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
@@ -1,38 +0,0 @@
|
|||||||
{ lib, config, ... }:
|
|
||||||
|
|
||||||
let
|
|
||||||
cfg = config.services.ntfy-sh;
|
|
||||||
in
|
|
||||||
{
|
|
||||||
options.services.ntfy-sh = {
|
|
||||||
hostname = lib.mkOption {
|
|
||||||
type = lib.types.str;
|
|
||||||
example = "ntfy.example.com";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
config = lib.mkIf cfg.enable {
|
|
||||||
services.ntfy-sh.settings = {
|
|
||||||
base-url = "https://${cfg.hostname}";
|
|
||||||
listen-http = "127.0.0.1:2586";
|
|
||||||
auth-default-access = "deny-all";
|
|
||||||
behind-proxy = true;
|
|
||||||
enable-login = true;
|
|
||||||
};
|
|
||||||
|
|
||||||
# backups
|
|
||||||
backup.group."ntfy".paths = [
|
|
||||||
"/var/lib/ntfy-sh"
|
|
||||||
];
|
|
||||||
|
|
||||||
services.nginx.enable = true;
|
|
||||||
services.nginx.virtualHosts.${cfg.hostname} = {
|
|
||||||
enableACME = true;
|
|
||||||
forceSSL = true;
|
|
||||||
locations."/" = {
|
|
||||||
proxyPass = "http://127.0.0.1:2586";
|
|
||||||
proxyWebsockets = true;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
@@ -26,16 +26,6 @@
|
|||||||
"printcap name" = "cups";
|
"printcap name" = "cups";
|
||||||
|
|
||||||
"hide files" = "/.nobackup/.DS_Store/._.DS_Store/";
|
"hide files" = "/.nobackup/.DS_Store/._.DS_Store/";
|
||||||
|
|
||||||
# Samba 4.22+ enables SMB3 directory leases by default, allowing clients
|
|
||||||
# to cache directory listings locally. When files are created locally on
|
|
||||||
# the server (bypassing Samba), these cached listings go stale because
|
|
||||||
# kernel oplocks — the mechanism that would break leases on local
|
|
||||||
# changes — is incompatible with smb2 leases. Enabling kernel oplocks
|
|
||||||
# would fix this but forces Samba to disable smb2 leases, durable
|
|
||||||
# handles, and level2 oplocks, losing handle caching performance.
|
|
||||||
# https://wiki.samba.org/index.php/Editing_files_locally_on_server:_interoperability
|
|
||||||
"smb3 directory leases" = "no";
|
|
||||||
};
|
};
|
||||||
public = {
|
public = {
|
||||||
path = "/data/samba/Public";
|
path = "/data/samba/Public";
|
||||||
|
|||||||
@@ -1,87 +0,0 @@
|
|||||||
{ config, lib, pkgs, ... }:
|
|
||||||
|
|
||||||
let
|
|
||||||
cfg = config.ntfy-alerts;
|
|
||||||
hasZfs = config.boot.supportedFilesystems.zfs or false;
|
|
||||||
hasNtfy = config.thisMachine.hasRole."ntfy";
|
|
||||||
|
|
||||||
checkScript = pkgs.writeShellScript "zfs-health-check" ''
|
|
||||||
PATH="${lib.makeBinPath [ pkgs.zfs pkgs.coreutils pkgs.gawk pkgs.curl ]}"
|
|
||||||
|
|
||||||
unhealthy=""
|
|
||||||
|
|
||||||
# Check pool health status
|
|
||||||
while IFS=$'\t' read -r pool state; do
|
|
||||||
if [ "$state" != "ONLINE" ]; then
|
|
||||||
unhealthy="$unhealthy"$'\n'"Pool '$pool' is $state"
|
|
||||||
fi
|
|
||||||
done < <(zpool list -H -o name,health)
|
|
||||||
|
|
||||||
# Check for errors (read, write, checksum) on any vdev
|
|
||||||
while IFS=$'\t' read -r pool errors; do
|
|
||||||
if [ "$errors" != "No known data errors" ] && [ -n "$errors" ]; then
|
|
||||||
unhealthy="$unhealthy"$'\n'"Pool '$pool' has errors: $errors"
|
|
||||||
fi
|
|
||||||
done < <(zpool status -x 2>/dev/null | awk '
|
|
||||||
/pool:/ { pool=$2 }
|
|
||||||
/errors:/ { sub(/^[[:space:]]*errors: /, ""); print pool "\t" $0 }
|
|
||||||
')
|
|
||||||
|
|
||||||
# Check for any drives with non-zero error counts
|
|
||||||
drive_errors=$(zpool status 2>/dev/null | awk '
|
|
||||||
/DEGRADED|FAULTED|OFFLINE|UNAVAIL|REMOVED/ && !/pool:/ && !/state:/ {
|
|
||||||
print " " $0
|
|
||||||
}
|
|
||||||
/[0-9]+[[:space:]]+[0-9]+[[:space:]]+[0-9]+/ {
|
|
||||||
if ($3 > 0 || $4 > 0 || $5 > 0) {
|
|
||||||
print " " $1 " (read:" $3 " write:" $4 " cksum:" $5 ")"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
')
|
|
||||||
if [ -n "$drive_errors" ]; then
|
|
||||||
unhealthy="$unhealthy"$'\n'"Device errors:"$'\n'"$drive_errors"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$unhealthy" ]; then
|
|
||||||
message="ZFS health check failed on ${config.networking.hostName}:$unhealthy"
|
|
||||||
|
|
||||||
curl \
|
|
||||||
--fail --silent --show-error \
|
|
||||||
--max-time 30 --retry 3 \
|
|
||||||
-H "Authorization: Bearer $NTFY_TOKEN" \
|
|
||||||
-H "Title: ZFS issue on ${config.networking.hostName}" \
|
|
||||||
-H "Priority: urgent" \
|
|
||||||
-H "Tags: warning" \
|
|
||||||
-d "$message" \
|
|
||||||
"${cfg.serverUrl}/${cfg.topic}"
|
|
||||||
|
|
||||||
echo "$message" >&2
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "All ZFS pools healthy"
|
|
||||||
'';
|
|
||||||
in
|
|
||||||
{
|
|
||||||
config = lib.mkIf (hasZfs && hasNtfy) {
|
|
||||||
systemd.services.zfs-health-check = {
|
|
||||||
description = "Check ZFS pool health and alert on issues";
|
|
||||||
wants = [ "network-online.target" ];
|
|
||||||
after = [ "network-online.target" "zfs.target" ];
|
|
||||||
serviceConfig = {
|
|
||||||
Type = "oneshot";
|
|
||||||
EnvironmentFile = "/run/agenix/ntfy-token";
|
|
||||||
ExecStart = checkScript;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
systemd.timers.zfs-health-check = {
|
|
||||||
description = "Periodic ZFS health check";
|
|
||||||
wantedBy = [ "timers.target" ];
|
|
||||||
timerConfig = {
|
|
||||||
OnCalendar = "daily";
|
|
||||||
Persistent = true;
|
|
||||||
RandomizedDelaySec = "1h";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
18
flake.lock
generated
18
flake.lock
generated
@@ -228,11 +228,11 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1771756436,
|
"lastModified": 1771683283,
|
||||||
"narHash": "sha256-Tl2I0YXdhSTufGqAaD1ySh8x+cvVsEI1mJyJg12lxhI=",
|
"narHash": "sha256-WxAEkAbo8dP7qiyPM6VN4ZGAxfuBVlNBNPkrqkrXVEc=",
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"repo": "home-manager",
|
"repo": "home-manager",
|
||||||
"rev": "5bd3589390b431a63072868a90c0f24771ff4cbb",
|
"rev": "c6ed3eab64d23520bcbb858aa53fe2b533725d4a",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@@ -250,11 +250,11 @@
|
|||||||
"spectrum": "spectrum"
|
"spectrum": "spectrum"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1771712688,
|
"lastModified": 1771365290,
|
||||||
"narHash": "sha256-Pf4CaRoOLQV02m2POPA+0EWvb3gVdpaiS0hNNVZhO3c=",
|
"narHash": "sha256-1XJOslVyF7yzf6yd/yl1VjGLywsbtwmQh3X1LuJcLI4=",
|
||||||
"owner": "astro",
|
"owner": "astro",
|
||||||
"repo": "microvm.nix",
|
"repo": "microvm.nix",
|
||||||
"rev": "a3abc020a3d8e624e145f4144ed40702f788ea32",
|
"rev": "789c90b164b55b4379e7a94af8b9c01489024c18",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@@ -270,11 +270,11 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1771734689,
|
"lastModified": 1771130777,
|
||||||
"narHash": "sha256-/phvMgr1yutyAMjKnZlxkVplzxHiz60i4rc+gKzpwhg=",
|
"narHash": "sha256-UIKOwG0D9XVIJfNWg6+gENAvQP+7LO46eO0Jpe+ItJ0=",
|
||||||
"owner": "Mic92",
|
"owner": "Mic92",
|
||||||
"repo": "nix-index-database",
|
"repo": "nix-index-database",
|
||||||
"rev": "8f590b832326ab9699444f3a48240595954a4b10",
|
"rev": "efec7aaad8d43f8e5194df46a007456093c40f88",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|||||||
@@ -175,10 +175,10 @@
|
|||||||
kexec = (mkEphemeral "x86_64-linux").config.system.build.images.kexec;
|
kexec = (mkEphemeral "x86_64-linux").config.system.build.images.kexec;
|
||||||
iso = (mkEphemeral "x86_64-linux").config.system.build.images.iso;
|
iso = (mkEphemeral "x86_64-linux").config.system.build.images.iso;
|
||||||
};
|
};
|
||||||
# "aarch64-linux" = {
|
"aarch64-linux" = {
|
||||||
# kexec = (mkEphemeral "aarch64-linux").config.system.build.images.kexec;
|
kexec = (mkEphemeral "aarch64-linux").config.system.build.images.kexec;
|
||||||
# iso = (mkEphemeral "aarch64-linux").config.system.build.images.iso;
|
iso = (mkEphemeral "aarch64-linux").config.system.build.images.iso;
|
||||||
# };
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
overlays.default = import ./overlays { inherit inputs; };
|
overlays.default = import ./overlays { inherit inputs; };
|
||||||
|
|||||||
@@ -8,7 +8,6 @@
|
|||||||
systemRoles = [
|
systemRoles = [
|
||||||
"personal"
|
"personal"
|
||||||
"dns-challenge"
|
"dns-challenge"
|
||||||
"ntfy"
|
|
||||||
];
|
];
|
||||||
|
|
||||||
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID/Df5lG07Il7fizEgZR/T9bMlR0joESRJ7cqM9BkOyP";
|
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID/Df5lG07Il7fizEgZR/T9bMlR0joESRJ7cqM9BkOyP";
|
||||||
|
|||||||
@@ -7,7 +7,6 @@
|
|||||||
|
|
||||||
systemRoles = [
|
systemRoles = [
|
||||||
"personal"
|
"personal"
|
||||||
"ntfy"
|
|
||||||
];
|
];
|
||||||
|
|
||||||
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEQi3q8jU6vRruExAL60J7GFO1gS8HsmXVJuKRT4ljrG";
|
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEQi3q8jU6vRruExAL60J7GFO1gS8HsmXVJuKRT4ljrG";
|
||||||
|
|||||||
9
machines/phil/default.nix
Normal file
9
machines/phil/default.nix
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
{ lib, ... }:
|
||||||
|
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
./hardware-configuration.nix
|
||||||
|
];
|
||||||
|
|
||||||
|
networking.hostName = "phil";
|
||||||
|
}
|
||||||
46
machines/phil/hardware-configuration.nix
Normal file
46
machines/phil/hardware-configuration.nix
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
||||||
|
# and may be overwritten by future invocations. Please make changes
|
||||||
|
# to /etc/nixos/configuration.nix instead.
|
||||||
|
{ lib, modulesPath, ... }:
|
||||||
|
|
||||||
|
{
|
||||||
|
imports =
|
||||||
|
[
|
||||||
|
(modulesPath + "/profiles/qemu-guest.nix")
|
||||||
|
];
|
||||||
|
|
||||||
|
# because grub just doesn't work for some reason
|
||||||
|
boot.loader.systemd-boot.enable = true;
|
||||||
|
|
||||||
|
remoteLuksUnlock.enable = true;
|
||||||
|
remoteLuksUnlock.enableTorUnlock = false;
|
||||||
|
|
||||||
|
boot.initrd.availableKernelModules = [ "xhci_pci" ];
|
||||||
|
boot.initrd.kernelModules = [ "dm-snapshot" ];
|
||||||
|
boot.kernelModules = [ ];
|
||||||
|
boot.extraModulePackages = [ ];
|
||||||
|
|
||||||
|
boot.initrd.luks.devices."enc-pv" = {
|
||||||
|
device = "/dev/disk/by-uuid/d26c1820-4c39-4615-98c2-51442504e194";
|
||||||
|
allowDiscards = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
fileSystems."/" =
|
||||||
|
{
|
||||||
|
device = "/dev/disk/by-uuid/851bfde6-93cd-439e-9380-de28aa87eda9";
|
||||||
|
fsType = "btrfs";
|
||||||
|
};
|
||||||
|
|
||||||
|
fileSystems."/boot" =
|
||||||
|
{
|
||||||
|
device = "/dev/disk/by-uuid/F185-C4E5";
|
||||||
|
fsType = "vfat";
|
||||||
|
};
|
||||||
|
|
||||||
|
swapDevices =
|
||||||
|
[{ device = "/dev/disk/by-uuid/d809e3a1-3915-405a-a200-4429c5efdf87"; }];
|
||||||
|
|
||||||
|
networking.interfaces.enp0s6.useDHCP = lib.mkDefault true;
|
||||||
|
|
||||||
|
nixpkgs.hostPlatform = lib.mkDefault "aarch64-linux";
|
||||||
|
}
|
||||||
20
machines/phil/properties.nix
Normal file
20
machines/phil/properties.nix
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
hostNames = [
|
||||||
|
"phil"
|
||||||
|
"phil.neet.dev"
|
||||||
|
];
|
||||||
|
|
||||||
|
arch = "aarch64-linux";
|
||||||
|
|
||||||
|
systemRoles = [
|
||||||
|
"server"
|
||||||
|
"nix-builder"
|
||||||
|
];
|
||||||
|
|
||||||
|
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBlgRPpuUkZqe8/lHugRPm/m2vcN9psYhh5tENHZt9I2";
|
||||||
|
|
||||||
|
remoteUnlock = {
|
||||||
|
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIK0RodotOXLMy/w70aa096gaNqPBnfgiXR5ZAH4+wGzd";
|
||||||
|
clearnetHost = "unlock.phil.neet.dev";
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -108,12 +108,4 @@
|
|||||||
# librechat
|
# librechat
|
||||||
services.librechat-container.enable = true;
|
services.librechat-container.enable = true;
|
||||||
services.librechat-container.host = "chat.neet.dev";
|
services.librechat-container.host = "chat.neet.dev";
|
||||||
|
|
||||||
# push notifications
|
|
||||||
services.ntfy-sh.enable = true;
|
|
||||||
services.ntfy-sh.hostname = "ntfy.neet.dev";
|
|
||||||
|
|
||||||
# uptime monitoring
|
|
||||||
services.gatus.enable = true;
|
|
||||||
services.gatus.hostname = "status.neet.dev";
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,7 +15,6 @@
|
|||||||
"dailybot"
|
"dailybot"
|
||||||
"gitea"
|
"gitea"
|
||||||
"librechat"
|
"librechat"
|
||||||
"ntfy"
|
|
||||||
];
|
];
|
||||||
|
|
||||||
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMBBlTAIp38RhErU1wNNV5MBeb+WGH0mhF/dxh5RsAXN";
|
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMBBlTAIp38RhErU1wNNV5MBeb+WGH0mhF/dxh5RsAXN";
|
||||||
|
|||||||
@@ -55,135 +55,123 @@
|
|||||||
users.users.googlebot.extraGroups = [ "transmission" ];
|
users.users.googlebot.extraGroups = [ "transmission" ];
|
||||||
users.groups.transmission.gid = config.ids.gids.transmission;
|
users.groups.transmission.gid = config.ids.gids.transmission;
|
||||||
|
|
||||||
pia-vpn = {
|
vpn-container.enable = true;
|
||||||
enable = true;
|
vpn-container.mounts = [
|
||||||
serverLocation = "swiss";
|
"/var/lib"
|
||||||
|
"/data/samba/Public"
|
||||||
containers.transmission = {
|
];
|
||||||
ip = "10.100.0.10";
|
vpn-container.config = {
|
||||||
mounts."/var/lib".hostPath = "/var/lib";
|
# servarr services
|
||||||
mounts."/data/samba/Public".hostPath = "/data/samba/Public";
|
services.prowlarr.enable = true;
|
||||||
receiveForwardedPort = { protocol = "both"; };
|
services.sonarr.enable = true;
|
||||||
onPortForwarded = ''
|
services.sonarr.user = "public_data";
|
||||||
# Notify Transmission of the PIA-assigned peer port via RPC
|
services.sonarr.group = "public_data";
|
||||||
for i in $(seq 1 30); do
|
services.bazarr.enable = true;
|
||||||
curlout=$(curl -s "http://transmission.containers:8080/transmission/rpc" 2>/dev/null) && break
|
services.bazarr.user = "public_data";
|
||||||
sleep 2
|
services.bazarr.group = "public_data";
|
||||||
done
|
services.radarr.enable = true;
|
||||||
regex='X-Transmission-Session-Id: (\w*)'
|
services.radarr.user = "public_data";
|
||||||
if [[ $curlout =~ $regex ]]; then
|
services.radarr.group = "public_data";
|
||||||
sessionId=''${BASH_REMATCH[1]}
|
services.lidarr.enable = true;
|
||||||
curl -s "http://transmission.containers:8080/transmission/rpc" \
|
services.lidarr.user = "public_data";
|
||||||
-d "{\"method\":\"session-set\",\"arguments\":{\"peer-port\":$PORT}}" \
|
services.lidarr.group = "public_data";
|
||||||
-H "X-Transmission-Session-Id: $sessionId"
|
services.recyclarr = {
|
||||||
fi
|
enable = true;
|
||||||
'';
|
configuration = {
|
||||||
config = {
|
radarr.radarr_main = {
|
||||||
services.transmission = {
|
api_key = {
|
||||||
enable = true;
|
_secret = "/run/credentials/recyclarr.service/radarr-api-key";
|
||||||
package = pkgs.transmission_4;
|
|
||||||
performanceNetParameters = true;
|
|
||||||
user = "public_data";
|
|
||||||
group = "public_data";
|
|
||||||
settings = {
|
|
||||||
"download-dir" = "/data/samba/Public/Media/Transmission";
|
|
||||||
"incomplete-dir" = "/var/lib/transmission/.incomplete";
|
|
||||||
"incomplete-dir-enabled" = true;
|
|
||||||
|
|
||||||
"rpc-enabled" = true;
|
|
||||||
"rpc-port" = 8080;
|
|
||||||
"rpc-bind-address" = "0.0.0.0";
|
|
||||||
"rpc-whitelist" = "127.0.0.1,10.100.*.*,192.168.*.*";
|
|
||||||
"rpc-host-whitelist-enabled" = false;
|
|
||||||
|
|
||||||
"port-forwarding-enabled" = true;
|
|
||||||
"peer-port" = 51413;
|
|
||||||
"peer-port-random-on-start" = false;
|
|
||||||
|
|
||||||
"encryption" = 1;
|
|
||||||
"lpd-enabled" = true;
|
|
||||||
"dht-enabled" = true;
|
|
||||||
"pex-enabled" = true;
|
|
||||||
|
|
||||||
"blocklist-enabled" = true;
|
|
||||||
"blocklist-updates-enabled" = true;
|
|
||||||
"blocklist-url" = "https://github.com/Naunter/BT_BlockLists/raw/master/bt_blocklists.gz";
|
|
||||||
|
|
||||||
"ratio-limit" = 3;
|
|
||||||
"ratio-limit-enabled" = true;
|
|
||||||
|
|
||||||
"download-queue-enabled" = true;
|
|
||||||
"download-queue-size" = 20;
|
|
||||||
};
|
};
|
||||||
};
|
base_url = "http://localhost:7878";
|
||||||
# https://github.com/NixOS/nixpkgs/issues/258793
|
|
||||||
systemd.services.transmission.serviceConfig = {
|
|
||||||
RootDirectoryStartOnly = lib.mkForce (lib.mkForce false);
|
|
||||||
RootDirectory = lib.mkForce (lib.mkForce "");
|
|
||||||
};
|
|
||||||
|
|
||||||
users.groups.public_data.gid = 994;
|
quality_definition.type = "movie";
|
||||||
users.users.public_data = {
|
};
|
||||||
isSystemUser = true;
|
sonarr.sonarr_main = {
|
||||||
group = "public_data";
|
api_key = {
|
||||||
uid = 994;
|
_secret = "/run/credentials/recyclarr.service/sonarr-api-key";
|
||||||
|
};
|
||||||
|
base_url = "http://localhost:8989";
|
||||||
|
|
||||||
|
quality_definition.type = "series";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
containers.servarr = {
|
systemd.services.recyclarr.serviceConfig.LoadCredential = [
|
||||||
ip = "10.100.0.11";
|
"radarr-api-key:/run/agenix/radarr-api-key"
|
||||||
mounts."/var/lib".hostPath = "/var/lib";
|
"sonarr-api-key:/run/agenix/sonarr-api-key"
|
||||||
mounts."/data/samba/Public".hostPath = "/data/samba/Public";
|
];
|
||||||
mounts."/run/agenix" = { hostPath = "/run/agenix"; isReadOnly = true; };
|
|
||||||
config = {
|
|
||||||
services.prowlarr.enable = true;
|
|
||||||
services.sonarr.enable = true;
|
|
||||||
services.sonarr.user = "public_data";
|
|
||||||
services.sonarr.group = "public_data";
|
|
||||||
services.bazarr.enable = true;
|
|
||||||
services.bazarr.user = "public_data";
|
|
||||||
services.bazarr.group = "public_data";
|
|
||||||
services.radarr.enable = true;
|
|
||||||
services.radarr.user = "public_data";
|
|
||||||
services.radarr.group = "public_data";
|
|
||||||
services.lidarr.enable = true;
|
|
||||||
services.lidarr.user = "public_data";
|
|
||||||
services.lidarr.group = "public_data";
|
|
||||||
services.recyclarr = {
|
|
||||||
enable = true;
|
|
||||||
configuration = {
|
|
||||||
radarr.radarr_main = {
|
|
||||||
api_key = {
|
|
||||||
_secret = "/run/credentials/recyclarr.service/radarr-api-key";
|
|
||||||
};
|
|
||||||
base_url = "http://localhost:7878";
|
|
||||||
quality_definition.type = "movie";
|
|
||||||
};
|
|
||||||
sonarr.sonarr_main = {
|
|
||||||
api_key = {
|
|
||||||
_secret = "/run/credentials/recyclarr.service/sonarr-api-key";
|
|
||||||
};
|
|
||||||
base_url = "http://localhost:8989";
|
|
||||||
quality_definition.type = "series";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
systemd.services.recyclarr.serviceConfig.LoadCredential = [
|
services.transmission = {
|
||||||
"radarr-api-key:/run/agenix/radarr-api-key"
|
enable = true;
|
||||||
"sonarr-api-key:/run/agenix/sonarr-api-key"
|
package = pkgs.transmission_4;
|
||||||
];
|
performanceNetParameters = true;
|
||||||
|
user = "public_data";
|
||||||
|
group = "public_data";
|
||||||
|
settings = {
|
||||||
|
/* directory settings */
|
||||||
|
# "watch-dir" = "/srv/storage/Transmission/To-Download";
|
||||||
|
# "watch-dir-enabled" = true;
|
||||||
|
"download-dir" = "/data/samba/Public/Media/Transmission";
|
||||||
|
"incomplete-dir" = "/var/lib/transmission/.incomplete";
|
||||||
|
"incomplete-dir-enabled" = true;
|
||||||
|
|
||||||
users.groups.public_data.gid = 994;
|
/* web interface, accessible from local network */
|
||||||
users.users.public_data = {
|
"rpc-enabled" = true;
|
||||||
isSystemUser = true;
|
"rpc-bind-address" = "0.0.0.0";
|
||||||
group = "public_data";
|
"rpc-whitelist" = "127.0.0.1,192.168.*.*,172.16.*.*";
|
||||||
uid = 994;
|
"rpc-host-whitelist" = "void,192.168.*.*,172.16.*.*";
|
||||||
};
|
"rpc-host-whitelist-enabled" = false;
|
||||||
|
|
||||||
|
"port-forwarding-enabled" = true;
|
||||||
|
"peer-port" = 50023;
|
||||||
|
"peer-port-random-on-start" = false;
|
||||||
|
|
||||||
|
"encryption" = 1;
|
||||||
|
"lpd-enabled" = true; /* local peer discovery */
|
||||||
|
"dht-enabled" = true; /* dht peer discovery in swarm */
|
||||||
|
"pex-enabled" = true; /* peer exchange */
|
||||||
|
|
||||||
|
/* ip blocklist */
|
||||||
|
"blocklist-enabled" = true;
|
||||||
|
"blocklist-updates-enabled" = true;
|
||||||
|
"blocklist-url" = "https://github.com/Naunter/BT_BlockLists/raw/master/bt_blocklists.gz";
|
||||||
|
|
||||||
|
/* download speed settings */
|
||||||
|
# "speed-limit-down" = 1200;
|
||||||
|
# "speed-limit-down-enabled" = false;
|
||||||
|
# "speed-limit-up" = 500;
|
||||||
|
# "speed-limit-up-enabled" = true;
|
||||||
|
|
||||||
|
/* seeding limit */
|
||||||
|
"ratio-limit" = 3;
|
||||||
|
"ratio-limit-enabled" = true;
|
||||||
|
|
||||||
|
"download-queue-enabled" = true;
|
||||||
|
"download-queue-size" = 20; # gotta go fast
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
# https://github.com/NixOS/nixpkgs/issues/258793
|
||||||
|
systemd.services.transmission.serviceConfig = {
|
||||||
|
RootDirectoryStartOnly = lib.mkForce (lib.mkForce false);
|
||||||
|
RootDirectory = lib.mkForce (lib.mkForce "");
|
||||||
|
};
|
||||||
|
|
||||||
|
users.groups.public_data.gid = 994;
|
||||||
|
users.users.public_data = {
|
||||||
|
isSystemUser = true;
|
||||||
|
group = "public_data";
|
||||||
|
uid = 994;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
pia.wireguard.badPortForwardPorts = [
|
||||||
|
9696 # prowlarr
|
||||||
|
8989 # sonarr
|
||||||
|
6767 # bazarr
|
||||||
|
7878 # radarr
|
||||||
|
8686 # lidarr
|
||||||
|
9091 # transmission web
|
||||||
|
];
|
||||||
age.secrets.radarr-api-key.file = ../../../secrets/radarr-api-key.age;
|
age.secrets.radarr-api-key.file = ../../../secrets/radarr-api-key.age;
|
||||||
age.secrets.sonarr-api-key.file = ../../../secrets/sonarr-api-key.age;
|
age.secrets.sonarr-api-key.file = ../../../secrets/sonarr-api-key.age;
|
||||||
|
|
||||||
@@ -227,12 +215,12 @@
|
|||||||
};
|
};
|
||||||
in
|
in
|
||||||
lib.mkMerge [
|
lib.mkMerge [
|
||||||
(mkVirtualHost "bazarr.s0.neet.dev" "http://servarr.containers:6767")
|
(mkVirtualHost "bazarr.s0.neet.dev" "http://vpn.containers:6767")
|
||||||
(mkVirtualHost "radarr.s0.neet.dev" "http://servarr.containers:7878")
|
(mkVirtualHost "radarr.s0.neet.dev" "http://vpn.containers:7878")
|
||||||
(mkVirtualHost "lidarr.s0.neet.dev" "http://servarr.containers:8686")
|
(mkVirtualHost "lidarr.s0.neet.dev" "http://vpn.containers:8686")
|
||||||
(mkVirtualHost "sonarr.s0.neet.dev" "http://servarr.containers:8989")
|
(mkVirtualHost "sonarr.s0.neet.dev" "http://vpn.containers:8989")
|
||||||
(mkVirtualHost "prowlarr.s0.neet.dev" "http://servarr.containers:9696")
|
(mkVirtualHost "prowlarr.s0.neet.dev" "http://vpn.containers:9696")
|
||||||
(mkVirtualHost "transmission.s0.neet.dev" "http://transmission.containers:8080")
|
(mkVirtualHost "transmission.s0.neet.dev" "http://vpn.containers:9091")
|
||||||
(mkVirtualHost "unifi.s0.neet.dev" "https://localhost:8443")
|
(mkVirtualHost "unifi.s0.neet.dev" "https://localhost:8443")
|
||||||
(mkVirtualHost "music.s0.neet.dev" "http://localhost:4533")
|
(mkVirtualHost "music.s0.neet.dev" "http://localhost:4533")
|
||||||
(mkVirtualHost "jellyfin.s0.neet.dev" "http://localhost:8096")
|
(mkVirtualHost "jellyfin.s0.neet.dev" "http://localhost:8096")
|
||||||
|
|||||||
@@ -45,6 +45,12 @@
|
|||||||
fsType = "zfs";
|
fsType = "zfs";
|
||||||
options = [ "zfsutil" "X-mount.mkdir" ];
|
options = [ "zfsutil" "X-mount.mkdir" ];
|
||||||
};
|
};
|
||||||
|
fileSystems."/var/lib/atticd" =
|
||||||
|
{
|
||||||
|
device = "rpool/nixos/var/lib/atticd";
|
||||||
|
fsType = "zfs";
|
||||||
|
options = [ "zfsutil" "X-mount.mkdir" ];
|
||||||
|
};
|
||||||
fileSystems."/var/log" =
|
fileSystems."/var/log" =
|
||||||
{
|
{
|
||||||
device = "rpool/nixos/var/log";
|
device = "rpool/nixos/var/log";
|
||||||
|
|||||||
@@ -18,7 +18,6 @@
|
|||||||
"linkwarden"
|
"linkwarden"
|
||||||
"outline"
|
"outline"
|
||||||
"dns-challenge"
|
"dns-challenge"
|
||||||
"ntfy"
|
|
||||||
];
|
];
|
||||||
|
|
||||||
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAwiXcUFtAvZCayhu4+AIcF+Ktrdgv9ee/mXSIhJbp4q";
|
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAwiXcUFtAvZCayhu4+AIcF+Ktrdgv9ee/mXSIhJbp4q";
|
||||||
|
|||||||
@@ -8,7 +8,6 @@
|
|||||||
systemRoles = [
|
systemRoles = [
|
||||||
"personal"
|
"personal"
|
||||||
"media-center"
|
"media-center"
|
||||||
"ntfy"
|
|
||||||
];
|
];
|
||||||
|
|
||||||
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHvdC1EiLqSNVmk5L1p7cWRIrrlelbK+NMj6tEBrwqIq";
|
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHvdC1EiLqSNVmk5L1p7cWRIrrlelbK+NMj6tEBrwqIq";
|
||||||
|
|||||||
@@ -1,19 +0,0 @@
|
|||||||
age-encryption.org/v1
|
|
||||||
-> ssh-ed25519 qEbiMg 5JtpNApPNiFqAB/gQcAsE1gz0Fg/uHW92f6Kx1J2ggQ
|
|
||||||
RzC1MQxyDYW1IuMo+OtSgcsND4v7XIRn0rCSkKCFA3A
|
|
||||||
-> ssh-ed25519 N7drjg mn6LWo+2zWEtUavbFQar966+j+g5su+lcBfWYz1aZDQ
|
|
||||||
EmKpdfkCSQao1+O/HJdOiam7UvBnDYcEEkgH6KrudQI
|
|
||||||
-> ssh-ed25519 jQaHAA an3Ukqz3BVoz0FEAA6/Lw1XKOkQWHwmTut+XD4E4vS8
|
|
||||||
9N2ePtXG2FPJSmOwcAO9p92MJKJJpTlEhKSmgMiinB0
|
|
||||||
-> ssh-ed25519 ZDy34A v98HzmBgwgOpUk2WrRuFsCdNR+nF2veVLzyT2pU2ZXY
|
|
||||||
o2pO5JbVEeaOFQ3beBvej6qgDdT9mgPCVHxmw2umhA0
|
|
||||||
-> ssh-ed25519 w3nu8g Uba2LWQueJ50Ds1/RjvkXI+VH7calMiM7dbL02sRJ3U
|
|
||||||
mFj5skmDXhJV9lK5iwUpebqxqVPAexdUntrbWJEix+Q
|
|
||||||
-> ssh-ed25519 evqvfg wLEEdTdmDRiFGYDrYFQjvRzc3zmGXgvztIy3UuFXnWg
|
|
||||||
CtCV/PpaxBmtDV+6InmcbKxNDmbPUTzyCm8tCf1Qw/4
|
|
||||||
-> ssh-ed25519 6AT2/g NaA1AhOdb+FGOCoWFEX0QN4cXS1CxlpFwpsH1L6vBA0
|
|
||||||
Z9aMYAofQ4Ath0zsg0YdZG3GTnCN2uQW7EG02bMZRsc
|
|
||||||
-> ssh-ed25519 hPp1nw +shAZrydcbjXfYxm1UW1YosKg5ZwBBKO6cct4HdotBo
|
|
||||||
GxnopKlmZQ/I6kMZPNurLgqwwkFHpUradaNYTPnlMFU
|
|
||||||
--- W6DrhCmU08IEIyPpHDiRV21xVeALNk1bDHrLYc2YcC4
|
|
||||||
'+Æ©<C386>CËl”i.Z£t;ýL²1Eãk#5ŸŠÑ£38‰?± šFWhö6?±_Ã=Âæ<C382>ãj/æÌRFÆáãåiÿóãÌZ{‘
|
|
||||||
@@ -9,7 +9,7 @@ let
|
|||||||
nobody = sshKeys.userKeys;
|
nobody = sshKeys.userKeys;
|
||||||
|
|
||||||
# For secrets that all machines need to know
|
# For secrets that all machines need to know
|
||||||
everyone = lib.unique (roles.personal ++ roles.server);
|
everyone = roles.personal ++ roles.server;
|
||||||
in
|
in
|
||||||
|
|
||||||
with roles;
|
with roles;
|
||||||
@@ -43,11 +43,8 @@ with roles;
|
|||||||
"linkwarden-environment.age".publicKeys = linkwarden;
|
"linkwarden-environment.age".publicKeys = linkwarden;
|
||||||
|
|
||||||
# backups
|
# backups
|
||||||
"backblaze-s3-backups.age".publicKeys = everyone;
|
"backblaze-s3-backups.age".publicKeys = personal ++ server;
|
||||||
"restic-password.age".publicKeys = everyone;
|
"restic-password.age".publicKeys = personal ++ server;
|
||||||
|
|
||||||
# ntfy alerts
|
|
||||||
"ntfy-token.age".publicKeys = everyone;
|
|
||||||
|
|
||||||
# gitea actions runner
|
# gitea actions runner
|
||||||
"gitea-actions-runner-token.age".publicKeys = gitea-actions-runner;
|
"gitea-actions-runner-token.age".publicKeys = gitea-actions-runner;
|
||||||
|
|||||||
Reference in New Issue
Block a user