Rewrite PIA VPN as multi-container bridge architecture
All checks were successful
Check Flake / check-flake (push) Successful in 3m15s
All checks were successful
Check Flake / check-flake (push) Successful in 3m15s
Replace the single VPN container (veth pair, host-side auth scripts) with a multi-container setup on a shared bridge network: - Dedicated VPN container handles all PIA auth, WireGuard config, NAT, and optional port forwarding DNAT - Service containers default-route through VPN container (leak-proof by topology) - Host runs tinyproxy on bridge for PIA API bootstrap before WG is up - WG interface is still created in host netns and moved into VPN container namespace - Monthly renewal to ensure that connection stays up (PIA allows connections to last up to 2 months) - Drop OpenVPN support entirely
This commit is contained in:
87
common/network/pia-vpn/service-container.nix
Normal file
87
common/network/pia-vpn/service-container.nix
Normal file
@@ -0,0 +1,87 @@
|
||||
{ config, lib, allModules, ... }:
|
||||
|
||||
# Generates service containers that route all traffic through the VPN container.
|
||||
# Each container gets a static IP on the VPN bridge with default route → VPN container.
|
||||
#
|
||||
# Uses lazy mapAttrs inside fixed config keys to avoid infinite recursion.
|
||||
# (mkMerge + mapAttrsToList at the top level forces eager evaluation of cfg.containers
|
||||
# during module structure discovery, which creates a cycle with config evaluation.)
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.pia-vpn;
|
||||
|
||||
mkContainer = name: ctr: {
|
||||
autoStart = true;
|
||||
ephemeral = true;
|
||||
privateNetwork = true;
|
||||
hostBridge = cfg.bridgeName;
|
||||
|
||||
bindMounts = mapAttrs
|
||||
(_: mount: {
|
||||
hostPath = mount.hostPath;
|
||||
isReadOnly = mount.isReadOnly;
|
||||
})
|
||||
ctr.mounts;
|
||||
|
||||
config = { config, pkgs, lib, ... }: {
|
||||
imports = allModules ++ [ ctr.config ];
|
||||
|
||||
# Static IP with gateway pointing to VPN container
|
||||
networking.useNetworkd = true;
|
||||
systemd.network.enable = true;
|
||||
networking.useDHCP = false;
|
||||
|
||||
systemd.network.networks."20-eth0" = {
|
||||
matchConfig.Name = "eth0";
|
||||
networkConfig = {
|
||||
Address = "${ctr.ip}/${cfg.subnetPrefixLen}";
|
||||
Gateway = cfg.vpnAddress;
|
||||
DNS = [ cfg.vpnAddress ];
|
||||
};
|
||||
};
|
||||
|
||||
networking.hosts = cfg.containerHosts;
|
||||
|
||||
# DNS through VPN container (queries go through WG tunnel = no DNS leak)
|
||||
networking.nameservers = [ cfg.vpnAddress ];
|
||||
|
||||
# Wait for actual VPN connectivity before network-online.target.
|
||||
# Without this, services start before the VPN tunnel is ready and failures
|
||||
# can't be reported to ntfy (no outbound connectivity yet).
|
||||
systemd.services.wait-for-vpn = {
|
||||
description = "Wait for VPN connectivity";
|
||||
before = [ "network-online.target" ];
|
||||
wantedBy = [ "network-online.target" ];
|
||||
after = [ "systemd-networkd-wait-online.service" ];
|
||||
serviceConfig.Type = "oneshot";
|
||||
path = [ pkgs.iputils ];
|
||||
script = ''
|
||||
until ping -c1 -W2 1.1.1.1 >/dev/null 2>&1; do
|
||||
echo "Waiting for VPN connectivity..."
|
||||
sleep 1
|
||||
done
|
||||
'';
|
||||
};
|
||||
|
||||
# Trust the bridge interface (host reaches us directly for nginx)
|
||||
networking.firewall.trustedInterfaces = [ "eth0" ];
|
||||
|
||||
# Disable host resolv.conf — we use our own networkd DNS config
|
||||
networking.useHostResolvConf = false;
|
||||
};
|
||||
};
|
||||
|
||||
mkContainerOrdering = name: _ctr: nameValuePair "container@${name}" {
|
||||
after = [ "container@pia-vpn.service" ];
|
||||
requires = [ "container@pia-vpn.service" ];
|
||||
partOf = [ "container@pia-vpn.service" ];
|
||||
};
|
||||
in
|
||||
{
|
||||
config = mkIf cfg.enable {
|
||||
containers = mapAttrs mkContainer cfg.containers;
|
||||
systemd.services = mapAttrs' mkContainerOrdering cfg.containers;
|
||||
};
|
||||
}
|
||||
Reference in New Issue
Block a user