Compare commits
1 Commits
89c507bfae
...
pia-client
| Author | SHA1 | Date | |
|---|---|---|---|
| a0c199ba06 |
@@ -1,38 +0,0 @@
|
||||
name: Check Flake
|
||||
|
||||
on: [push]
|
||||
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
PATH: /run/current-system/sw/bin/:/nix/var/nix/profiles/per-user/gitea-runner/profile/bin
|
||||
|
||||
# defaults:
|
||||
# run:
|
||||
# shell: nix shell nixpkgs#nodejs-18_x
|
||||
|
||||
jobs:
|
||||
check-flake:
|
||||
runs-on: nixos
|
||||
steps:
|
||||
# - run: node --version
|
||||
# - name: Install basic dependencies
|
||||
# run: apt-get update && apt-get install -y --no-install-recommends sudo curl ca-certificates xz-utils
|
||||
|
||||
# - name: Install Nix
|
||||
# uses: https://github.com/cachix/install-nix-action@v20
|
||||
# with:
|
||||
# github_access_token: ${{ secrets.__GITHUB_TOKEN }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: nix profile install nixpkgs#nodejs-18_x
|
||||
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# - name: Get ENV var names
|
||||
# run: printenv | cut -d'=' -f1
|
||||
|
||||
- name: Check Flake
|
||||
run: nix flake check --show-trace
|
||||
@@ -3,9 +3,10 @@
|
||||
### Source Layout
|
||||
- `/common` - common configuration imported into all `/machines`
|
||||
- `/boot` - config related to bootloaders, cpu microcode, and unlocking LUKS root disks over tor
|
||||
- `/network` - config for tailscale, and NixOS container with automatic vpn tunneling via PIA
|
||||
- `/network` - config for tailscale, zeroteir, and NixOS container with automatic vpn tunneling via PIA
|
||||
- `/pc` - config that a graphical desktop computer should have. Use `de.enable = true;` to enable everthing.
|
||||
- `/server` - config that creates new nixos services or extends existing ones to meet my needs
|
||||
- `/ssh.nix` - all ssh public host and user keys for all `/machines`
|
||||
- `/machines` - all my NixOS machines along with their machine unique configuration for hardware and services
|
||||
- `/kexec` - a special machine for generating minimal kexec images. Does not import `/common`
|
||||
- `/secrets` - encrypted shared secrets unlocked through `/machines` ssh host keys
|
||||
|
||||
51
TODO.md
51
TODO.md
@@ -10,12 +10,24 @@
|
||||
- https://nixos.wiki/wiki/Comparison_of_NixOS_setups
|
||||
|
||||
### Housekeeping
|
||||
- Format everything here using nixfmt
|
||||
- Cleanup the line between hardware-configuration.nix and configuration.nix in machine config
|
||||
- CI https://gvolpe.com/blog/nixos-binary-cache-ci/
|
||||
- remove `options.currentSystem`
|
||||
- allow `hostname` option for webservices to be null to disable configuring nginx
|
||||
|
||||
### NAS
|
||||
- helios64 extra led lights
|
||||
- safely turn off NAS on power disconnect
|
||||
- hardware de/encoding for rk3399 helios64 https://forum.pine64.org/showthread.php?tid=14018
|
||||
- tor unlock
|
||||
|
||||
### bcachefs
|
||||
- bcachefs health alerts via email
|
||||
- bcachefs periodic snapshotting
|
||||
- use mount.bcachefs command for mounting
|
||||
- bcachefs native encryption
|
||||
- just need a kernel module? https://github.com/firestack/bcachefs-tools-flake/blob/kf/dev/mvp/nixos/module/bcachefs.nix#L40
|
||||
|
||||
### Shell Comands
|
||||
- tailexitnode = `sudo tailscale up --exit-node=<exit-node-ip> --exit-node-allow-lan-access=true`
|
||||
@@ -40,7 +52,21 @@
|
||||
- https://ampache.org/
|
||||
- replace nextcloud with seafile
|
||||
|
||||
### VPN container
|
||||
- use wireguard for vpn
|
||||
- https://github.com/triffid/pia-wg/blob/master/pia-wg.sh
|
||||
- https://github.com/pia-foss/manual-connections
|
||||
- port forwarding for vpn
|
||||
- transmission using forwarded port
|
||||
- https://www.wireguard.com/netns/
|
||||
- one way firewall for vpn container
|
||||
|
||||
### Networking
|
||||
- tailscale for p2p connections
|
||||
- remove all use of zerotier
|
||||
|
||||
### Archive
|
||||
- https://www.backblaze.com/b2/cloud-storage.html
|
||||
- email
|
||||
- https://github.com/Disassembler0/dovecot-archive/blob/main/src/dovecot_archive.py
|
||||
- http://kb.unixservertech.com/software/dovecot/archiveserver
|
||||
@@ -49,32 +75,7 @@
|
||||
- https://christine.website/blog/paranoid-nixos-2021-07-18
|
||||
- https://nixos.wiki/wiki/Impermanence
|
||||
|
||||
# Setup CI
|
||||
- CI
|
||||
- hydra
|
||||
- https://docs.cachix.org/continuous-integration-setup/
|
||||
- Binary Cache
|
||||
- Maybe use cachix https://gvolpe.com/blog/nixos-binary-cache-ci/
|
||||
- Self hosted binary cache? https://www.tweag.io/blog/2019-11-21-untrusted-ci/
|
||||
- https://github.com/edolstra/nix-serve
|
||||
- https://nixos.wiki/wiki/Binary_Cache
|
||||
- https://discourse.nixos.org/t/introducing-attic-a-self-hostable-nix-binary-cache-server/24343
|
||||
- Both
|
||||
- https://garnix.io/
|
||||
- https://nixbuild.net
|
||||
|
||||
|
||||
# Secrets
|
||||
- consider using headscale
|
||||
- Replace luks over tor for remote unlock with luks over tailscale using ephemeral keys
|
||||
- Rollover luks FDE passwords
|
||||
- /secrets on personal computers should only be readable using a trusted ssh key, preferably requiring a yubikey
|
||||
- Rollover shared yubikey secrets
|
||||
- offsite backup yubikey, pw db, and ssh key with /secrets access
|
||||
|
||||
### Misc
|
||||
- for automated kernel upgrades on luks systems, need to kexec with initrd that contains luks key
|
||||
- https://github.com/flowztul/keyexec/blob/master/etc/default/kexec-cryptroot
|
||||
- https://github.com/pop-os/system76-scheduler
|
||||
- improve email a little bit https://helloinbox.email
|
||||
- remap razer keys https://github.com/sezanzeb/input-remapper
|
||||
|
||||
@@ -4,12 +4,11 @@
|
||||
|
||||
let
|
||||
cfg = config.system.autoUpgrade;
|
||||
in
|
||||
{
|
||||
in {
|
||||
config = lib.mkIf cfg.enable {
|
||||
system.autoUpgrade = {
|
||||
flake = "git+https://git.neet.dev/zuckerberg/nix-config.git";
|
||||
flags = [ "--recreate-lock-file" "--no-write-lock-file" ]; # ignore lock file, just pull the latest
|
||||
flags = [ "--recreate-lock-file" ]; # ignore lock file, just pull the latest
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.backup;
|
||||
hostname = config.networking.hostName;
|
||||
|
||||
mkRespository = group: "s3:s3.us-west-004.backblazeb2.com/D22TgIt0-main-backup/${group}";
|
||||
|
||||
mkBackup = group: paths: {
|
||||
repository = mkRespository group;
|
||||
inherit paths;
|
||||
|
||||
initialize = true;
|
||||
|
||||
timerConfig = {
|
||||
OnCalendar = "daily";
|
||||
RandomizedDelaySec = "1h";
|
||||
};
|
||||
|
||||
extraBackupArgs = [
|
||||
''--exclude-if-present ".nobackup"''
|
||||
];
|
||||
|
||||
pruneOpts = [
|
||||
"--keep-daily 7" # one backup for each of the last n days
|
||||
"--keep-weekly 5" # one backup for each of the last n weeks
|
||||
"--keep-monthly 12" # one backup for each of the last n months
|
||||
"--keep-yearly 75" # one backup for each of the last n years
|
||||
];
|
||||
|
||||
environmentFile = "/run/agenix/backblaze-s3-backups";
|
||||
passwordFile = "/run/agenix/restic-password";
|
||||
};
|
||||
|
||||
# example usage: "sudo restic_samba unlock" (removes lockfile)
|
||||
mkResticGroupCmd = group: pkgs.writeShellScriptBin "restic_${group}" ''
|
||||
if [ "$EUID" -ne 0 ]
|
||||
then echo "Run as root"
|
||||
exit
|
||||
fi
|
||||
. /run/agenix/backblaze-s3-backups
|
||||
export AWS_SECRET_ACCESS_KEY
|
||||
export AWS_ACCESS_KEY_ID
|
||||
export RESTIC_PASSWORD_FILE=/run/agenix/restic-password
|
||||
export RESTIC_REPOSITORY="${mkRespository group}"
|
||||
exec ${pkgs.restic}/bin/restic "$@"
|
||||
'';
|
||||
in
|
||||
{
|
||||
options.backup = {
|
||||
group = lib.mkOption {
|
||||
default = null;
|
||||
type = lib.types.nullOr (lib.types.attrsOf (lib.types.submodule {
|
||||
options = {
|
||||
paths = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = ''
|
||||
Paths to backup
|
||||
'';
|
||||
};
|
||||
};
|
||||
}));
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf (cfg.group != null) {
|
||||
services.restic.backups = lib.concatMapAttrs
|
||||
(group: groupCfg: {
|
||||
${group} = mkBackup group groupCfg.paths;
|
||||
})
|
||||
cfg.group;
|
||||
|
||||
age.secrets.backblaze-s3-backups.file = ../secrets/backblaze-s3-backups.age;
|
||||
age.secrets.restic-password.file = ../secrets/restic-password.age;
|
||||
|
||||
environment.systemPackages = map mkResticGroupCmd (builtins.attrNames cfg.group);
|
||||
};
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
{ config, lib, ... }:
|
||||
|
||||
{
|
||||
nix = {
|
||||
settings = {
|
||||
substituters = [
|
||||
"https://cache.nixos.org/"
|
||||
"https://nix-community.cachix.org"
|
||||
"http://s0.koi-bebop.ts.net:5000"
|
||||
];
|
||||
trusted-public-keys = [
|
||||
"nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
|
||||
"s0.koi-bebop.ts.net:OjbzD86YjyJZpCp9RWaQKANaflcpKhtzBMNP8I2aPUU="
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -3,8 +3,7 @@
|
||||
with lib;
|
||||
let
|
||||
cfg = config.bios;
|
||||
in
|
||||
{
|
||||
in {
|
||||
options.bios = {
|
||||
enable = mkEnableOption "enable bios boot";
|
||||
device = mkOption {
|
||||
|
||||
@@ -5,6 +5,6 @@
|
||||
./firmware.nix
|
||||
./efi.nix
|
||||
./bios.nix
|
||||
./remote-luks-unlock.nix
|
||||
./luks.nix
|
||||
];
|
||||
}
|
||||
@@ -3,8 +3,7 @@
|
||||
with lib;
|
||||
let
|
||||
cfg = config.efi;
|
||||
in
|
||||
{
|
||||
in {
|
||||
options.efi = {
|
||||
enable = mkEnableOption "enable efi boot";
|
||||
};
|
||||
|
||||
@@ -3,8 +3,7 @@
|
||||
with lib;
|
||||
let
|
||||
cfg = config.firmware;
|
||||
in
|
||||
{
|
||||
in {
|
||||
options.firmware.x86_64 = {
|
||||
enable = mkEnableOption "enable x86_64 firmware";
|
||||
};
|
||||
|
||||
101
common/boot/luks.nix
Normal file
101
common/boot/luks.nix
Normal file
@@ -0,0 +1,101 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.luks;
|
||||
in {
|
||||
options.luks = {
|
||||
enable = lib.mkEnableOption "enable luks root remote decrypt over ssh/tor";
|
||||
device = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "enc-pv";
|
||||
};
|
||||
path = lib.mkOption {
|
||||
type = lib.types.either lib.types.str lib.types.path;
|
||||
};
|
||||
allowDiscards = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
};
|
||||
};
|
||||
sshHostKeys = lib.mkOption {
|
||||
type = lib.types.listOf (lib.types.either lib.types.str lib.types.path);
|
||||
default = [
|
||||
"/secret/ssh_host_rsa_key"
|
||||
"/secret/ssh_host_ed25519_key"
|
||||
];
|
||||
};
|
||||
sshAuthorizedKeys = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = config.users.users.googlebot.openssh.authorizedKeys.keys;
|
||||
};
|
||||
onionConfig = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
default = /secret/onion;
|
||||
};
|
||||
kernelModules = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [ "e1000" "e1000e" "virtio_pci" "r8169" ];
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
boot.initrd.luks.devices.${cfg.device.name} = {
|
||||
device = cfg.device.path;
|
||||
allowDiscards = cfg.device.allowDiscards;
|
||||
};
|
||||
|
||||
# Unlock LUKS disk over ssh
|
||||
boot.initrd.network.enable = true;
|
||||
boot.initrd.kernelModules = cfg.kernelModules;
|
||||
boot.initrd.network.ssh = {
|
||||
enable = true;
|
||||
port = 22;
|
||||
hostKeys = cfg.sshHostKeys;
|
||||
authorizedKeys = cfg.sshAuthorizedKeys;
|
||||
};
|
||||
|
||||
boot.initrd.postDeviceCommands = ''
|
||||
echo 'waiting for root device to be opened...'
|
||||
mkfifo /crypt-ramfs/passphrase
|
||||
echo /crypt-ramfs/passphrase >> /dev/null
|
||||
'';
|
||||
|
||||
# Make machine accessable over tor for boot unlock
|
||||
boot.initrd.secrets = {
|
||||
"/etc/tor/onion/bootup" = cfg.onionConfig;
|
||||
};
|
||||
boot.initrd.extraUtilsCommands = ''
|
||||
copy_bin_and_libs ${pkgs.tor}/bin/tor
|
||||
copy_bin_and_libs ${pkgs.haveged}/bin/haveged
|
||||
'';
|
||||
# start tor during boot process
|
||||
boot.initrd.network.postCommands = let
|
||||
torRc = (pkgs.writeText "tor.rc" ''
|
||||
DataDirectory /etc/tor
|
||||
SOCKSPort 127.0.0.1:9050 IsolateDestAddr
|
||||
SOCKSPort 127.0.0.1:9063
|
||||
HiddenServiceDir /etc/tor/onion/bootup
|
||||
HiddenServicePort 22 127.0.0.1:22
|
||||
'');
|
||||
in ''
|
||||
# Add nice prompt for giving LUKS passphrase over ssh
|
||||
echo 'read -s -p "Unlock Passphrase: " passphrase && echo $passphrase > /crypt-ramfs/passphrase && exit' >> /root/.profile
|
||||
|
||||
echo "tor: preparing onion folder"
|
||||
# have to do this otherwise tor does not want to start
|
||||
chmod -R 700 /etc/tor
|
||||
|
||||
echo "make sure localhost is up"
|
||||
ip a a 127.0.0.1/8 dev lo
|
||||
ip link set lo up
|
||||
|
||||
echo "haveged: starting haveged"
|
||||
haveged -F &
|
||||
|
||||
echo "tor: starting tor"
|
||||
tor -f ${torRc} --verify-config
|
||||
tor -f ${torRc} &
|
||||
'';
|
||||
};
|
||||
}
|
||||
@@ -1,99 +0,0 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.remoteLuksUnlock;
|
||||
in
|
||||
{
|
||||
options.remoteLuksUnlock = {
|
||||
enable = lib.mkEnableOption "enable luks root remote decrypt over ssh/tor";
|
||||
enableTorUnlock = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = cfg.enable;
|
||||
description = "Make machine accessable over tor for ssh boot unlock";
|
||||
};
|
||||
sshHostKeys = lib.mkOption {
|
||||
type = lib.types.listOf (lib.types.either lib.types.str lib.types.path);
|
||||
default = [
|
||||
"/secret/ssh_host_rsa_key"
|
||||
"/secret/ssh_host_ed25519_key"
|
||||
];
|
||||
};
|
||||
sshAuthorizedKeys = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = config.users.users.googlebot.openssh.authorizedKeys.keys;
|
||||
};
|
||||
onionConfig = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
default = /secret/onion;
|
||||
};
|
||||
kernelModules = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
default = [ "e1000" "e1000e" "virtio_pci" "r8169" ];
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
# boot.initrd.luks.devices.${cfg.device.name} = {
|
||||
# device = cfg.device.path;
|
||||
# allowDiscards = cfg.device.allowDiscards;
|
||||
# };
|
||||
|
||||
# Unlock LUKS disk over ssh
|
||||
boot.initrd.network.enable = true;
|
||||
boot.initrd.kernelModules = cfg.kernelModules;
|
||||
boot.initrd.network.ssh = {
|
||||
enable = true;
|
||||
port = 22;
|
||||
hostKeys = cfg.sshHostKeys;
|
||||
authorizedKeys = cfg.sshAuthorizedKeys;
|
||||
};
|
||||
|
||||
boot.initrd.postDeviceCommands = ''
|
||||
echo 'waiting for root device to be opened...'
|
||||
mkfifo /crypt-ramfs/passphrase
|
||||
echo /crypt-ramfs/passphrase >> /dev/null
|
||||
'';
|
||||
|
||||
boot.initrd.secrets = lib.mkIf cfg.enableTorUnlock {
|
||||
"/etc/tor/onion/bootup" = cfg.onionConfig;
|
||||
};
|
||||
boot.initrd.extraUtilsCommands = lib.mkIf cfg.enableTorUnlock ''
|
||||
copy_bin_and_libs ${pkgs.tor}/bin/tor
|
||||
copy_bin_and_libs ${pkgs.haveged}/bin/haveged
|
||||
'';
|
||||
boot.initrd.network.postCommands = lib.mkMerge [
|
||||
(
|
||||
''
|
||||
# Add nice prompt for giving LUKS passphrase over ssh
|
||||
echo 'read -s -p "Unlock Passphrase: " passphrase && echo $passphrase > /crypt-ramfs/passphrase && exit' >> /root/.profile
|
||||
''
|
||||
)
|
||||
|
||||
(
|
||||
let torRc = (pkgs.writeText "tor.rc" ''
|
||||
DataDirectory /etc/tor
|
||||
SOCKSPort 127.0.0.1:9050 IsolateDestAddr
|
||||
SOCKSPort 127.0.0.1:9063
|
||||
HiddenServiceDir /etc/tor/onion/bootup
|
||||
HiddenServicePort 22 127.0.0.1:22
|
||||
''); in
|
||||
lib.mkIf cfg.enableTorUnlock ''
|
||||
echo "tor: preparing onion folder"
|
||||
# have to do this otherwise tor does not want to start
|
||||
chmod -R 700 /etc/tor
|
||||
|
||||
echo "make sure localhost is up"
|
||||
ip a a 127.0.0.1/8 dev lo
|
||||
ip link set lo up
|
||||
|
||||
echo "haveged: starting haveged"
|
||||
haveged -F &
|
||||
|
||||
echo "tor: starting tor"
|
||||
tor -f ${torRc} --verify-config
|
||||
tor -f ${torRc} &
|
||||
''
|
||||
)
|
||||
];
|
||||
};
|
||||
}
|
||||
@@ -2,8 +2,6 @@
|
||||
|
||||
{
|
||||
imports = [
|
||||
./backups.nix
|
||||
./binary-cache.nix
|
||||
./flakes.nix
|
||||
./auto-update.nix
|
||||
./shell.nix
|
||||
@@ -11,9 +9,6 @@
|
||||
./boot
|
||||
./server
|
||||
./pc
|
||||
./machine-info
|
||||
./nix-builder.nix
|
||||
./ssh.nix
|
||||
];
|
||||
|
||||
nix.flakes.enable = true;
|
||||
@@ -25,23 +20,17 @@
|
||||
networking.firewall.enable = true;
|
||||
networking.firewall.allowPing = true;
|
||||
|
||||
time.timeZone = "America/Denver";
|
||||
time.timeZone = "America/New_York";
|
||||
i18n.defaultLocale = "en_US.UTF-8";
|
||||
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings = {
|
||||
PasswordAuthentication = false;
|
||||
};
|
||||
};
|
||||
services.openssh.enable = true;
|
||||
programs.mosh.enable = true;
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
wget
|
||||
kakoune
|
||||
htop
|
||||
git
|
||||
git-lfs
|
||||
git git-lfs
|
||||
dnsutils
|
||||
tmux
|
||||
nethogs
|
||||
@@ -53,8 +42,6 @@
|
||||
micro
|
||||
helix
|
||||
lm_sensors
|
||||
picocom
|
||||
lf
|
||||
];
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
@@ -67,24 +54,11 @@
|
||||
"dialout" # serial
|
||||
];
|
||||
shell = pkgs.fish;
|
||||
openssh.authorizedKeys.keys = config.machines.ssh.userKeys;
|
||||
openssh.authorizedKeys.keys = (import ./ssh.nix).users;
|
||||
hashedPassword = "$6$TuDO46rILr$gkPUuLKZe3psexhs8WFZMpzgEBGksE.c3Tjh1f8sD0KMC4oV89K2pqAABfl.Lpxu2jVdr5bgvR5cWnZRnji/r/";
|
||||
uid = 1000;
|
||||
};
|
||||
users.users.root = {
|
||||
openssh.authorizedKeys.keys = config.machines.ssh.deployKeys;
|
||||
};
|
||||
nix.settings = {
|
||||
trusted-users = [ "root" "googlebot" ];
|
||||
};
|
||||
|
||||
# don't use sudo
|
||||
security.doas.enable = true;
|
||||
security.sudo.enable = false;
|
||||
security.doas.extraRules = [
|
||||
# don't ask for password every time
|
||||
{ groups = [ "wheel" ]; persist = true; }
|
||||
];
|
||||
nix.trustedUsers = [ "root" "googlebot" ];
|
||||
|
||||
nix.gc.automatic = true;
|
||||
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
with lib;
|
||||
let
|
||||
cfg = config.nix.flakes;
|
||||
in
|
||||
{
|
||||
in {
|
||||
options.nix.flakes = {
|
||||
enable = mkEnableOption "use nix flakes";
|
||||
};
|
||||
|
||||
@@ -1,200 +0,0 @@
|
||||
# Gathers info about each machine to constuct overall configuration
|
||||
# Ex: Each machine already trusts each others SSH fingerprint already
|
||||
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
machines = config.machines.hosts;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./ssh.nix
|
||||
./roles.nix
|
||||
];
|
||||
|
||||
options.machines = {
|
||||
|
||||
hosts = lib.mkOption {
|
||||
type = lib.types.attrsOf
|
||||
(lib.types.submodule {
|
||||
options = {
|
||||
|
||||
hostNames = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = ''
|
||||
List of hostnames for this machine. The first one is the default so it is the target of deployments.
|
||||
Used for automatically trusting hosts for ssh connections.
|
||||
'';
|
||||
};
|
||||
|
||||
arch = lib.mkOption {
|
||||
type = lib.types.enum [ "x86_64-linux" "aarch64-linux" ];
|
||||
description = ''
|
||||
The architecture of this machine.
|
||||
'';
|
||||
};
|
||||
|
||||
systemRoles = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str; # TODO: maybe use an enum?
|
||||
description = ''
|
||||
The set of roles this machine holds. Affects secrets available. (TODO add service config as well using this info)
|
||||
'';
|
||||
};
|
||||
|
||||
hostKey = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
The system ssh host key of this machine. Used for automatically trusting hosts for ssh connections
|
||||
and for decrypting secrets with agenix.
|
||||
'';
|
||||
};
|
||||
|
||||
remoteUnlock = lib.mkOption {
|
||||
default = null;
|
||||
type = lib.types.nullOr (lib.types.submodule {
|
||||
options = {
|
||||
|
||||
hostKey = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = ''
|
||||
The system ssh host key of this machine used for luks boot unlocking only.
|
||||
'';
|
||||
};
|
||||
|
||||
clearnetHost = lib.mkOption {
|
||||
default = null;
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
description = ''
|
||||
The hostname resolvable over clearnet used to luks boot unlock this machine
|
||||
'';
|
||||
};
|
||||
|
||||
onionHost = lib.mkOption {
|
||||
default = null;
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
description = ''
|
||||
The hostname resolvable over tor used to luks boot unlock this machine
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
});
|
||||
};
|
||||
|
||||
userKeys = lib.mkOption {
|
||||
default = [ ];
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = ''
|
||||
The list of user keys. Each key here can be used to log into all other systems as `googlebot`.
|
||||
|
||||
TODO: consider auto populating other programs that use ssh keys such as gitea
|
||||
'';
|
||||
};
|
||||
|
||||
deployKeys = lib.mkOption {
|
||||
default = [ ];
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = ''
|
||||
The list of deployment keys. Each key here can be used to log into all other systems as `root`.
|
||||
'';
|
||||
};
|
||||
|
||||
configurationPath = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
description = ''
|
||||
The path to this machine's configuration directory.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
assertions = (lib.concatLists (lib.mapAttrsToList
|
||||
(
|
||||
name: cfg: [
|
||||
{
|
||||
assertion = builtins.length cfg.hostNames > 0;
|
||||
message = ''
|
||||
Error with config for ${name}
|
||||
There must be at least one hostname.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = builtins.length cfg.systemRoles > 0;
|
||||
message = ''
|
||||
Error with config for ${name}
|
||||
There must be at least one system role.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = cfg.remoteUnlock == null || cfg.remoteUnlock.hostKey != cfg.hostKey;
|
||||
message = ''
|
||||
Error with config for ${name}
|
||||
Unlock hostkey and hostkey cannot be the same because unlock hostkey is in /boot, unencrypted.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = cfg.remoteUnlock == null || (cfg.remoteUnlock.clearnetHost != null || cfg.remoteUnlock.onionHost != null);
|
||||
message = ''
|
||||
Error with config for ${name}
|
||||
At least one of clearnet host or onion host must be defined.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = cfg.remoteUnlock == null || cfg.remoteUnlock.clearnetHost == null || builtins.elem cfg.remoteUnlock.clearnetHost cfg.hostNames == false;
|
||||
message = ''
|
||||
Error with config for ${name}
|
||||
Clearnet unlock hostname cannot be in the list of hostnames for security reasons.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = cfg.remoteUnlock == null || cfg.remoteUnlock.onionHost == null || lib.strings.hasSuffix ".onion" cfg.remoteUnlock.onionHost;
|
||||
message = ''
|
||||
Error with config for ${name}
|
||||
Tor unlock hostname must be an onion address.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = builtins.elem "personal" cfg.systemRoles || builtins.length cfg.userKeys == 0;
|
||||
message = ''
|
||||
Error with config for ${name}
|
||||
There must be at least one userkey defined for personal machines.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = builtins.elem "deploy" cfg.systemRoles || builtins.length cfg.deployKeys == 0;
|
||||
message = ''
|
||||
Error with config for ${name}
|
||||
Only deploy machines are allowed to have deploy keys for security reasons.
|
||||
'';
|
||||
}
|
||||
]
|
||||
)
|
||||
machines));
|
||||
|
||||
# Set per machine properties automatically using each of their `properties.nix` files respectively
|
||||
machines.hosts =
|
||||
let
|
||||
properties = dir: lib.concatMapAttrs
|
||||
(name: path: {
|
||||
${name} =
|
||||
import path
|
||||
//
|
||||
{ configurationPath = builtins.dirOf path; };
|
||||
})
|
||||
(propertiesFiles dir);
|
||||
propertiesFiles = dir:
|
||||
lib.foldl (lib.mergeAttrs) { } (propertiesFiles' dir);
|
||||
propertiesFiles' = dir:
|
||||
let
|
||||
propFiles = lib.filter (p: baseNameOf p == "properties.nix") (lib.filesystem.listFilesRecursive dir);
|
||||
dirName = path: builtins.baseNameOf (builtins.dirOf path);
|
||||
in
|
||||
builtins.map (p: { "${dirName p}" = p; }) propFiles;
|
||||
in
|
||||
properties ../../machines;
|
||||
};
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
# Allows getting machine-info outside the scope of nixos configuration
|
||||
|
||||
{ nixpkgs ? import <nixpkgs> { }
|
||||
, assertionsModule ? <nixpkgs/nixos/modules/misc/assertions.nix>
|
||||
}:
|
||||
|
||||
{
|
||||
machines =
|
||||
(nixpkgs.lib.evalModules {
|
||||
modules = [
|
||||
./default.nix
|
||||
assertionsModule
|
||||
];
|
||||
}).config.machines;
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
{ config, lib, ... }:
|
||||
|
||||
# Maps roles to their hosts
|
||||
|
||||
{
|
||||
options.machines.roles = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.listOf lib.types.str);
|
||||
};
|
||||
|
||||
config = {
|
||||
machines.roles = lib.zipAttrs
|
||||
(lib.mapAttrsToList
|
||||
(host: cfg:
|
||||
lib.foldl (lib.mergeAttrs) { }
|
||||
(builtins.map (role: { ${role} = host; })
|
||||
cfg.systemRoles))
|
||||
config.machines.hosts);
|
||||
};
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
{ config, lib, ... }:
|
||||
|
||||
let
|
||||
machines = config.machines;
|
||||
|
||||
sshkeys = keyType: lib.foldl (l: cfg: l ++ cfg.${keyType}) [ ] (builtins.attrValues machines.hosts);
|
||||
in
|
||||
{
|
||||
options.machines.ssh = {
|
||||
userKeys = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = ''
|
||||
List of user keys aggregated from all machines.
|
||||
'';
|
||||
};
|
||||
|
||||
deployKeys = lib.mkOption {
|
||||
default = [ ];
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = ''
|
||||
List of deploy keys aggregated from all machines.
|
||||
'';
|
||||
};
|
||||
|
||||
hostKeysByRole = lib.mkOption {
|
||||
type = lib.types.attrsOf (lib.types.listOf lib.types.str);
|
||||
description = ''
|
||||
Machine host keys divided into their roles.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
machines.ssh.userKeys = sshkeys "userKeys";
|
||||
machines.ssh.deployKeys = sshkeys "deployKeys";
|
||||
|
||||
machines.ssh.hostKeysByRole = lib.mapAttrs
|
||||
(role: hosts:
|
||||
builtins.map
|
||||
(host: machines.hosts.${host}.hostKey)
|
||||
hosts)
|
||||
machines.roles;
|
||||
};
|
||||
}
|
||||
@@ -7,11 +7,11 @@ let
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./hosts.nix
|
||||
./pia-openvpn.nix
|
||||
./pia-wireguard.nix
|
||||
./ping.nix
|
||||
./tailscale.nix
|
||||
./vpn.nix
|
||||
./zerotier.nix
|
||||
];
|
||||
|
||||
options.networking.ip_forward = mkEnableOption "Enable ip forwarding";
|
||||
|
||||
63
common/network/hosts.nix
Normal file
63
common/network/hosts.nix
Normal file
@@ -0,0 +1,63 @@
|
||||
{ config, lib, ... }:
|
||||
|
||||
let
|
||||
system = (import ../ssh.nix).system;
|
||||
in {
|
||||
networking.hosts = {
|
||||
# some DNS providers filter local ip results from DNS request
|
||||
"172.30.145.180" = [ "s0.zt.neet.dev" ];
|
||||
"172.30.109.9" = [ "ponyo.zt.neet.dev" ];
|
||||
"172.30.189.212" = [ "ray.zt.neet.dev" ];
|
||||
};
|
||||
|
||||
programs.ssh.knownHosts = {
|
||||
liza = {
|
||||
hostNames = [ "liza" "liza.neet.dev" ];
|
||||
publicKey = system.liza;
|
||||
};
|
||||
ponyo = {
|
||||
hostNames = [ "ponyo" "ponyo.neet.dev" "ponyo.zt.neet.dev" "git.neet.dev" ];
|
||||
publicKey = system.ponyo;
|
||||
};
|
||||
ponyo-unlock = {
|
||||
hostNames = [ "unlock.ponyo.neet.dev" "cfamr6artx75qvt7ho3rrbsc7mkucmv5aawebwflsfuorusayacffryd.onion" ];
|
||||
publicKey = system.ponyo-unlock;
|
||||
};
|
||||
ray = {
|
||||
hostNames = [ "ray" "ray.zt.neet.dev" ];
|
||||
publicKey = system.ray;
|
||||
};
|
||||
s0 = {
|
||||
hostNames = [ "s0" "s0.zt.neet.dev" ];
|
||||
publicKey = system.s0;
|
||||
};
|
||||
n1 = {
|
||||
hostNames = [ "n1" ];
|
||||
publicKey = system.n1;
|
||||
};
|
||||
n2 = {
|
||||
hostNames = [ "n2" ];
|
||||
publicKey = system.n2;
|
||||
};
|
||||
n3 = {
|
||||
hostNames = [ "n3" ];
|
||||
publicKey = system.n3;
|
||||
};
|
||||
n4 = {
|
||||
hostNames = [ "n4" ];
|
||||
publicKey = system.n4;
|
||||
};
|
||||
n5 = {
|
||||
hostNames = [ "n5" ];
|
||||
publicKey = system.n5;
|
||||
};
|
||||
n6 = {
|
||||
hostNames = [ "n6" ];
|
||||
publicKey = system.n6;
|
||||
};
|
||||
n7 = {
|
||||
hostNames = [ "n7" ];
|
||||
publicKey = system.n7;
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.pia.openvpn;
|
||||
cfg = config.pia;
|
||||
vpnfailsafe = pkgs.stdenv.mkDerivation {
|
||||
pname = "vpnfailsafe";
|
||||
version = "0.0.1";
|
||||
@@ -14,7 +14,7 @@ let
|
||||
};
|
||||
in
|
||||
{
|
||||
options.pia.openvpn = {
|
||||
options.pia = {
|
||||
enable = lib.mkEnableOption "Enable private internet access";
|
||||
server = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
@@ -108,6 +108,6 @@ in
|
||||
};
|
||||
};
|
||||
};
|
||||
age.secrets."pia-login.conf".file = ../../secrets/pia-login.age;
|
||||
age.secrets."pia-login.conf".file = ../../secrets/pia-login.conf;
|
||||
};
|
||||
}
|
||||
@@ -1,357 +0,0 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
# Server list:
|
||||
# https://serverlist.piaservers.net/vpninfo/servers/v6
|
||||
# Reference materials:
|
||||
# https://github.com/pia-foss/manual-connections
|
||||
# https://github.com/thrnz/docker-wireguard-pia/blob/master/extra/wg-gen.sh
|
||||
|
||||
# TODO handle potential errors (or at least print status, success, and failures to the console)
|
||||
# TODO parameterize names of systemd services so that multiple wg VPNs could coexist in theory easier
|
||||
# TODO implement this module such that the wireguard VPN doesn't have to live in a container
|
||||
# TODO don't add forward rules if the PIA port is the same as cfg.forwardedPort
|
||||
# TODO verify signatures of PIA responses
|
||||
|
||||
with builtins;
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.pia.wireguard;
|
||||
|
||||
getPIAToken = ''
|
||||
PIA_USER=`sed '1q;d' /run/agenix/pia-login.conf`
|
||||
PIA_PASS=`sed '2q;d' /run/agenix/pia-login.conf`
|
||||
# PIA_TOKEN only lasts 24hrs
|
||||
PIA_TOKEN=`curl -s -u "$PIA_USER:$PIA_PASS" https://www.privateinternetaccess.com/gtoken/generateToken | jq -r '.token'`
|
||||
'';
|
||||
|
||||
chooseWireguardServer = ''
|
||||
servers=$(mktemp)
|
||||
servers_json=$(mktemp)
|
||||
curl -s "https://serverlist.piaservers.net/vpninfo/servers/v6" > "$servers"
|
||||
# extract json part only
|
||||
head -n 1 "$servers" | tr -d '\n' > "$servers_json"
|
||||
|
||||
echo "Available location ids:" && jq '.regions | .[] | {name, id, port_forward}' "$servers_json"
|
||||
|
||||
# Some locations have multiple servers available. Pick a random one.
|
||||
totalservers=$(jq -r '.regions | .[] | select(.id=="'${cfg.serverLocation}'") | .servers.wg | length' "$servers_json")
|
||||
if ! [[ "$totalservers" =~ ^[0-9]+$ ]] || [ "$totalservers" -eq 0 ] 2>/dev/null; then
|
||||
echo "Location \"${cfg.serverLocation}\" not found."
|
||||
exit 1
|
||||
fi
|
||||
serverindex=$(( RANDOM % totalservers))
|
||||
WG_HOSTNAME=$(jq -r '.regions | .[] | select(.id=="'${cfg.serverLocation}'") | .servers.wg | .['$serverindex'].cn' "$servers_json")
|
||||
WG_SERVER_IP=$(jq -r '.regions | .[] | select(.id=="'${cfg.serverLocation}'") | .servers.wg | .['$serverindex'].ip' "$servers_json")
|
||||
WG_SERVER_PORT=$(jq -r '.groups.wg | .[0] | .ports | .[0]' "$servers_json")
|
||||
|
||||
# write chosen server
|
||||
rm -f /tmp/${cfg.interfaceName}-server.conf
|
||||
touch /tmp/${cfg.interfaceName}-server.conf
|
||||
chmod 700 /tmp/${cfg.interfaceName}-server.conf
|
||||
echo "$WG_HOSTNAME" >> /tmp/${cfg.interfaceName}-server.conf
|
||||
echo "$WG_SERVER_IP" >> /tmp/${cfg.interfaceName}-server.conf
|
||||
echo "$WG_SERVER_PORT" >> /tmp/${cfg.interfaceName}-server.conf
|
||||
|
||||
rm $servers_json $servers
|
||||
'';
|
||||
|
||||
getChosenWireguardServer = ''
|
||||
WG_HOSTNAME=`sed '1q;d' /tmp/${cfg.interfaceName}-server.conf`
|
||||
WG_SERVER_IP=`sed '2q;d' /tmp/${cfg.interfaceName}-server.conf`
|
||||
WG_SERVER_PORT=`sed '3q;d' /tmp/${cfg.interfaceName}-server.conf`
|
||||
'';
|
||||
|
||||
refreshPIAPort = ''
|
||||
${getChosenWireguardServer}
|
||||
signature=`sed '1q;d' /tmp/${cfg.interfaceName}-port-renewal`
|
||||
payload=`sed '2q;d' /tmp/${cfg.interfaceName}-port-renewal`
|
||||
bind_port_response=`curl -Gs -m 5 --connect-to "$WG_HOSTNAME::$WG_SERVER_IP:" --cacert "${./ca.rsa.4096.crt}" --data-urlencode "payload=$payload" --data-urlencode "signature=$signature" "https://$WG_HOSTNAME:19999/bindPort"`
|
||||
'';
|
||||
|
||||
portForwarding = cfg.forwardPortForTransmission || cfg.forwardedPort != null;
|
||||
|
||||
containerServiceName = "container@${config.vpn-container.containerName}.service";
|
||||
in
|
||||
{
|
||||
options.pia.wireguard = {
|
||||
enable = mkEnableOption "Enable private internet access";
|
||||
badPortForwardPorts = mkOption {
|
||||
type = types.listOf types.port;
|
||||
description = ''
|
||||
Ports that will not be accepted from PIA.
|
||||
If PIA assigns a port from this list, the connection is aborted since we cannot ask for a different port.
|
||||
This is used to guarantee we are not assigned a port that is used by a service we do not want exposed.
|
||||
'';
|
||||
};
|
||||
wireguardListenPort = mkOption {
|
||||
type = types.port;
|
||||
description = "The port wireguard listens on for this VPN connection";
|
||||
default = 51820;
|
||||
};
|
||||
serverLocation = mkOption {
|
||||
type = types.str;
|
||||
default = "swiss";
|
||||
};
|
||||
interfaceName = mkOption {
|
||||
type = types.str;
|
||||
default = "piaw";
|
||||
};
|
||||
forwardedPort = mkOption {
|
||||
type = types.nullOr types.port;
|
||||
description = "The port to redirect port forwarded TCP VPN traffic too";
|
||||
default = null;
|
||||
};
|
||||
forwardPortForTransmission = mkEnableOption "PIA port forwarding for transmission should be performed.";
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = cfg.forwardPortForTransmission != (cfg.forwardedPort != null);
|
||||
message = ''
|
||||
The PIA forwarded port cannot simultaneously be used by transmission and redirected to another port.
|
||||
'';
|
||||
}
|
||||
];
|
||||
|
||||
# mounts used to pass the connection parameters to the container
|
||||
# the container doesn't have internet until it uses these parameters so it cannot fetch them itself
|
||||
vpn-container.mounts = [
|
||||
"/tmp/${cfg.interfaceName}.conf"
|
||||
"/tmp/${cfg.interfaceName}-server.conf"
|
||||
"/tmp/${cfg.interfaceName}-address.conf"
|
||||
];
|
||||
|
||||
# The container takes ownership of the wireguard interface on its startup
|
||||
containers.vpn.interfaces = [ cfg.interfaceName ];
|
||||
|
||||
# TODO: while this is much better than "loose" networking, it seems to have issues with firewall restarts
|
||||
# allow traffic for wireguard interface to pass since wireguard trips up rpfilter
|
||||
# networking.firewall = {
|
||||
# extraCommands = ''
|
||||
# ip46tables -t raw -I nixos-fw-rpfilter -p udp -m udp --sport ${toString cfg.wireguardListenPort} -j RETURN
|
||||
# ip46tables -t raw -I nixos-fw-rpfilter -p udp -m udp --dport ${toString cfg.wireguardListenPort} -j RETURN
|
||||
# '';
|
||||
# extraStopCommands = ''
|
||||
# ip46tables -t raw -D nixos-fw-rpfilter -p udp -m udp --sport ${toString cfg.wireguardListenPort} -j RETURN || true
|
||||
# ip46tables -t raw -D nixos-fw-rpfilter -p udp -m udp --dport ${toString cfg.wireguardListenPort} -j RETURN || true
|
||||
# '';
|
||||
# };
|
||||
networking.firewall.checkReversePath = "loose";
|
||||
|
||||
systemd.services.pia-vpn-wireguard-init = {
|
||||
description = "Creates PIA VPN Wireguard Interface";
|
||||
|
||||
requires = [ "network-online.target" ];
|
||||
after = [ "network.target" "network-online.target" ];
|
||||
before = [ containerServiceName ];
|
||||
requiredBy = [ containerServiceName ];
|
||||
partOf = [ containerServiceName ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
path = with pkgs; [ wireguard-tools jq curl iproute ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
|
||||
# restart once a month; PIA forwarded port expires after two months
|
||||
# because the container is "PartOf" this unit, it gets restarted too
|
||||
RuntimeMaxSec = "30d";
|
||||
};
|
||||
|
||||
script = ''
|
||||
# Prepare to connect by generating wg secrets and auth'ing with PIA since the container
|
||||
# cannot do without internet to start with. NAT'ing the host's internet would address this
|
||||
# issue but is not ideal because then leaking network outside of the VPN is more likely.
|
||||
|
||||
${chooseWireguardServer}
|
||||
|
||||
${getPIAToken}
|
||||
|
||||
# generate wireguard keys
|
||||
privKey=$(wg genkey)
|
||||
pubKey=$(echo "$privKey" | wg pubkey)
|
||||
|
||||
# authorize our WG keys with the PIA server we are about to connect to
|
||||
wireguard_json=`curl -s -G --connect-to "$WG_HOSTNAME::$WG_SERVER_IP:" --cacert "${./ca.rsa.4096.crt}" --data-urlencode "pt=$PIA_TOKEN" --data-urlencode "pubkey=$pubKey" https://$WG_HOSTNAME:$WG_SERVER_PORT/addKey`
|
||||
|
||||
# create wg-quick config file
|
||||
rm -f /tmp/${cfg.interfaceName}.conf /tmp/${cfg.interfaceName}-address.conf
|
||||
touch /tmp/${cfg.interfaceName}.conf /tmp/${cfg.interfaceName}-address.conf
|
||||
chmod 700 /tmp/${cfg.interfaceName}.conf /tmp/${cfg.interfaceName}-address.conf
|
||||
echo "
|
||||
[Interface]
|
||||
# Address = $(echo "$wireguard_json" | jq -r '.peer_ip')
|
||||
PrivateKey = $privKey
|
||||
ListenPort = ${toString cfg.wireguardListenPort}
|
||||
[Peer]
|
||||
PersistentKeepalive = 25
|
||||
PublicKey = $(echo "$wireguard_json" | jq -r '.server_key')
|
||||
AllowedIPs = 0.0.0.0/0
|
||||
Endpoint = $WG_SERVER_IP:$(echo "$wireguard_json" | jq -r '.server_port')
|
||||
" >> /tmp/${cfg.interfaceName}.conf
|
||||
|
||||
# create file storing the VPN ip address PIA assigned to us
|
||||
echo "$wireguard_json" | jq -r '.peer_ip' >> /tmp/${cfg.interfaceName}-address.conf
|
||||
|
||||
# Create wg interface now so it inherits from the namespace with internet access
|
||||
# the container will handle actually connecting the interface since that info is
|
||||
# not preserved upon moving into the container's networking namespace
|
||||
# Roughly following this guide https://www.wireguard.com/netns/#ordinary-containerization
|
||||
[[ -z $(ip link show dev ${cfg.interfaceName} 2>/dev/null) ]] || exit
|
||||
ip link add ${cfg.interfaceName} type wireguard
|
||||
'';
|
||||
|
||||
preStop = ''
|
||||
# cleanup wireguard interface
|
||||
ip link del ${cfg.interfaceName}
|
||||
rm -f /tmp/${cfg.interfaceName}.conf /tmp/${cfg.interfaceName}-address.conf
|
||||
'';
|
||||
};
|
||||
|
||||
vpn-container.config.systemd.services.pia-vpn-wireguard = {
|
||||
description = "Initializes the PIA VPN WireGuard Tunnel";
|
||||
|
||||
requires = [ "network-online.target" ];
|
||||
after = [ "network.target" "network-online.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
path = with pkgs; [ wireguard-tools iproute curl jq iptables ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
|
||||
script = ''
|
||||
# pseudo calls wg-quick
|
||||
# Near equivalent of "wg-quick up /tmp/${cfg.interfaceName}.conf"
|
||||
# cannot actually call wg-quick because the interface has to be already
|
||||
# created before the container taken ownership of the interface
|
||||
# Thus, assumes wg interface was already created:
|
||||
# ip link add ${cfg.interfaceName} type wireguard
|
||||
|
||||
${getChosenWireguardServer}
|
||||
|
||||
myaddress=`cat /tmp/${cfg.interfaceName}-address.conf`
|
||||
|
||||
wg setconf ${cfg.interfaceName} /tmp/${cfg.interfaceName}.conf
|
||||
ip -4 address add $myaddress dev ${cfg.interfaceName}
|
||||
ip link set mtu 1420 up dev ${cfg.interfaceName}
|
||||
wg set ${cfg.interfaceName} fwmark ${toString cfg.wireguardListenPort}
|
||||
ip -4 route add 0.0.0.0/0 dev ${cfg.interfaceName} table ${toString cfg.wireguardListenPort}
|
||||
|
||||
# TODO is this needed?
|
||||
ip -4 rule add not fwmark ${toString cfg.wireguardListenPort} table ${toString cfg.wireguardListenPort}
|
||||
ip -4 rule add table main suppress_prefixlength 0
|
||||
|
||||
# The rest of the script is only for only for port forwarding skip if not needed
|
||||
if [ ${boolToString portForwarding} == false ]; then exit 0; fi
|
||||
|
||||
# Reserve port
|
||||
${getPIAToken}
|
||||
payload_and_signature=`curl -s -m 5 --connect-to "$WG_HOSTNAME::$WG_SERVER_IP:" --cacert "${./ca.rsa.4096.crt}" -G --data-urlencode "token=$PIA_TOKEN" "https://$WG_HOSTNAME:19999/getSignature"`
|
||||
signature=$(echo "$payload_and_signature" | jq -r '.signature')
|
||||
payload=$(echo "$payload_and_signature" | jq -r '.payload')
|
||||
port=$(echo "$payload" | base64 -d | jq -r '.port')
|
||||
|
||||
# Check if the port is acceptable
|
||||
notallowed=(${concatStringsSep " " (map toString cfg.badPortForwardPorts)})
|
||||
if [[ " ''${notallowed[*]} " =~ " $port " ]]; then
|
||||
# the port PIA assigned is not allowed, kill the connection
|
||||
wg-quick down /tmp/${cfg.interfaceName}.conf
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# write reserved port to file readable for all users
|
||||
echo $port > /tmp/${cfg.interfaceName}-port
|
||||
chmod 644 /tmp/${cfg.interfaceName}-port
|
||||
|
||||
# write payload and signature info needed to allow refreshing allocated forwarded port
|
||||
rm -f /tmp/${cfg.interfaceName}-port-renewal
|
||||
touch /tmp/${cfg.interfaceName}-port-renewal
|
||||
chmod 700 /tmp/${cfg.interfaceName}-port-renewal
|
||||
echo $signature >> /tmp/${cfg.interfaceName}-port-renewal
|
||||
echo $payload >> /tmp/${cfg.interfaceName}-port-renewal
|
||||
|
||||
# Block all traffic from VPN interface except for traffic that is from the forwarded port
|
||||
iptables -I nixos-fw -p tcp --dport $port -j nixos-fw-accept -i ${cfg.interfaceName}
|
||||
iptables -I nixos-fw -p udp --dport $port -j nixos-fw-accept -i ${cfg.interfaceName}
|
||||
|
||||
# The first port refresh triggers the port to be actually allocated
|
||||
${refreshPIAPort}
|
||||
|
||||
${optionalString (cfg.forwardedPort != null) ''
|
||||
# redirect the fowarded port
|
||||
iptables -A INPUT -i ${cfg.interfaceName} -p tcp --dport $port -j ACCEPT
|
||||
iptables -A INPUT -i ${cfg.interfaceName} -p udp --dport $port -j ACCEPT
|
||||
iptables -A INPUT -i ${cfg.interfaceName} -p tcp --dport ${toString cfg.forwardedPort} -j ACCEPT
|
||||
iptables -A INPUT -i ${cfg.interfaceName} -p udp --dport ${toString cfg.forwardedPort} -j ACCEPT
|
||||
iptables -A PREROUTING -t nat -i ${cfg.interfaceName} -p tcp --dport $port -j REDIRECT --to-port ${toString cfg.forwardedPort}
|
||||
iptables -A PREROUTING -t nat -i ${cfg.interfaceName} -p udp --dport $port -j REDIRECT --to-port ${toString cfg.forwardedPort}
|
||||
''}
|
||||
|
||||
${optionalString cfg.forwardPortForTransmission ''
|
||||
# assumes no auth needed for transmission
|
||||
curlout=$(curl localhost:9091/transmission/rpc 2>/dev/null)
|
||||
regex='X-Transmission-Session-Id\: (\w*)'
|
||||
if [[ $curlout =~ $regex ]]; then
|
||||
sessionId=''${BASH_REMATCH[1]}
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# set the port in transmission
|
||||
data='{"method": "session-set", "arguments": { "peer-port" :'$port' } }'
|
||||
curl http://localhost:9091/transmission/rpc -d "$data" -H "X-Transmission-Session-Id: $sessionId"
|
||||
''}
|
||||
'';
|
||||
|
||||
preStop = ''
|
||||
wg-quick down /tmp/${cfg.interfaceName}.conf
|
||||
|
||||
# The rest of the script is only for only for port forwarding skip if not needed
|
||||
if [ ${boolToString portForwarding} == false ]; then exit 0; fi
|
||||
|
||||
${optionalString (cfg.forwardedPort != null) ''
|
||||
# stop redirecting the forwarded port
|
||||
iptables -D INPUT -i ${cfg.interfaceName} -p tcp --dport $port -j ACCEPT
|
||||
iptables -D INPUT -i ${cfg.interfaceName} -p udp --dport $port -j ACCEPT
|
||||
iptables -D INPUT -i ${cfg.interfaceName} -p tcp --dport ${toString cfg.forwardedPort} -j ACCEPT
|
||||
iptables -D INPUT -i ${cfg.interfaceName} -p udp --dport ${toString cfg.forwardedPort} -j ACCEPT
|
||||
iptables -D PREROUTING -t nat -i ${cfg.interfaceName} -p tcp --dport $port -j REDIRECT --to-port ${toString cfg.forwardedPort}
|
||||
iptables -D PREROUTING -t nat -i ${cfg.interfaceName} -p udp --dport $port -j REDIRECT --to-port ${toString cfg.forwardedPort}
|
||||
''}
|
||||
'';
|
||||
};
|
||||
|
||||
vpn-container.config.systemd.services.pia-vpn-wireguard-forward-port = {
|
||||
enable = portForwarding;
|
||||
description = "PIA VPN WireGuard Tunnel Port Forwarding";
|
||||
after = [ "pia-vpn-wireguard.service" ];
|
||||
requires = [ "pia-vpn-wireguard.service" ];
|
||||
|
||||
path = with pkgs; [ curl ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
};
|
||||
|
||||
script = refreshPIAPort;
|
||||
};
|
||||
|
||||
vpn-container.config.systemd.timers.pia-vpn-wireguard-forward-port = {
|
||||
enable = portForwarding;
|
||||
partOf = [ "pia-vpn-wireguard-forward-port.service" ];
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "*:0/10"; # 10 minutes
|
||||
RandomizedDelaySec = "1m"; # vary by 1 min to give PIA servers some relief
|
||||
};
|
||||
};
|
||||
|
||||
age.secrets."pia-login.conf".file = ../../secrets/pia-login.age;
|
||||
};
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
# keeps peer to peer connections alive with a periodic ping
|
||||
|
||||
with lib;
|
||||
with builtins;
|
||||
|
||||
# todo auto restart
|
||||
|
||||
let
|
||||
cfg = config.keepalive-ping;
|
||||
|
||||
serviceTemplate = host:
|
||||
{
|
||||
"keepalive-ping@${host}" = {
|
||||
description = "Periodic ping keep alive for ${host} connection";
|
||||
|
||||
requires = [ "network-online.target" ];
|
||||
after = [ "network.target" "network-online.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig.Restart = "always";
|
||||
|
||||
path = with pkgs; [ iputils ];
|
||||
|
||||
script = ''
|
||||
ping -i ${cfg.delay} ${host} &>/dev/null
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
combineAttrs = foldl recursiveUpdate { };
|
||||
|
||||
serviceList = map serviceTemplate cfg.hosts;
|
||||
|
||||
services = combineAttrs serviceList;
|
||||
in
|
||||
{
|
||||
options.keepalive-ping = {
|
||||
enable = mkEnableOption "Enable keep alive ping task";
|
||||
hosts = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [ ];
|
||||
description = ''
|
||||
Hosts to ping periodically
|
||||
'';
|
||||
};
|
||||
delay = mkOption {
|
||||
type = types.str;
|
||||
default = "60";
|
||||
description = ''
|
||||
Ping interval in seconds of periodic ping per host being pinged
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services = services;
|
||||
};
|
||||
}
|
||||
@@ -8,11 +8,7 @@ in
|
||||
{
|
||||
options.services.tailscale.exitNode = mkEnableOption "Enable exit node support";
|
||||
|
||||
config.services.tailscale.enable = mkDefault (!config.boot.isContainer);
|
||||
|
||||
# MagicDNS
|
||||
config.networking.nameservers = mkIf cfg.enable [ "1.1.1.1" "8.8.8.8" ];
|
||||
config.networking.search = mkIf cfg.enable [ "koi-bebop.ts.net" ];
|
||||
config.services.tailscale.enable = !config.boot.isContainer;
|
||||
|
||||
# exit node
|
||||
config.networking.firewall.checkReversePath = mkIf cfg.exitNode "loose";
|
||||
|
||||
@@ -26,8 +26,6 @@ in
|
||||
'';
|
||||
};
|
||||
|
||||
useOpenVPN = mkEnableOption "Uses OpenVPN instead of wireguard for PIA VPN connection";
|
||||
|
||||
config = mkOption {
|
||||
type = types.anything;
|
||||
default = {};
|
||||
@@ -43,9 +41,6 @@ in
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
pia.wireguard.enable = !cfg.useOpenVPN;
|
||||
pia.wireguard.forwardPortForTransmission = !cfg.useOpenVPN;
|
||||
|
||||
containers.${cfg.containerName} = {
|
||||
ephemeral = true;
|
||||
autoStart = true;
|
||||
@@ -64,7 +59,7 @@ in
|
||||
}
|
||||
)));
|
||||
|
||||
enableTun = cfg.useOpenVPN;
|
||||
enableTun = true;
|
||||
privateNetwork = true;
|
||||
hostAddress = "172.16.100.1";
|
||||
localAddress = "172.16.100.2";
|
||||
@@ -72,35 +67,28 @@ in
|
||||
config = {
|
||||
imports = allModules ++ [cfg.config];
|
||||
|
||||
# speeds up evaluation
|
||||
nixpkgs.pkgs = pkgs;
|
||||
|
||||
# networking.firewall.enable = mkForce false;
|
||||
networking.firewall.trustedInterfaces = [
|
||||
# completely trust internal interface to host
|
||||
"eth0"
|
||||
];
|
||||
networking.firewall.enable = mkForce false;
|
||||
|
||||
pia.openvpn.enable = cfg.useOpenVPN;
|
||||
pia.openvpn.server = "swiss.privacy.network"; # swiss vpn
|
||||
pia.enable = true;
|
||||
pia.server = "swiss.privacy.network"; # swiss vpn
|
||||
|
||||
# TODO fix so it does run it's own resolver again
|
||||
# run it's own DNS resolver
|
||||
networking.useHostResolvConf = false;
|
||||
# services.resolved.enable = true;
|
||||
networking.nameservers = [ "1.1.1.1" "8.8.8.8" ];
|
||||
services.resolved.enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
# load secrets the container needs
|
||||
age.secrets = config.containers.${cfg.containerName}.config.age.secrets;
|
||||
|
||||
# forwarding for vpn container (only for OpenVPN)
|
||||
networking.nat.enable = mkIf cfg.useOpenVPN true;
|
||||
networking.nat.internalInterfaces = mkIf cfg.useOpenVPN [
|
||||
# forwarding for vpn container
|
||||
networking.nat.enable = true;
|
||||
networking.nat.internalInterfaces = [
|
||||
"ve-${cfg.containerName}"
|
||||
];
|
||||
networking.ip_forward = mkIf cfg.useOpenVPN true;
|
||||
networking.ip_forward = true;
|
||||
|
||||
# assumes only one potential interface
|
||||
networking.usePredictableInterfaceNames = false;
|
||||
|
||||
14
common/network/zerotier.nix
Normal file
14
common/network/zerotier.nix
Normal file
@@ -0,0 +1,14 @@
|
||||
{ lib, config, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.zerotierone;
|
||||
in {
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.zerotierone.joinNetworks = [
|
||||
"565799d8f6d654c0"
|
||||
];
|
||||
networking.firewall.allowedUDPPorts = [
|
||||
9993
|
||||
];
|
||||
};
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
{ config, lib, ... }:
|
||||
|
||||
let
|
||||
builderRole = "nix-builder";
|
||||
builderUserName = "nix-builder";
|
||||
|
||||
machinesByRole = role: lib.filterAttrs (hostname: cfg: builtins.elem role cfg.systemRoles) config.machines.hosts;
|
||||
otherMachinesByRole = role: lib.filterAttrs (hostname: cfg: hostname != config.networking.hostName) (machinesByRole role);
|
||||
thisMachineHasRole = role: builtins.hasAttr config.networking.hostName (machinesByRole role);
|
||||
|
||||
builders = machinesByRole builderRole;
|
||||
thisMachineIsABuilder = thisMachineHasRole builderRole;
|
||||
|
||||
# builders don't include themselves as a remote builder
|
||||
otherBuilders = lib.filterAttrs (hostname: cfg: hostname != config.networking.hostName) builders;
|
||||
in
|
||||
lib.mkMerge [
|
||||
# configure builder
|
||||
(lib.mkIf thisMachineIsABuilder {
|
||||
users.users.${builderUserName} = {
|
||||
description = "Distributed Nix Build User";
|
||||
group = builderUserName;
|
||||
isSystemUser = true;
|
||||
createHome = true;
|
||||
home = "/var/lib/nix-builder";
|
||||
useDefaultShell = true;
|
||||
openssh.authorizedKeys.keys = builtins.map
|
||||
(builderCfg: builderCfg.hostKey)
|
||||
(builtins.attrValues config.machines.hosts);
|
||||
};
|
||||
users.groups.${builderUserName} = { };
|
||||
|
||||
nix.settings.trusted-users = [
|
||||
builderUserName
|
||||
];
|
||||
})
|
||||
|
||||
# use each builder
|
||||
{
|
||||
nix.distributedBuilds = true;
|
||||
|
||||
nix.buildMachines = builtins.map
|
||||
(builderCfg: {
|
||||
hostName = builtins.elemAt builderCfg.hostNames 0;
|
||||
system = builderCfg.arch;
|
||||
protocol = "ssh-ng";
|
||||
sshUser = builderUserName;
|
||||
sshKey = "/etc/ssh/ssh_host_ed25519_key";
|
||||
maxJobs = 3;
|
||||
speedFactor = 10;
|
||||
supportedFeatures = [ "nixos-test" "benchmark" "big-parallel" "kvm" ];
|
||||
})
|
||||
(builtins.attrValues otherBuilders);
|
||||
|
||||
# It is very likely that the builder's internet is faster or just as fast
|
||||
nix.extraOptions = ''
|
||||
builders-use-substitutes = true
|
||||
'';
|
||||
}
|
||||
]
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
in
|
||||
{
|
||||
in {
|
||||
config = lib.mkIf cfg.enable {
|
||||
# enable pulseaudio support for packages
|
||||
nixpkgs.config.pulseaudio = true;
|
||||
@@ -17,6 +16,45 @@ in
|
||||
alsa.support32Bit = true;
|
||||
pulse.enable = true;
|
||||
jack.enable = true;
|
||||
|
||||
# use the example session manager (no others are packaged yet so this is enabled by default,
|
||||
# no need to redefine it in your config for now)
|
||||
#media-session.enable = true;
|
||||
|
||||
config.pipewire = {
|
||||
"context.objects" = [
|
||||
{
|
||||
# A default dummy driver. This handles nodes marked with the "node.always-driver"
|
||||
# properyty when no other driver is currently active. JACK clients need this.
|
||||
factory = "spa-node-factory";
|
||||
args = {
|
||||
"factory.name" = "support.node.driver";
|
||||
"node.name" = "Dummy-Driver";
|
||||
"priority.driver" = 8000;
|
||||
};
|
||||
}
|
||||
{
|
||||
factory = "adapter";
|
||||
args = {
|
||||
"factory.name" = "support.null-audio-sink";
|
||||
"node.name" = "Microphone-Proxy";
|
||||
"node.description" = "Microphone";
|
||||
"media.class" = "Audio/Source/Virtual";
|
||||
"audio.position" = "MONO";
|
||||
};
|
||||
}
|
||||
{
|
||||
factory = "adapter";
|
||||
args = {
|
||||
"factory.name" = "support.null-audio-sink";
|
||||
"node.name" = "Main-Output-Proxy";
|
||||
"node.description" = "Main Output";
|
||||
"media.class" = "Audio/Sink";
|
||||
"audio.position" = "FL,FR";
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
users.users.googlebot.extraGroups = [ "audio" ];
|
||||
|
||||
@@ -49,8 +49,7 @@ let
|
||||
];
|
||||
};
|
||||
|
||||
in
|
||||
{
|
||||
in {
|
||||
config = lib.mkIf cfg.enable {
|
||||
# chromium with specific extensions + settings
|
||||
programs.chromium = {
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
in
|
||||
{
|
||||
in {
|
||||
imports = [
|
||||
./kde.nix
|
||||
./xfce.nix
|
||||
@@ -37,6 +36,7 @@ in
|
||||
mumble
|
||||
tigervnc
|
||||
bluez-tools
|
||||
vscodium
|
||||
element-desktop
|
||||
mpv
|
||||
nextcloud-client
|
||||
@@ -50,12 +50,6 @@ in
|
||||
arduino
|
||||
yt-dlp
|
||||
jellyfin-media-player
|
||||
joplin-desktop
|
||||
config.inputs.deploy-rs.packages.${config.currentSystem}.deploy-rs
|
||||
|
||||
# For Nix IDE
|
||||
nixpkgs-fmt
|
||||
rnix-lsp
|
||||
];
|
||||
|
||||
# Networking
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
in
|
||||
{
|
||||
in {
|
||||
config = lib.mkIf cfg.enable {
|
||||
users.users.googlebot.packages = [
|
||||
pkgs.discord
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
in
|
||||
{
|
||||
in {
|
||||
config = lib.mkIf cfg.enable {
|
||||
# kde plasma
|
||||
services.xserver = {
|
||||
|
||||
@@ -1,48 +1,36 @@
|
||||
# mounts the samba share on s0 over tailscale
|
||||
# mounts the samba share on s0 over zeroteir
|
||||
|
||||
{ config, lib, pkgs, ... }:
|
||||
{ config, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.mount-samba;
|
||||
|
||||
# prevents hanging on network split and other similar niceties to ensure a stable connection
|
||||
network_opts = "nostrictsync,cache=strict,handlecache,handletimeout=30000,rwpidforward,mapposix,soft,resilienthandles,echo_interval=10,noblocksend,fsc";
|
||||
# prevents hanging on network split
|
||||
network_opts = "x-systemd.automount,noauto,x-systemd.idle-timeout=60,x-systemd.device-timeout=5s,x-systemd.mount-timeout=5s,nostrictsync,cache=loose,handlecache,handletimeout=30000,rwpidforward,mapposix,soft,resilienthandles,echo_interval=10,noblocksend";
|
||||
|
||||
systemd_opts = "x-systemd.automount,noauto,x-systemd.idle-timeout=60,x-systemd.device-timeout=5s,x-systemd.mount-timeout=5s";
|
||||
user_opts = "uid=${toString config.users.users.googlebot.uid},file_mode=0660,dir_mode=0770,user";
|
||||
auth_opts = "sec=ntlmv2i,credentials=/run/agenix/smb-secrets";
|
||||
version_opts = "vers=3.1.1";
|
||||
auth_opts = "credentials=/run/agenix/smb-secrets";
|
||||
version_opts = "vers=2.1";
|
||||
|
||||
opts = "${systemd_opts},${network_opts},${user_opts},${version_opts},${auth_opts}";
|
||||
in
|
||||
{
|
||||
opts = "${network_opts},${user_opts},${version_opts},${auth_opts}";
|
||||
in {
|
||||
options.services.mount-samba = {
|
||||
enable = lib.mkEnableOption "enable mounting samba shares";
|
||||
};
|
||||
|
||||
config = lib.mkIf (cfg.enable && config.services.tailscale.enable) {
|
||||
config = lib.mkIf (cfg.enable && config.services.zerotierone.enable) {
|
||||
fileSystems."/mnt/public" = {
|
||||
device = "//s0.koi-bebop.ts.net/public";
|
||||
device = "//s0.zt.neet.dev/public";
|
||||
fsType = "cifs";
|
||||
options = [ opts ];
|
||||
};
|
||||
|
||||
fileSystems."/mnt/private" = {
|
||||
device = "//s0.koi-bebop.ts.net/googlebot";
|
||||
device = "//s0.zt.neet.dev/googlebot";
|
||||
fsType = "cifs";
|
||||
options = [ opts ];
|
||||
};
|
||||
|
||||
age.secrets.smb-secrets.file = ../../secrets/smb-secrets.age;
|
||||
|
||||
environment.shellAliases = {
|
||||
# remount storage
|
||||
remount_public = "sudo systemctl restart mnt-public.mount";
|
||||
remount_private = "sudo systemctl restart mnt-private.mount";
|
||||
|
||||
# Encrypted Vault
|
||||
vault_unlock = "${pkgs.gocryptfs}/bin/gocryptfs /mnt/private/.vault/ /mnt/vault/";
|
||||
vault_lock = "umount /mnt/vault/";
|
||||
};
|
||||
};
|
||||
}
|
||||
76
common/pc/pia/default.nix
Normal file
76
common/pc/pia/default.nix
Normal file
@@ -0,0 +1,76 @@
|
||||
{ lib, config, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.pia;
|
||||
in {
|
||||
imports = [
|
||||
./pia.nix
|
||||
];
|
||||
|
||||
options.services.pia = {
|
||||
enable = lib.mkEnableOption "Enable PIA Client";
|
||||
|
||||
dataDir = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "/var/lib/pia";
|
||||
description = ''
|
||||
Path to the pia data directory
|
||||
'';
|
||||
};
|
||||
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "root";
|
||||
description = ''
|
||||
The user pia should run as
|
||||
'';
|
||||
};
|
||||
|
||||
group = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "piagrp";
|
||||
description = ''
|
||||
The group pia should run as
|
||||
'';
|
||||
};
|
||||
|
||||
users = mkOption {
|
||||
type = with types; listOf str;
|
||||
default = [];
|
||||
description = ''
|
||||
Usernames to be added to the "spotifyd" group, so that they
|
||||
can start and interact with the userspace daemon.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
# users.users.${cfg.user} =
|
||||
# if cfg.user == "pia" then {
|
||||
# isSystemUser = true;
|
||||
# group = cfg.group;
|
||||
# home = cfg.dataDir;
|
||||
# createHome = true;
|
||||
# }
|
||||
# else {};
|
||||
users.groups.${cfg.group}.members = cfg.users;
|
||||
|
||||
systemd.services.pia-daemon = {
|
||||
enable = true;
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig.ExecStart = "${pkgs.pia-daemon}/bin/pia-daemon";
|
||||
serviceConfig.PrivateTmp="yes";
|
||||
serviceConfig.User = cfg.user;
|
||||
serviceConfig.Group = cfg.group;
|
||||
preStart = ''
|
||||
mkdir -p ${cfg.dataDir}
|
||||
chown ${cfg.user}:${cfg.group} ${cfg.dataDir}
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
147
common/pc/pia/fix-pia.patch
Normal file
147
common/pc/pia/fix-pia.patch
Normal file
@@ -0,0 +1,147 @@
|
||||
diff --git a/Rakefile b/Rakefile
|
||||
index fa6d771..bcd6fb1 100644
|
||||
--- a/Rakefile
|
||||
+++ b/Rakefile
|
||||
@@ -151,41 +151,6 @@ end
|
||||
# Install LICENSE.txt
|
||||
stage.install('LICENSE.txt', :res)
|
||||
|
||||
-# Download server lists to ship preloaded copies with the app. These tasks
|
||||
-# depend on version.txt so they're refreshed periodically (whenver a new commit
|
||||
-# is made), but not for every build.
|
||||
-#
|
||||
-# SERVER_DATA_DIR can be set to use existing files instead of downloading them;
|
||||
-# this is primarily intended for reproducing a build.
|
||||
-#
|
||||
-# Create a probe for SERVER_DATA_DIR so these are updated if it changes.
|
||||
-serverDataProbe = Probe.new('serverdata')
|
||||
-serverDataProbe.file('serverdata.txt', "#{ENV['SERVER_DATA_DIR']}")
|
||||
-# JSON resource build directory
|
||||
-jsonFetched = Build.new('json-fetched')
|
||||
-# These are the assets we need to fetch and the URIs we get them from
|
||||
-{
|
||||
- 'modern_shadowsocks.json': 'https://serverlist.piaservers.net/shadow_socks',
|
||||
- 'modern_servers.json': 'https://serverlist.piaservers.net/vpninfo/servers/v6',
|
||||
- 'modern_region_meta.json': 'https://serverlist.piaservers.net/vpninfo/regions/v2'
|
||||
-}.each do |k, v|
|
||||
- fetchedFile = jsonFetched.artifact(k.to_s)
|
||||
- serverDataDir = ENV['SERVER_DATA_DIR']
|
||||
- file fetchedFile => [version.artifact('version.txt'),
|
||||
- serverDataProbe.artifact('serverdata.txt'),
|
||||
- jsonFetched.componentDir] do |t|
|
||||
- if(serverDataDir)
|
||||
- # Use the copy provided instead of fetching (for reproducing a build)
|
||||
- File.copy(File.join(serverDataDir, k), fetchedFile)
|
||||
- else
|
||||
- # Fetch from the web API (write with "binary" mode so LF is not
|
||||
- # converted to CRLF on Windows)
|
||||
- File.binwrite(t.name, Net::HTTP.get(URI(v)))
|
||||
- end
|
||||
- end
|
||||
- stage.install(fetchedFile, :res)
|
||||
-end
|
||||
-
|
||||
# Install version/brand/arch info in case an upgrade needs to know what is
|
||||
# currently installed
|
||||
stage.install(version.artifact('version.txt'), :res)
|
||||
diff --git a/common/src/posix/unixsignalhandler.cpp b/common/src/posix/unixsignalhandler.cpp
|
||||
index f820a6d..e1b6c33 100644
|
||||
--- a/common/src/posix/unixsignalhandler.cpp
|
||||
+++ b/common/src/posix/unixsignalhandler.cpp
|
||||
@@ -132,7 +132,7 @@ void UnixSignalHandler::_signalHandler(int, siginfo_t *info, void *)
|
||||
// we checked it, we can't even log because the logger is not reentrant.
|
||||
auto pThis = instance();
|
||||
if(pThis)
|
||||
- ::write(pThis->_sigFd[0], info, sizeof(siginfo_t));
|
||||
+ auto _ = ::write(pThis->_sigFd[0], info, sizeof(siginfo_t));
|
||||
}
|
||||
template<int Signal>
|
||||
void UnixSignalHandler::setAbortAction()
|
||||
diff --git a/daemon/src/linux/linux_nl.cpp b/daemon/src/linux/linux_nl.cpp
|
||||
index fd3aced..2367a5e 100644
|
||||
--- a/daemon/src/linux/linux_nl.cpp
|
||||
+++ b/daemon/src/linux/linux_nl.cpp
|
||||
@@ -642,6 +642,6 @@ LinuxNl::~LinuxNl()
|
||||
unsigned char term = 0;
|
||||
PosixFd killSocket = _workerKillSocket.get();
|
||||
if(killSocket)
|
||||
- ::write(killSocket.get(), &term, sizeof(term));
|
||||
+ auto _ = ::write(killSocket.get(), &term, sizeof(term));
|
||||
_workerThread.join();
|
||||
}
|
||||
diff --git a/extras/support-tool/launcher/linux-launcher.cpp b/extras/support-tool/launcher/linux-launcher.cpp
|
||||
index 3f63ac2..420d54d 100644
|
||||
--- a/extras/support-tool/launcher/linux-launcher.cpp
|
||||
+++ b/extras/support-tool/launcher/linux-launcher.cpp
|
||||
@@ -48,7 +48,7 @@ int fork_execv(gid_t gid, char *filename, char *const argv[])
|
||||
if(forkResult == 0)
|
||||
{
|
||||
// Apply gid as both real and effective
|
||||
- setregid(gid, gid);
|
||||
+ auto _ = setregid(gid, gid);
|
||||
|
||||
int execErr = execv(filename, argv);
|
||||
std::cerr << "exec err: " << execErr << " / " << errno << " - "
|
||||
diff --git a/rake/model/qt.rb b/rake/model/qt.rb
|
||||
index c8cd362..a6abe59 100644
|
||||
--- a/rake/model/qt.rb
|
||||
+++ b/rake/model/qt.rb
|
||||
@@ -171,12 +171,7 @@ class Qt
|
||||
end
|
||||
|
||||
def getQtRoot(qtVersion, arch)
|
||||
- qtToolchainPtns = getQtToolchainPatterns(arch)
|
||||
- qtRoots = FileList[*Util.joinPaths([[qtVersion], qtToolchainPtns])]
|
||||
- # Explicitly filter for existing paths - if the pattern has wildcards
|
||||
- # we only get existing directories, but if the patterns are just
|
||||
- # alternates with no wildcards, we can get directories that don't exist
|
||||
- qtRoots.find_all { |r| File.exist?(r) }.max
|
||||
+ ENV['QTROOT']
|
||||
end
|
||||
|
||||
def getQtVersionScore(minor, patch)
|
||||
@@ -192,12 +187,7 @@ class Qt
|
||||
end
|
||||
|
||||
def getQtPathVersion(path)
|
||||
- verMatch = path.match('^.*/Qt[^/]*/5\.(\d+)\.?(\d*)$')
|
||||
- if(verMatch == nil)
|
||||
- nil
|
||||
- else
|
||||
- [verMatch[1].to_i, verMatch[2].to_i]
|
||||
- end
|
||||
+ [ENV['QT_MAJOR'].to_i, ENV['QT_MINOR'].to_i]
|
||||
end
|
||||
|
||||
# Build a component definition with the defaults. The "Core" component will
|
||||
diff --git a/rake/product/linux.rb b/rake/product/linux.rb
|
||||
index f43fb3e..83505af 100644
|
||||
--- a/rake/product/linux.rb
|
||||
+++ b/rake/product/linux.rb
|
||||
@@ -18,8 +18,7 @@ module PiaLinux
|
||||
QT_BINARIES = %w(pia-client pia-daemon piactl pia-support-tool)
|
||||
|
||||
# Version of libicu (needed to determine lib*.so.## file names in deployment)
|
||||
- ICU_VERSION = FileList[File.join(Executable::Qt.targetQtRoot, 'lib', 'libicudata.so.*')]
|
||||
- .first.match(/libicudata\.so\.(\d+)(\..*|)/)[1]
|
||||
+ ICU_VERSION = ENV['ICU_MAJOR'].to_i;
|
||||
|
||||
# Copy a directory recursively, excluding *.debug files (debugging symbols)
|
||||
def self.copyWithoutDebug(sourceDir, destDir)
|
||||
@@ -220,16 +219,5 @@ module PiaLinux
|
||||
# Since these are just development workflow tools, they can be skipped if
|
||||
# specific dependencies are not available.
|
||||
def self.defineTools(toolsStage)
|
||||
- # Test if we have libthai-dev, for the Thai word breaking utility
|
||||
- if(Executable::Tc.sysHeaderAvailable?('thai/thwbrk.h'))
|
||||
- Executable.new('thaibreak')
|
||||
- .source('tools/thaibreak')
|
||||
- .lib('thai')
|
||||
- .install(toolsStage, :bin)
|
||||
- toolsStage.install('tools/thaibreak/thai_ts.sh', :bin)
|
||||
- toolsStage.install('tools/onesky_import/import_translations.sh', :bin)
|
||||
- else
|
||||
- puts "skipping thaibreak utility, install libthai-dev to build thaibreak"
|
||||
- end
|
||||
end
|
||||
end
|
||||
139
common/pc/pia/pia.nix
Normal file
139
common/pc/pia/pia.nix
Normal file
@@ -0,0 +1,139 @@
|
||||
{ pkgs, lib, config, ... }:
|
||||
|
||||
{
|
||||
nixpkgs.overlays = [
|
||||
(self: super:
|
||||
|
||||
with self;
|
||||
|
||||
let
|
||||
# arch = builtins.elemAt (lib.strings.splitString "-" builtins.currentSystem) 0;
|
||||
arch = "x86_64";
|
||||
|
||||
pia-desktop = clangStdenv.mkDerivation rec {
|
||||
pname = "pia-desktop";
|
||||
version = "3.3.0";
|
||||
|
||||
src = fetchgit {
|
||||
url = "https://github.com/pia-foss/desktop";
|
||||
rev = version;
|
||||
fetchLFS = true;
|
||||
sha256 = "D9txL5MUWyRYTnsnhlQdYT4dGVpj8PFsVa5hkrb36cw=";
|
||||
};
|
||||
|
||||
patches = [
|
||||
./fix-pia.patch
|
||||
];
|
||||
|
||||
nativeBuildInputs = [
|
||||
cmake
|
||||
rake
|
||||
];
|
||||
|
||||
prePatch = ''
|
||||
sed -i 's|/usr/include/libnl3|${libnl.dev}/include/libnl3|' Rakefile
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
mkdir -p $out/bin $out/lib $out/share
|
||||
cp -r ../out/pia_release_${arch}/stage/bin $out
|
||||
cp -r ../out/pia_release_${arch}/stage/lib $out
|
||||
cp -r ../out/pia_release_${arch}/stage/share $out
|
||||
'';
|
||||
|
||||
cmakeFlags = [
|
||||
"-DCMAKE_BUILD_TYPE=Release"
|
||||
];
|
||||
|
||||
QTROOT = "${qt5.full}";
|
||||
QT_MAJOR = lib.versions.minor (lib.strings.parseDrvName qt5.full.name).version;
|
||||
QT_MINOR = lib.versions.patch (lib.strings.parseDrvName qt5.full.name).version;
|
||||
ICU_MAJOR = lib.versions.major (lib.strings.parseDrvName icu.name).version;
|
||||
|
||||
buildInputs = [
|
||||
mesa
|
||||
libsForQt5.qt5.qtquickcontrols
|
||||
libsForQt5.qt5.qtquickcontrols2
|
||||
icu
|
||||
libnl
|
||||
];
|
||||
|
||||
dontWrapQtApps = true;
|
||||
};
|
||||
in rec {
|
||||
openvpn-updown = buildFHSUserEnv {
|
||||
name = "openvpn-updown";
|
||||
targetPkgs = pkgs: (with pkgs; [ pia-desktop ]);
|
||||
runScript = "openvpn-updown.sh";
|
||||
};
|
||||
|
||||
pia-client = buildFHSUserEnv {
|
||||
name = "pia-client";
|
||||
targetPkgs = pkgs: (with pkgs; [
|
||||
pia-desktop
|
||||
xorg.libXau
|
||||
xorg.libXdmcp
|
||||
]);
|
||||
runScript = "pia-client";
|
||||
};
|
||||
|
||||
piactl = buildFHSUserEnv {
|
||||
name = "piactl";
|
||||
targetPkgs = pkgs: (with pkgs; [ pia-desktop ]);
|
||||
runScript = "piactl";
|
||||
};
|
||||
|
||||
pia-daemon = buildFHSUserEnv {
|
||||
name = "pia-daemon";
|
||||
targetPkgs = pkgs: (with pkgs; [ pia-desktop ]);
|
||||
runScript = "pia-daemon";
|
||||
};
|
||||
|
||||
pia-hnsd = buildFHSUserEnv {
|
||||
name = "pia-hnsd";
|
||||
targetPkgs = pkgs: (with pkgs; [ pia-desktop ]);
|
||||
runScript = "pia-hnsd";
|
||||
};
|
||||
|
||||
pia-openvpn = buildFHSUserEnv {
|
||||
name = "pia-openvpn";
|
||||
targetPkgs = pkgs: (with pkgs; [ pia-desktop ]);
|
||||
runScript = "pia-openvpn";
|
||||
};
|
||||
|
||||
pia-ss-local = buildFHSUserEnv {
|
||||
name = "pia-ss-local";
|
||||
targetPkgs = pkgs: (with pkgs; [ pia-desktop ]);
|
||||
runScript = "pia-ss-local";
|
||||
};
|
||||
|
||||
pia-support-tool = buildFHSUserEnv {
|
||||
name = "pia-support-tool";
|
||||
targetPkgs = pkgs: (with pkgs; [
|
||||
pia-desktop
|
||||
xorg.libXau
|
||||
xorg.libXdmcp
|
||||
]);
|
||||
runScript = "pia-support-tool";
|
||||
};
|
||||
|
||||
pia-unbound = buildFHSUserEnv {
|
||||
name = "pia-unbound";
|
||||
targetPkgs = pkgs: (with pkgs; [ pia-desktop ]);
|
||||
runScript = "pia-unbound";
|
||||
};
|
||||
|
||||
pia-wireguard-go = buildFHSUserEnv {
|
||||
name = "pia-wireguard-go";
|
||||
targetPkgs = pkgs: (with pkgs; [ pia-desktop ]);
|
||||
runScript = "pia-wireguard-go";
|
||||
};
|
||||
|
||||
support-tool-launcher = buildFHSUserEnv {
|
||||
name = "support-tool-launcher";
|
||||
targetPkgs = pkgs: (with pkgs; [ pia-desktop ]);
|
||||
runScript = "support-tool-launcher";
|
||||
};
|
||||
})
|
||||
];
|
||||
}
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
in
|
||||
{
|
||||
in {
|
||||
config = lib.mkIf cfg.enable {
|
||||
nixpkgs.overlays = [
|
||||
(self: super: {
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
in
|
||||
{
|
||||
in {
|
||||
config = lib.mkIf cfg.enable {
|
||||
programs.steam.enable = true;
|
||||
hardware.steam-hardware.enable = true; # steam controller
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
in
|
||||
{
|
||||
in {
|
||||
config = lib.mkIf cfg.enable {
|
||||
nixpkgs.overlays = [
|
||||
(self: super: {
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
let
|
||||
cfg = config.de.touchpad;
|
||||
in
|
||||
{
|
||||
in {
|
||||
options.de.touchpad = {
|
||||
enable = lib.mkEnableOption "enable touchpad";
|
||||
};
|
||||
|
||||
@@ -4,19 +4,8 @@ let
|
||||
cfg = config.de;
|
||||
|
||||
extensions = with pkgs.vscode-extensions; [
|
||||
bbenoist.nix # nix syntax support
|
||||
arrterian.nix-env-selector # nix dev envs
|
||||
dart-code.dart-code
|
||||
dart-code.flutter
|
||||
golang.go
|
||||
jnoortheen.nix-ide
|
||||
] ++ pkgs.vscode-utils.extensionsFromVscodeMarketplace [
|
||||
{
|
||||
name = "platformio-ide";
|
||||
publisher = "platformio";
|
||||
version = "3.1.1";
|
||||
sha256 = "fwEct7Tj8bfTOLRozSZJGWoLzWRSvYz/KxcnfpO8Usg=";
|
||||
}
|
||||
# bbenoist.Nix # nix syntax support
|
||||
# arrterian.nix-env-selector # nix dev envs
|
||||
];
|
||||
|
||||
vscodium-with-extensions = pkgs.vscode-with-extensions.override {
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
in
|
||||
{
|
||||
in {
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.xserver = {
|
||||
enable = true;
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
let
|
||||
cfg = config.de;
|
||||
in
|
||||
{
|
||||
in {
|
||||
config = lib.mkIf cfg.enable {
|
||||
# yubikey
|
||||
services.pcscd.enable = true;
|
||||
|
||||
@@ -3,9 +3,9 @@
|
||||
with lib;
|
||||
let
|
||||
cfg = config.ceph;
|
||||
in
|
||||
{
|
||||
options.ceph = { };
|
||||
in {
|
||||
options.ceph = {
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
# ceph.enable = true;
|
||||
|
||||
@@ -10,14 +10,9 @@
|
||||
./matrix.nix
|
||||
./zerobin.nix
|
||||
./gitea.nix
|
||||
./gitea-runner.nix
|
||||
./privatebin/privatebin.nix
|
||||
./radio.nix
|
||||
./samba.nix
|
||||
./owncast.nix
|
||||
./mailserver.nix
|
||||
./nextcloud.nix
|
||||
./iodine.nix
|
||||
./searx.nix
|
||||
];
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.gitea-runner;
|
||||
in
|
||||
{
|
||||
options.services.gitea-runner = {
|
||||
enable = lib.mkEnableOption "Enables gitea runner";
|
||||
dataDir = lib.mkOption {
|
||||
default = "/var/lib/gitea-runner";
|
||||
type = lib.types.str;
|
||||
description = lib.mdDoc "gitea runner data directory.";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
virtualisation.docker.enable = true;
|
||||
|
||||
users.users.gitea-runner = {
|
||||
description = "Gitea Runner Service";
|
||||
home = cfg.dataDir;
|
||||
useDefaultShell = true;
|
||||
group = "gitea-runner";
|
||||
isSystemUser = true;
|
||||
createHome = true;
|
||||
extraGroups = [
|
||||
"docker" # allow creating docker containers
|
||||
];
|
||||
};
|
||||
users.groups.gitea-runner = { };
|
||||
|
||||
systemd.services.gitea-runner = {
|
||||
description = "Gitea Runner";
|
||||
|
||||
serviceConfig = {
|
||||
WorkingDirectory = cfg.dataDir;
|
||||
User = "gitea-runner";
|
||||
Group = "gitea-runner";
|
||||
};
|
||||
|
||||
requires = [ "network-online.target" ];
|
||||
after = [ "network.target" "network-online.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
path = with pkgs; [ gitea-actions-runner ];
|
||||
|
||||
script = ''
|
||||
exec act_runner daemon
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,9 +1,8 @@
|
||||
{ lib, pkgs, config, ... }:
|
||||
{ lib, config, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.gitea;
|
||||
in
|
||||
{
|
||||
in {
|
||||
options.services.gitea = {
|
||||
hostname = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
@@ -15,8 +14,11 @@ in
|
||||
domain = cfg.hostname;
|
||||
rootUrl = "https://${cfg.hostname}/";
|
||||
appName = cfg.hostname;
|
||||
ssh.enable = true;
|
||||
# lfs.enable = true;
|
||||
# dump.enable = true;
|
||||
dump.enable = true;
|
||||
cookieSecure = true;
|
||||
disableRegistration = true;
|
||||
settings = {
|
||||
other = {
|
||||
SHOW_FOOTER_VERSION = false;
|
||||
@@ -24,37 +26,8 @@ in
|
||||
ui = {
|
||||
DEFAULT_THEME = "arc-green";
|
||||
};
|
||||
service = {
|
||||
DISABLE_REGISTRATION = true;
|
||||
};
|
||||
session = {
|
||||
COOKIE_SECURE = true;
|
||||
};
|
||||
mailer = {
|
||||
ENABLED = true;
|
||||
MAILER_TYPE = "smtp";
|
||||
SMTP_ADDR = "mail.neet.dev";
|
||||
SMTP_PORT = "465";
|
||||
IS_TLS_ENABLED = true;
|
||||
USER = "robot@runyan.org";
|
||||
FROM = "no-reply@neet.dev";
|
||||
};
|
||||
actions = {
|
||||
ENABLED = true;
|
||||
};
|
||||
};
|
||||
mailerPasswordFile = "/run/agenix/robots-email-pw";
|
||||
};
|
||||
age.secrets.robots-email-pw = {
|
||||
file = ../../secrets/robots-email-pw.age;
|
||||
owner = config.services.gitea.user;
|
||||
};
|
||||
|
||||
# backups
|
||||
backup.group."gitea".paths = [
|
||||
config.services.gitea.stateDir
|
||||
];
|
||||
|
||||
services.nginx.enable = true;
|
||||
services.nginx.virtualHosts.${cfg.hostname} = {
|
||||
enableACME = true;
|
||||
|
||||
@@ -7,8 +7,7 @@
|
||||
|
||||
let
|
||||
cfg = config.services.icecast;
|
||||
in
|
||||
{
|
||||
in {
|
||||
options.services.icecast = {
|
||||
mount = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.iodine.server;
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
# iodine DNS-based vpn
|
||||
services.iodine.server = {
|
||||
ip = "192.168.99.1";
|
||||
domain = "tun.neet.dev";
|
||||
passwordFile = "/run/agenix/iodine";
|
||||
};
|
||||
age.secrets.iodine.file = ../../secrets/iodine.age;
|
||||
networking.firewall.allowedUDPPorts = [ 53 ];
|
||||
|
||||
networking.nat.internalInterfaces = [
|
||||
"dns0" # iodine
|
||||
];
|
||||
};
|
||||
}
|
||||
@@ -1,111 +0,0 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with builtins;
|
||||
|
||||
let
|
||||
cfg = config.mailserver;
|
||||
domains = [
|
||||
"neet.space"
|
||||
"neet.dev"
|
||||
"neet.cloud"
|
||||
"runyan.org"
|
||||
"runyan.rocks"
|
||||
"thunderhex.com"
|
||||
"tar.ninja"
|
||||
"bsd.ninja"
|
||||
"bsd.rocks"
|
||||
];
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
# kresd doesn't work with tailscale MagicDNS
|
||||
mailserver.localDnsResolver = false;
|
||||
services.resolved.enable = true;
|
||||
|
||||
mailserver = {
|
||||
fqdn = "mail.neet.dev";
|
||||
dkimKeyBits = 2048;
|
||||
indexDir = "/var/lib/mailindex";
|
||||
enableManageSieve = true;
|
||||
fullTextSearch.enable = true;
|
||||
fullTextSearch.indexAttachments = true;
|
||||
fullTextSearch.memoryLimit = 500;
|
||||
inherit domains;
|
||||
loginAccounts = {
|
||||
"jeremy@runyan.org" = {
|
||||
hashedPasswordFile = "/run/agenix/hashed-email-pw";
|
||||
# catchall for all domains
|
||||
aliases = map (domain: "@${domain}") domains;
|
||||
};
|
||||
"cris@runyan.org" = {
|
||||
hashedPasswordFile = "/run/agenix/cris-hashed-email-pw";
|
||||
aliases = [ "chris@runyan.org" ];
|
||||
};
|
||||
"robot@runyan.org" = {
|
||||
aliases = [
|
||||
"no-reply@neet.dev"
|
||||
"robot@neet.dev"
|
||||
];
|
||||
sendOnly = true;
|
||||
hashedPasswordFile = "/run/agenix/hashed-robots-email-pw";
|
||||
};
|
||||
};
|
||||
rejectRecipients = [
|
||||
"george@runyan.org"
|
||||
"joslyn@runyan.org"
|
||||
"damon@runyan.org"
|
||||
"jonas@runyan.org"
|
||||
];
|
||||
forwards = {
|
||||
"amazon@runyan.org" = [
|
||||
"jeremy@runyan.org"
|
||||
"cris@runyan.org"
|
||||
];
|
||||
};
|
||||
certificateScheme = 3; # use let's encrypt for certs
|
||||
};
|
||||
age.secrets.hashed-email-pw.file = ../../secrets/hashed-email-pw.age;
|
||||
age.secrets.cris-hashed-email-pw.file = ../../secrets/cris-hashed-email-pw.age;
|
||||
age.secrets.hashed-robots-email-pw.file = ../../secrets/hashed-robots-email-pw.age;
|
||||
|
||||
# sendmail to use xxx@domain instead of xxx@mail.domain
|
||||
services.postfix.origin = "$mydomain";
|
||||
|
||||
# relay sent mail through mailgun
|
||||
# https://www.howtoforge.com/community/threads/different-smtp-relays-for-different-domains-in-postfix.82711/#post-392620
|
||||
services.postfix.config = {
|
||||
smtp_sasl_auth_enable = "yes";
|
||||
smtp_sasl_security_options = "noanonymous";
|
||||
smtp_sasl_password_maps = "hash:/var/lib/postfix/conf/sasl_relay_passwd";
|
||||
smtp_use_tls = "yes";
|
||||
sender_dependent_relayhost_maps = "hash:/var/lib/postfix/conf/sender_relay";
|
||||
smtp_sender_dependent_authentication = "yes";
|
||||
};
|
||||
services.postfix.mapFiles.sender_relay =
|
||||
let
|
||||
relayHost = "[smtp.mailgun.org]:587";
|
||||
in
|
||||
pkgs.writeText "sender_relay"
|
||||
(concatStringsSep "\n" (map (domain: "@${domain} ${relayHost}") domains));
|
||||
services.postfix.mapFiles.sasl_relay_passwd = "/run/agenix/sasl_relay_passwd";
|
||||
age.secrets.sasl_relay_passwd.file = ../../secrets/sasl_relay_passwd.age;
|
||||
|
||||
# webmail
|
||||
services.nginx.enable = true;
|
||||
services.roundcube = {
|
||||
enable = true;
|
||||
hostName = config.mailserver.fqdn;
|
||||
extraConfig = ''
|
||||
# starttls needed for authentication, so the fqdn required to match the certificate
|
||||
$config['smtp_server'] = "tls://${config.mailserver.fqdn}";
|
||||
$config['smtp_user'] = "%u";
|
||||
$config['smtp_pass'] = "%p";
|
||||
'';
|
||||
};
|
||||
|
||||
# backups
|
||||
backup.group."email".paths = [
|
||||
config.mailserver.mailDirectory
|
||||
];
|
||||
};
|
||||
}
|
||||
@@ -3,8 +3,7 @@
|
||||
let
|
||||
cfg = config.services.matrix;
|
||||
certs = config.security.acme.certs;
|
||||
in
|
||||
{
|
||||
in {
|
||||
options.services.matrix = {
|
||||
enable = lib.mkEnableOption "enable matrix";
|
||||
element-web = {
|
||||
@@ -138,8 +137,7 @@ in
|
||||
];
|
||||
locations."/".proxyPass = "http://localhost:${toString cfg.port}";
|
||||
};
|
||||
virtualHosts.${cfg.turn.host} = {
|
||||
# get TLS cert for TURN server
|
||||
virtualHosts.${cfg.turn.host} = { # get TLS cert for TURN server
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
};
|
||||
|
||||
@@ -3,8 +3,7 @@
|
||||
let
|
||||
cfg = config.services.murmur;
|
||||
certs = config.security.acme.certs;
|
||||
in
|
||||
{
|
||||
in {
|
||||
options.services.murmur.domain = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
};
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
|
||||
let
|
||||
cfg = config.services.nextcloud;
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.nextcloud = {
|
||||
https = true;
|
||||
package = pkgs.nextcloud25;
|
||||
hostName = "neet.cloud";
|
||||
config.dbtype = "sqlite";
|
||||
config.adminuser = "jeremy";
|
||||
config.adminpassFile = "/run/agenix/nextcloud-pw";
|
||||
autoUpdateApps.enable = true;
|
||||
enableBrokenCiphersForSSE = false;
|
||||
};
|
||||
age.secrets.nextcloud-pw = {
|
||||
file = ../../secrets/nextcloud-pw.age;
|
||||
owner = "nextcloud";
|
||||
};
|
||||
|
||||
# backups
|
||||
backup.group."nextcloud".paths = [
|
||||
config.services.nextcloud.home
|
||||
];
|
||||
|
||||
services.nginx.virtualHosts.${config.services.nextcloud.hostName} = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -5,8 +5,7 @@ let
|
||||
nginxWithRTMP = pkgs.nginx.override {
|
||||
modules = [ pkgs.nginxModules.rtmp ];
|
||||
};
|
||||
in
|
||||
{
|
||||
in {
|
||||
options.services.nginx.stream = {
|
||||
enable = lib.mkEnableOption "enable nginx rtmp/hls/dash video streaming";
|
||||
port = lib.mkOption {
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
let
|
||||
cfg = config.services.nginx;
|
||||
in
|
||||
{
|
||||
in {
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.nginx = {
|
||||
recommendedGzipSettings = true;
|
||||
|
||||
@@ -4,8 +4,7 @@ with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.owncast;
|
||||
in
|
||||
{
|
||||
in {
|
||||
options.services.owncast = {
|
||||
hostname = lib.mkOption {
|
||||
type = types.str;
|
||||
|
||||
@@ -14,8 +14,7 @@ let
|
||||
cp -ar $src $out
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
in {
|
||||
options.services.privatebin = {
|
||||
enable = lib.mkEnableOption "enable privatebin";
|
||||
host = lib.mkOption {
|
||||
|
||||
@@ -3,8 +3,7 @@
|
||||
let
|
||||
cfg = config.services.radio;
|
||||
radioPackage = config.inputs.radio.packages.${config.currentSystem}.radio;
|
||||
in
|
||||
{
|
||||
in {
|
||||
options.services.radio = {
|
||||
enable = lib.mkEnableOption "enable radio";
|
||||
user = lib.mkOption {
|
||||
|
||||
@@ -25,7 +25,9 @@
|
||||
printing = cups
|
||||
printcap name = cups
|
||||
|
||||
hide files = /.nobackup/.DS_Store/._.DS_Store/
|
||||
# horrible files
|
||||
veto files = /._*/.DS_Store/ /._*/._.DS_Store/
|
||||
delete veto files = yes
|
||||
'';
|
||||
|
||||
shares = {
|
||||
@@ -75,13 +77,6 @@
|
||||
};
|
||||
};
|
||||
|
||||
# backups
|
||||
backup.group."samba".paths = [
|
||||
config.services.samba.shares.googlebot.path
|
||||
config.services.samba.shares.cris.path
|
||||
config.services.samba.shares.public.path
|
||||
];
|
||||
|
||||
# Windows discovery of samba server
|
||||
services.samba-wsdd = {
|
||||
enable = true;
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.searx;
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.searx = {
|
||||
environmentFile = "/run/agenix/searx";
|
||||
settings = {
|
||||
server.port = 43254;
|
||||
server.secret_key = "@SEARX_SECRET_KEY@";
|
||||
engines = [{
|
||||
name = "wolframalpha";
|
||||
shortcut = "wa";
|
||||
api_key = "@WOLFRAM_API_KEY@";
|
||||
engine = "wolframalpha_api";
|
||||
}];
|
||||
};
|
||||
};
|
||||
services.nginx.virtualHosts."search.neet.space" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://localhost:${toString config.services.searx.settings.server.port}";
|
||||
};
|
||||
};
|
||||
age.secrets.searx.file = ../../secrets/searx.age;
|
||||
};
|
||||
}
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
let
|
||||
cfg = config.services.thelounge;
|
||||
in
|
||||
{
|
||||
in {
|
||||
options.services.thelounge = {
|
||||
fileUploadBaseUrl = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
@@ -43,10 +42,6 @@ in
|
||||
};
|
||||
};
|
||||
|
||||
backup.group."thelounge".paths = [
|
||||
"/var/lib/thelounge/"
|
||||
];
|
||||
|
||||
# the lounge client
|
||||
services.nginx.virtualHosts.${cfg.host} = {
|
||||
enableACME = true;
|
||||
|
||||
@@ -79,11 +79,8 @@ in
|
||||
"${toStr webrtc-peer-lower-port}-${toStr webrtc-peer-upper-port}:${toStr webrtc-peer-lower-port}-${toStr webrtc-peer-upper-port}/udp"
|
||||
];
|
||||
cmd = [
|
||||
"lightspeed-webrtc"
|
||||
"--addr=0.0.0.0"
|
||||
"--ip=${domain}"
|
||||
"--ports=${toStr webrtc-peer-lower-port}-${toStr webrtc-peer-upper-port}"
|
||||
"run"
|
||||
"lightspeed-webrtc" "--addr=0.0.0.0" "--ip=${domain}"
|
||||
"--ports=${toStr webrtc-peer-lower-port}-${toStr webrtc-peer-upper-port}" "run"
|
||||
];
|
||||
# imageFile = pkgs.dockerTools.pullImage {
|
||||
# imageName = "projectlightspeed/webrtc";
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
let
|
||||
cfg = config.services.zerobin;
|
||||
in
|
||||
{
|
||||
in {
|
||||
options.services.zerobin = {
|
||||
host = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
|
||||
@@ -1,28 +1,36 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
# Improvements to the default shell
|
||||
# - use nix-index for command-not-found
|
||||
# - use nix-locate for command-not-found
|
||||
# - disable fish's annoying greeting message
|
||||
# - add some handy shell commands
|
||||
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
comma
|
||||
];
|
||||
|
||||
# nix-index
|
||||
programs.nix-index.enable = true;
|
||||
programs.nix-index.enableFishIntegration = true;
|
||||
let
|
||||
nix-locate = config.inputs.nix-locate.packages.${config.currentSystem}.default;
|
||||
in {
|
||||
programs.command-not-found.enable = false;
|
||||
|
||||
environment.systemPackages = [
|
||||
nix-locate
|
||||
];
|
||||
|
||||
programs.fish = {
|
||||
enable = true;
|
||||
|
||||
shellInit = ''
|
||||
shellInit = let
|
||||
wrapper = pkgs.writeScript "command-not-found" ''
|
||||
#!${pkgs.bash}/bin/bash
|
||||
source ${nix-locate}/etc/profile.d/command-not-found.sh
|
||||
command_not_found_handle "$@"
|
||||
'';
|
||||
in ''
|
||||
# use nix-locate for command-not-found functionality
|
||||
function __fish_command_not_found_handler --on-event fish_command_not_found
|
||||
${wrapper} $argv
|
||||
end
|
||||
|
||||
# disable annoying fish shell greeting
|
||||
set fish_greeting
|
||||
|
||||
alias sudo="doas"
|
||||
'';
|
||||
};
|
||||
|
||||
@@ -30,23 +38,9 @@
|
||||
myip = "dig +short myip.opendns.com @resolver1.opendns.com";
|
||||
|
||||
# https://linuxreviews.org/HOWTO_Test_Disk_I/O_Performance
|
||||
io_seq_read = "${pkgs.fio}/bin/fio --name TEST --eta-newline=5s --filename=temp.file --rw=read --size=2g --io_size=10g --blocksize=1024k --ioengine=libaio --fsync=10000 --iodepth=32 --direct=1 --numjobs=1 --runtime=60 --group_reporting; rm temp.file";
|
||||
io_seq_write = "${pkgs.fio}/bin/fio --name TEST --eta-newline=5s --filename=temp.file --rw=write --size=2g --io_size=10g --blocksize=1024k --ioengine=libaio --fsync=10000 --iodepth=32 --direct=1 --numjobs=1 --runtime=60 --group_reporting; rm temp.file";
|
||||
io_rand_read = "${pkgs.fio}/bin/fio --name TEST --eta-newline=5s --filename=temp.file --rw=randread --size=2g --io_size=10g --blocksize=4k --ioengine=libaio --fsync=1 --iodepth=1 --direct=1 --numjobs=32 --runtime=60 --group_reporting; rm temp.file";
|
||||
io_rand_write = "${pkgs.fio}/bin/fio --name TEST --eta-newline=5s --filename=temp.file --rw=randrw --size=2g --io_size=10g --blocksize=4k --ioengine=libaio --fsync=1 --iodepth=1 --direct=1 --numjobs=1 --runtime=60 --group_reporting; rm temp.file";
|
||||
io_seq_read = "nix run nixpkgs#fio -- --name TEST --eta-newline=5s --filename=temp.file --rw=read --size=2g --io_size=10g --blocksize=1024k --ioengine=libaio --fsync=10000 --iodepth=32 --direct=1 --numjobs=1 --runtime=60 --group_reporting; rm temp.file";
|
||||
io_seq_write = "nix run nixpkgs#fio -- --name TEST --eta-newline=5s --filename=temp.file --rw=write --size=2g --io_size=10g --blocksize=1024k --ioengine=libaio --fsync=10000 --iodepth=32 --direct=1 --numjobs=1 --runtime=60 --group_reporting; rm temp.file";
|
||||
io_rand_read = "nix run nixpkgs#fio -- --name TEST --eta-newline=5s --filename=temp.file --rw=randread --size=2g --io_size=10g --blocksize=4k --ioengine=libaio --fsync=1 --iodepth=1 --direct=1 --numjobs=32 --runtime=60 --group_reporting; rm temp.file";
|
||||
io_rand_write = "nix run nixpkgs#fio -- --name TEST --eta-newline=5s --filename=temp.file --rw=randrw --size=2g --io_size=10g --blocksize=4k --ioengine=libaio --fsync=1 --iodepth=1 --direct=1 --numjobs=1 --runtime=60 --group_reporting; rm temp.file";
|
||||
};
|
||||
|
||||
nixpkgs.overlays = [
|
||||
(final: prev: {
|
||||
# comma uses the "nix-index" package built into nixpkgs by default.
|
||||
# That package doesn't use the prebuilt nix-index database so it needs to be changed.
|
||||
comma = prev.comma.overrideAttrs (old: {
|
||||
postInstall = ''
|
||||
wrapProgram $out/bin/comma \
|
||||
--prefix PATH : ${lib.makeBinPath [ prev.fzy config.programs.nix-index.package ]}
|
||||
ln -s $out/bin/comma $out/bin/,
|
||||
'';
|
||||
});
|
||||
})
|
||||
];
|
||||
}
|
||||
@@ -1,38 +1,65 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
programs.ssh.knownHosts = lib.filterAttrs (n: v: v != null) (lib.concatMapAttrs
|
||||
(host: cfg: {
|
||||
${host} = {
|
||||
hostNames = cfg.hostNames;
|
||||
publicKey = cfg.hostKey;
|
||||
};
|
||||
"${host}-remote-unlock" =
|
||||
if cfg.remoteUnlock != null then {
|
||||
hostNames = builtins.filter (h: h != null) [ cfg.remoteUnlock.clearnetHost cfg.remoteUnlock.onionHost ];
|
||||
publicKey = cfg.remoteUnlock.hostKey;
|
||||
} else null;
|
||||
})
|
||||
config.machines.hosts);
|
||||
|
||||
# prebuilt cmds for easy ssh LUKS unlock
|
||||
environment.shellAliases =
|
||||
let
|
||||
unlockHosts = unlockType: lib.concatMapAttrs
|
||||
(host: cfg:
|
||||
if cfg.remoteUnlock != null && cfg.remoteUnlock.${unlockType} != null then {
|
||||
${host} = cfg.remoteUnlock.${unlockType};
|
||||
} else { })
|
||||
config.machines.hosts;
|
||||
in
|
||||
lib.concatMapAttrs (host: addr: { "unlock-over-tor_${host}" = "torsocks ssh root@${addr}"; }) (unlockHosts "onionHost")
|
||||
//
|
||||
lib.concatMapAttrs (host: addr: { "unlock_${host}" = "ssh root@${addr}"; }) (unlockHosts "clearnetHost");
|
||||
|
||||
# TODO: Old ssh keys I will remove some day...
|
||||
machines.ssh.userKeys = [
|
||||
rec {
|
||||
users = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMVR/R3ZOsv7TZbICGBCHdjh1NDT8SnswUyINeJOC7QG"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE0dcqL/FhHmv+a1iz3f9LJ48xubO7MZHy35rW9SZOYM"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO0VFnn3+Mh0nWeN92jov81qNE9fpzTAHYBphNoY7HUx" # reg
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHSkKiRUUmnErOKGx81nyge/9KqjkPh8BfDk0D3oP586" # nat
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFeTK1iARlNIKP/DS8/ObBm9yUM/3L1Ub4XI5A2r9OzP" # ray
|
||||
];
|
||||
system = {
|
||||
liza = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDY/pNyWedEfU7Tq9ikGbriRuF1ZWkHhegGS17L0Vcdl";
|
||||
ponyo = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMBBlTAIp38RhErU1wNNV5MBeb+WGH0mhF/dxh5RsAXN";
|
||||
ponyo-unlock = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIC9LQuuImgWlkjDhEEIbM1wOd+HqRv1RxvYZuLXPSdRi";
|
||||
ray = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDQM8hwKRgl8cZj7UVYATSLYu4LhG7I0WFJ9m2iWowiB";
|
||||
s0 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAwiXcUFtAvZCayhu4+AIcF+Ktrdgv9ee/mXSIhJbp4q";
|
||||
n1 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPWlhd1Oid5Xf2zdcBrcdrR0TlhObutwcJ8piobRTpRt";
|
||||
n2 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJ7bRiRutnI7Bmyt/I238E3Fp5DqiClIXiVibsccipOr";
|
||||
n3 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB+rJEaRrFDGirQC2UoWQkmpzLg4qgTjGJgVqiipWiU5";
|
||||
n4 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINYm2ROIfCeGz6QtDwqAmcj2DX9tq2CZn0eLhskdvB4Z";
|
||||
n5 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE5Qhvwq3PiHEKf+2/4w5ZJkSMNzFLhIRrPOR98m7wW4";
|
||||
n6 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID/P/pa9+qhKAPfvvd8xSO2komJqDW0M1nCK7ZrP6PO7";
|
||||
n7 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPtOlOvTlMX2mxPaXDJ6VlMe5rmroUXpKmJVNxgV32xL";
|
||||
};
|
||||
|
||||
# groups
|
||||
systems = with system; [
|
||||
liza
|
||||
ponyo
|
||||
ray
|
||||
s0
|
||||
n1
|
||||
n2
|
||||
n3
|
||||
n4
|
||||
n5
|
||||
n6
|
||||
n7
|
||||
];
|
||||
personal = with system; [
|
||||
ray
|
||||
];
|
||||
servers = with system; [
|
||||
liza
|
||||
ponyo
|
||||
s0
|
||||
n1
|
||||
n2
|
||||
n3
|
||||
n4
|
||||
n5
|
||||
n6
|
||||
n7
|
||||
];
|
||||
compute = with system; [
|
||||
n1
|
||||
n2
|
||||
n3
|
||||
n4
|
||||
n5
|
||||
n6
|
||||
n7
|
||||
];
|
||||
storage = with system; [
|
||||
s0
|
||||
];
|
||||
}
|
||||
103
flake.lock
generated
103
flake.lock
generated
@@ -8,11 +8,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1682101079,
|
||||
"narHash": "sha256-MdAhtjrLKnk2uiqun1FWABbKpLH090oeqCSiWemtuck=",
|
||||
"lastModified": 1675176355,
|
||||
"narHash": "sha256-Qjxh5cmN56siY97mzmBLI1+cdjXSPqmfPVsKxBvHmwI=",
|
||||
"owner": "ryantm",
|
||||
"repo": "agenix",
|
||||
"rev": "2994d002dcff5353ca1ac48ec584c7f6589fe447",
|
||||
"rev": "b7ffcfe77f817d9ee992640ba1f270718d197f28",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -105,31 +105,6 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"deploy-rs": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat",
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"utils": [
|
||||
"simple-nixos-mailserver",
|
||||
"utils"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1683515103,
|
||||
"narHash": "sha256-vWlnZ0twW+ekOC6JuAHDfupv+u4QNvWawG7+DaQJ4VA=",
|
||||
"owner": "serokell",
|
||||
"repo": "deploy-rs",
|
||||
"rev": "64160276cd6569694131ed8864d4d35470a84ec3",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "serokell",
|
||||
"repo": "deploy-rs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
@@ -147,15 +122,12 @@
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1681202837,
|
||||
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
|
||||
"lastModified": 1667395993,
|
||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
|
||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -164,38 +136,39 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-index-database": {
|
||||
"nix-locate": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat",
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1683638468,
|
||||
"narHash": "sha256-tQEaGZfZ2Hpw+XIVEHaJ8FaF1yNQyMDDhUyIQ7LTIEg=",
|
||||
"owner": "Mic92",
|
||||
"repo": "nix-index-database",
|
||||
"rev": "219067a5e3cf4b9581c8b4fcfc59ecd5af953d07",
|
||||
"lastModified": 1673969751,
|
||||
"narHash": "sha256-U6aYz3lqZ4NVEGEWiti1i0FyqEo4bUjnTAnA73DPnNU=",
|
||||
"owner": "bennofs",
|
||||
"repo": "nix-index",
|
||||
"rev": "5f98881b1ed27ab6656e6d71b534f88430f6823a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "Mic92",
|
||||
"repo": "nix-index-database",
|
||||
"owner": "bennofs",
|
||||
"repo": "nix-index",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1683741689,
|
||||
"narHash": "sha256-VY6gjqAFQe0Xyz+olc979zbsW9dC4VG+mINGffFKVEw=",
|
||||
"lastModified": 1672580127,
|
||||
"narHash": "sha256-3lW3xZslREhJogoOkjeZtlBtvFMyxHku7I/9IVehhT8=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "f431ee4a85cb985075b4ed27596913e8087f4264",
|
||||
"rev": "0874168639713f547c05947c76124f78441ea46c",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "master",
|
||||
"ref": "nixos-22.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
@@ -215,16 +188,20 @@
|
||||
"type": "indirect"
|
||||
}
|
||||
},
|
||||
"nixpkgs-hostapd-pr": {
|
||||
"flake": false,
|
||||
"nixpkgs-unstable": {
|
||||
"locked": {
|
||||
"narHash": "sha256-35+g1EJMcDFhb3UP15fyR1aD4AX1ifz2EqaYItITZ7U=",
|
||||
"type": "file",
|
||||
"url": "https://github.com/NixOS/nixpkgs/pull/222536.patch"
|
||||
"lastModified": 1675835843,
|
||||
"narHash": "sha256-y1dSCQPcof4CWzRYRqDj4qZzbBl+raVPAko5Prdil28=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "32f914af34f126f54b45e482fb2da4ae78f3095f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"type": "file",
|
||||
"url": "https://github.com/NixOS/nixpkgs/pull/222536.patch"
|
||||
"owner": "NixOS",
|
||||
"ref": "master",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"radio": {
|
||||
@@ -273,11 +250,10 @@
|
||||
"agenix": "agenix",
|
||||
"archivebox": "archivebox",
|
||||
"dailybuild_modules": "dailybuild_modules",
|
||||
"deploy-rs": "deploy-rs",
|
||||
"flake-utils": "flake-utils",
|
||||
"nix-index-database": "nix-index-database",
|
||||
"nix-locate": "nix-locate",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"nixpkgs-hostapd-pr": "nixpkgs-hostapd-pr",
|
||||
"nixpkgs-unstable": "nixpkgs-unstable",
|
||||
"radio": "radio",
|
||||
"radio-web": "radio-web",
|
||||
"simple-nixos-mailserver": "simple-nixos-mailserver"
|
||||
@@ -307,21 +283,6 @@
|
||||
"type": "gitlab"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"utils": {
|
||||
"locked": {
|
||||
"lastModified": 1605370193,
|
||||
|
||||
109
flake.nix
109
flake.nix
@@ -1,11 +1,13 @@
|
||||
{
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/master";
|
||||
# nixpkgs-patch-howdy.url = "https://github.com/NixOS/nixpkgs/pull/216245.diff";
|
||||
# nixpkgs-patch-howdy.flake = false;
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-22.05";
|
||||
nixpkgs-unstable.url = "github:NixOS/nixpkgs/master";
|
||||
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
|
||||
nix-locate.url = "github:bennofs/nix-index";
|
||||
nix-locate.inputs.nixpkgs.follows = "nixpkgs";
|
||||
|
||||
# mail server
|
||||
simple-nixos-mailserver.url = "gitlab:simple-nixos-mailserver/nixos-mailserver/nixos-22.05";
|
||||
simple-nixos-mailserver.inputs.nixpkgs.follows = "nixpkgs";
|
||||
@@ -30,69 +32,33 @@
|
||||
archivebox.url = "git+https://git.neet.dev/zuckerberg/archivebox.git";
|
||||
archivebox.inputs.nixpkgs.follows = "nixpkgs";
|
||||
archivebox.inputs.flake-utils.follows = "flake-utils";
|
||||
|
||||
# nixos config deployment
|
||||
deploy-rs.url = "github:serokell/deploy-rs";
|
||||
deploy-rs.inputs.nixpkgs.follows = "nixpkgs";
|
||||
deploy-rs.inputs.utils.follows = "simple-nixos-mailserver/utils";
|
||||
|
||||
# prebuilt nix-index database
|
||||
nix-index-database.url = "github:Mic92/nix-index-database";
|
||||
nix-index-database.inputs.nixpkgs.follows = "nixpkgs";
|
||||
|
||||
nixpkgs-hostapd-pr.url = "https://github.com/NixOS/nixpkgs/pull/222536.patch";
|
||||
nixpkgs-hostapd-pr.flake = false;
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, ... }@inputs:
|
||||
let
|
||||
machines = (import ./common/machine-info/moduleless.nix
|
||||
{
|
||||
inherit nixpkgs;
|
||||
assertionsModule = "${nixpkgs}/nixos/modules/misc/assertions.nix";
|
||||
}).machines.hosts;
|
||||
in
|
||||
{
|
||||
outputs = { self, nixpkgs, nixpkgs-unstable, ... }@inputs: {
|
||||
|
||||
nixosConfigurations =
|
||||
let
|
||||
modules = system: hostname: with inputs; [
|
||||
modules = system: [
|
||||
./common
|
||||
simple-nixos-mailserver.nixosModule
|
||||
agenix.nixosModules.default
|
||||
dailybuild_modules.nixosModule
|
||||
archivebox.nixosModule
|
||||
nix-index-database.nixosModules.nix-index
|
||||
inputs.simple-nixos-mailserver.nixosModule
|
||||
inputs.agenix.nixosModules.default
|
||||
inputs.dailybuild_modules.nixosModule
|
||||
inputs.archivebox.nixosModule
|
||||
({ lib, ... }: {
|
||||
config = {
|
||||
environment.systemPackages = [
|
||||
agenix.packages.${system}.agenix
|
||||
config.environment.systemPackages = [
|
||||
inputs.agenix.packages.${system}.agenix
|
||||
];
|
||||
|
||||
networking.hostName = hostname;
|
||||
};
|
||||
|
||||
# because nixos specialArgs doesn't work for containers... need to pass in inputs a different way
|
||||
options.inputs = lib.mkOption { default = inputs; };
|
||||
options.currentSystem = lib.mkOption { default = system; };
|
||||
})
|
||||
];
|
||||
|
||||
mkSystem = system: nixpkgs: path: hostname:
|
||||
mkSystem = system: nixpkgs: path:
|
||||
let
|
||||
allModules = modules system hostname;
|
||||
|
||||
# allow patching nixpkgs, remove this hack once this is solved: https://github.com/NixOS/nix/issues/3920
|
||||
patchedNixpkgsSrc = nixpkgs.legacyPackages.${system}.applyPatches {
|
||||
name = "nixpkgs-patched";
|
||||
src = nixpkgs;
|
||||
patches = [
|
||||
inputs.nixpkgs-hostapd-pr
|
||||
];
|
||||
};
|
||||
patchedNixpkgs = nixpkgs.lib.fix (self: (import "${patchedNixpkgsSrc}/flake.nix").outputs { self = nixpkgs; });
|
||||
|
||||
in
|
||||
patchedNixpkgs.lib.nixosSystem {
|
||||
allModules = modules system;
|
||||
in nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
modules = allModules ++ [path];
|
||||
|
||||
@@ -101,13 +67,23 @@
|
||||
};
|
||||
};
|
||||
in
|
||||
nixpkgs.lib.mapAttrs
|
||||
(hostname: cfg:
|
||||
mkSystem cfg.arch nixpkgs cfg.configurationPath hostname)
|
||||
machines;
|
||||
{
|
||||
"reg" = mkSystem "x86_64-linux" nixpkgs ./machines/reg/configuration.nix;
|
||||
"ray" = mkSystem "x86_64-linux" nixpkgs-unstable ./machines/ray/configuration.nix;
|
||||
"nat" = mkSystem "aarch64-linux" nixpkgs ./machines/nat/configuration.nix;
|
||||
"liza" = mkSystem "x86_64-linux" nixpkgs ./machines/liza/configuration.nix;
|
||||
"ponyo" = mkSystem "x86_64-linux" nixpkgs ./machines/ponyo/configuration.nix;
|
||||
"s0" = mkSystem "aarch64-linux" nixpkgs-unstable ./machines/storage/s0/configuration.nix;
|
||||
"n1" = mkSystem "aarch64-linux" nixpkgs ./machines/compute/n1/configuration.nix;
|
||||
"n2" = mkSystem "aarch64-linux" nixpkgs ./machines/compute/n2/configuration.nix;
|
||||
"n3" = mkSystem "aarch64-linux" nixpkgs ./machines/compute/n3/configuration.nix;
|
||||
"n4" = mkSystem "aarch64-linux" nixpkgs ./machines/compute/n4/configuration.nix;
|
||||
"n5" = mkSystem "aarch64-linux" nixpkgs ./machines/compute/n5/configuration.nix;
|
||||
"n6" = mkSystem "aarch64-linux" nixpkgs ./machines/compute/n6/configuration.nix;
|
||||
"n7" = mkSystem "aarch64-linux" nixpkgs ./machines/compute/n7/configuration.nix;
|
||||
};
|
||||
|
||||
packages =
|
||||
let
|
||||
packages = let
|
||||
mkKexec = system:
|
||||
(nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
@@ -118,28 +94,11 @@
|
||||
inherit system;
|
||||
modules = [ ./machines/ephemeral/iso.nix ];
|
||||
}).config.system.build.isoImage;
|
||||
in
|
||||
{
|
||||
in {
|
||||
"x86_64-linux"."kexec" = mkKexec "x86_64-linux";
|
||||
"x86_64-linux"."iso" = mkIso "x86_64-linux";
|
||||
"aarch64-linux"."kexec" = mkKexec "aarch64-linux";
|
||||
"aarch64-linux"."iso" = mkIso "aarch64-linux";
|
||||
};
|
||||
|
||||
deploy.nodes =
|
||||
let
|
||||
mkDeploy = configName: arch: hostname: {
|
||||
inherit hostname;
|
||||
magicRollback = false;
|
||||
sshUser = "root";
|
||||
profiles.system.path = inputs.deploy-rs.lib.${arch}.activate.nixos self.nixosConfigurations.${configName};
|
||||
};
|
||||
in
|
||||
nixpkgs.lib.mapAttrs
|
||||
(hostname: cfg:
|
||||
mkDeploy hostname cfg.arch (builtins.head cfg.hostNames))
|
||||
machines;
|
||||
|
||||
checks = builtins.mapAttrs (system: deployLib: deployLib.deployChecks self.deploy) inputs.deploy-rs.lib;
|
||||
};
|
||||
}
|
||||
|
||||
24
machines/compute/common.nix
Normal file
24
machines/compute/common.nix
Normal file
@@ -0,0 +1,24 @@
|
||||
{ config, ... }:
|
||||
|
||||
{
|
||||
# NixOS wants to enable GRUB by default
|
||||
boot.loader.grub.enable = false;
|
||||
# Enables the generation of /boot/extlinux/extlinux.conf
|
||||
boot.loader.generic-extlinux-compatible.enable = true;
|
||||
|
||||
fileSystems = {
|
||||
"/" = {
|
||||
device = "/dev/disk/by-label/NIXOS_SD";
|
||||
fsType = "ext4";
|
||||
};
|
||||
};
|
||||
|
||||
system.autoUpgrade.enable = true;
|
||||
|
||||
networking.interfaces.eth0.useDHCP = true;
|
||||
|
||||
hardware.deviceTree.enable = true;
|
||||
hardware.deviceTree.overlays = [
|
||||
./sopine-baseboard-ethernet.dtbo # fix pine64 clusterboard ethernet
|
||||
];
|
||||
}
|
||||
9
machines/compute/n1/configuration.nix
Normal file
9
machines/compute/n1/configuration.nix
Normal file
@@ -0,0 +1,9 @@
|
||||
{ config, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
../common.nix
|
||||
];
|
||||
|
||||
networking.hostName = "n1";
|
||||
}
|
||||
9
machines/compute/n2/configuration.nix
Normal file
9
machines/compute/n2/configuration.nix
Normal file
@@ -0,0 +1,9 @@
|
||||
{ config, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
../common.nix
|
||||
];
|
||||
|
||||
networking.hostName = "n2";
|
||||
}
|
||||
9
machines/compute/n3/configuration.nix
Normal file
9
machines/compute/n3/configuration.nix
Normal file
@@ -0,0 +1,9 @@
|
||||
{ config, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
../common.nix
|
||||
];
|
||||
|
||||
networking.hostName = "n3";
|
||||
}
|
||||
9
machines/compute/n4/configuration.nix
Normal file
9
machines/compute/n4/configuration.nix
Normal file
@@ -0,0 +1,9 @@
|
||||
{ config, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
../common.nix
|
||||
];
|
||||
|
||||
networking.hostName = "n4";
|
||||
}
|
||||
9
machines/compute/n5/configuration.nix
Normal file
9
machines/compute/n5/configuration.nix
Normal file
@@ -0,0 +1,9 @@
|
||||
{ config, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
../common.nix
|
||||
];
|
||||
|
||||
networking.hostName = "n5";
|
||||
}
|
||||
9
machines/compute/n6/configuration.nix
Normal file
9
machines/compute/n6/configuration.nix
Normal file
@@ -0,0 +1,9 @@
|
||||
{ config, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
../common.nix
|
||||
];
|
||||
|
||||
networking.hostName = "n6";
|
||||
}
|
||||
9
machines/compute/n7/configuration.nix
Normal file
9
machines/compute/n7/configuration.nix
Normal file
@@ -0,0 +1,9 @@
|
||||
{ config, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
../common.nix
|
||||
];
|
||||
|
||||
networking.hostName = "n7";
|
||||
}
|
||||
BIN
machines/compute/sopine-baseboard-ethernet.dtbo
Normal file
BIN
machines/compute/sopine-baseboard-ethernet.dtbo
Normal file
Binary file not shown.
15
machines/compute/sopine-baseboard-ethernet.dts
Normal file
15
machines/compute/sopine-baseboard-ethernet.dts
Normal file
@@ -0,0 +1,15 @@
|
||||
/dts-v1/;
|
||||
|
||||
/ {
|
||||
model = "SoPine with baseboard";
|
||||
compatible = "pine64,sopine-baseboard\0pine64,sopine\0allwinner,sun50i-a64";
|
||||
|
||||
fragment@0 {
|
||||
/* target = <ðernet@1c30000>; */
|
||||
target-path = "/soc/ethernet@1c30000";
|
||||
__overlay__ {
|
||||
allwinner,tx-delay-ps = <500>;
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
@@ -1,53 +1,28 @@
|
||||
{ config, pkgs, modulesPath, ... }:
|
||||
{ pkgs, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
(modulesPath + "/installer/cd-dvd/channel.nix")
|
||||
../../common/machine-info
|
||||
../../common/ssh.nix
|
||||
];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "e1000" "e1000e" "virtio_pci" "r8169" ];
|
||||
boot.kernelParams = [
|
||||
"panic=30"
|
||||
"boot.panic_on_fail" # reboot the machine upon fatal boot issues
|
||||
"console=ttyS0,115200" # enable serial console
|
||||
"panic=30" "boot.panic_on_fail" # reboot the machine upon fatal boot issues
|
||||
"console=ttyS0" # enable serial console
|
||||
"console=tty1"
|
||||
];
|
||||
boot.kernel.sysctl."vm.overcommit_memory" = "1";
|
||||
|
||||
boot.kernelPackages = pkgs.linuxPackages_latest;
|
||||
|
||||
system.stateVersion = "21.11";
|
||||
|
||||
# hardware.enableAllFirmware = true;
|
||||
# nixpkgs.config.allowUnfree = true;
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
cryptsetup
|
||||
btrfs-progs
|
||||
git
|
||||
git-lfs
|
||||
wget
|
||||
htop
|
||||
dnsutils
|
||||
pciutils
|
||||
usbutils
|
||||
lm_sensors
|
||||
];
|
||||
|
||||
environment.variables.GC_INITIAL_HEAP_SIZE = "1M";
|
||||
|
||||
networking.useDHCP = true;
|
||||
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings = {
|
||||
KbdInteractiveAuthentication = false;
|
||||
PasswordAuthentication = false;
|
||||
};
|
||||
challengeResponseAuthentication = false;
|
||||
passwordAuthentication = false;
|
||||
};
|
||||
|
||||
services.getty.autologinUser = "root";
|
||||
users.users.root.openssh.authorizedKeys.keys = config.machines.ssh.userKeys;
|
||||
users.users.root.openssh.authorizedKeys.keys = (import ../common/ssh.nix).users;
|
||||
}
|
||||
110
machines/liza/configuration.nix
Normal file
110
machines/liza/configuration.nix
Normal file
@@ -0,0 +1,110 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
{
|
||||
imports =[
|
||||
./hardware-configuration.nix
|
||||
];
|
||||
|
||||
# 5synsrjgvfzywruomjsfvfwhhlgxqhyofkzeqt2eisyijvjvebnu2xyd.onion
|
||||
|
||||
firmware.x86_64.enable = true;
|
||||
bios = {
|
||||
enable = true;
|
||||
device = "/dev/sda";
|
||||
};
|
||||
|
||||
luks = {
|
||||
enable = true;
|
||||
device.path = "/dev/disk/by-uuid/2f736fba-8a0c-4fb5-8041-c849fb5e1297";
|
||||
};
|
||||
|
||||
system.autoUpgrade.enable = true;
|
||||
|
||||
networking.hostName = "liza";
|
||||
|
||||
networking.interfaces.enp1s0.useDHCP = true;
|
||||
|
||||
mailserver = {
|
||||
enable = true;
|
||||
fqdn = "mail.neet.dev";
|
||||
dkimKeyBits = 2048;
|
||||
indexDir = "/var/lib/mailindex";
|
||||
enableManageSieve = true;
|
||||
fullTextSearch.enable = true;
|
||||
fullTextSearch.indexAttachments = true;
|
||||
fullTextSearch.memoryLimit = 500;
|
||||
domains = [
|
||||
"neet.space" "neet.dev" "neet.cloud"
|
||||
"runyan.org" "runyan.rocks"
|
||||
"thunderhex.com" "tar.ninja"
|
||||
"bsd.ninja" "bsd.rocks"
|
||||
];
|
||||
loginAccounts = {
|
||||
"jeremy@runyan.org" = {
|
||||
hashedPasswordFile = "/run/agenix/email-pw";
|
||||
aliases = [
|
||||
"@neet.space" "@neet.cloud" "@neet.dev"
|
||||
"@runyan.org" "@runyan.rocks"
|
||||
"@thunderhex.com" "@tar.ninja"
|
||||
"@bsd.ninja" "@bsd.rocks"
|
||||
];
|
||||
};
|
||||
};
|
||||
rejectRecipients = [
|
||||
"george@runyan.org"
|
||||
"joslyn@runyan.org"
|
||||
"damon@runyan.org"
|
||||
"jonas@runyan.org"
|
||||
];
|
||||
certificateScheme = 3; # use let's encrypt for certs
|
||||
};
|
||||
age.secrets.email-pw.file = ../../secrets/email-pw.age;
|
||||
|
||||
# sendmail to use xxx@domain instead of xxx@mail.domain
|
||||
services.postfix.origin = "$mydomain";
|
||||
|
||||
# relay sent mail through mailgun
|
||||
# https://www.howtoforge.com/community/threads/different-smtp-relays-for-different-domains-in-postfix.82711/#post-392620
|
||||
services.postfix.config = {
|
||||
smtp_sasl_auth_enable = "yes";
|
||||
smtp_sasl_security_options = "noanonymous";
|
||||
smtp_sasl_password_maps = "hash:/var/lib/postfix/conf/sasl_relay_passwd";
|
||||
smtp_use_tls = "yes";
|
||||
sender_dependent_relayhost_maps = "hash:/var/lib/postfix/conf/sender_relay";
|
||||
smtp_sender_dependent_authentication = "yes";
|
||||
};
|
||||
services.postfix.mapFiles.sender_relay = let
|
||||
relayHost = "[smtp.mailgun.org]:587";
|
||||
in pkgs.writeText "sender_relay" ''
|
||||
@neet.space ${relayHost}
|
||||
@neet.cloud ${relayHost}
|
||||
@neet.dev ${relayHost}
|
||||
@runyan.org ${relayHost}
|
||||
@runyan.rocks ${relayHost}
|
||||
@thunderhex.com ${relayHost}
|
||||
@tar.ninja ${relayHost}
|
||||
@bsd.ninja ${relayHost}
|
||||
@bsd.rocks ${relayHost}
|
||||
'';
|
||||
services.postfix.mapFiles.sasl_relay_passwd = "/run/agenix/sasl_relay_passwd";
|
||||
age.secrets.sasl_relay_passwd.file = ../../secrets/sasl_relay_passwd.age;
|
||||
|
||||
services.nextcloud = {
|
||||
enable = true;
|
||||
https = true;
|
||||
package = pkgs.nextcloud22;
|
||||
hostName = "neet.cloud";
|
||||
config.dbtype = "sqlite";
|
||||
config.adminuser = "jeremy";
|
||||
config.adminpassFile = "/run/agenix/nextcloud-pw";
|
||||
autoUpdateApps.enable = true;
|
||||
};
|
||||
age.secrets.nextcloud-pw = {
|
||||
file = ../../secrets/nextcloud-pw.age;
|
||||
owner = "nextcloud";
|
||||
};
|
||||
services.nginx.virtualHosts.${config.services.nextcloud.hostName} = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
};
|
||||
}
|
||||
36
machines/liza/hardware-configuration.nix
Normal file
36
machines/liza/hardware-configuration.nix
Normal file
@@ -0,0 +1,36 @@
|
||||
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
||||
# and may be overwritten by future invocations. Please make changes
|
||||
# to /etc/nixos/configuration.nix instead.
|
||||
{ config, lib, pkgs, modulesPath, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[ (modulesPath + "/profiles/qemu-guest.nix")
|
||||
];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "ata_piix" "virtio_pci" "floppy" "sr_mod" "virtio_blk" ];
|
||||
boot.initrd.kernelModules = [ "dm-snapshot" ];
|
||||
boot.kernelModules = [ ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" =
|
||||
{ device = "/dev/disk/by-uuid/b90eaf3c-2f91-499a-a066-861e0f4478df";
|
||||
fsType = "btrfs";
|
||||
};
|
||||
|
||||
fileSystems."/home" =
|
||||
{ device = "/dev/disk/by-uuid/b90eaf3c-2f91-499a-a066-861e0f4478df";
|
||||
fsType = "btrfs";
|
||||
options = [ "subvol=home" ];
|
||||
};
|
||||
|
||||
fileSystems."/boot" =
|
||||
{ device = "/dev/disk/by-uuid/2b8f6f6d-9358-4d30-8341-7426574e0819";
|
||||
fsType = "ext3";
|
||||
};
|
||||
|
||||
swapDevices =
|
||||
[ { device = "/dev/disk/by-uuid/ef7a83db-4b33-41d1-85fc-cff69e480352"; }
|
||||
];
|
||||
|
||||
}
|
||||
@@ -10,6 +10,8 @@
|
||||
networking.hostName = "nat";
|
||||
networking.interfaces.ens160.useDHCP = true;
|
||||
|
||||
services.zerotierone.enable = true;
|
||||
|
||||
de.enable = true;
|
||||
de.touchpad.enable = true;
|
||||
}
|
||||
|
||||
@@ -12,14 +12,12 @@
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" =
|
||||
{
|
||||
device = "/dev/disk/by-uuid/02a8c0c7-fd4e-4443-a83c-2d0b63848779";
|
||||
{ device = "/dev/disk/by-uuid/02a8c0c7-fd4e-4443-a83c-2d0b63848779";
|
||||
fsType = "btrfs";
|
||||
};
|
||||
|
||||
fileSystems."/boot" =
|
||||
{
|
||||
device = "/dev/disk/by-uuid/0C95-1290";
|
||||
{ device = "/dev/disk/by-uuid/0C95-1290";
|
||||
fsType = "vfat";
|
||||
};
|
||||
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./hardware-configuration.nix
|
||||
];
|
||||
|
||||
networking.hostName = "phil";
|
||||
services.gitea-runner.enable = true;
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
||||
# and may be overwritten by future invocations. Please make changes
|
||||
# to /etc/nixos/configuration.nix instead.
|
||||
{ config, lib, pkgs, modulesPath, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
];
|
||||
|
||||
# because grub just doesn't work for some reason
|
||||
boot.loader.systemd-boot.enable = true;
|
||||
|
||||
remoteLuksUnlock.enable = true;
|
||||
remoteLuksUnlock.enableTorUnlock = false;
|
||||
|
||||
boot.initrd.availableKernelModules = [ "xhci_pci" ];
|
||||
boot.initrd.kernelModules = [ "dm-snapshot" ];
|
||||
boot.kernelModules = [ ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
boot.initrd.luks.devices."enc-pv" = {
|
||||
device = "/dev/disk/by-uuid/d26c1820-4c39-4615-98c2-51442504e194";
|
||||
allowDiscards = true;
|
||||
};
|
||||
|
||||
fileSystems."/" =
|
||||
{
|
||||
device = "/dev/disk/by-uuid/851bfde6-93cd-439e-9380-de28aa87eda9";
|
||||
fsType = "btrfs";
|
||||
};
|
||||
|
||||
fileSystems."/boot" =
|
||||
{
|
||||
device = "/dev/disk/by-uuid/F185-C4E5";
|
||||
fsType = "vfat";
|
||||
};
|
||||
|
||||
swapDevices =
|
||||
[{ device = "/dev/disk/by-uuid/d809e3a1-3915-405a-a200-4429c5efdf87"; }];
|
||||
|
||||
networking.interfaces.enp0s6.useDHCP = lib.mkDefault true;
|
||||
|
||||
nixpkgs.hostPlatform = lib.mkDefault "aarch64-linux";
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
{
|
||||
hostNames = [
|
||||
"phil"
|
||||
"phil.neet.dev"
|
||||
];
|
||||
|
||||
arch = "aarch64-linux";
|
||||
|
||||
systemRoles = [
|
||||
"server"
|
||||
"gitea-runner"
|
||||
"nix-builder"
|
||||
];
|
||||
|
||||
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBlgRPpuUkZqe8/lHugRPm/m2vcN9psYhh5tENHZt9I2";
|
||||
|
||||
remoteUnlock = {
|
||||
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIK0RodotOXLMy/w70aa096gaNqPBnfgiXR5ZAH4+wGzd";
|
||||
clearnetHost = "unlock.phil.neet.dev";
|
||||
};
|
||||
}
|
||||
@@ -5,24 +5,29 @@
|
||||
./hardware-configuration.nix
|
||||
];
|
||||
|
||||
networking.hostName = "ponyo";
|
||||
|
||||
firmware.x86_64.enable = true;
|
||||
bios = {
|
||||
enable = true;
|
||||
device = "/dev/sda";
|
||||
};
|
||||
|
||||
luks = {
|
||||
enable = true;
|
||||
device.path = "/dev/disk/by-uuid/4cc36be4-dbff-4afe-927d-69bf4637bae2";
|
||||
};
|
||||
|
||||
system.autoUpgrade.enable = true;
|
||||
|
||||
# p2p mesh network
|
||||
services.tailscale.exitNode = true;
|
||||
services.zerotierone.enable = true;
|
||||
|
||||
# email server
|
||||
mailserver.enable = true;
|
||||
|
||||
# nextcloud
|
||||
services.nextcloud.enable = true;
|
||||
|
||||
# git
|
||||
services.gitea = {
|
||||
enable = true;
|
||||
hostname = "git.neet.dev";
|
||||
disableRegistration = true;
|
||||
};
|
||||
|
||||
# IRC
|
||||
services.thelounge = {
|
||||
enable = true;
|
||||
port = 9000;
|
||||
@@ -34,14 +39,12 @@
|
||||
};
|
||||
};
|
||||
|
||||
# mumble
|
||||
services.murmur = {
|
||||
enable = true;
|
||||
port = 23563;
|
||||
domain = "voice.neet.space";
|
||||
};
|
||||
|
||||
# IRC bot
|
||||
services.drastikbot = {
|
||||
enable = true;
|
||||
wolframAppIdFile = "/run/agenix/wolframalpha";
|
||||
@@ -50,11 +53,8 @@
|
||||
file = ../../secrets/wolframalpha.age;
|
||||
owner = config.services.drastikbot.user;
|
||||
};
|
||||
backup.group."dailybot".paths = [
|
||||
config.services.drastikbot.dataDir
|
||||
];
|
||||
|
||||
# music radio
|
||||
# wrap radio in a VPN
|
||||
vpn-container.enable = true;
|
||||
vpn-container.config = {
|
||||
services.radio = {
|
||||
@@ -62,7 +62,11 @@
|
||||
host = "radio.runyan.org";
|
||||
};
|
||||
};
|
||||
pia.wireguard.badPortForwardPorts = [ ];
|
||||
|
||||
# tailscale
|
||||
services.tailscale.exitNode = true;
|
||||
|
||||
# icecast endpoint + website
|
||||
services.nginx.virtualHosts."radio.runyan.org" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
@@ -77,7 +81,6 @@
|
||||
};
|
||||
};
|
||||
|
||||
# matrix home server
|
||||
services.matrix = {
|
||||
enable = true;
|
||||
host = "neet.space";
|
||||
@@ -95,36 +98,67 @@
|
||||
secret = "a8369a0e96922abf72494bb888c85831b";
|
||||
};
|
||||
};
|
||||
# pin postgresql for matrix (will need to migrate eventually)
|
||||
services.postgresql.package = pkgs.postgresql_11;
|
||||
|
||||
# iodine DNS-based vpn
|
||||
services.iodine.server.enable = true;
|
||||
services.searx = {
|
||||
enable = true;
|
||||
environmentFile = "/run/agenix/searx";
|
||||
settings = {
|
||||
server.port = 43254;
|
||||
server.secret_key = "@SEARX_SECRET_KEY@";
|
||||
engines = [ {
|
||||
name = "wolframalpha";
|
||||
shortcut = "wa";
|
||||
api_key = "@WOLFRAM_API_KEY@";
|
||||
engine = "wolframalpha_api";
|
||||
} ];
|
||||
};
|
||||
};
|
||||
services.nginx.virtualHosts."search.neet.space" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://localhost:${toString config.services.searx.settings.server.port}";
|
||||
};
|
||||
};
|
||||
age.secrets.searx.file = ../../secrets/searx.age;
|
||||
|
||||
# iodine DNS-based vpn
|
||||
services.iodine.server = {
|
||||
enable = true;
|
||||
ip = "192.168.99.1";
|
||||
domain = "tun.neet.dev";
|
||||
passwordFile = "/run/agenix/iodine";
|
||||
};
|
||||
age.secrets.iodine.file = ../../secrets/iodine.age;
|
||||
networking.firewall.allowedUDPPorts = [ 53 ];
|
||||
|
||||
networking.nat.internalInterfaces = [
|
||||
"dns0" # iodine
|
||||
];
|
||||
|
||||
# proxied web services
|
||||
services.nginx.enable = true;
|
||||
services.nginx.virtualHosts."jellyfin.neet.cloud" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://s0.koi-bebop.ts.net";
|
||||
proxyPass = "http://s0.zt.neet.dev";
|
||||
proxyWebsockets = true;
|
||||
};
|
||||
};
|
||||
services.nginx.virtualHosts."navidrome.neet.cloud" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/".proxyPass = "http://s0.koi-bebop.ts.net:4533";
|
||||
locations."/".proxyPass = "http://s0.zt.neet.dev:4533";
|
||||
};
|
||||
|
||||
# TODO replace with a proper file hosting service
|
||||
services.nginx.virtualHosts."tmp.neet.dev" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
root = "/var/www/tmp";
|
||||
};
|
||||
|
||||
# redirect runyan.org to github
|
||||
# redirect to github
|
||||
services.nginx.virtualHosts."runyan.org" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
@@ -133,7 +167,6 @@
|
||||
'';
|
||||
};
|
||||
|
||||
# owncast live streaming
|
||||
services.owncast.enable = true;
|
||||
services.owncast.hostname = "live.neet.dev";
|
||||
}
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
{
|
||||
imports =
|
||||
[
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
[ (modulesPath + "/profiles/qemu-guest.nix")
|
||||
];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "virtio_pci" "virtio_scsi" "sd_mod" ];
|
||||
@@ -11,26 +10,13 @@
|
||||
boot.kernelModules = [ "kvm-intel" "nvme" ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
firmware.x86_64.enable = true;
|
||||
|
||||
bios = {
|
||||
enable = true;
|
||||
device = "/dev/sda";
|
||||
};
|
||||
|
||||
remoteLuksUnlock.enable = true;
|
||||
boot.initrd.luks.devices."enc-pv".device = "/dev/disk/by-uuid/4cc36be4-dbff-4afe-927d-69bf4637bae2";
|
||||
boot.initrd.luks.devices."enc-pv2".device = "/dev/disk/by-uuid/e52b01b3-81c8-4bb2-ae7e-a3d9c793cb00"; # expanded disk
|
||||
|
||||
fileSystems."/" =
|
||||
{
|
||||
device = "/dev/mapper/enc-pv";
|
||||
{ device = "/dev/mapper/enc-pv";
|
||||
fsType = "btrfs";
|
||||
};
|
||||
|
||||
fileSystems."/boot" =
|
||||
{
|
||||
device = "/dev/disk/by-uuid/d3a3777d-1e70-47fa-a274-804dc70ee7fd";
|
||||
{ device = "/dev/disk/by-uuid/d3a3777d-1e70-47fa-a274-804dc70ee7fd";
|
||||
fsType = "ext4";
|
||||
};
|
||||
|
||||
@@ -41,5 +27,11 @@
|
||||
}
|
||||
];
|
||||
|
||||
networking.interfaces.eth0.useDHCP = true;
|
||||
# The global useDHCP flag is deprecated, therefore explicitly set to false here.
|
||||
# Per-interface useDHCP will be mandatory in the future, so this generated config
|
||||
# replicates the default behaviour.
|
||||
networking.useDHCP = lib.mkDefault false;
|
||||
networking.interfaces.eth0.useDHCP = lib.mkDefault true;
|
||||
|
||||
hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
{
|
||||
hostNames = [
|
||||
"ponyo"
|
||||
"ponyo.neet.dev"
|
||||
"git.neet.dev"
|
||||
];
|
||||
|
||||
arch = "x86_64-linux";
|
||||
|
||||
systemRoles = [
|
||||
"server"
|
||||
"email-server"
|
||||
"iodine"
|
||||
"pia"
|
||||
"nextcloud"
|
||||
"dailybot"
|
||||
"gitea"
|
||||
];
|
||||
|
||||
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMBBlTAIp38RhErU1wNNV5MBeb+WGH0mhF/dxh5RsAXN";
|
||||
|
||||
remoteUnlock = {
|
||||
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIC9LQuuImgWlkjDhEEIbM1wOd+HqRv1RxvYZuLXPSdRi";
|
||||
|
||||
clearnetHost = "unlock.ponyo.neet.dev";
|
||||
onionHost = "cfamr6artx75qvt7ho3rrbsc7mkucmv5aawebwflsfuorusayacffryd.onion";
|
||||
};
|
||||
}
|
||||
119
machines/ray/configuration.nix
Normal file
119
machines/ray/configuration.nix
Normal file
@@ -0,0 +1,119 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./hardware-configuration.nix
|
||||
];
|
||||
|
||||
firmware.x86_64.enable = true;
|
||||
efi.enable = true;
|
||||
|
||||
boot.initrd.luks.devices."enc-pv" = {
|
||||
device = "/dev/disk/by-uuid/c1822e5f-4137-44e1-885f-954e926583ce";
|
||||
allowDiscards = true;
|
||||
};
|
||||
|
||||
boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
|
||||
|
||||
networking.hostName = "ray";
|
||||
|
||||
hardware.enableAllFirmware = true;
|
||||
|
||||
# depthai
|
||||
services.udev.extraRules = ''
|
||||
SUBSYSTEM=="usb", ATTRS{idVendor}=="03e7", MODE="0666"
|
||||
'';
|
||||
|
||||
# gpu
|
||||
services.xserver.videoDrivers = [ "nvidia" ];
|
||||
hardware.nvidia = {
|
||||
modesetting.enable = true; # for nvidia-vaapi-driver
|
||||
prime = {
|
||||
reverseSync.enable = true;
|
||||
offload.enableOffloadCmd = true;
|
||||
nvidiaBusId = "PCI:1:0:0";
|
||||
amdgpuBusId = "PCI:4:0:0";
|
||||
};
|
||||
};
|
||||
|
||||
# virt-manager
|
||||
virtualisation.libvirtd.enable = true;
|
||||
programs.dconf.enable = true;
|
||||
virtualisation.spiceUSBRedirection.enable = true;
|
||||
environment.systemPackages = with pkgs; [ virt-manager ];
|
||||
users.users.googlebot.extraGroups = [ "libvirtd" ];
|
||||
|
||||
# vpn-container.enable = true;
|
||||
# containers.vpn.interfaces = [ "piaw" ];
|
||||
|
||||
# allow traffic for wireguard interface to pass
|
||||
# networking.firewall = {
|
||||
# # wireguard trips rpfilter up
|
||||
# extraCommands = ''
|
||||
# ip46tables -t raw -I nixos-fw-rpfilter -p udp -m udp --sport 51820 -j RETURN
|
||||
# ip46tables -t raw -I nixos-fw-rpfilter -p udp -m udp --dport 51820 -j RETURN
|
||||
# '';
|
||||
# extraStopCommands = ''
|
||||
# ip46tables -t raw -D nixos-fw-rpfilter -p udp -m udp --sport 51820 -j RETURN || true
|
||||
# ip46tables -t raw -D nixos-fw-rpfilter -p udp -m udp --dport 51820 -j RETURN || true
|
||||
# '';
|
||||
# };
|
||||
|
||||
# systemd.services.pia-vpn-wireguard = {
|
||||
# enable = true;
|
||||
# description = "PIA VPN WireGuard Tunnel";
|
||||
# requires = [ "network-online.target" ];
|
||||
# after = [ "network.target" "network-online.target" ];
|
||||
# wantedBy = [ "multi-user.target" ];
|
||||
# environment.DEVICE = "piaw";
|
||||
# path = with pkgs; [ kmod wireguard-tools jq curl ];
|
||||
|
||||
# serviceConfig = {
|
||||
# Type = "oneshot";
|
||||
# RemainAfterExit = true;
|
||||
# };
|
||||
|
||||
# script = ''
|
||||
# WG_HOSTNAME=zurich406
|
||||
# WG_SERVER_IP=156.146.62.153
|
||||
|
||||
# PIA_USER=`sed '1q;d' /run/agenix/pia-login.conf`
|
||||
# PIA_PASS=`sed '2q;d' /run/agenix/pia-login.conf`
|
||||
# PIA_TOKEN=`curl -s -u "$PIA_USER:$PIA_PASS" https://www.privateinternetaccess.com/gtoken/generateToken | jq -r '.token'`
|
||||
# privKey=$(wg genkey)
|
||||
# pubKey=$(echo "$privKey" | wg pubkey)
|
||||
# wireguard_json=`curl -s -G --connect-to "$WG_HOSTNAME::$WG_SERVER_IP:" --cacert "${./ca.rsa.4096.crt}" --data-urlencode "pt=$PIA_TOKEN" --data-urlencode "pubkey=$pubKey" https://$WG_HOSTNAME:1337/addKey`
|
||||
|
||||
# echo "
|
||||
# [Interface]
|
||||
# Address = $(echo "$wireguard_json" | jq -r '.peer_ip')
|
||||
# PrivateKey = $privKey
|
||||
# ListenPort = 51820
|
||||
# [Peer]
|
||||
# PersistentKeepalive = 25
|
||||
# PublicKey = $(echo "$wireguard_json" | jq -r '.server_key')
|
||||
# AllowedIPs = 0.0.0.0/0
|
||||
# Endpoint = $WG_SERVER_IP:$(echo "$wireguard_json" | jq -r '.server_port')
|
||||
# " > /tmp/piaw.conf
|
||||
|
||||
# # TODO make /tmp/piaw.conf ro to root
|
||||
|
||||
# ${lib.optionalString (!config.boot.isContainer) "modprobe wireguard"}
|
||||
# wg-quick up /tmp/piaw.conf
|
||||
# '';
|
||||
|
||||
# preStop = ''
|
||||
# wg-quick down /tmp/piaw.conf
|
||||
# '';
|
||||
# };
|
||||
# age.secrets."pia-login.conf".file = ../../secrets/pia-login.conf;
|
||||
|
||||
virtualisation.docker.enable = true;
|
||||
|
||||
services.zerotierone.enable = true;
|
||||
|
||||
services.mount-samba.enable = true;
|
||||
|
||||
de.enable = true;
|
||||
de.touchpad.enable = true;
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
{ config, pkgs, lib, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./hardware-configuration.nix
|
||||
];
|
||||
|
||||
# for luks onlock over tor
|
||||
services.tor.enable = true;
|
||||
services.tor.client.enable = true;
|
||||
|
||||
# services.howdy.enable = true;
|
||||
|
||||
hardware.openrazer.enable = true;
|
||||
hardware.openrazer.users = [ "googlebot" ];
|
||||
hardware.openrazer.devicesOffOnScreensaver = false;
|
||||
users.users.googlebot.packages = [ pkgs.polychromatic ];
|
||||
|
||||
services.udev.extraRules = ''
|
||||
# depthai
|
||||
SUBSYSTEM=="usb", ATTRS{idVendor}=="03e7", MODE="0666"
|
||||
|
||||
# Moonlander
|
||||
# Rules for Oryx web flashing and live training
|
||||
KERNEL=="hidraw*", ATTRS{idVendor}=="16c0", MODE="0664", GROUP="plugdev"
|
||||
KERNEL=="hidraw*", ATTRS{idVendor}=="3297", MODE="0664", GROUP="plugdev"
|
||||
# Wally Flashing rules for the Moonlander and Planck EZ
|
||||
SUBSYSTEMS=="usb", ATTRS{idVendor}=="0483", ATTRS{idProduct}=="df11", MODE:="0666", SYMLINK+="stm32_dfu"
|
||||
'';
|
||||
users.groups.plugdev = {
|
||||
members = [ "googlebot" ];
|
||||
};
|
||||
|
||||
# virt-manager
|
||||
virtualisation.libvirtd.enable = true;
|
||||
programs.dconf.enable = true;
|
||||
virtualisation.spiceUSBRedirection.enable = true;
|
||||
environment.systemPackages = with pkgs; [ virt-manager ];
|
||||
users.users.googlebot.extraGroups = [ "libvirtd" ];
|
||||
|
||||
# allow building ARM derivations
|
||||
boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
|
||||
|
||||
services.spotifyd.enable = true;
|
||||
|
||||
virtualisation.docker.enable = true;
|
||||
|
||||
virtualisation.appvm.enable = true;
|
||||
virtualisation.appvm.user = "googlebot";
|
||||
|
||||
services.mount-samba.enable = true;
|
||||
|
||||
de.enable = true;
|
||||
de.touchpad.enable = true;
|
||||
}
|
||||
@@ -5,58 +5,37 @@
|
||||
|
||||
{
|
||||
imports =
|
||||
[
|
||||
(modulesPath + "/installer/scan/not-detected.nix")
|
||||
[ (modulesPath + "/installer/scan/not-detected.nix")
|
||||
];
|
||||
|
||||
# boot
|
||||
efi.enable = true;
|
||||
boot.initrd.availableKernelModules = [ "nvme" "xhci_pci" "usbhid" "usb_storage" "sd_mod" ];
|
||||
boot.initrd.kernelModules = [ "dm-snapshot" ];
|
||||
|
||||
# kernel
|
||||
boot.kernelModules = [ "kvm-amd" ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
# firmware
|
||||
firmware.x86_64.enable = true;
|
||||
hardware.enableAllFirmware = true;
|
||||
|
||||
# gpu
|
||||
services.xserver.videoDrivers = [ "nvidia" ];
|
||||
hardware.nvidia = {
|
||||
modesetting.enable = true; # for nvidia-vaapi-driver
|
||||
prime = {
|
||||
reverseSync.enable = true;
|
||||
offload.enableOffloadCmd = true;
|
||||
nvidiaBusId = "PCI:1:0:0";
|
||||
amdgpuBusId = "PCI:4:0:0";
|
||||
};
|
||||
};
|
||||
|
||||
# disks
|
||||
remoteLuksUnlock.enable = true;
|
||||
boot.initrd.luks.devices."enc-pv" = {
|
||||
device = "/dev/disk/by-uuid/c1822e5f-4137-44e1-885f-954e926583ce";
|
||||
allowDiscards = true;
|
||||
};
|
||||
fileSystems."/" =
|
||||
{
|
||||
device = "/dev/vg/root";
|
||||
{ device = "/dev/vg/root";
|
||||
fsType = "btrfs";
|
||||
options = [ "subvol=root" ];
|
||||
};
|
||||
|
||||
fileSystems."/home" =
|
||||
{
|
||||
device = "/dev/vg/root";
|
||||
{ device = "/dev/vg/root";
|
||||
fsType = "btrfs";
|
||||
options = [ "subvol=home" ];
|
||||
};
|
||||
|
||||
fileSystems."/boot" =
|
||||
{
|
||||
device = "/dev/disk/by-uuid/2C85-2B59";
|
||||
{ device = "/dev/disk/by-uuid/2C85-2B59";
|
||||
fsType = "vfat";
|
||||
};
|
||||
|
||||
swapDevices =
|
||||
[{ device = "/dev/vg/swap"; }];
|
||||
[ { device = "/dev/vg/swap"; }
|
||||
];
|
||||
|
||||
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
|
||||
|
||||
# high-resolution display
|
||||
hardware.video.hidpi.enable = lib.mkDefault true;
|
||||
}
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
{
|
||||
hostNames = [
|
||||
"ray"
|
||||
];
|
||||
|
||||
arch = "x86_64-linux";
|
||||
|
||||
systemRoles = [
|
||||
"personal"
|
||||
"deploy"
|
||||
];
|
||||
|
||||
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDQM8hwKRgl8cZj7UVYATSLYu4LhG7I0WFJ9m2iWowiB";
|
||||
|
||||
userKeys = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFeTK1iARlNIKP/DS8/ObBm9yUM/3L1Ub4XI5A2r9OzP"
|
||||
];
|
||||
|
||||
deployKeys = [
|
||||
"sk-ssh-ed25519@openssh.com AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAIEaGIwLiUa6wQLlEF+keQOIYy/tCmJvV6eENzUQjSqW2AAAABHNzaDo="
|
||||
];
|
||||
}
|
||||
35
machines/reg/configuration.nix
Normal file
35
machines/reg/configuration.nix
Normal file
@@ -0,0 +1,35 @@
|
||||
{ config, pkgs, fetchurl, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./hardware-configuration.nix
|
||||
];
|
||||
|
||||
# smcxui7kwoyxpswwage4fkcppxnqzpw33xcmxmlhxvk5gcp5s6lrtfad.onion
|
||||
|
||||
boot.kernelPackages = pkgs.linuxPackages_5_12;
|
||||
|
||||
firmware.x86_64.enable = true;
|
||||
efi.enable = true;
|
||||
|
||||
luks = {
|
||||
enable = true;
|
||||
device = {
|
||||
path = "/dev/disk/by-uuid/975d8427-2c6a-440d-a1d2-18dd15ba5bc2";
|
||||
allowDiscards = true;
|
||||
};
|
||||
};
|
||||
|
||||
networking.hostName = "reg";
|
||||
|
||||
de.enable = true;
|
||||
de.touchpad.enable = true;
|
||||
|
||||
services.zerotierone.enable = true;
|
||||
|
||||
# VNC
|
||||
networking.firewall.allowedTCPPorts = [ 5900 ];
|
||||
|
||||
networking.interfaces.enp57s0f1.useDHCP = true;
|
||||
}
|
||||
|
||||
38
machines/reg/hardware-configuration.nix
Normal file
38
machines/reg/hardware-configuration.nix
Normal file
@@ -0,0 +1,38 @@
|
||||
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
||||
# and may be overwritten by future invocations. Please make changes
|
||||
# to /etc/nixos/configuration.nix instead.
|
||||
{ config, lib, pkgs, modulesPath, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[ (modulesPath + "/installer/scan/not-detected.nix")
|
||||
];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "nvme" "usb_storage" "sd_mod" "rtsx_pci_sdmmc" ];
|
||||
boot.initrd.kernelModules = [ "dm-snapshot" ];
|
||||
boot.kernelModules = [ "kvm-intel" ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" =
|
||||
{ device = "/dev/disk/by-uuid/b3a2906b-e9a5-45bd-aac5-960297437fe9";
|
||||
fsType = "btrfs";
|
||||
options = [ "subvol=root" "noatime" "nodiratime" "discard" ];
|
||||
};
|
||||
|
||||
fileSystems."/home" =
|
||||
{ device = "/dev/disk/by-uuid/b3a2906b-e9a5-45bd-aac5-960297437fe9";
|
||||
fsType = "btrfs";
|
||||
options = [ "subvol=home" "noatime" "nodiratime" "discard" ];
|
||||
};
|
||||
|
||||
fileSystems."/boot" =
|
||||
{ device = "/dev/disk/by-uuid/6C41-24A0";
|
||||
fsType = "vfat";
|
||||
};
|
||||
|
||||
swapDevices =
|
||||
[ { device = "/dev/disk/by-uuid/34ec322f-79c3-4993-a073-ef1da3c6ef51"; }
|
||||
];
|
||||
|
||||
powerManagement.cpuFreqGovernor = lib.mkDefault "powersave";
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./hardware-configuration.nix
|
||||
./router.nix
|
||||
];
|
||||
|
||||
# https://dataswamp.org/~solene/2022-08-03-nixos-with-live-usb-router.html
|
||||
# https://github.com/mdlayher/homelab/blob/391cfc0de06434e4dee0abe2bec7a2f0637345ac/nixos/routnerr-2/configuration.nix
|
||||
# https://github.com/skogsbrus/os/blob/master/sys/router.nix
|
||||
# http://trac.gateworks.com/wiki/wireless/wifi
|
||||
|
||||
system.autoUpgrade.enable = true;
|
||||
|
||||
services.tailscale.exitNode = true;
|
||||
|
||||
router.enable = true;
|
||||
router.privateSubnet = "192.168.3";
|
||||
|
||||
services.iperf3.enable = true;
|
||||
|
||||
# networking.useDHCP = lib.mkForce true;
|
||||
|
||||
# TODO
|
||||
# networking.usePredictableInterfaceNames = true;
|
||||
|
||||
powerManagement.cpuFreqGovernor = "ondemand";
|
||||
|
||||
|
||||
services.irqbalance.enable = true;
|
||||
|
||||
# services.miniupnpd = {
|
||||
# enable = true;
|
||||
# externalInterface = "eth0";
|
||||
# internalIPs = [ "br0" ];
|
||||
# };
|
||||
}
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user