1 Commits

Author SHA1 Message Date
b7549e63f5 prototype 2023-04-26 14:46:55 -06:00
159 changed files with 3068 additions and 4253 deletions

View File

@@ -1,160 +0,0 @@
---
name: create-workspace
description: >
Creates a new sandboxed workspace (isolated dev environment) by adding
NixOS configuration for a VM, container, or Incus instance. Use when
the user wants to create, set up, or add a new sandboxed workspace.
---
# Create Sandboxed Workspace
Creates an isolated development environment backed by a VM (microvm.nix), container (systemd-nspawn), or Incus instance. This produces:
1. A workspace config file at `machines/<machine>/workspaces/<name>.nix`
2. A registration entry in `machines/<machine>/default.nix`
## Step 1: Parse Arguments
Extract the workspace name and backend type from `$ARGUMENTS`. If either is missing, ask the user.
- **Name**: lowercase alphanumeric with hyphens (e.g., `my-project`)
- **Type**: one of `vm`, `container`, or `incus`
## Step 2: Detect Machine
Run `hostname` to get the current machine name. Verify that `machines/<hostname>/default.nix` exists.
If the machine directory doesn't exist, stop and tell the user this machine isn't managed by this flake.
## Step 3: Allocate IP Address
Read `machines/<hostname>/default.nix` to find existing `sandboxed-workspace.workspaces` entries and their IPs.
All IPs are in the `192.168.83.0/24` subnet. Use these ranges by convention:
| Type | IP Range |
|------|----------|
| vm | 192.168.83.10 - .49 |
| container | 192.168.83.50 - .89 |
| incus | 192.168.83.90 - .129 |
Pick the next available IP in the appropriate range. If no workspaces exist yet for that type, use the first IP in the range.
## Step 4: Create Workspace Config File
Create `machines/<hostname>/workspaces/<name>.nix`. Use this template:
```nix
{ config, lib, pkgs, ... }:
{
environment.systemPackages = with pkgs; [
# Add packages here
];
}
```
Ask the user if they want any packages pre-installed.
Create the `workspaces/` directory if it doesn't exist.
**Important:** After creating the file, run `git add` on it immediately. Nix flakes only see files tracked by git, so new files must be staged before `nix build` will work.
## Step 5: Register Workspace
Edit `machines/<hostname>/default.nix` to add the workspace entry inside the `sandboxed-workspace` block.
The entry should look like:
```nix
workspaces.<name> = {
type = "<type>";
config = ./workspaces/<name>.nix;
ip = "<allocated-ip>";
};
```
**If `sandboxed-workspace` block doesn't exist yet**, add the full block:
```nix
sandboxed-workspace = {
enable = true;
workspaces.<name> = {
type = "<type>";
config = ./workspaces/<name>.nix;
ip = "<allocated-ip>";
};
};
```
The machine also needs `networking.sandbox.upstreamInterface` set. Check if it exists; if not, ask the user for their primary network interface name (they can find it with `ip route show default`).
Do **not** set `hostKey` — it gets auto-generated on first boot and can be added later.
## Step 6: Verify Build
Run a build to check for configuration errors:
```
nix build .#nixosConfigurations.<hostname>.config.system.build.toplevel --no-link
```
If the build fails, fix the configuration and retry.
## Step 7: Deploy
Tell the user to deploy by running:
```
doas nixos-rebuild switch --flake .
```
**Never run this command yourself** — it requires privileges.
## Step 8: Post-Deploy Info
Tell the user to deploy and then start the workspace so the host key gets generated. Provide these instructions:
**Deploy:**
```
doas nixos-rebuild switch --flake .
```
**Starting the workspace:**
```
doas systemctl start <service>
```
Where `<service>` is:
- VM: `microvm@<name>`
- Container: `container@<name>`
- Incus: `incus-workspace-<name>`
Or use the auto-generated shell alias: `workspace_<name>_start`
**Connecting:**
```
ssh googlebot@workspace-<name>
```
Or use the alias: `workspace_<name>`
**Never run deploy or start commands yourself** — they require privileges.
## Step 9: Add Host Key
After the user has deployed and started the workspace, add the SSH host key to the workspace config. Do NOT skip this step — always wait for the user to confirm they've started the workspace, then proceed.
1. Read the host key from `~/sandboxed/<name>/ssh-host-keys/ssh_host_ed25519_key.pub`
2. Add `hostKey = "<contents>";` to the workspace entry in `machines/<hostname>/default.nix`
3. Run the build again to verify
4. Tell the user to redeploy with `doas nixos-rebuild switch --flake .`
## Backend Reference
| | VM | Container | Incus |
|---|---|---|---|
| Isolation | Full kernel (cloud-hypervisor) | Shared kernel (systemd-nspawn) | Unprivileged container |
| Overhead | Higher (separate kernel) | Lower (bind mounts) | Medium |
| Filesystem | virtiofs shares | Bind mounts | Incus-managed |
| Use case | Untrusted code, kernel-level isolation | Fast dev environments | Better security than nspawn |

View File

@@ -1,42 +0,0 @@
name: Check Flake
on: [push]
env:
DEBIAN_FRONTEND: noninteractive
PATH: /run/current-system/sw/bin/
jobs:
check-flake:
runs-on: nixos
steps:
- name: Checkout the repository
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Check Flake
run: nix flake check --all-systems --print-build-logs --log-format raw --show-trace
- name: Build all systems
run: |
nix eval .#nixosConfigurations --apply 'cs: builtins.attrNames cs' --json \
| jq -r '.[]' \
| xargs -I{} nix build ".#nixosConfigurations.{}.config.system.build.toplevel" --no-link --print-build-logs --log-format raw
- name: Push to cache
env:
XDG_CONFIG_HOME: ${{ runner.temp }}/.config
run: |
set -euo pipefail
attic login local "${{ vars.ATTIC_ENDPOINT }}" "${{ secrets.ATTIC_TOKEN }}"
# Get all system toplevel store paths
toplevels=$(nix eval .#nixosConfigurations --apply 'cs: map (n: "${cs.${n}.config.system.build.toplevel}") (builtins.attrNames cs)' --json | jq -r '.[]')
echo "Found $(echo "$toplevels" | wc -l) system toplevels"
# Expand to full closures, deduplicate, and filter out paths already
# signed by cache.nixos.org — only our custom builds need caching
paths=$(echo "$toplevels" \
| xargs nix path-info -r --json \
| jq -r '[to_entries[] | select(.value.signatures | all(startswith("cache.nixos.org") | not)) | .key] | unique[]')
echo "Pushing $(echo "$paths" | wc -l) unique paths to cache"
echo "$paths" | xargs attic push local:nixos

View File

@@ -1,81 +0,0 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## What This Is
A NixOS flake managing multiple machines. All machines import `/common` for shared config, and each machine has its own directory under `/machines/<hostname>/` with a `default.nix` (machine-specific config), `hardware-configuration.nix`, and `properties.nix` (metadata: hostnames, arch, roles, SSH keys).
## Common Commands
```bash
# Build a machine config (check for errors without deploying)
nix build .#nixosConfigurations.<hostname>.config.system.build.toplevel --no-link
# Deploy to local machine (user must run this themselves - requires privileges)
doas nixos-rebuild switch --flake .
# Deploy to a remote machine (boot-only, no activate)
deploy --remote-build --boot --debug-logs --skip-checks .#<hostname>
# Deploy to a remote machine (activate immediately)
deploy --remote-build --debug-logs --skip-checks .#<hostname>
# Update flake lockfile
make update-lockfile
# Update a single flake input
make update-input <input-name>
# Edit an agenix secret
make edit-secret <secret-filename>
# Rekey all secrets (after adding/removing machine host keys)
make rekey-secrets
```
## Architecture
### Machine Discovery (Auto-Registration)
Machines are **not** listed in `flake.nix`. Instead, `common/machine-info/default.nix` recursively scans `/machines/` for any `properties.nix` file and auto-registers that directory as a machine. To add a machine, create `machines/<name>/properties.nix` and `machines/<name>/default.nix`.
`properties.nix` returns a plain attrset (no NixOS module args) with: `hostNames`, `arch`, `systemRoles`, `hostKey`, and optionally `userKeys`, `deployKeys`, `remoteUnlock`.
### Role System
Each machine declares `systemRoles` in its `properties.nix` (e.g., `["personal" "dns-challenge"]`). Roles drive conditional config:
- `config.thisMachine.hasRole.<role>` - boolean, used to conditionally enable features (e.g., `de.enable` for `personal` role)
- `config.machines.withRole.<role>` - list of hostnames with that role
- Roles also determine which machines can decrypt which agenix secrets (see `secrets/secrets.nix`)
### Secrets (agenix)
Secrets in `/secrets/` are encrypted `.age` files. `secrets.nix` maps each secret to the SSH host keys (by role) that can decrypt it. After changing which machines have access, run `make rekey-secrets`.
### Sandboxed Workspaces
`common/sandboxed-workspace/` provides isolated dev environments. Three backends: `vm` (microvm/cloud-hypervisor), `container` (systemd-nspawn), `incus`. Workspaces are defined in machine `default.nix` files and their per-workspace config goes in `machines/<hostname>/workspaces/<name>.nix`. The base config (`base.nix`) handles networking, SSH, user setup, and Claude Code pre-configuration.
IP allocation convention: VMs `.10-.49`, containers `.50-.89`, incus `.90-.129` in `192.168.83.0/24`.
### Backups
`common/backups.nix` defines a `backup.group` option. Machines declare backup groups with paths; restic handles daily backups to Backblaze B2 with automatic ZFS/btrfs snapshot support. Each group gets a `restic_<group>` CLI wrapper for manual operations.
### Nixpkgs Patching
`flake.nix` applies patches from `/patches/` to nixpkgs before building (workaround for nix#3920).
### Key Conventions
- Uses `doas` instead of `sudo` everywhere
- Fish shell is the default user shell
- Home Manager is used for user-level config (`home/googlebot.nix`)
- `lib/default.nix` extends nixpkgs lib with custom utility functions (extends via `nixpkgs.lib.extend`)
- Overlays are in `/overlays/` and applied globally via `flake.nix`
- The Nix formatter for this project is `nixpkgs-fmt`
- Do not add "Co-Authored-By" lines to commit messages
- Always use `--no-link` when running `nix build`
- Don't use `nix build --dry-run` unless you only need evaluation — it skips the actual build
- Avoid `2>&1` on nix commands — it can cause error output to be missed

View File

@@ -1,52 +0,0 @@
# Lockfile utils
.PHONY: update-lockfile
update-lockfile:
nix flake update --commit-lock-file
.PHONY: update-lockfile-without-commit
update-lockfile-without-commit:
nix flake update
# Agenix utils
.PHONY: edit-secret
edit-secret:
cd secrets && agenix -e $(filter-out $@,$(MAKECMDGOALS))
.PHONY: rekey-secrets
rekey-secrets:
cd secrets && agenix -r
# NixOS utils
.PHONY: clean-old-nixos-profiles
clean-old-nixos-profiles:
doas nix-collect-garbage -d
# Garbage Collect
.PHONY: gc
gc:
nix store gc
# Update a flake input by name (ex: 'nixpkgs')
.PHONY: update-input
update-input:
nix flake update $(filter-out $@,$(MAKECMDGOALS))
# Build Custom Install ISO
.PHONY: iso
iso:
nix build .#packages.x86_64-linux.iso
# Build Custom kexec image
.PHONY: kexec-img
kexec-img:
nix build .#packages.x86_64-linux.kexec
# Deploy a host by name (ex: 's0') but don't activate
.PHONY: deploy
deploy:
deploy --remote-build --boot --debug-logs --skip-checks .#$(filter-out $@,$(MAKECMDGOALS))
# Deploy a host by name (ex: 's0')
.PHONY: deploy-activate
deploy-activate:
deploy --remote-build --debug-logs --skip-checks .#$(filter-out $@,$(MAKECMDGOALS))

View File

@@ -1,32 +1,11 @@
# NixOS Configuration
# My NixOS configurations
A NixOS flake managing multiple machines with role-based configuration, agenix secrets, and sandboxed dev workspaces.
## Layout
- `/common` - shared configuration imported by all machines
- `/boot` - bootloaders, CPU microcode, remote LUKS unlock over Tor
- `/network` - Tailscale, VPN tunneling via PIA
- `/pc` - desktop/graphical config (enabled by the `personal` role)
- `/server` - service definitions and extensions
- `/sandboxed-workspace` - isolated dev environments (VM, container, or Incus)
- `/machines` - per-machine config (`default.nix`, `hardware-configuration.nix`, `properties.nix`)
- `/secrets` - agenix-encrypted secrets, decryptable by machines based on their roles
- `/home` - Home Manager user config
- `/lib` - custom library functions extending nixpkgs lib
- `/overlays` - nixpkgs overlays applied globally
- `/patches` - patches applied to nixpkgs at build time
## Notable Features
**Auto-discovery & roles** — Machines register themselves by placing a `properties.nix` under `/machines/`. No manual listing in `flake.nix`. Roles declared per-machine (`"personal"`, `"dns-challenge"`, etc.) drive feature enablement via `config.thisMachine.hasRole.<role>` and control which agenix secrets each machine can decrypt.
**Machine properties module system**`properties.nix` files form a separate lightweight module system (`machine-info`) for recording machine metadata (hostnames, architecture, roles, SSH keys). Since every machine's properties are visible to every other machine, each system can reflect on the properties of the entire fleet — enabling automatic SSH trust, role-based secret access, and cross-machine coordination without duplicating information.
**Remote LUKS unlock over Tor** — Machines with encrypted root disks can be unlocked remotely via SSH. An embedded Tor hidden service starts in the initrd so the machine is reachable even without a known IP, using a separate SSH host key for the boot environment.
**VPN containers** — A `vpn-container` module spins up an ephemeral NixOS container with a PIA WireGuard tunnel. The host creates the WireGuard interface and authenticates with PIA, then hands it off to the container's network namespace. This ensures that the container can **never** have direct internet access. Leakage is impossible.
**Sandboxed workspaces** — Isolated dev environments backed by microVMs (cloud-hypervisor), systemd-nspawn containers, or Incus. Each workspace gets a static IP on a NAT'd bridge, auto-generated SSH host keys, shell aliases for management, and comes pre-configured with Claude Code. The sandbox network blocks access to the local LAN while allowing internet.
**Snapshot-aware backups** — Restic backups to Backblaze B2 automatically create ZFS snapshots or btrfs read-only snapshots before backing up, using mount namespaces to bind-mount frozen data over the original paths so restic records correct paths. Each backup group gets a `restic_<group>` CLI wrapper. Supports `.nobackup` marker files.
### Source Layout
- `/common` - common configuration imported into all `/machines`
- `/boot` - config related to bootloaders, cpu microcode, and unlocking LUKS root disks over tor
- `/network` - config for tailscale, and NixOS container with automatic vpn tunneling via PIA
- `/pc` - config that a graphical desktop computer should have. Use `de.enable = true;` to enable everthing.
- `/server` - config that creates new nixos services or extends existing ones to meet my needs
- `/machines` - all my NixOS machines along with their machine unique configuration for hardware and services
- `/kexec` - a special machine for generating minimal kexec images. Does not import `/common`
- `/secrets` - encrypted shared secrets unlocked through `/machines` ssh host keys

View File

@@ -1,4 +1,4 @@
{ config, lib, ... }:
{ config, pkgs, lib, ... }:
# Modify auto-update so that it pulls a flake
@@ -6,10 +6,20 @@ let
cfg = config.system.autoUpgrade;
in
{
config = lib.mkIf cfg.enable {
system.autoUpgrade = {
flake = "git+https://git.neet.dev/zuckerberg/nix-config.git";
flags = [ "--recreate-lock-file" "--no-write-lock-file" ]; # ignore lock file, just pull the latest
};
};
config = lib.mkIf cfg.enable (lib.mkMerge [
{
system.autoUpgrade = {
flake = "git+https://git.neet.dev/zuckerberg/nix-config.git";
flags = [ "--recreate-lock-file" "--no-write-lock-file" ]; # ignore lock file, just pull the latest
# dates = "03:40";
# kexecWindow = lib.mkDefault { lower = "01:00"; upper = "05:00"; };
# randomizedDelaySec = "45min";
};
system.autoUpgrade.allowKexec = lib.mkDefault true;
luks.enableKexec = cfg.allowKexec && builtins.length config.luks.devices > 0;
}
]);
}

View File

@@ -2,101 +2,13 @@
let
cfg = config.backup;
hostname = config.networking.hostName;
mkRespository = group: "s3:s3.us-west-004.backblazeb2.com/D22TgIt0-main-backup/${group}";
findmnt = "${pkgs.util-linux}/bin/findmnt";
mount = "${pkgs.util-linux}/bin/mount";
umount = "${pkgs.util-linux}/bin/umount";
btrfs = "${pkgs.btrfs-progs}/bin/btrfs";
zfs = "/run/current-system/sw/bin/zfs";
# Creates snapshots and bind mounts them over original paths within the
# service's mount namespace, so restic sees correct paths but reads frozen data
snapshotHelperFn = ''
snapshot_for_path() {
local group="$1" path="$2" action="$3"
local pathhash fstype
pathhash=$(echo -n "$path" | sha256sum | cut -c1-8)
fstype=$(${findmnt} -n -o FSTYPE -T "$path" 2>/dev/null || echo "unknown")
case "$fstype" in
zfs)
local dataset mount subpath snapname snappath
dataset=$(${findmnt} -n -o SOURCE -T "$path")
mount=$(${findmnt} -n -o TARGET -T "$path")
subpath=''${path#"$mount"}
[[ "$subpath" != /* ]] && subpath="/$subpath"
snapname="''${dataset}@restic-''${group}-''${pathhash}"
snappath="''${mount}/.zfs/snapshot/restic-''${group}-''${pathhash}''${subpath}"
case "$action" in
create)
${zfs} destroy "$snapname" 2>/dev/null || true
${zfs} snapshot "$snapname"
${mount} --bind "$snappath" "$path"
echo "$path"
;;
destroy)
${umount} "$path" 2>/dev/null || true
${zfs} destroy "$snapname" 2>/dev/null || true
;;
esac
;;
btrfs)
local mount subpath snapdir snappath
mount=$(${findmnt} -n -o TARGET -T "$path")
subpath=''${path#"$mount"}
[[ "$subpath" != /* ]] && subpath="/$subpath"
snapdir="/.restic-snapshots/''${group}-''${pathhash}"
snappath="''${snapdir}''${subpath}"
case "$action" in
create)
${btrfs} subvolume delete "$snapdir" 2>/dev/null || true
mkdir -p /.restic-snapshots
${btrfs} subvolume snapshot -r "$mount" "$snapdir" >&2
${mount} --bind "$snappath" "$path"
echo "$path"
;;
destroy)
${umount} "$path" 2>/dev/null || true
${btrfs} subvolume delete "$snapdir" 2>/dev/null || true
;;
esac
;;
*)
echo "No snapshot support for $fstype ($path), using original" >&2
[ "$action" = "create" ] && echo "$path"
;;
esac
}
'';
mkBackup = group: paths: {
repository = mkRespository group;
dynamicFilesFrom = "cat /run/restic-backup-${group}/paths";
backupPrepareCommand = ''
mkdir -p /run/restic-backup-${group}
: > /run/restic-backup-${group}/paths
${snapshotHelperFn}
for path in ${lib.escapeShellArgs paths}; do
snapshot_for_path ${lib.escapeShellArg group} "$path" create >> /run/restic-backup-${group}/paths
done
'';
backupCleanupCommand = ''
${snapshotHelperFn}
for path in ${lib.escapeShellArgs paths}; do
snapshot_for_path ${lib.escapeShellArg group} "$path" destroy
done
rm -rf /run/restic-backup-${group}
'';
inherit paths;
initialize = true;
@@ -109,11 +21,11 @@ let
''--exclude-if-present ".nobackup"''
];
# Keeps backups from up to 6 months ago
pruneOpts = [
"--keep-daily 7" # one backup for each of the last n days
"--keep-weekly 5" # one backup for each of the last n weeks
"--keep-monthly 6" # one backup for each of the last n months
"--keep-monthly 12" # one backup for each of the last n months
"--keep-yearly 75" # one backup for each of the last n years
];
environmentFile = "/run/agenix/backblaze-s3-backups";
@@ -152,25 +64,12 @@ in
};
config = lib.mkIf (cfg.group != null) {
assertions = lib.mapAttrsToList (group: _: {
assertion = config.systemd.services."restic-backups-${group}".enable;
message = "Expected systemd service 'restic-backups-${group}' not found. The nixpkgs restic module may have changed its naming convention.";
}) cfg.group;
services.restic.backups = lib.concatMapAttrs
(group: groupCfg: {
${group} = mkBackup group groupCfg.paths;
})
cfg.group;
# Mount namespace lets us bind mount snapshots over original paths,
# so restic backs up from frozen snapshots while recording correct paths
systemd.services = lib.concatMapAttrs
(group: _: {
"restic-backups-${group}".serviceConfig.PrivateMounts = true;
})
cfg.group;
age.secrets.backblaze-s3-backups.file = ../secrets/backblaze-s3-backups.age;
age.secrets.restic-password.file = ../secrets/restic-password.age;

View File

@@ -1,29 +0,0 @@
{ config, ... }:
{
nix = {
settings = {
substituters = [
"https://cache.nixos.org/"
"https://nix-community.cachix.org"
"http://s0.koi-bebop.ts.net:28338/nixos"
];
trusted-public-keys = [
"nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
"nixos:SnTTQutdOJbAmxo6AQ3cbRt5w9f4byMXQODCieBH3PQ="
];
# Allow substituters to be offline
# This isn't exactly ideal since it would be best if I could set up a system
# so that it is an error if a derivation isn't available for any substituters
# and use this flag as intended for deciding if it should build missing
# derivations locally. See https://github.com/NixOS/nix/issues/6901
fallback = true;
# Authenticate to private nixos cache
netrc-file = config.age.secrets.attic-netrc.path;
};
};
age.secrets.attic-netrc.file = ../secrets/attic-netrc.age;
}

View File

@@ -10,20 +10,18 @@ in
device = mkOption {
type = types.str;
};
configurationLimit = mkOption {
default = 20;
type = types.int;
};
};
config = mkIf cfg.enable {
# Use GRUB 2 for BIOS
boot.loader = {
timeout = 2;
grub = {
enable = true;
device = cfg.device;
version = 2;
useOSProber = true;
configurationLimit = cfg.configurationLimit;
configurationLimit = 20;
theme = pkgs.nixos-grub2-theme;
};
};

View File

@@ -1,10 +1,12 @@
{ ... }:
{ lib, config, pkgs, ... }:
{
imports = [
./firmware.nix
./efi.nix
./bios.nix
./kexec-luks.nix
./luks.nix
./remote-luks-unlock.nix
];
}

View File

@@ -7,23 +7,21 @@ in
{
options.efi = {
enable = mkEnableOption "enable efi boot";
configurationLimit = mkOption {
default = 20;
type = types.int;
};
};
config = mkIf cfg.enable {
# Use GRUB2 for EFI
boot.loader = {
efi.canTouchEfiVariables = true;
timeout = 2;
grub = {
enable = true;
device = "nodev";
version = 2;
efiSupport = true;
useOSProber = true;
# memtest86.enable = true;
configurationLimit = cfg.configurationLimit;
configurationLimit = 20;
theme = pkgs.nixos-grub2-theme;
};
};

View File

@@ -1,4 +1,4 @@
{ lib, config, ... }:
{ lib, config, pkgs, ... }:
with lib;
let

121
common/boot/kexec-luks.nix Normal file
View File

@@ -0,0 +1,121 @@
# Allows kexec'ing as an alternative to rebooting for machines that
# have luks encrypted partitions that need to be mounted at boot.
# These luks partitions will be automatically unlocked, no password,
# or any interaction needed whatsoever.
# This is accomplished by fetching the luks key(s) while the system is running,
# then building a temporary initrd that contains the luks key(s), and kexec'ing.
{ config, lib, pkgs, ... }:
{
options.luks = {
enableKexec = lib.mkEnableOption "Enable support for transparent passwordless kexec while using luks";
};
config = lib.mkIf config.luks.enableKexec {
luks.fallbackToPassword = true;
luks.disableKeyring = true;
boot.initrd.luks.devices = lib.listToAttrs
(builtins.map
(item:
{
name = item;
value = {
masterKeyFile = "/etc/${item}.key";
};
})
config.luks.deviceNames);
systemd.services.prepare-luks-kexec-image = {
description = "Prepare kexec automatic LUKS unlock on kexec reboot without a password";
wantedBy = [ "kexec.target" ];
unitConfig.DefaultDependencies = false;
serviceConfig.Type = "oneshot";
path = with pkgs; [ file kexec-tools coreutils-full cpio findutils gzip xz zstd lvm2 xxd gawk ];
# based on https://github.com/flowztul/keyexec
script = ''
system=/nix/var/nix/profiles/system
old_initrd=$(readlink -f "$system/initrd")
umask 0077
CRYPTROOT_TMPDIR="$(mktemp -d --tmpdir=/dev/shm)"
cleanup() {
shred -fu "$CRYPTROOT_TMPDIR/initrd_contents/etc/"*.key || true
shred -fu "$CRYPTROOT_TMPDIR/new_initrd" || true
shred -fu "$CRYPTROOT_TMPDIR/secret/"* || true
rm -rf "$CRYPTROOT_TMPDIR"
}
# trap cleanup INT TERM EXIT
mkdir -p "$CRYPTROOT_TMPDIR"
cd "$CRYPTROOT_TMPDIR"
# Determine the compression type of the initrd image
compression=$(file -b --mime-type "$old_initrd" | awk -F'/' '{print $2}')
# Decompress the initrd image based on its compression type
case "$compression" in
gzip)
gunzip -c "$old_initrd" > initrd.cpio
;;
xz)
unxz -c "$old_initrd" > initrd.cpio
;;
zstd)
zstd -d -c "$old_initrd" > initrd.cpio
;;
*)
echo "Unsupported compression type: $compression"
exit 1
;;
esac
# Extract the contents of the cpio archive
mkdir -p initrd_contents
cd initrd_contents
cpio -idv < ../initrd.cpio
# Generate keys and add them to the extracted initrd filesystem
luksDeviceNames=(${builtins.concatStringsSep " " config.luks.deviceNames})
for item in "''${luksDeviceNames[@]}"; do
dmsetup --showkeys table "$item" | cut -d ' ' -f5 | xxd -ps -g1 -r > "./etc/$item.key"
done
# Add normal initrd secrets too
${lib.concatStringsSep "\n" (lib.mapAttrsToList (dest: source:
let source' = if source == null then dest else builtins.toString source; in
''
mkdir -p $(dirname "./${dest}")
cp -a ${source'} "./${dest}"
''
) config.boot.initrd.secrets)
}
# Create a new cpio archive with the modified contents
find . | cpio -o -H newc -v > ../new_initrd.cpio
# Compress the new cpio archive using the original compression type
cd ..
case "$compression" in
gzip)
gunzip -c new_initrd.cpio > new_initrd
;;
xz)
unxz -c new_initrd.cpio > new_initrd
;;
zstd)
zstd -c new_initrd.cpio > new_initrd
;;
esac
kexec --load "$system/kernel" --append "init=$system/init ${builtins.concatStringsSep " " config.boot.kernelParams}" --initrd "$CRYPTROOT_TMPDIR/new_initrd"
'';
};
};
}

74
common/boot/luks.nix Normal file
View File

@@ -0,0 +1,74 @@
# Makes it a little easier to configure luks partitions for boot
# Additionally, this solves a circular dependency between kexec luks
# and NixOS's luks module.
{ config, lib, ... }:
let
cfg = config.luks;
deviceCount = builtins.length cfg.devices;
deviceMap = lib.imap
(i: item: {
device = item;
name =
if deviceCount == 1 then "enc-pv"
else "enc-pv${builtins.toString (i + 1)}";
})
cfg.devices;
in
{
options.luks = {
devices = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
};
allowDiscards = lib.mkOption {
type = lib.types.bool;
default = true;
};
fallbackToPassword = lib.mkEnableOption
"Fallback to interactive passphrase prompt if the cannot be found.";
disableKeyring = lib.mkEnableOption
"When opening LUKS2 devices, don't use the kernel keyring";
# set automatically, don't touch
deviceNames = lib.mkOption {
type = lib.types.listOf lib.types.str;
};
};
config = lib.mkMerge [
{
assertions = [
{
assertion = deviceCount == builtins.length (builtins.attrNames config.boot.initrd.luks.devices);
message = ''
All luks devices must be specified using `luks.devices` not `boot.initrd.luks.devices`.
'';
}
];
}
(lib.mkIf (deviceCount != 0) {
luks.deviceNames = builtins.map (device: device.name) deviceMap;
boot.initrd.luks.devices = lib.listToAttrs (
builtins.map
(item:
{
name = item.name;
value = {
device = item.device;
allowDiscards = cfg.allowDiscards;
fallbackToPassword = cfg.fallbackToPassword;
disableKeyring = cfg.disableKeyring;
};
})
deviceMap);
})
];
}

View File

@@ -1,7 +1,5 @@
{ config, pkgs, lib, ... }:
# TODO: use tailscale instead of tor https://gist.github.com/antifuchs/e30d58a64988907f282c82231dde2cbc
let
cfg = config.remoteLuksUnlock;
in

View File

@@ -1,9 +1,8 @@
{ config, pkgs, lib, ... }:
{ config, pkgs, ... }:
{
imports = [
./backups.nix
./binary-cache.nix
./flakes.nix
./auto-update.nix
./shell.nix
@@ -12,28 +11,20 @@
./server
./pc
./machine-info
./nix-builder.nix
./ssh.nix
./sandboxed-workspace
];
nix.flakes.enable = true;
system.stateVersion = "23.11";
system.stateVersion = "21.11";
networking.useDHCP = lib.mkDefault true;
networking.useDHCP = false;
networking.firewall.enable = true;
networking.firewall.allowPing = true;
time.timeZone = "America/Los_Angeles";
i18n = {
defaultLocale = "en_US.UTF-8";
extraLocaleSettings = {
LANGUAGE = "en_US.UTF-8";
LC_ALL = "en_US.UTF-8";
};
};
time.timeZone = "America/Denver";
i18n.defaultLocale = "en_US.UTF-8";
services.openssh = {
enable = true;
@@ -56,13 +47,12 @@
pciutils
usbutils
killall
screen
micro
helix
lm_sensors
picocom
lf
gnumake
tree
];
nixpkgs.config.allowUnfree = true;
@@ -98,7 +88,4 @@
security.acme.acceptTerms = true;
security.acme.defaults.email = "zuckerberg@neet.dev";
# Enable Desktop Environment if this is a PC (machine role is "personal")
de.enable = lib.mkDefault (config.thisMachine.hasRole."personal");
}

View File

@@ -1,4 +1,4 @@
{ lib, config, ... }:
{ lib, pkgs, config, ... }:
with lib;
let
cfg = config.nix.flakes;
@@ -10,9 +10,16 @@ in
config = mkIf cfg.enable {
nix = {
package = pkgs.nixFlakes;
extraOptions = ''
experimental-features = nix-command flakes
'';
# pin nixpkgs for system commands such as "nix shell"
registry.nixpkgs.flake = config.inputs.nixpkgs;
# pin system nixpkgs to the same version as the flake input
nixPath = [ "nixpkgs=${config.inputs.nixpkgs}" ];
};
};
}

View File

@@ -1,94 +1,10 @@
# Gathers info about each machine to constuct overall configuration
# Ex: Each machine already trusts each others SSH fingerprint already
{ config, lib, ... }:
{ config, lib, pkgs, ... }:
let
machines = config.machines.hosts;
hostOptionsSubmoduleType = lib.types.submodule {
options = {
hostNames = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
List of hostnames for this machine. The first one is the default so it is the target of deployments.
Used for automatically trusting hosts for ssh connections.
'';
};
arch = lib.mkOption {
type = lib.types.enum [ "x86_64-linux" "aarch64-linux" ];
description = ''
The architecture of this machine.
'';
};
systemRoles = lib.mkOption {
type = lib.types.listOf lib.types.str; # TODO: maybe use an enum?
description = ''
The set of roles this machine holds. Affects secrets available. (TODO add service config as well using this info)
'';
};
hostKey = lib.mkOption {
type = lib.types.str;
description = ''
The system ssh host key of this machine. Used for automatically trusting hosts for ssh connections
and for decrypting secrets with agenix.
'';
};
remoteUnlock = lib.mkOption {
default = null;
type = lib.types.nullOr (lib.types.submodule {
options = {
hostKey = lib.mkOption {
type = lib.types.str;
description = ''
The system ssh host key of this machine used for luks boot unlocking only.
'';
};
clearnetHost = lib.mkOption {
default = null;
type = lib.types.nullOr lib.types.str;
description = ''
The hostname resolvable over clearnet used to luks boot unlock this machine
'';
};
onionHost = lib.mkOption {
default = null;
type = lib.types.nullOr lib.types.str;
description = ''
The hostname resolvable over tor used to luks boot unlock this machine
'';
};
};
});
};
userKeys = lib.mkOption {
default = [ ];
type = lib.types.listOf lib.types.str;
description = ''
The list of user keys. Each key here can be used to log into all other systems as `googlebot`.
TODO: consider auto populating other programs that use ssh keys such as gitea
'';
};
deployKeys = lib.mkOption {
default = [ ];
type = lib.types.listOf lib.types.str;
description = ''
The list of deployment keys. Each key here can be used to log into all other systems as `root`.
'';
};
configurationPath = lib.mkOption {
type = lib.types.path;
description = ''
The path to this machine's configuration directory.
'';
};
};
};
in
{
imports = [
@@ -97,14 +13,102 @@ in
];
options.machines = {
hosts = lib.mkOption {
type = lib.types.attrsOf hostOptionsSubmoduleType;
};
};
options.thisMachine.config = lib.mkOption {
# For ease of use, a direct copy of the host config from machines.hosts.${hostName}
type = hostOptionsSubmoduleType;
hosts = lib.mkOption {
type = lib.types.attrsOf
(lib.types.submodule {
options = {
hostNames = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
List of hostnames for this machine. The first one is the default so it is the target of deployments.
Used for automatically trusting hosts for ssh connections.
'';
};
arch = lib.mkOption {
type = lib.types.enum [ "x86_64-linux" "aarch64-linux" ];
description = ''
The architecture of this machine.
'';
};
systemRoles = lib.mkOption {
type = lib.types.listOf lib.types.str; # TODO: maybe use an enum?
description = ''
The set of roles this machine holds. Affects secrets available. (TODO add service config as well using this info)
'';
};
hostKey = lib.mkOption {
type = lib.types.str;
description = ''
The system ssh host key of this machine. Used for automatically trusting hosts for ssh connections
and for decrypting secrets with agenix.
'';
};
remoteUnlock = lib.mkOption {
default = null;
type = lib.types.nullOr (lib.types.submodule {
options = {
hostKey = lib.mkOption {
type = lib.types.str;
description = ''
The system ssh host key of this machine used for luks boot unlocking only.
'';
};
clearnetHost = lib.mkOption {
default = null;
type = lib.types.nullOr lib.types.str;
description = ''
The hostname resolvable over clearnet used to luks boot unlock this machine
'';
};
onionHost = lib.mkOption {
default = null;
type = lib.types.nullOr lib.types.str;
description = ''
The hostname resolvable over tor used to luks boot unlock this machine
'';
};
};
});
};
userKeys = lib.mkOption {
default = [ ];
type = lib.types.listOf lib.types.str;
description = ''
The list of user keys. Each key here can be used to log into all other systems as `googlebot`.
TODO: consider auto populating other programs that use ssh keys such as gitea
'';
};
deployKeys = lib.mkOption {
default = [ ];
type = lib.types.listOf lib.types.str;
description = ''
The list of deployment keys. Each key here can be used to log into all other systems as `root`.
'';
};
configurationPath = lib.mkOption {
type = lib.types.path;
description = ''
The path to this machine's configuration directory.
'';
};
};
});
};
};
config = {
@@ -192,16 +196,5 @@ in
builtins.map (p: { "${dirName p}" = p; }) propFiles;
in
properties ../../machines;
# Don't try to evaluate "thisMachine" when reflecting using moduleless.nix.
# When evaluated by moduleless.nix this will fail due to networking.hostName not
# existing. This is because moduleless.nix is not intended for reflection from the
# perspective of a perticular machine but is instead intended for reflecting on
# the properties of all machines as a whole system.
thisMachine.config = config.machines.hosts.${config.networking.hostName};
# Add ssh keys from KeepassXC
machines.ssh.userKeys = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILACiZO7QnB4bcmziVaUkUE0ZPMR0M/yJbbHYsHIZz9g" ];
machines.ssh.deployKeys = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID58MvKGs3GDMMcN8Iyi9S59SciSrVM97wKtOvUAl3li" ];
};
}

View File

@@ -1,55 +1,19 @@
{ config, lib, ... }:
# Maps roles to their hosts.
# machines.withRole = {
# personal = [
# "machine1" "machine3"
# ];
# cache = [
# "machine2"
# ];
# };
#
# A list of all possible roles
# machines.allRoles = [
# "personal"
# "cache"
# ];
#
# For each role has true or false if the current machine has that role
# thisMachine.hasRole = {
# personal = true;
# cache = false;
# };
# Maps roles to their hosts
{
options.machines.withRole = lib.mkOption {
options.machines.roles = lib.mkOption {
type = lib.types.attrsOf (lib.types.listOf lib.types.str);
};
options.machines.allRoles = lib.mkOption {
type = lib.types.listOf lib.types.str;
};
options.thisMachine.hasRole = lib.mkOption {
type = lib.types.attrsOf lib.types.bool;
};
config = {
machines.withRole = lib.zipAttrs
machines.roles = lib.zipAttrs
(lib.mapAttrsToList
(host: cfg:
lib.foldl (lib.mergeAttrs) { }
(builtins.map (role: { ${role} = host; })
cfg.systemRoles))
config.machines.hosts);
machines.allRoles = lib.attrNames config.machines.withRole;
thisMachine.hasRole = lib.mapAttrs
(role: cfg:
builtins.elem config.networking.hostName config.machines.withRole.${role}
)
config.machines.withRole;
};
}

View File

@@ -39,6 +39,6 @@ in
builtins.map
(host: machines.hosts.${host}.hostKey)
hosts)
machines.withRole;
machines.roles;
};
}

View File

@@ -9,9 +9,9 @@ in
imports = [
./pia-openvpn.nix
./pia-wireguard.nix
./ping.nix
./tailscale.nix
./vpn.nix
./sandbox.nix
];
options.networking.ip_forward = mkEnableOption "Enable ip forwarding";

View File

@@ -11,7 +11,6 @@
# TODO implement this module such that the wireguard VPN doesn't have to live in a container
# TODO don't add forward rules if the PIA port is the same as cfg.forwardedPort
# TODO verify signatures of PIA responses
# TODO `RuntimeMaxSec = "30d";` for pia-vpn-wireguard-init isn't allowed per the systemd logs. Find alternative.
with builtins;
with lib;
@@ -144,14 +143,14 @@ in
systemd.services.pia-vpn-wireguard-init = {
description = "Creates PIA VPN Wireguard Interface";
wants = [ "network-online.target" ];
requires = [ "network-online.target" ];
after = [ "network.target" "network-online.target" ];
before = [ containerServiceName ];
requiredBy = [ containerServiceName ];
partOf = [ containerServiceName ];
wantedBy = [ "multi-user.target" ];
path = with pkgs; [ wireguard-tools jq curl iproute2 iputils ];
path = with pkgs; [ wireguard-tools jq curl iproute ];
serviceConfig = {
Type = "oneshot";
@@ -163,11 +162,6 @@ in
};
script = ''
echo Waiting for internet...
while ! ping -c 1 -W 1 1.1.1.1; do
sleep 1
done
# Prepare to connect by generating wg secrets and auth'ing with PIA since the container
# cannot do without internet to start with. NAT'ing the host's internet would address this
# issue but is not ideal because then leaking network outside of the VPN is more likely.
@@ -220,11 +214,11 @@ in
vpn-container.config.systemd.services.pia-vpn-wireguard = {
description = "Initializes the PIA VPN WireGuard Tunnel";
wants = [ "network-online.target" ];
requires = [ "network-online.target" ];
after = [ "network.target" "network-online.target" ];
wantedBy = [ "multi-user.target" ];
path = with pkgs; [ wireguard-tools iproute2 curl jq iptables ];
path = with pkgs; [ wireguard-tools iproute curl jq iptables ];
serviceConfig = {
Type = "oneshot";

59
common/network/ping.nix Normal file
View File

@@ -0,0 +1,59 @@
{ config, pkgs, lib, ... }:
# keeps peer to peer connections alive with a periodic ping
with lib;
with builtins;
# todo auto restart
let
cfg = config.keepalive-ping;
serviceTemplate = host:
{
"keepalive-ping@${host}" = {
description = "Periodic ping keep alive for ${host} connection";
requires = [ "network-online.target" ];
after = [ "network.target" "network-online.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig.Restart = "always";
path = with pkgs; [ iputils ];
script = ''
ping -i ${cfg.delay} ${host} &>/dev/null
'';
};
};
combineAttrs = foldl recursiveUpdate { };
serviceList = map serviceTemplate cfg.hosts;
services = combineAttrs serviceList;
in
{
options.keepalive-ping = {
enable = mkEnableOption "Enable keep alive ping task";
hosts = mkOption {
type = types.listOf types.str;
default = [ ];
description = ''
Hosts to ping periodically
'';
};
delay = mkOption {
type = types.str;
default = "60";
description = ''
Ping interval in seconds of periodic ping per host being pinged
'';
};
};
config = mkIf cfg.enable {
systemd.services = services;
};
}

View File

@@ -1,126 +0,0 @@
{ config, lib, ... }:
# Network configuration for sandboxed workspaces (VMs and containers)
# Creates a bridge network with NAT for isolated environments
with lib;
let
cfg = config.networking.sandbox;
in
{
options.networking.sandbox = {
enable = mkEnableOption "sandboxed workspace network bridge";
bridgeName = mkOption {
type = types.str;
default = "sandbox-br";
description = "Name of the bridge interface for sandboxed workspaces";
};
subnet = mkOption {
type = types.str;
default = "192.168.83.0/24";
description = "Subnet for sandboxed workspace network";
};
hostAddress = mkOption {
type = types.str;
default = "192.168.83.1";
description = "Host address on the sandbox bridge";
};
upstreamInterface = mkOption {
type = types.str;
description = "Upstream network interface for NAT";
};
};
config = mkIf cfg.enable {
networking.ip_forward = true;
# Create the bridge interface
systemd.network.netdevs."10-${cfg.bridgeName}" = {
netdevConfig = {
Kind = "bridge";
Name = cfg.bridgeName;
};
};
systemd.network.networks."10-${cfg.bridgeName}" = {
matchConfig.Name = cfg.bridgeName;
networkConfig = {
Address = "${cfg.hostAddress}/24";
DHCPServer = false;
IPv4Forwarding = true;
IPv6Forwarding = false;
IPMasquerade = "ipv4";
};
linkConfig.RequiredForOnline = "no";
};
# Automatically attach VM tap interfaces to the bridge
systemd.network.networks."11-vm" = {
matchConfig.Name = "vm-*";
networkConfig.Bridge = cfg.bridgeName;
linkConfig.RequiredForOnline = "no";
};
# Automatically attach container veth interfaces to the bridge
systemd.network.networks."11-container" = {
matchConfig.Name = "ve-*";
networkConfig.Bridge = cfg.bridgeName;
linkConfig.RequiredForOnline = "no";
};
# NAT configuration for sandboxed workspaces
networking.nat = {
enable = true;
internalInterfaces = [ cfg.bridgeName ];
externalInterface = cfg.upstreamInterface;
};
# Enable systemd-networkd (required for bridge setup)
systemd.network.enable = true;
# When NetworkManager handles primary networking, disable systemd-networkd-wait-online.
# The bridge is the only interface managed by systemd-networkd and it never reaches
# "online" state without connected workspaces. NetworkManager-wait-online.service already
# gates network-online.target for the primary interface.
# On pure systemd-networkd systems (no NM), we just ignore the bridge.
systemd.network.wait-online.enable =
!config.networking.networkmanager.enable;
systemd.network.wait-online.ignoredInterfaces =
lib.mkIf (!config.networking.networkmanager.enable) [ cfg.bridgeName ];
# If NetworkManager is enabled, tell it to ignore sandbox interfaces
# This allows systemd-networkd and NetworkManager to coexist
networking.networkmanager.unmanaged = [
"interface-name:${cfg.bridgeName}"
"interface-name:vm-*"
"interface-name:ve-*"
"interface-name:veth*"
];
# Make systemd-resolved listen on the bridge for workspace DNS queries.
# By default resolved only listens on 127.0.0.53 (localhost).
# DNSStubListenerExtra adds the bridge address so workspaces can use the host as DNS.
services.resolved.settings.Resolve.DNSStubListenerExtra = cfg.hostAddress;
# Allow DNS traffic from workspaces to the host
networking.firewall.interfaces.${cfg.bridgeName} = {
allowedTCPPorts = [ 53 ];
allowedUDPPorts = [ 53 ];
};
# Block sandboxes from reaching the local network (private RFC1918 ranges)
# while still allowing public internet access via NAT.
# The sandbox subnet itself is allowed so workspaces can reach the host gateway.
networking.firewall.extraForwardRules = ''
iifname ${cfg.bridgeName} ip daddr ${cfg.hostAddress} accept
iifname ${cfg.bridgeName} ip daddr 10.0.0.0/8 drop
iifname ${cfg.bridgeName} ip daddr 172.16.0.0/12 drop
iifname ${cfg.bridgeName} ip daddr 192.168.0.0/16 drop
'';
};
}

View File

@@ -10,10 +10,6 @@ in
config.services.tailscale.enable = mkDefault (!config.boot.isContainer);
# Trust Tailscale interface - access control is handled by Tailscale ACLs.
# Required because nftables (used by Incus) breaks Tailscale's automatic iptables rules.
config.networking.firewall.trustedInterfaces = mkIf cfg.enable [ "tailscale0" ];
# MagicDNS
config.networking.nameservers = mkIf cfg.enable [ "1.1.1.1" "8.8.8.8" ];
config.networking.search = mkIf cfg.enable [ "koi-bebop.ts.net" ];

View File

@@ -1,4 +1,4 @@
{ config, lib, allModules, ... }:
{ config, pkgs, lib, allModules, ... }:
with lib;
@@ -72,6 +72,9 @@ in
config = {
imports = allModules ++ [ cfg.config ];
# speeds up evaluation
nixpkgs.pkgs = pkgs;
# networking.firewall.enable = mkForce false;
networking.firewall.trustedInterfaces = [
# completely trust internal interface to host

View File

@@ -1,56 +0,0 @@
{ config, lib, ... }:
let
builderUserName = "nix-builder";
builderRole = "nix-builder";
builders = config.machines.withRole.${builderRole};
thisMachineIsABuilder = config.thisMachine.hasRole.${builderRole};
# builders don't include themselves as a remote builder
otherBuilders = lib.filter (hostname: hostname != config.networking.hostName) builders;
in
lib.mkMerge [
# configure builder
(lib.mkIf thisMachineIsABuilder {
users.users.${builderUserName} = {
description = "Distributed Nix Build User";
group = builderUserName;
isSystemUser = true;
createHome = true;
home = "/var/lib/nix-builder";
useDefaultShell = true;
openssh.authorizedKeys.keys = builtins.map
(builderCfg: builderCfg.hostKey)
(builtins.attrValues config.machines.hosts);
};
users.groups.${builderUserName} = { };
nix.settings.trusted-users = [
builderUserName
];
})
# use each builder
{
nix.distributedBuilds = true;
nix.buildMachines = builtins.map
(builderHostname: {
hostName = builderHostname;
system = config.machines.hosts.${builderHostname}.arch;
protocol = "ssh-ng";
sshUser = builderUserName;
sshKey = "/etc/ssh/ssh_host_ed25519_key";
maxJobs = 3;
speedFactor = 10;
supportedFeatures = [ "nixos-test" "benchmark" "big-parallel" "kvm" ];
})
otherBuilders;
# It is very likely that the builder's internet is faster or just as fast
nix.extraOptions = ''
builders-use-substitutes = true
'';
}
]

View File

@@ -1,4 +1,4 @@
{ lib, config, ... }:
{ lib, config, pkgs, ... }:
let
cfg = config.de;
@@ -19,15 +19,6 @@ in
jack.enable = true;
};
services.pipewire.extraConfig.pipewire."92-fix-wine-audio" = {
context.properties = {
default.clock.rate = 48000;
default.clock.quantum = 256;
default.clock.min-quantum = 256;
default.clock.max-quantum = 2048;
};
};
users.users.googlebot.extraGroups = [ "audio" ];
# bt headset support

View File

@@ -17,6 +17,38 @@ let
"PREFIX=$(out)"
];
};
nvidia-vaapi-driver = pkgs.stdenv.mkDerivation rec {
pname = "nvidia-vaapi-driver";
version = "0.0.5";
src = pkgs.fetchFromGitHub {
owner = "elFarto";
repo = pname;
rev = "v${version}";
sha256 = "2bycqKolVoaHK64XYcReteuaON9TjzrFhaG5kty28YY=";
};
patches = [
./use-meson-v57.patch
];
nativeBuildInputs = with pkgs; [
meson
cmake
ninja
pkg-config
];
buildInputs = with pkgs; [
nv-codec-headers-11-1-5-1
libva
gst_all_1.gstreamer
gst_all_1.gst-plugins-bad
libglvnd
];
};
in
{
config = lib.mkIf cfg.enable {
@@ -41,28 +73,32 @@ in
"SpellcheckLanguage" = [ "en-US" ];
};
defaultSearchProviderSuggestURL = null;
defaultSearchProviderSearchURL = "https://duckduckgo.com/?q={searchTerms}&kp=-1&kl=us-en";
defaultSearchProviderSearchURL = " https://duckduckgo.com/?q={searchTerms}&kp=-1&kl=us-en";
};
# hardware accelerated video playback (on intel)
nixpkgs.config.packageOverrides = pkgs: {
vaapiIntel = pkgs.vaapiIntel.override { enableHybridCodec = true; };
chromium = pkgs.chromium.override {
enableWideVine = true;
# ungoogled = true;
# --enable-native-gpu-memory-buffers # fails on AMD APU
# --enable-webrtc-vp9-support
commandLineArgs = "--use-vulkan";
commandLineArgs = "--use-vulkan --use-gl=desktop --enable-zero-copy --enable-hardware-overlays --enable-features=VaapiVideoDecoder,CanvasOopRasterization --ignore-gpu-blocklist --enable-accelerated-mjpeg-decode --enable-accelerated-video --enable-gpu-rasterization";
};
};
# todo vulkan in chrome
# todo video encoding in chrome
hardware.graphics = {
hardware.opengl = {
enable = true;
extraPackages = with pkgs; [
intel-media-driver # LIBVA_DRIVER_NAME=iHD
vaapiIntel # LIBVA_DRIVER_NAME=i965 (older but works better for Firefox/Chromium)
# vaapiVdpau
libvdpau-va-gl
nvidia-vaapi-driver
];
extraPackages32 = with pkgs.pkgsi686Linux; [ vaapiIntel ];
};
};
}

View File

@@ -6,17 +6,19 @@ in
{
imports = [
./kde.nix
./xfce.nix
./yubikey.nix
./chromium.nix
./firefox.nix
# ./firefox.nix
./audio.nix
# ./torbrowser.nix
./pithos.nix
./spotify.nix
./vscodium.nix
./discord.nix
./steam.nix
./touchpad.nix
./mount-samba.nix
./udev.nix
./virtualisation.nix
];
options.de = {
@@ -24,10 +26,9 @@ in
};
config = lib.mkIf cfg.enable {
environment.systemPackages = with pkgs; [
# https://github.com/NixOS/nixpkgs/pull/328086#issuecomment-2235384618
gparted
];
# vulkan
hardware.opengl.driSupport = true;
hardware.opengl.driSupport32Bit = true;
# Applications
users.users.googlebot.packages = with pkgs; [
@@ -36,81 +37,45 @@ in
mumble
tigervnc
bluez-tools
vscodium
element-desktop
mpv
nextcloud-client
signal-desktop
minecraft
gparted
libreoffice-fresh
thunderbird
spotify
spotifyd
spotify-qt
arduino
yt-dlp
jellyfin-media-player
joplin-desktop
config.inputs.deploy-rs.packages.${config.currentSystem}.deploy-rs
lxqt.pavucontrol-qt
deskflow
file-roller
android-tools
logseq
# For Nix IDE
nixpkgs-fmt
nixd
nil
godot-mono
rnix-lsp
];
# Networking
networking.networkmanager.enable = true;
users.users.googlebot.extraGroups = [ "networkmanager" ];
# Printing
services.printing.enable = true;
services.printing.drivers = with pkgs; [
gutenprint
];
# Scanning
hardware.sane.enable = true;
hardware.sane.extraBackends = with pkgs; [
# Enable support for "driverless" scanners
# Check for support here: https://mfi.apple.com/account/airprint-search
sane-airscan
];
# Printer/Scanner discovery
# Printer discovery
services.avahi.enable = true;
services.avahi.nssmdns4 = true;
services.avahi.nssmdns = true;
programs.file-roller.enable = true;
# Security
services.gnome.gnome-keyring.enable = true;
security.pam.services.googlebot.enableGnomeKeyring = true;
# Mount personal SMB stores
services.mount-samba.enable = true;
# allow building ARM derivations
boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
# for luks onlock over tor
services.tor.enable = true;
services.tor.client.enable = true;
# Enable wayland support in various chromium based applications
environment.sessionVariables.NIXOS_OZONE_WL = "1";
fonts.packages = with pkgs; [ nerd-fonts.symbols-only ];
# SSH Ask pass
programs.ssh.enableAskPassword = true;
programs.ssh.askPassword = "${pkgs.kdePackages.ksshaskpass}/bin/ksshaskpass";
users.users.googlebot.extraGroups = [
# Networking
"networkmanager"
# Scanning
"scanner"
"lp"
];
};
}

View File

@@ -20,6 +20,31 @@ let
};
firefox = pkgs.wrapFirefox somewhatPrivateFF {
desktopName = "Sneed Browser";
nixExtensions = [
(pkgs.fetchFirefoxAddon {
name = "ublock-origin";
url = "https://addons.mozilla.org/firefox/downloads/file/3719054/ublock_origin-1.33.2-an+fx.xpi";
sha256 = "XDpe9vW1R1iVBTI4AmNgAg1nk7BVQdIAMuqd0cnK5FE=";
})
(pkgs.fetchFirefoxAddon {
name = "sponsorblock";
url = "https://addons.mozilla.org/firefox/downloads/file/3720594/sponsorblock_skip_sponsorships_on_youtube-2.0.12.3-an+fx.xpi";
sha256 = "HRtnmZWyXN3MKo4AvSYgNJGkBEsa2RaMamFbkz+YzQg=";
})
(pkgs.fetchFirefoxAddon {
name = "KeePassXC-Browser";
url = "https://addons.mozilla.org/firefox/downloads/file/3720664/keepassxc_browser-1.7.6-fx.xpi";
sha256 = "3K404/eq3amHhIT0WhzQtC892he5I0kp2SvbzE9dbZg=";
})
(pkgs.fetchFirefoxAddon {
name = "https-everywhere";
url = "https://addons.mozilla.org/firefox/downloads/file/3716461/https_everywhere-2021.1.27-an+fx.xpi";
sha256 = "2gSXSLunKCwPjAq4Wsj0lOeV551r3G+fcm1oeqjMKh8=";
})
];
extraPolicies = {
CaptivePortal = false;
DisableFirefoxStudies = true;
@@ -49,6 +74,12 @@ let
ExtensionRecommendations = false;
SkipOnboarding = true;
};
WebsiteFilter = {
Block = [
"http://paradigminteractive.io/"
"https://paradigminteractive.io/"
];
};
};
extraPrefs = ''

View File

@@ -5,18 +5,20 @@ let
in
{
config = lib.mkIf cfg.enable {
services.displayManager.sddm.enable = true;
services.displayManager.sddm.wayland.enable = true;
services.desktopManager.plasma6.enable = true;
# kde plasma
services.xserver = {
enable = true;
desktopManager.plasma5.enable = true;
displayManager.sddm.enable = true;
};
# kde apps
nixpkgs.config.firefox.enablePlasmaBrowserIntegration = true;
users.users.googlebot.packages = with pkgs; [
# akonadi
# kmail
# plasma5Packages.kmail-account-wizard
kdePackages.kate
kdePackages.kdeconnect-kde
kdePackages.skanpage
kate
];
};
}

View File

@@ -13,8 +13,6 @@ let
auth_opts = "sec=ntlmv2i,credentials=/run/agenix/smb-secrets";
version_opts = "vers=3.1.1";
public_user_opts = "gid=${toString config.users.groups.users.gid}";
opts = "${systemd_opts},${network_opts},${user_opts},${version_opts},${auth_opts}";
in
{
@@ -26,7 +24,7 @@ in
fileSystems."/mnt/public" = {
device = "//s0.koi-bebop.ts.net/public";
fsType = "cifs";
options = [ "${opts},${public_user_opts}" ];
options = [ opts ];
};
fileSystems."/mnt/private" = {

86
common/pc/spotify.nix Normal file
View File

@@ -0,0 +1,86 @@
{ lib, config, pkgs, ... }:
with lib;
let
cfg = config.services.spotifyd;
toml = pkgs.formats.toml { };
spotifydConf = toml.generate "spotify.conf" cfg.settings;
in
{
disabledModules = [
"services/audio/spotifyd.nix"
];
options = {
services.spotifyd = {
enable = mkEnableOption "spotifyd, a Spotify playing daemon";
settings = mkOption {
default = { };
type = toml.type;
example = { global.bitrate = 320; };
description = ''
Configuration for Spotifyd. For syntax and directives, see
<link xlink:href="https://github.com/Spotifyd/spotifyd#Configuration"/>.
'';
};
users = mkOption {
type = with types; listOf str;
default = [ ];
description = ''
Usernames to be added to the "spotifyd" group, so that they
can start and interact with the userspace daemon.
'';
};
};
};
config = mkIf cfg.enable {
# username specific stuff because i'm lazy...
services.spotifyd.users = [ "googlebot" ];
users.users.googlebot.packages = with pkgs; [
spotify
spotify-tui
];
users.groups.spotifyd = {
members = cfg.users;
};
age.secrets.spotifyd = {
file = ../../secrets/spotifyd.age;
group = "spotifyd";
mode = "0440"; # group can read
};
# spotifyd to read secrets and run as user service
services.spotifyd = {
settings.global = {
username_cmd = "sed '1q;d' /run/agenix/spotifyd";
password_cmd = "sed '2q;d' /run/agenix/spotifyd";
bitrate = 320;
backend = "pulseaudio";
device_name = config.networking.hostName;
device_type = "computer";
# on_song_change_hook = "command_to_run_on_playback_events"
autoplay = true;
};
};
systemd.user.services.spotifyd-daemon = {
enable = true;
wantedBy = [ "graphical-session.target" ];
partOf = [ "graphical-session.target" ];
description = "spotifyd, a Spotify playing daemon";
environment.SHELL = "/bin/sh";
serviceConfig = {
ExecStart = "${pkgs.spotifyd}/bin/spotifyd --no-daemon --config-path ${spotifydConf}";
Restart = "always";
CacheDirectory = "spotifyd";
};
};
};
}

25
common/pc/torbrowser.nix Normal file
View File

@@ -0,0 +1,25 @@
{ lib, config, pkgs, ... }:
let
cfg = config.de;
in
{
config = lib.mkIf cfg.enable {
nixpkgs.overlays = [
(self: super: {
tor-browser-bundle-bin = super.tor-browser-bundle-bin.overrideAttrs (old: rec {
version = "10.0.10";
lang = "en-US";
src = pkgs.fetchurl {
url = "https://dist.torproject.org/torbrowser/${version}/tor-browser-linux64-${version}_${lang}.tar.xz";
sha256 = "vYWZ+NsGN8YH5O61+zrUjlFv3rieaBqjBQ+a18sQcZg=";
};
});
})
];
users.users.googlebot.packages = with pkgs; [
tor-browser-bundle-bin
];
};
}

View File

@@ -1,11 +1,15 @@
{ lib, config, ... }:
{ lib, config, pkgs, ... }:
let
cfg = config.de;
cfg = config.de.touchpad;
in
{
options.de.touchpad = {
enable = lib.mkEnableOption "enable touchpad";
};
config = lib.mkIf cfg.enable {
services.libinput.enable = true;
services.libinput.touchpad.naturalScrolling = true;
services.xserver.libinput.enable = true;
services.xserver.libinput.touchpad.naturalScrolling = true;
};
}

View File

@@ -1,25 +0,0 @@
{ config, lib, pkgs, ... }:
let
cfg = config.de;
in
{
config = lib.mkIf cfg.enable {
services.udev.extraRules = ''
# depthai
SUBSYSTEM=="usb", ATTRS{idVendor}=="03e7", MODE="0666"
# Moonlander
# Rules for Oryx web flashing and live training
KERNEL=="hidraw*", ATTRS{idVendor}=="16c0", MODE="0664", GROUP="plugdev"
KERNEL=="hidraw*", ATTRS{idVendor}=="3297", MODE="0664", GROUP="plugdev"
# Wally Flashing rules for the Moonlander and Planck EZ
SUBSYSTEMS=="usb", ATTRS{idVendor}=="0483", ATTRS{idProduct}=="df11", MODE:="0666", SYMLINK+="stm32_dfu"
'';
services.udev.packages = [ pkgs.platformio ];
users.groups.plugdev = {
members = [ "googlebot" ];
};
};
}

View File

@@ -0,0 +1,22 @@
diff --git a/meson.build b/meson.build
index dace367..8c0e290 100644
--- a/meson.build
+++ b/meson.build
@@ -8,7 +8,7 @@ project(
'warning_level=0',
],
license: 'MIT',
- meson_version: '>= 0.58.0',
+ meson_version: '>= 0.57.0',
)
cc = meson.get_compiler('c')
@@ -47,8 +47,3 @@ shared_library(
gnu_symbol_visibility: 'hidden',
)
-meson.add_devenv(environment({
- 'NVD_LOG': '1',
- 'LIBVA_DRIVER_NAME': 'nvidia',
- 'LIBVA_DRIVERS_PATH': meson.project_build_root(),
-}))

View File

@@ -1,23 +0,0 @@
{ config, lib, pkgs, ... }:
let
cfg = config.de;
in
{
config = lib.mkIf cfg.enable {
# AppVMs
virtualisation.appvm.enable = true;
virtualisation.appvm.user = "googlebot";
# Use podman instead of docker
virtualisation.podman.enable = true;
virtualisation.podman.dockerCompat = true;
# virt-manager
virtualisation.libvirtd.enable = true;
programs.dconf.enable = true;
virtualisation.spiceUSBRedirection.enable = true;
environment.systemPackages = with pkgs; [ virt-manager ];
users.users.googlebot.extraGroups = [ "libvirtd" "adbusers" ];
};
}

22
common/pc/vscodium.nix Normal file
View File

@@ -0,0 +1,22 @@
{ lib, config, pkgs, ... }:
let
cfg = config.de;
extensions = with pkgs.vscode-extensions; [
# bbenoist.Nix # nix syntax support
# arrterian.nix-env-selector # nix dev envs
];
vscodium-with-extensions = pkgs.vscode-with-extensions.override {
vscode = pkgs.vscodium;
vscodeExtensions = extensions;
};
in
{
config = lib.mkIf cfg.enable {
users.users.googlebot.packages = [
vscodium-with-extensions
];
};
}

23
common/pc/xfce.nix Normal file
View File

@@ -0,0 +1,23 @@
{ lib, config, pkgs, ... }:
let
cfg = config.de;
in
{
config = lib.mkIf cfg.enable {
services.xserver = {
enable = true;
desktopManager = {
xterm.enable = false;
xfce.enable = true;
};
displayManager.sddm.enable = true;
};
# xfce apps
# TODO for some reason whiskermenu needs to be global for it to work
environment.systemPackages = with pkgs; [
xfce.xfce4-whiskermenu-plugin
];
};
}

View File

@@ -1,147 +0,0 @@
{ hostConfig, workspaceName, ip, networkInterface }:
# Base configuration shared by all sandboxed workspaces (VMs and containers)
# This provides common settings for networking, SSH, users, and packages
#
# Parameters:
# hostConfig - The host's NixOS config (for inputs, ssh keys, etc.)
# workspaceName - Name of the workspace (used as hostname)
# ip - Static IP address for the workspace
# networkInterface - Match config for systemd-networkd (e.g., { Type = "ether"; } or { Name = "host0"; })
{ config, lib, pkgs, ... }:
let
claudeConfigFile = pkgs.writeText "claude-config.json" (builtins.toJSON {
hasCompletedOnboarding = true;
theme = "dark";
projects = {
"/home/googlebot/workspace" = {
hasTrustDialogAccepted = true;
};
};
});
in
{
imports = [
../shell.nix
hostConfig.inputs.home-manager.nixosModules.home-manager
hostConfig.inputs.nix-index-database.nixosModules.default
];
nixpkgs.overlays = [
hostConfig.inputs.claude-code-nix.overlays.default
];
# Basic system configuration
system.stateVersion = "25.11";
# Set hostname to match the workspace name
networking.hostName = workspaceName;
# Networking with systemd-networkd
networking.useNetworkd = true;
systemd.network.enable = true;
# Enable resolved to populate /etc/resolv.conf from networkd's DNS settings
services.resolved.enable = true;
# Basic networking configuration
networking.useDHCP = false;
# Static IP configuration
# Uses the host as DNS server (host forwards to upstream DNS)
systemd.network.networks."20-workspace" = {
matchConfig = networkInterface;
networkConfig = {
Address = "${ip}/24";
Gateway = hostConfig.networking.sandbox.hostAddress;
DNS = [ hostConfig.networking.sandbox.hostAddress ];
};
};
# Disable firewall inside workspaces (we're behind NAT)
networking.firewall.enable = false;
# Enable SSH for access
services.openssh = {
enable = true;
settings = {
PasswordAuthentication = false;
PermitRootLogin = "prohibit-password";
};
};
# Use persistent SSH host keys from shared directory
services.openssh.hostKeys = lib.mkForce [
{
path = "/etc/ssh-host-keys/ssh_host_ed25519_key";
type = "ed25519";
}
];
# Basic system packages
environment.systemPackages = with pkgs; [
claude-code
kakoune
vim
git
htop
wget
curl
tmux
dnsutils
];
# User configuration
users.mutableUsers = false;
users.users.googlebot = {
isNormalUser = true;
extraGroups = [ "wheel" ];
shell = pkgs.fish;
openssh.authorizedKeys.keys = hostConfig.machines.ssh.userKeys;
};
security.doas.enable = true;
security.sudo.enable = false;
security.doas.extraRules = [
{ groups = [ "wheel" ]; noPass = true; }
];
# Minimal locale settings
i18n.defaultLocale = "en_US.UTF-8";
time.timeZone = "America/Los_Angeles";
# Enable flakes
nix.settings.experimental-features = [ "nix-command" "flakes" ];
nix.settings.trusted-users = [ "googlebot" ];
# Make nixpkgs available in NIX_PATH and registry (like the NixOS ISO)
# This allows `nix-shell -p`, `nix repl '<nixpkgs>'`, etc. to work
nix.nixPath = [ "nixpkgs=${hostConfig.inputs.nixpkgs}" ];
nix.registry.nixpkgs.flake = hostConfig.inputs.nixpkgs;
# Enable fish shell
programs.fish.enable = true;
# Initialize Claude Code config on first boot (skips onboarding, trusts workspace)
systemd.services.claude-config-init = {
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
User = "googlebot";
Group = "users";
};
script = ''
if [ ! -f /home/googlebot/claude-config/.claude.json ]; then
cp ${claudeConfigFile} /home/googlebot/claude-config/.claude.json
fi
'';
};
# Home Manager configuration
home-manager.useGlobalPkgs = true;
home-manager.useUserPackages = true;
home-manager.users.googlebot = import ./home.nix;
}

View File

@@ -1,74 +0,0 @@
{ config, lib, pkgs, ... }:
# Container-specific configuration for sandboxed workspaces using systemd-nspawn
# This module is imported by default.nix for workspaces with type = "container"
with lib;
let
cfg = config.sandboxed-workspace;
hostConfig = config;
# Filter for container-type workspaces only
containerWorkspaces = filterAttrs (n: ws: ws.type == "container") cfg.workspaces;
in
{
config = mkIf (cfg.enable && containerWorkspaces != { }) {
# NixOS container module only sets restartIfChanged when autoStart=true
# Work around this by setting it directly on the systemd service
systemd.services = mapAttrs'
(name: ws: nameValuePair "container@${name}" {
restartIfChanged = lib.mkForce true;
restartTriggers = [
config.containers.${name}.path
config.environment.etc."nixos-containers/${name}.conf".source
];
})
containerWorkspaces;
# Convert container workspace configs to NixOS containers format
containers = mapAttrs
(name: ws: {
autoStart = ws.autoStart;
privateNetwork = true;
ephemeral = true;
restartIfChanged = true;
# Attach container's veth to the sandbox bridge
# This creates the veth pair and attaches host side to the bridge
hostBridge = config.networking.sandbox.bridgeName;
bindMounts = {
"/home/googlebot/workspace" = {
hostPath = "/home/googlebot/sandboxed/${name}/workspace";
isReadOnly = false;
};
"/etc/ssh-host-keys" = {
hostPath = "/home/googlebot/sandboxed/${name}/ssh-host-keys";
isReadOnly = false;
};
"/home/googlebot/claude-config" = {
hostPath = "/home/googlebot/sandboxed/${name}/claude-config";
isReadOnly = false;
};
};
config = { config, lib, pkgs, ... }: {
imports = [
(import ./base.nix {
inherit hostConfig;
workspaceName = name;
ip = ws.ip;
networkInterface = { Name = "eth0"; };
})
(import ws.config)
];
networking.useHostResolvConf = false;
nixpkgs.config.allowUnfree = true;
};
})
containerWorkspaces;
};
}

View File

@@ -1,164 +0,0 @@
{ config, lib, pkgs, ... }:
# Unified sandboxed workspace module supporting both VMs and containers
# This module provides isolated development environments with shared configuration
with lib;
let
cfg = config.sandboxed-workspace;
in
{
imports = [
./vm.nix
./container.nix
./incus.nix
];
options.sandboxed-workspace = {
enable = mkEnableOption "sandboxed workspace management";
workspaces = mkOption {
type = types.attrsOf (types.submodule {
options = {
type = mkOption {
type = types.enum [ "vm" "container" "incus" ];
description = ''
Backend type for this workspace:
- "vm": microVM with cloud-hypervisor (more isolation, uses virtiofs)
- "container": systemd-nspawn via NixOS containers (less overhead, uses bind mounts)
- "incus": Incus/LXD container (unprivileged, better security than NixOS containers)
'';
};
config = mkOption {
type = types.path;
description = "Path to the workspace configuration file";
};
ip = mkOption {
type = types.str;
example = "192.168.83.10";
description = ''
Static IP address for this workspace on the microvm bridge network.
Configures the workspace's network interface and adds an entry to /etc/hosts
on the host so the workspace can be accessed by name (e.g., ssh workspace-example).
Must be in the 192.168.83.0/24 subnet (or whatever networking.sandbox.subnet is).
'';
};
hostKey = mkOption {
type = types.nullOr types.str;
default = null;
example = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAA...";
description = ''
SSH host public key for this workspace. If set, adds to programs.ssh.knownHosts
so the host automatically trusts the workspace without prompting.
Get the key from: ~/sandboxed/<name>/ssh-host-keys/ssh_host_ed25519_key.pub
'';
};
autoStart = mkOption {
type = types.bool;
default = false;
description = "Whether to automatically start this workspace on boot";
};
cid = mkOption {
type = types.nullOr types.int;
default = null;
description = ''
vsock Context Identifier for this workspace (VM-only, ignored for containers).
If null, auto-generated from workspace name.
Must be unique per host. Valid range: 3 to 4294967294.
See: https://man7.org/linux/man-pages/man7/vsock.7.html
'';
};
};
});
default = { };
description = "Sandboxed workspace configurations";
};
};
config = mkIf cfg.enable {
# Automatically enable sandbox networking when workspaces are defined
networking.sandbox.enable = mkIf (cfg.workspaces != { }) true;
# Add workspace hostnames to /etc/hosts so they can be accessed by name
networking.hosts = lib.mkMerge (lib.mapAttrsToList
(name: ws: {
${ws.ip} = [ "workspace-${name}" ];
})
cfg.workspaces);
# Add workspace SSH host keys to known_hosts so host trusts workspaces without prompting
programs.ssh.knownHosts = lib.mkMerge (lib.mapAttrsToList
(name: ws:
lib.optionalAttrs (ws.hostKey != null) {
"workspace-${name}" = {
publicKey = ws.hostKey;
extraHostNames = [ ws.ip ];
};
})
cfg.workspaces);
# Shell aliases for workspace management
environment.shellAliases = lib.mkMerge (lib.mapAttrsToList
(name: ws:
let
serviceName =
if ws.type == "vm" then "microvm@${name}"
else if ws.type == "incus" then "incus-workspace-${name}"
else "container@${name}";
in
{
"workspace_${name}" = "ssh googlebot@workspace-${name}";
"workspace_${name}_start" = "doas systemctl start ${serviceName}";
"workspace_${name}_stop" = "doas systemctl stop ${serviceName}";
"workspace_${name}_restart" = "doas systemctl restart ${serviceName}";
"workspace_${name}_status" = "doas systemctl status ${serviceName}";
})
cfg.workspaces);
# Automatically generate SSH host keys and directories for all workspaces
systemd.services = lib.mapAttrs'
(name: ws:
let
serviceName =
if ws.type == "vm" then "microvm@${name}"
else if ws.type == "incus" then "incus-workspace-${name}"
else "container@${name}";
in
lib.nameValuePair "workspace-${name}-setup" {
description = "Setup directories and SSH keys for workspace ${name}";
wantedBy = [ "multi-user.target" ];
before = [ "${serviceName}.service" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
};
script = ''
# Create directories if they don't exist
mkdir -p /home/googlebot/sandboxed/${name}/workspace
mkdir -p /home/googlebot/sandboxed/${name}/ssh-host-keys
mkdir -p /home/googlebot/sandboxed/${name}/claude-config
# Fix ownership
chown -R googlebot:users /home/googlebot/sandboxed/${name}
# Generate SSH host key if it doesn't exist
if [ ! -f /home/googlebot/sandboxed/${name}/ssh-host-keys/ssh_host_ed25519_key ]; then
${pkgs.openssh}/bin/ssh-keygen -t ed25519 -N "" \
-f /home/googlebot/sandboxed/${name}/ssh-host-keys/ssh_host_ed25519_key
chown googlebot:users /home/googlebot/sandboxed/${name}/ssh-host-keys/ssh_host_ed25519_key*
echo "Generated SSH host key for workspace ${name}"
fi
'';
}
)
cfg.workspaces;
};
}

View File

@@ -1,50 +0,0 @@
{ lib, pkgs, ... }:
# Home Manager configuration for sandboxed workspace user environment
# This sets up the shell and tools inside VMs and containers
{
home.username = "googlebot";
home.homeDirectory = "/home/googlebot";
home.stateVersion = "24.11";
programs.home-manager.enable = true;
# Shell configuration
programs.fish.enable = true;
programs.starship.enable = true;
programs.starship.enableFishIntegration = true;
programs.starship.settings.container.disabled = true;
# Basic command-line tools
programs.btop.enable = true;
programs.ripgrep.enable = true;
programs.eza.enable = true;
# Git configuration
programs.git = {
enable = true;
settings = {
user.name = lib.mkDefault "googlebot";
user.email = lib.mkDefault "zuckerberg@neet.dev";
};
};
# Shell aliases
home.shellAliases = {
ls = "eza";
la = "eza -la";
ll = "eza -l";
};
# Environment variables for Claude Code
home.sessionVariables = {
# Isolate Claude config to a specific directory on the host
CLAUDE_CONFIG_DIR = "/home/googlebot/claude-config";
};
# Additional packages for development
home.packages = with pkgs; [
# Add packages as needed per workspace
];
}

View File

@@ -1,182 +0,0 @@
{ config, lib, pkgs, ... }:
# Incus-specific configuration for sandboxed workspaces
# Creates fully declarative Incus containers from NixOS configurations
with lib;
let
cfg = config.sandboxed-workspace;
hostConfig = config;
incusWorkspaces = filterAttrs (n: ws: ws.type == "incus") cfg.workspaces;
# Build a NixOS LXC image for a workspace
mkContainerImage = name: ws:
let
nixpkgs = hostConfig.inputs.nixpkgs;
containerSystem = nixpkgs.lib.nixosSystem {
modules = [
(import ./base.nix {
inherit hostConfig;
workspaceName = name;
ip = ws.ip;
networkInterface = { Name = "eth0"; };
})
(import ws.config)
({ config, lib, pkgs, ... }: {
nixpkgs.hostPlatform = hostConfig.currentSystem;
boot.isContainer = true;
networking.useHostResolvConf = false;
nixpkgs.config.allowUnfree = true;
# Incus containers don't support the kernel features nix sandbox requires
nix.settings.sandbox = false;
environment.systemPackages = [
(lib.hiPrio (pkgs.writeShellScriptBin "claude" ''
exec ${pkgs.claude-code}/bin/claude --dangerously-skip-permissions "$@"
''))
];
})
];
};
in
{
rootfs = containerSystem.config.system.build.images.lxc;
metadata = containerSystem.config.system.build.images.lxc-metadata;
toplevel = containerSystem.config.system.build.toplevel;
};
mkIncusService = name: ws:
let
images = mkContainerImage name ws;
hash = builtins.substring 0 12 (builtins.hashString "sha256" "${images.rootfs}");
imageName = "nixos-workspace-${name}-${hash}";
containerName = "workspace-${name}";
bridgeName = config.networking.sandbox.bridgeName;
mac = lib.mkMac "incus-${name}";
addDevices = ''
incus config device add ${containerName} eth0 nic nictype=bridged parent=${bridgeName} hwaddr=${mac}
incus config device add ${containerName} workspace disk source=/home/googlebot/sandboxed/${name}/workspace path=/home/googlebot/workspace shift=true
incus config device add ${containerName} ssh-keys disk source=/home/googlebot/sandboxed/${name}/ssh-host-keys path=/etc/ssh-host-keys shift=true
incus config device add ${containerName} claude-config disk source=/home/googlebot/sandboxed/${name}/claude-config path=/home/googlebot/claude-config shift=true
'';
in
{
description = "Incus workspace ${name}";
after = [ "incus.service" "incus-preseed.service" "workspace-${name}-setup.service" ];
requires = [ "incus.service" ];
wants = [ "workspace-${name}-setup.service" ];
wantedBy = optional ws.autoStart "multi-user.target";
path = [ config.virtualisation.incus.package pkgs.gnutar pkgs.xz pkgs.util-linux ];
restartTriggers = [ images.rootfs images.metadata ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
};
script = ''
set -euo pipefail
# Serialize incus operations - concurrent container creation causes race conditions
exec 9>/run/incus-workspace.lock
flock -x 9
# Import image if not present
if ! incus image list --format csv | grep -q "${imageName}"; then
metadata_tarball=$(echo ${images.metadata}/tarball/*.tar.xz)
rootfs_tarball=$(echo ${images.rootfs}/tarball/*.tar.xz)
incus image import "$metadata_tarball" "$rootfs_tarball" --alias ${imageName}
# Clean up old images for this workspace
incus image list --format csv | grep "nixos-workspace-${name}-" | grep -v "${imageName}" | cut -d, -f2 | while read old_image; do
incus image delete "$old_image" || true
done || true
fi
# Always recreate container for ephemeral behavior
incus stop ${containerName} --force 2>/dev/null || true
incus delete ${containerName} --force 2>/dev/null || true
incus init ${imageName} ${containerName}
${addDevices}
incus start ${containerName}
# Wait for container to start
for i in $(seq 1 30); do
if incus list --format csv | grep -q "^${containerName},RUNNING"; then
exit 0
fi
sleep 1
done
exit 1
'';
preStop = ''
exec 9>/run/incus-workspace.lock
flock -x 9
incus stop ${containerName} --force 2>/dev/null || true
incus delete ${containerName} --force 2>/dev/null || true
# Clean up all images for this workspace
incus image list --format csv 2>/dev/null | grep "nixos-workspace-${name}-" | cut -d, -f2 | while read img; do
incus image delete "$img" 2>/dev/null || true
done
'';
};
in
{
config = mkIf (cfg.enable && incusWorkspaces != { }) {
virtualisation.incus.enable = true;
networking.nftables.enable = true;
virtualisation.incus.preseed = {
storage_pools = [{
name = "default";
driver = "dir";
config = {
source = "/var/lib/incus/storage-pools/default";
};
}];
profiles = [{
name = "default";
config = {
"security.privileged" = "false";
"security.idmap.isolated" = "true";
};
devices = {
root = {
path = "/";
pool = "default";
type = "disk";
};
};
}];
};
systemd.services = mapAttrs'
(name: ws: nameValuePair "incus-workspace-${name}" (mkIncusService name ws))
incusWorkspaces;
# Extra alias for incus shell access (ssh is also available via default.nix aliases)
environment.shellAliases = mkMerge (mapAttrsToList
(name: ws: {
"workspace_${name}_shell" = "doas incus exec workspace-${name} -- su -l googlebot";
})
incusWorkspaces);
};
}

View File

@@ -1,140 +0,0 @@
{ config, lib, pkgs, ... }:
# VM-specific configuration for sandboxed workspaces using microvm.nix
# This module is imported by default.nix for workspaces with type = "vm"
with lib;
let
cfg = config.sandboxed-workspace;
hostConfig = config;
# Generate a deterministic vsock CID from workspace name.
#
# vsock (virtual sockets) enables host-VM communication without networking.
# cloud-hypervisor uses vsock for systemd-notify integration: when a VM finishes
# booting, systemd sends READY=1 to the host via vsock, allowing the host's
# microvm@ service to accurately track VM boot status instead of guessing.
#
# Each VM needs a unique CID (Context Identifier). Reserved CIDs per vsock(7):
# - VMADDR_CID_HYPERVISOR (0): reserved for hypervisor
# - VMADDR_CID_LOCAL (1): loopback address
# - VMADDR_CID_HOST (2): host address
# See: https://man7.org/linux/man-pages/man7/vsock.7.html
# https://docs.kernel.org/virt/kvm/vsock.html
#
# We auto-generate from SHA256 hash to ensure uniqueness without manual assignment.
# Range: 100 - 16777315 (offset avoids reserved CIDs and leaves 3-99 for manual use)
nameToCid = name:
let
hash = builtins.hashString "sha256" name;
hexPart = builtins.substring 0 6 hash;
in
100 + (builtins.foldl'
(acc: c: acc * 16 + (
if c == "a" then 10
else if c == "b" then 11
else if c == "c" then 12
else if c == "d" then 13
else if c == "e" then 14
else if c == "f" then 15
else lib.strings.toInt c
)) 0
(lib.stringToCharacters hexPart));
# Filter for VM-type workspaces only
vmWorkspaces = filterAttrs (n: ws: ws.type == "vm") cfg.workspaces;
# Generate VM configuration for a workspace
mkVmConfig = name: ws: {
inherit pkgs; # Use host's pkgs (includes allowUnfree)
config = import ws.config;
specialArgs = { inputs = hostConfig.inputs; };
extraModules = [
(import ./base.nix {
inherit hostConfig;
workspaceName = name;
ip = ws.ip;
networkInterface = { Type = "ether"; };
})
{
environment.systemPackages = [
(lib.hiPrio (pkgs.writeShellScriptBin "claude" ''
exec ${pkgs.claude-code}/bin/claude --dangerously-skip-permissions "$@"
''))
];
# MicroVM specific configuration
microvm = {
# Use cloud-hypervisor for better performance
hypervisor = lib.mkDefault "cloud-hypervisor";
# Resource allocation
vcpu = 8;
mem = 4096; # 4GB RAM
# Disk for writable overlay
volumes = [{
image = "overlay.img";
mountPoint = "/nix/.rw-store";
size = 8192; # 8GB
}];
# Shared directories with host using virtiofs
shares = [
{
# Share the host's /nix/store for accessing packages
proto = "virtiofs";
tag = "ro-store";
source = "/nix/store";
mountPoint = "/nix/.ro-store";
}
{
proto = "virtiofs";
tag = "workspace";
source = "/home/googlebot/sandboxed/${name}/workspace";
mountPoint = "/home/googlebot/workspace";
}
{
proto = "virtiofs";
tag = "ssh-host-keys";
source = "/home/googlebot/sandboxed/${name}/ssh-host-keys";
mountPoint = "/etc/ssh-host-keys";
}
{
proto = "virtiofs";
tag = "claude-config";
source = "/home/googlebot/sandboxed/${name}/claude-config";
mountPoint = "/home/googlebot/claude-config";
}
];
# Writeable overlay for /nix/store
writableStoreOverlay = "/nix/.rw-store";
# TAP interface for bridged networking
# The interface name "vm-*" matches the pattern in common/network/microvm.nix
# which automatically attaches it to the microbr bridge
interfaces = [{
type = "tap";
id = "vm-${name}";
mac = lib.mkMac "vm-${name}";
}];
# Enable vsock for systemd-notify integration
vsock.cid =
if ws.cid != null
then ws.cid
else nameToCid name;
};
}
];
autostart = ws.autoStart;
};
in
{
config = mkIf (cfg.enable && vmWorkspaces != { }) {
# Convert VM workspace configs to microvm.nix format
microvm.vms = mapAttrs mkVmConfig vmWorkspaces;
};
}

View File

@@ -1,16 +0,0 @@
{ config, lib, ... }:
let
cfg = config.services.actual;
in
{
config = lib.mkIf cfg.enable {
services.actual.settings = {
port = 25448;
};
backup.group."actual-budget".paths = [
"/var/lib/actual"
];
};
}

View File

@@ -1,31 +0,0 @@
{ config, lib, ... }:
{
config = lib.mkIf (config.thisMachine.hasRole."binary-cache") {
services.atticd = {
enable = true;
environmentFile = config.age.secrets.atticd-credentials.path;
settings = {
listen = "[::]:28338";
chunking = {
nar-size-threshold = 64 * 1024; # 64 KiB
# The preferred minimum size of a chunk, in bytes
min-size = 16 * 1024; # 16 KiB
# The preferred average size of a chunk, in bytes
avg-size = 64 * 1024; # 64 KiB
# The preferred maximum size of a chunk, in bytes
max-size = 256 * 1024; # 256 KiB
};
compression.type = "zstd";
garbage-collection.default-retention-period = "6 months";
};
};
age.secrets.atticd-credentials.file = ../../secrets/atticd-credentials.age;
};
}

43
common/server/ceph.nix Normal file
View File

@@ -0,0 +1,43 @@
{ config, lib, ... }:
with lib;
let
cfg = config.ceph;
in
{
options.ceph = { };
config = mkIf cfg.enable {
# ceph.enable = true;
## S3 Object gateway
#ceph.rgw.enable = true;
#ceph.rgw.daemons = [
#];
# https://docs.ceph.com/en/latest/start/intro/
# meta object storage daemon
ceph.osd.enable = true;
ceph.osd.daemons = [
];
# monitor's ceph state
ceph.mon.enable = true;
ceph.mon.daemons = [
];
# manage ceph
ceph.mgr.enable = true;
ceph.mgr.daemons = [
];
# metadata server
ceph.mds.enable = true;
ceph.mds.daemons = [
];
ceph.global.fsid = "925773DC-D95F-476C-BBCD-08E01BF0865F";
};
}

View File

@@ -1,20 +1,23 @@
{ ... }:
{ config, pkgs, ... }:
{
imports = [
./nginx.nix
./thelounge.nix
./mumble.nix
./icecast.nix
./nginx-stream.nix
./matrix.nix
./zerobin.nix
./gitea.nix
./gitea-runner.nix
./privatebin/privatebin.nix
./radio.nix
./samba.nix
./owncast.nix
./mailserver.nix
./nextcloud.nix
./gitea-actions-runner.nix
./atticd.nix
./librechat.nix
./actualbudget.nix
./unifi.nix
./iodine.nix
./searx.nix
];
}

View File

@@ -1,79 +0,0 @@
{ config, lib, ... }:
# Gitea Actions Runner inside a NixOS container.
# The container shares the host's /nix/store (read-only) and nix-daemon socket,
# so builds go through the host daemon and outputs land in the host store.
# Warning: NixOS containers are not fully secure — do not run untrusted code.
# To enable, assign a machine the 'gitea-actions-runner' system role.
let
thisMachineIsARunner = config.thisMachine.hasRole."gitea-actions-runner";
containerName = "gitea-runner";
giteaRunnerUid = 991;
giteaRunnerGid = 989;
in
{
config = lib.mkIf (thisMachineIsARunner && !config.boot.isContainer) {
containers.${containerName} = {
autoStart = true;
ephemeral = true;
bindMounts = {
"/run/agenix/gitea-actions-runner-token" = {
hostPath = "/run/agenix/gitea-actions-runner-token";
isReadOnly = true;
};
"/var/lib/gitea-runner" = {
hostPath = "/var/lib/gitea-runner";
isReadOnly = false;
};
};
config = { config, lib, pkgs, ... }: {
system.stateVersion = "25.11";
services.gitea-actions-runner.instances.inst = {
enable = true;
name = containerName;
url = "https://git.neet.dev/";
tokenFile = "/run/agenix/gitea-actions-runner-token";
labels = [ "nixos:host" ];
};
# Disable dynamic user so runner state persists via bind mount
systemd.services.gitea-runner-inst.serviceConfig.DynamicUser = lib.mkForce false;
users.users.gitea-runner = {
uid = giteaRunnerUid;
home = "/var/lib/gitea-runner";
group = "gitea-runner";
isSystemUser = true;
createHome = true;
};
users.groups.gitea-runner.gid = giteaRunnerGid;
nix.settings.experimental-features = [ "nix-command" "flakes" ];
environment.systemPackages = with pkgs; [
git
nodejs
jq
attic-client
];
};
};
# Matching user on host — the container's gitea-runner UID must be
# recognized by the host's nix-daemon as trusted (shared UID namespace)
users.users.gitea-runner = {
uid = giteaRunnerUid;
home = "/var/lib/gitea-runner";
group = "gitea-runner";
isSystemUser = true;
createHome = true;
};
users.groups.gitea-runner.gid = giteaRunnerGid;
age.secrets.gitea-actions-runner-token.file = ../../secrets/gitea-actions-runner-token.age;
};
}

View File

@@ -0,0 +1,98 @@
{ config, pkgs, lib, ... }:
let
cfg = config.services.gitea-runner;
in
{
options.services.gitea-runner = {
enable = lib.mkEnableOption "Enables gitea runner";
dataDir = lib.mkOption {
default = "/var/lib/gitea-runner";
type = lib.types.str;
description = lib.mdDoc "gitea runner data directory.";
};
instanceUrl = lib.mkOption {
type = lib.types.str;
};
registrationTokenFile = lib.mkOption {
type = lib.types.path;
};
};
config = lib.mkIf cfg.enable {
virtualisation.docker.enable = true;
users.users.gitea-runner = {
description = "Gitea Runner Service";
home = cfg.dataDir;
useDefaultShell = true;
group = "gitea-runner";
isSystemUser = true;
createHome = true;
extraGroups = [
"docker" # allow creating docker containers
];
};
users.groups.gitea-runner = { };
# registration token
services.gitea-runner.registrationTokenFile = "/run/agenix/gitea-runner-registration-token";
age.secrets.gitea-runner-registration-token = {
file = ../../secrets/gitea-runner-registration-token.age;
owner = "gitea-runner";
};
systemd.services.gitea-runner = {
description = "Gitea Runner";
serviceConfig = {
WorkingDirectory = cfg.dataDir;
User = "gitea-runner";
Group = "gitea-runner";
};
requires = [ "network-online.target" ];
after = [ "network.target" "network-online.target" ];
wantedBy = [ "multi-user.target" ];
path = with pkgs; [ gitea-actions-runner ];
# based on https://gitea.com/gitea/act_runner/src/branch/main/run.sh
script = ''
. ${cfg.registrationTokenFile}
if [[ ! -s .runner ]]; then
try=$((try + 1))
success=0
LOGFILE="$(mktemp)"
# The point of this loop is to make it simple, when running both act_runner and gitea in docker,
# for the act_runner to wait a moment for gitea to become available before erroring out. Within
# the context of a single docker-compose, something similar could be done via healthchecks, but
# this is more flexible.
while [[ $success -eq 0 ]] && [[ $try -lt ''${10:-10} ]]; do
act_runner register \
--instance "${cfg.instanceUrl}" \
--token "$GITEA_RUNNER_REGISTRATION_TOKEN" \
--name "${config.networking.hostName}" \
--no-interactive > $LOGFILE 2>&1
cat $LOGFILE
cat $LOGFILE | grep 'Runner registered successfully' > /dev/null
if [[ $? -eq 0 ]]; then
echo "SUCCESS"
success=1
else
echo "Waiting to retry ..."
sleep 5
fi
done
fi
exec act_runner daemon
'';
};
};
}

View File

@@ -1,4 +1,4 @@
{ lib, config, ... }:
{ lib, pkgs, config, ... }:
let
cfg = config.services.gitea;
@@ -12,28 +12,23 @@ in
};
config = lib.mkIf cfg.enable {
services.gitea = {
domain = cfg.hostname;
rootUrl = "https://${cfg.hostname}/";
appName = cfg.hostname;
lfs.enable = true;
# lfs.enable = true;
# dump.enable = true;
settings = {
server = {
ROOT_URL = "https://${cfg.hostname}/";
DOMAIN = cfg.hostname;
};
other = {
SHOW_FOOTER_VERSION = false;
};
ui = {
DEFAULT_THEME = "gitea-dark";
DEFAULT_THEME = "arc-green";
};
service = {
DISABLE_REGISTRATION = true;
};
session = {
COOKIE_SECURE = true;
PROVIDER = "db";
SESSION_LIFE_TIME = 259200; # 3 days
GC_INTERVAL_TIME = 259200; # 3 days
};
mailer = {
ENABLED = true;
@@ -47,9 +42,6 @@ in
actions = {
ENABLED = true;
};
indexer = {
REPO_INDEXER_ENABLED = true;
};
};
mailerPasswordFile = "/run/agenix/robots-email-pw";
};
@@ -68,7 +60,7 @@ in
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://localhost:${toString cfg.settings.server.HTTP_PORT}";
proxyPass = "http://localhost:${toString cfg.httpPort}";
};
};
};

42
common/server/gitlab.nix Normal file
View File

@@ -0,0 +1,42 @@
{ config, pkgs, ... }:
{
services.gitlab = {
enable = true;
databasePasswordFile = "/var/keys/gitlab/db_password";
initialRootPasswordFile = "/var/keys/gitlab/root_password";
https = true;
host = "git.neet.dev";
port = 443;
user = "git";
group = "git";
databaseUsername = "git";
smtp = {
enable = true;
address = "localhost";
port = 25;
};
secrets = {
dbFile = "/var/keys/gitlab/db";
secretFile = "/var/keys/gitlab/secret";
otpFile = "/var/keys/gitlab/otp";
jwsFile = "/var/keys/gitlab/jws";
};
extraConfig = {
gitlab = {
email_from = "gitlab-no-reply@neet.dev";
email_display_name = "neet.dev GitLab";
email_reply_to = "gitlab-no-reply@neet.dev";
};
};
pagesExtraArgs = [ "-listen-proxy" "127.0.0.1:8090" ];
};
services.nginx.virtualHosts = {
"git.neet.dev" = {
enableACME = true;
forceSSL = true;
locations."/".proxyPass = "http://unix:/run/gitlab/gitlab-workhorse.socket";
};
};
}

25
common/server/hydra.nix Normal file
View File

@@ -0,0 +1,25 @@
{ config, pkgs, ... }:
let
domain = "hydra.neet.dev";
port = 3000;
notifyEmail = "hydra@neet.dev";
in
{
services.nginx.virtualHosts."${domain}" = {
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://localhost:${toString port}";
};
};
services.hydra = {
enable = true;
inherit port;
hydraURL = "https://${domain}";
useSubstitutes = true;
notificationSender = notifyEmail;
buildMachinesFiles = [ ];
};
}

65
common/server/icecast.nix Normal file
View File

@@ -0,0 +1,65 @@
{ lib, config, ... }:
# configures icecast to only accept source from localhost
# to a audio optimized stream on services.icecast.mount
# made available via nginx for http access on
# https://host/mount
let
cfg = config.services.icecast;
in
{
options.services.icecast = {
mount = lib.mkOption {
type = lib.types.str;
example = "stream.mp3";
};
fallback = lib.mkOption {
type = lib.types.str;
example = "fallback.mp3";
};
nginx = lib.mkEnableOption "enable nginx";
};
config = lib.mkIf cfg.enable {
services.icecast = {
listen.address = "0.0.0.0";
listen.port = 8001;
admin.password = "hackme";
extraConf = ''
<authentication>
<source-password>hackme</source-password>
</authentication>
<http-headers>
<header type="cors" name="Access-Control-Allow-Origin" />
</http-headers>
<mount type="normal">
<mount-name>/${cfg.mount}</mount-name>
<max-listeners>30</max-listeners>
<bitrate>64000</bitrate>
<hidden>false</hidden>
<public>false</public>
<fallback-mount>/${cfg.fallback}</fallback-mount>
<fallback-override>1</fallback-override>
</mount>
<mount type="normal">
<mount-name>/${cfg.fallback}</mount-name>
<max-listeners>30</max-listeners>
<bitrate>64000</bitrate>
<hidden>false</hidden>
<public>false</public>
</mount>
'';
};
services.nginx.virtualHosts.${cfg.hostname} = lib.mkIf cfg.nginx {
enableACME = true;
forceSSL = true;
locations."/${cfg.mount}" = {
proxyPass = "http://localhost:${toString cfg.listen.port}/${cfg.mount}";
extraConfig = ''
add_header Access-Control-Allow-Origin *;
'';
};
};
};
}

21
common/server/iodine.nix Normal file
View File

@@ -0,0 +1,21 @@
{ config, pkgs, lib, ... }:
let
cfg = config.services.iodine.server;
in
{
config = lib.mkIf cfg.enable {
# iodine DNS-based vpn
services.iodine.server = {
ip = "192.168.99.1";
domain = "tun.neet.dev";
passwordFile = "/run/agenix/iodine";
};
age.secrets.iodine.file = ../../secrets/iodine.age;
networking.firewall.allowedUDPPorts = [ 53 ];
networking.nat.internalInterfaces = [
"dns0" # iodine
];
};
}

View File

@@ -1,69 +0,0 @@
{ config, lib, ... }:
with lib;
let
cfg = config.services.librechat-container;
in
{
options.services.librechat-container = {
enable = mkEnableOption "librechat";
port = mkOption {
type = types.int;
default = 3080;
};
host = lib.mkOption {
type = lib.types.str;
example = "example.com";
};
};
config = mkIf cfg.enable {
virtualisation.oci-containers.containers = {
librechat = {
image = "ghcr.io/danny-avila/librechat:v0.8.1";
environment = {
HOST = "0.0.0.0";
MONGO_URI = "mongodb://host.containers.internal:27017/LibreChat";
ENDPOINTS = "openAI,google,bingAI,gptPlugins";
OPENAI_MODELS = lib.concatStringsSep "," [
"gpt-4o-mini"
"o3-mini"
"gpt-4o"
"o1"
];
REFRESH_TOKEN_EXPIRY = toString (1000 * 60 * 60 * 24 * 30); # 30 days
};
environmentFiles = [
"/run/agenix/librechat-env-file"
];
ports = [
"${toString cfg.port}:3080"
];
};
};
age.secrets.librechat-env-file.file = ../../secrets/librechat-env-file.age;
services.mongodb.enable = true;
services.mongodb.bind_ip = "0.0.0.0";
# easier podman maintenance
virtualisation.oci-containers.backend = "podman";
virtualisation.podman.dockerSocket.enable = true;
virtualisation.podman.dockerCompat = true;
# For mongodb access
networking.firewall.trustedInterfaces = [
"podman0" # for librechat
];
services.nginx.virtualHosts.${cfg.host} = {
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://localhost:${toString cfg.port}";
proxyWebsockets = true;
};
};
};
}

View File

@@ -28,6 +28,7 @@ in
indexDir = "/var/lib/mailindex";
enableManageSieve = true;
fullTextSearch.enable = true;
fullTextSearch.indexAttachments = true;
fullTextSearch.memoryLimit = 500;
inherit domains;
loginAccounts = {
@@ -36,10 +37,6 @@ in
# catchall for all domains
aliases = map (domain: "@${domain}") domains;
};
"cris@runyan.org" = {
hashedPasswordFile = "/run/agenix/cris-hashed-email-pw";
aliases = [ "chris@runyan.org" ];
};
"robot@runyan.org" = {
aliases = [
"no-reply@neet.dev"
@@ -54,37 +51,18 @@ in
"joslyn@runyan.org"
"damon@runyan.org"
"jonas@runyan.org"
"simon@neet.dev"
"ellen@runyan.org"
];
forwards = {
"amazon@runyan.org" = [
"jeremy@runyan.org"
"cris@runyan.org"
];
};
x509.useACMEHost = config.mailserver.fqdn; # use let's encrypt for certs
stateVersion = 3;
certificateScheme = 3; # use let's encrypt for certs
};
age.secrets.hashed-email-pw.file = ../../secrets/hashed-email-pw.age;
age.secrets.cris-hashed-email-pw.file = ../../secrets/cris-hashed-email-pw.age;
age.secrets.hashed-robots-email-pw.file = ../../secrets/hashed-robots-email-pw.age;
# Get let's encrypt cert
services.nginx = {
enable = true;
virtualHosts."${config.mailserver.fqdn}" = {
forceSSL = true;
enableACME = true;
};
};
# sendmail to use xxx@domain instead of xxx@mail.domain
services.postfix.settings.main.myorigin = "$mydomain";
services.postfix.origin = "$mydomain";
# relay sent mail through mailgun
# https://www.howtoforge.com/community/threads/different-smtp-relays-for-different-domains-in-postfix.82711/#post-392620
services.postfix.settings.main = {
services.postfix.config = {
smtp_sasl_auth_enable = "yes";
smtp_sasl_security_options = "noanonymous";
smtp_sasl_password_maps = "hash:/var/lib/postfix/conf/sasl_relay_passwd";
@@ -102,6 +80,7 @@ in
age.secrets.sasl_relay_passwd.file = ../../secrets/sasl_relay_passwd.age;
# webmail
services.nginx.enable = true;
services.roundcube = {
enable = true;
hostName = config.mailserver.fqdn;

View File

@@ -3,44 +3,18 @@
let
cfg = config.services.nextcloud;
nextcloudHostname = "runyan.org";
collaboraOnlineHostname = "collabora.runyan.org";
whiteboardHostname = "whiteboard.runyan.org";
whiteboardPort = 3002; # Seems impossible to change
# Hardcoded public ip of ponyo... I wish I didn't need this...
public_ip_address = "147.135.114.130";
in
{
config = lib.mkIf cfg.enable {
services.nextcloud = {
https = true;
package = pkgs.nextcloud32;
hostName = nextcloudHostname;
package = pkgs.nextcloud25;
hostName = "neet.cloud";
config.dbtype = "sqlite";
config.adminuser = "jeremy";
config.adminpassFile = "/run/agenix/nextcloud-pw";
# Apps
autoUpdateApps.enable = true;
extraAppsEnable = true;
extraApps = with config.services.nextcloud.package.packages.apps; {
# Want
inherit end_to_end_encryption mail spreed;
# For file and document editing (collabora online and excalidraw)
inherit richdocuments whiteboard;
# Might use
inherit calendar qownnotesapi;
# Try out
# inherit bookmarks cookbook deck memories maps music news notes phonetrack polls forms;
};
# Allows installing Apps from the UI (might remove later)
appstoreEnable = true;
enableBrokenCiphersForSSE = false;
};
age.secrets.nextcloud-pw = {
file = ../../secrets/nextcloud-pw.age;
@@ -56,100 +30,5 @@ in
enableACME = true;
forceSSL = true;
};
# collabora-online
# https://diogotc.com/blog/collabora-nextcloud-nixos/
services.collabora-online = {
enable = true;
port = 15972;
settings = {
# Rely on reverse proxy for SSL
ssl = {
enable = false;
termination = true;
};
# Listen on loopback interface only
net = {
listen = "loopback";
post_allow.host = [ "localhost" ];
};
# Restrict loading documents from WOPI Host
storage.wopi = {
"@allow" = true;
host = [ config.services.nextcloud.hostName ];
};
server_name = collaboraOnlineHostname;
};
};
services.nginx.virtualHosts.${config.services.collabora-online.settings.server_name} = {
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://localhost:${toString config.services.collabora-online.port}";
proxyWebsockets = true;
};
};
systemd.services.nextcloud-config-collabora =
let
wopi_url = "http://localhost:${toString config.services.collabora-online.port}";
public_wopi_url = "https://${collaboraOnlineHostname}";
wopi_allowlist = lib.concatStringsSep "," [
"127.0.0.1"
"::1"
public_ip_address
];
in
{
wantedBy = [ "multi-user.target" ];
after = [ "nextcloud-setup.service" "coolwsd.service" ];
requires = [ "coolwsd.service" ];
path = [
config.services.nextcloud.occ
];
script = ''
nextcloud-occ config:app:set richdocuments wopi_url --value ${lib.escapeShellArg wopi_url}
nextcloud-occ config:app:set richdocuments public_wopi_url --value ${lib.escapeShellArg public_wopi_url}
nextcloud-occ config:app:set richdocuments wopi_allowlist --value ${lib.escapeShellArg wopi_allowlist}
nextcloud-occ richdocuments:setup
'';
serviceConfig = {
Type = "oneshot";
};
};
# Whiteboard
services.nextcloud-whiteboard-server = {
enable = true;
settings.NEXTCLOUD_URL = "https://${nextcloudHostname}";
secrets = [ "/run/agenix/whiteboard-server-jwt-secret" ];
};
systemd.services.nextcloud-config-whiteboard = {
wantedBy = [ "multi-user.target" ];
after = [ "nextcloud-setup.service" ];
requires = [ "coolwsd.service" ];
path = [
config.services.nextcloud.occ
];
script = ''
nextcloud-occ config:app:set whiteboard collabBackendUrl --value="https://${whiteboardHostname}"
nextcloud-occ config:app:set whiteboard jwt_secret_key --value="$JWT_SECRET_KEY"
'';
serviceConfig = {
Type = "oneshot";
EnvironmentFile = [ "/run/agenix/whiteboard-server-jwt-secret" ];
};
};
age.secrets.whiteboard-server-jwt-secret.file = ../../secrets/whiteboard-server-jwt-secret.age;
services.nginx.virtualHosts.${whiteboardHostname} = {
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://localhost:${toString whiteboardPort}";
proxyWebsockets = true;
};
};
};
}

View File

@@ -0,0 +1,76 @@
{ lib, config, pkgs, ... }:
let
cfg = config.services.nginx.stream;
nginxWithRTMP = pkgs.nginx.override {
modules = [ pkgs.nginxModules.rtmp ];
};
in
{
options.services.nginx.stream = {
enable = lib.mkEnableOption "enable nginx rtmp/hls/dash video streaming";
port = lib.mkOption {
type = lib.types.int;
default = 1935;
description = "rtmp injest/serve port";
};
rtmpName = lib.mkOption {
type = lib.types.str;
default = "live";
description = "the name of the rtmp application";
};
hostname = lib.mkOption {
type = lib.types.str;
description = "the http host to serve hls";
};
httpLocation = lib.mkOption {
type = lib.types.str;
default = "/tmp";
description = "the path of the tmp http files";
};
};
config = lib.mkIf cfg.enable {
services.nginx = {
enable = true;
package = nginxWithRTMP;
virtualHosts.${cfg.hostname} = {
enableACME = true;
forceSSL = true;
locations = {
"/stream/hls".root = "${cfg.httpLocation}/hls";
"/stream/dash".root = "${cfg.httpLocation}/dash";
};
extraConfig = ''
location /stat {
rtmp_stat all;
}
'';
};
appendConfig = ''
rtmp {
server {
listen ${toString cfg.port};
chunk_size 4096;
application ${cfg.rtmpName} {
allow publish all;
allow publish all;
live on;
record off;
hls on;
hls_path ${cfg.httpLocation}/hls;
dash on;
dash_path ${cfg.httpLocation}/dash;
}
}
}
'';
};
networking.firewall.allowedTCPPorts = [
cfg.port
];
};
}

View File

@@ -1,13 +1,9 @@
{ lib, config, ... }:
{ lib, config, pkgs, ... }:
let
cfg = config.services.nginx;
in
{
options.services.nginx = {
openFirewall = lib.mkEnableOption "Open firewall ports 80 and 443";
};
config = lib.mkIf cfg.enable {
services.nginx = {
recommendedGzipSettings = true;
@@ -16,8 +12,6 @@ in
recommendedTlsSettings = true;
};
services.nginx.openFirewall = lib.mkDefault true;
networking.firewall.allowedTCPPorts = lib.mkIf cfg.openFirewall [ 80 443 ];
networking.firewall.allowedTCPPorts = [ 80 443 ];
};
}

View File

@@ -0,0 +1,42 @@
;<?php http_response_code(403); /*
[main]
name = "Kode Paste"
discussion = false
opendiscussion = false
password = true
fileupload = false
burnafterreadingselected = false
defaultformatter = "plaintext"
sizelimit = 10485760
template = "bootstrap"
languageselection = false
[expire]
default = "1week"
[expire_options]
5min = 300
10min = 600
1hour = 3600
1day = 86400
1week = 604800
[formatter_options]
plaintext = "Plain Text"
syntaxhighlighting = "Source Code"
markdown = "Markdown"
[traffic]
limit = 10
dir = "/var/lib/privatebin"
[purge]
limit = 300
batchsize = 10
dir = "/var/lib/privatebin"
[model]
class = Filesystem
[model_options]
dir = "/var/lib/privatebin"

View File

@@ -0,0 +1,74 @@
{ config, pkgs, lib, ... }:
let
cfg = config.services.privatebin;
privateBinSrc = pkgs.stdenv.mkDerivation {
name = "privatebin";
src = pkgs.fetchFromGitHub {
owner = "privatebin";
repo = "privatebin";
rev = "d65bf02d7819a530c3c2a88f6f9947651fe5258d";
sha256 = "7ttAvEDL1ab0cUZcqZzXFkXwB2rF2t4eNpPxt48ap94=";
};
installPhase = ''
cp -ar $src $out
'';
};
in
{
options.services.privatebin = {
enable = lib.mkEnableOption "enable privatebin";
host = lib.mkOption {
type = lib.types.str;
example = "example.com";
};
};
config = lib.mkIf cfg.enable {
users.users.privatebin = {
description = "privatebin service user";
group = "privatebin";
isSystemUser = true;
};
users.groups.privatebin = { };
services.nginx.enable = true;
services.nginx.virtualHosts.${cfg.host} = {
enableACME = true;
forceSSL = true;
locations."/" = {
root = privateBinSrc;
index = "index.php";
};
locations."~ \.php$" = {
root = privateBinSrc;
extraConfig = ''
fastcgi_pass unix:${config.services.phpfpm.pools.privatebin.socket};
fastcgi_index index.php;
'';
};
};
systemd.tmpfiles.rules = [
"d '/var/lib/privatebin' 0750 privatebin privatebin - -"
];
services.phpfpm.pools.privatebin = {
user = "privatebin";
group = "privatebin";
phpEnv = {
CONFIG_PATH = "${./conf.php}";
};
settings = {
pm = "dynamic";
"listen.owner" = config.services.nginx.user;
"pm.max_children" = 5;
"pm.start_servers" = 2;
"pm.min_spare_servers" = 1;
"pm.max_spare_servers" = 3;
"pm.max_requests" = 500;
};
};
};
}

75
common/server/radio.nix Normal file
View File

@@ -0,0 +1,75 @@
{ config, pkgs, lib, ... }:
let
cfg = config.services.radio;
radioPackage = config.inputs.radio.packages.${config.currentSystem}.radio;
in
{
options.services.radio = {
enable = lib.mkEnableOption "enable radio";
user = lib.mkOption {
type = lib.types.str;
default = "radio";
description = ''
The user radio should run as
'';
};
group = lib.mkOption {
type = lib.types.str;
default = "radio";
description = ''
The group radio should run as
'';
};
dataDir = lib.mkOption {
type = lib.types.str;
default = "/var/lib/radio";
description = ''
Path to the radio data directory
'';
};
host = lib.mkOption {
type = lib.types.str;
description = ''
Domain radio is hosted on
'';
};
nginx = lib.mkEnableOption "enable nginx";
};
config = lib.mkIf cfg.enable {
services.icecast = {
enable = true;
hostname = cfg.host;
mount = "stream.mp3";
fallback = "fallback.mp3";
};
services.nginx.virtualHosts.${cfg.host} = lib.mkIf cfg.nginx {
enableACME = true;
forceSSL = true;
locations."/".root = config.inputs.radio-web;
};
users.users.${cfg.user} = {
isSystemUser = true;
group = cfg.group;
home = cfg.dataDir;
createHome = true;
};
users.groups.${cfg.group} = { };
systemd.services.radio = {
enable = true;
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig.ExecStart = "${radioPackage}/bin/radio ${config.services.icecast.listen.address}:${toString config.services.icecast.listen.port} ${config.services.icecast.mount} 5500";
serviceConfig.User = cfg.user;
serviceConfig.Group = cfg.group;
serviceConfig.WorkingDirectory = cfg.dataDir;
preStart = ''
mkdir -p ${cfg.dataDir}
chown ${cfg.user} ${cfg.dataDir}
'';
};
};
}

View File

@@ -5,28 +5,30 @@
services.samba = {
openFirewall = true;
package = pkgs.sambaFull; # printer sharing
securityType = "user";
# should this be on?
nsswins = true;
settings = {
global = {
security = "user";
workgroup = "HOME";
"server string" = "smbnix";
"netbios name" = "smbnix";
"use sendfile" = "yes";
"min protocol" = "smb2";
"guest account" = "nobody";
"map to guest" = "bad user";
extraConfig = ''
workgroup = HOME
server string = smbnix
netbios name = smbnix
security = user
use sendfile = yes
min protocol = smb2
guest account = nobody
map to guest = bad user
# printing
"load printers" = "yes";
printing = "cups";
"printcap name" = "cups";
# printing
load printers = yes
printing = cups
printcap name = cups
"hide files" = "/.nobackup/.DS_Store/._.DS_Store/";
};
hide files = /.nobackup/.DS_Store/._.DS_Store/
'';
shares = {
public = {
path = "/data/samba/Public";
browseable = "yes";
@@ -75,9 +77,9 @@
# backups
backup.group."samba".paths = [
config.services.samba.settings.googlebot.path
config.services.samba.settings.cris.path
config.services.samba.settings.public.path
config.services.samba.shares.googlebot.path
config.services.samba.shares.cris.path
config.services.samba.shares.public.path
];
# Windows discovery of samba server
@@ -95,7 +97,7 @@
# Printer discovery
# (is this needed?)
services.avahi.enable = true;
services.avahi.nssmdns4 = true;
services.avahi.nssmdns = true;
# printer sharing
systemd.tmpfiles.rules = [

30
common/server/searx.nix Normal file
View File

@@ -0,0 +1,30 @@
{ config, pkgs, lib, ... }:
let
cfg = config.services.searx;
in
{
config = lib.mkIf cfg.enable {
services.searx = {
environmentFile = "/run/agenix/searx";
settings = {
server.port = 43254;
server.secret_key = "@SEARX_SECRET_KEY@";
engines = [{
name = "wolframalpha";
shortcut = "wa";
api_key = "@WOLFRAM_API_KEY@";
engine = "wolframalpha_api";
}];
};
};
services.nginx.virtualHosts."search.neet.space" = {
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://localhost:${toString config.services.searx.settings.server.port}";
};
};
age.secrets.searx.file = ../../secrets/searx.age;
};
}

View File

@@ -1,26 +0,0 @@
{ config, lib, pkgs, ... }:
let
cfg = config.services.unifi;
in
{
options.services.unifi = {
# Open select Unifi ports instead of using openFirewall to avoid opening access to unifi's control panel
openMinimalFirewall = lib.mkEnableOption "Open bare minimum firewall ports";
};
config = lib.mkIf cfg.enable {
services.unifi.unifiPackage = pkgs.unifi;
services.unifi.mongodbPackage = pkgs.mongodb-7_0;
networking.firewall = lib.mkIf cfg.openMinimalFirewall {
allowedUDPPorts = [
3478 # STUN
10001 # used for device discovery.
];
allowedTCPPorts = [
8080 # Used for device and application communication.
];
};
};
}

View File

@@ -0,0 +1,97 @@
{ config, pkgs, ... }:
let
# external
rtp-port = 8083;
webrtc-peer-lower-port = 20000;
webrtc-peer-upper-port = 20100;
domain = "live.neet.space";
# internal
ingest-port = 8084;
web-port = 8085;
webrtc-port = 8086;
toStr = builtins.toString;
in
{
networking.firewall.allowedUDPPorts = [ rtp-port ];
networking.firewall.allowedTCPPortRanges = [{
from = webrtc-peer-lower-port;
to = webrtc-peer-upper-port;
}];
networking.firewall.allowedUDPPortRanges = [{
from = webrtc-peer-lower-port;
to = webrtc-peer-upper-port;
}];
virtualisation.docker.enable = true;
services.nginx.virtualHosts.${domain} = {
enableACME = true;
forceSSL = true;
locations = {
"/" = {
proxyPass = "http://localhost:${toStr web-port}";
};
"websocket" = {
proxyPass = "http://localhost:${toStr webrtc-port}/websocket";
proxyWebsockets = true;
};
};
};
virtualisation.oci-containers = {
backend = "docker";
containers = {
"lightspeed-ingest" = {
workdir = "/var/lib/lightspeed-ingest";
image = "projectlightspeed/ingest";
ports = [
"${toStr ingest-port}:8084"
];
# imageFile = pkgs.dockerTools.pullImage {
# imageName = "projectlightspeed/ingest";
# finalImageTag = "version-0.1.4";
# imageDigest = "sha256:9fc51833b7c27a76d26e40f092b9cec1ac1c4bfebe452e94ad3269f1f73ff2fc";
# sha256 = "19kxl02x0a3i6hlnsfcm49hl6qxnq2f3hfmyv1v8qdaz58f35kd5";
# };
};
"lightspeed-react" = {
workdir = "/var/lib/lightspeed-react";
image = "projectlightspeed/react";
ports = [
"${toStr web-port}:80"
];
# imageFile = pkgs.dockerTools.pullImage {
# imageName = "projectlightspeed/react";
# finalImageTag = "version-0.1.3";
# imageDigest = "sha256:b7c58425f1593f7b4304726b57aa399b6e216e55af9c0962c5c19333fae638b6";
# sha256 = "0d2jh7mr20h7dxgsp7ml7cw2qd4m8ja9rj75dpy59zyb6v0bn7js";
# };
};
"lightspeed-webrtc" = {
workdir = "/var/lib/lightspeed-webrtc";
image = "projectlightspeed/webrtc";
ports = [
"${toStr webrtc-port}:8080"
"${toStr rtp-port}:65535/udp"
"${toStr webrtc-peer-lower-port}-${toStr webrtc-peer-upper-port}:${toStr webrtc-peer-lower-port}-${toStr webrtc-peer-upper-port}/tcp"
"${toStr webrtc-peer-lower-port}-${toStr webrtc-peer-upper-port}:${toStr webrtc-peer-lower-port}-${toStr webrtc-peer-upper-port}/udp"
];
cmd = [
"lightspeed-webrtc"
"--addr=0.0.0.0"
"--ip=${domain}"
"--ports=${toStr webrtc-peer-lower-port}-${toStr webrtc-peer-upper-port}"
"run"
];
# imageFile = pkgs.dockerTools.pullImage {
# imageName = "projectlightspeed/webrtc";
# finalImageTag = "version-0.1.2";
# imageDigest = "sha256:ddf8b3dd294485529ec11d1234a3fc38e365a53c4738998c6bc2c6930be45ecf";
# sha256 = "1bdy4ak99fjdphj5bsk8rp13xxmbqdhfyfab14drbyffivg9ad2i";
# };
};
};
};
}

View File

@@ -0,0 +1,7 @@
Copyright 2020 Matthijs Steen
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -0,0 +1,37 @@
# Visual Studio Code Server support in NixOS
Experimental support for VS Code Server in NixOS. The NodeJS by default supplied by VS Code cannot be used within NixOS due to missing hardcoded paths, so it is automatically replaced by a symlink to a compatible version of NodeJS that does work under NixOS.
## Installation
```nix
{
imports = [
(fetchTarball "https://github.com/msteen/nixos-vscode-server/tarball/master")
];
services.vscode-server.enable = true;
}
```
And then enable them for the relevant users:
```
systemctl --user enable auto-fix-vscode-server.service
```
### Home Manager
```nix
{
imports = [
"${fetchTarball "https://github.com/msteen/nixos-vscode-server/tarball/master"}/modules/vscode-server/home.nix"
];
services.vscode-server.enable = true;
}
```
## Usage
When the service is enabled and running it should simply work, there is nothing for you to do.

View File

@@ -0,0 +1 @@
import ./modules/vscode-server

View File

@@ -0,0 +1,8 @@
import ./module.nix ({ name, description, serviceConfig }:
{
systemd.user.services.${name} = {
inherit description serviceConfig;
wantedBy = [ "default.target" ];
};
})

View File

@@ -0,0 +1,15 @@
import ./module.nix ({ name, description, serviceConfig }:
{
systemd.user.services.${name} = {
Unit = {
Description = description;
};
Service = serviceConfig;
Install = {
WantedBy = [ "default.target" ];
};
};
})

View File

@@ -0,0 +1,42 @@
moduleConfig:
{ lib, pkgs, ... }:
with lib;
{
options.services.vscode-server.enable = with types; mkEnableOption "VS Code Server";
config = moduleConfig rec {
name = "auto-fix-vscode-server";
description = "Automatically fix the VS Code server used by the remote SSH extension";
serviceConfig = {
# When a monitored directory is deleted, it will stop being monitored.
# Even if it is later recreated it will not restart monitoring it.
# Unfortunately the monitor does not kill itself when it stops monitoring,
# so rather than creating our own restart mechanism, we leverage systemd to do this for us.
Restart = "always";
RestartSec = 0;
ExecStart = pkgs.writeShellScript "${name}.sh" ''
set -euo pipefail
PATH=${makeBinPath (with pkgs; [ coreutils inotify-tools ])}
bin_dir=~/.vscode-server/bin
[[ -e $bin_dir ]] &&
find "$bin_dir" -mindepth 2 -maxdepth 2 -name node -type f -exec ln -sfT ${pkgs.nodejs-12_x}/bin/node {} \; ||
mkdir -p "$bin_dir"
while IFS=: read -r bin_dir event; do
# A new version of the VS Code Server is being created.
if [[ $event == 'CREATE,ISDIR' ]]; then
# Create a trigger to know when their node is being created and replace it for our symlink.
touch "$bin_dir/node"
inotifywait -qq -e DELETE_SELF "$bin_dir/node"
ln -sfT ${pkgs.nodejs-12_x}/bin/node "$bin_dir/node"
# The monitored directory is deleted, e.g. when "Uninstall VS Code Server from Host" has been run.
elif [[ $event == DELETE_SELF ]]; then
# See the comments above Restart in the service config.
exit 0
fi
done < <(inotifywait -q -m -e CREATE,ISDIR -e DELETE_SELF --format '%w%f:%e' "$bin_dir")
'';
};
};
}

34
common/server/zerobin.nix Normal file
View File

@@ -0,0 +1,34 @@
{ config, pkgs, lib, ... }:
let
cfg = config.services.zerobin;
in
{
options.services.zerobin = {
host = lib.mkOption {
type = lib.types.str;
example = "example.com";
};
port = lib.mkOption {
type = lib.types.int;
default = 33422;
};
};
config = lib.mkIf cfg.enable {
services.zerobin.listenPort = cfg.port;
services.zerobin.listenAddress = "localhost";
services.nginx.virtualHosts.${cfg.host} = {
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://localhost:${toString cfg.port}";
proxyWebsockets = true;
};
};
# zerobin service is broken in nixpkgs currently
systemd.services.zerobin.serviceConfig.ExecStart = lib.mkForce
"${pkgs.zerobin}/bin/zerobin --host=${cfg.listenAddress} --port=${toString cfg.listenPort} --data-dir=${cfg.dataDir}";
};
}

View File

@@ -1,4 +1,4 @@
{ pkgs, ... }:
{ config, lib, pkgs, ... }:
# Improvements to the default shell
# - use nix-index for command-not-found
@@ -6,11 +6,14 @@
# - add some handy shell commands
{
environment.systemPackages = with pkgs; [
comma
];
# nix-index
programs.nix-index.enable = true;
programs.nix-index.enableFishIntegration = true;
programs.command-not-found.enable = false;
programs.nix-index-database.comma.enable = true;
programs.fish = {
enable = true;
@@ -18,6 +21,8 @@
shellInit = ''
# disable annoying fish shell greeting
set fish_greeting
alias sudo="doas"
'';
};
@@ -29,7 +34,19 @@
io_seq_write = "${pkgs.fio}/bin/fio --name TEST --eta-newline=5s --filename=temp.file --rw=write --size=2g --io_size=10g --blocksize=1024k --ioengine=libaio --fsync=10000 --iodepth=32 --direct=1 --numjobs=1 --runtime=60 --group_reporting; rm temp.file";
io_rand_read = "${pkgs.fio}/bin/fio --name TEST --eta-newline=5s --filename=temp.file --rw=randread --size=2g --io_size=10g --blocksize=4k --ioengine=libaio --fsync=1 --iodepth=1 --direct=1 --numjobs=32 --runtime=60 --group_reporting; rm temp.file";
io_rand_write = "${pkgs.fio}/bin/fio --name TEST --eta-newline=5s --filename=temp.file --rw=randrw --size=2g --io_size=10g --blocksize=4k --ioengine=libaio --fsync=1 --iodepth=1 --direct=1 --numjobs=1 --runtime=60 --group_reporting; rm temp.file";
llsblk = "lsblk -o +uuid,fsType";
};
nixpkgs.overlays = [
(final: prev: {
# comma uses the "nix-index" package built into nixpkgs by default.
# That package doesn't use the prebuilt nix-index database so it needs to be changed.
comma = prev.comma.overrideAttrs (old: {
postInstall = ''
wrapProgram $out/bin/comma \
--prefix PATH : ${lib.makeBinPath [ prev.fzy config.programs.nix-index.package ]}
ln -s $out/bin/comma $out/bin/,
'';
});
})
];
}

View File

@@ -1,4 +1,4 @@
{ config, lib, ... }:
{ config, lib, pkgs, ... }:
{
programs.ssh.knownHosts = lib.filterAttrs (n: v: v != null) (lib.concatMapAttrs
@@ -31,6 +31,8 @@
# TODO: Old ssh keys I will remove some day...
machines.ssh.userKeys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMVR/R3ZOsv7TZbICGBCHdjh1NDT8SnswUyINeJOC7QG"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE0dcqL/FhHmv+a1iz3f9LJ48xubO7MZHy35rW9SZOYM"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHSkKiRUUmnErOKGx81nyge/9KqjkPh8BfDk0D3oP586" # nat
];
}

364
flake.lock generated
View File

@@ -3,22 +3,16 @@
"agenix": {
"inputs": {
"darwin": "darwin",
"home-manager": [
"home-manager"
],
"nixpkgs": [
"nixpkgs"
],
"systems": [
"systems"
]
},
"locked": {
"lastModified": 1762618334,
"narHash": "sha256-wyT7Pl6tMFbFrs8Lk/TlEs81N6L+VSybPfiIgzU8lbQ=",
"lastModified": 1682101079,
"narHash": "sha256-MdAhtjrLKnk2uiqun1FWABbKpLH090oeqCSiWemtuck=",
"owner": "ryantm",
"repo": "agenix",
"rev": "fcdea223397448d35d9b31f798479227e80183f6",
"rev": "2994d002dcff5353ca1ac48ec584c7f6589fe447",
"type": "github"
},
"original": {
@@ -27,6 +21,29 @@
"type": "github"
}
},
"archivebox": {
"inputs": {
"flake-utils": [
"flake-utils"
],
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1648612759,
"narHash": "sha256-SJwlpD2Wz3zFoX2mIYCQfwIOYHaOdeiWGFeDXsLGM84=",
"ref": "refs/heads/master",
"rev": "39d338b9b24159d8ef3309eecc0d32a2a9f102b5",
"revCount": 2,
"type": "git",
"url": "https://git.neet.dev/zuckerberg/archivebox.git"
},
"original": {
"type": "git",
"url": "https://git.neet.dev/zuckerberg/archivebox.git"
}
},
"blobs": {
"flake": false,
"locked": {
@@ -43,7 +60,7 @@
"type": "gitlab"
}
},
"claude-code-nix": {
"dailybuild_modules": {
"inputs": {
"flake-utils": [
"flake-utils"
@@ -53,40 +70,17 @@
]
},
"locked": {
"lastModified": 1770491193,
"narHash": "sha256-zdnWeXmPZT8BpBo52s4oansT1Rq0SNzksXKpEcMc5lE=",
"owner": "sadjow",
"repo": "claude-code-nix",
"rev": "f68a2683e812d1e4f9a022ff3e0206d46347d019",
"type": "github"
},
"original": {
"owner": "sadjow",
"repo": "claude-code-nix",
"type": "github"
}
},
"dailybot": {
"inputs": {
"flake-utils": [
"flake-utils"
],
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1739947126,
"narHash": "sha256-JoiddH5H9up8jC/VKU8M7wDlk/bstKoJ3rHj+TkW4Zo=",
"lastModified": 1651719222,
"narHash": "sha256-p/GY5vOP+HUlxNL4OtEhmBNEVQsedOHXEmjfCGONVmE=",
"ref": "refs/heads/master",
"rev": "ea1ad60f1c6662103ef4a3705d8e15aa01219529",
"revCount": 20,
"rev": "1290ddd9a2ff2bf2d0f702750768312b80efcd34",
"revCount": 19,
"type": "git",
"url": "https://git.neet.dev/zuckerberg/dailybot.git"
"url": "https://git.neet.dev/zuckerberg/dailybuild_modules.git"
},
"original": {
"type": "git",
"url": "https://git.neet.dev/zuckerberg/dailybot.git"
"url": "https://git.neet.dev/zuckerberg/dailybuild_modules.git"
}
},
"darwin": {
@@ -97,11 +91,11 @@
]
},
"locked": {
"lastModified": 1744478979,
"narHash": "sha256-dyN+teG9G82G+m+PX/aSAagkC+vUv0SgUw3XkPhQodQ=",
"lastModified": 1673295039,
"narHash": "sha256-AsdYgE8/GPwcelGgrntlijMg4t3hLFJFCRF3tL5WVjA=",
"owner": "lnl7",
"repo": "nix-darwin",
"rev": "43975d782b418ebf4969e9ccba82466728c2851b",
"rev": "87b9d090ad39b25b2400029c64825fc2a8868943",
"type": "github"
},
"original": {
@@ -113,22 +107,21 @@
},
"deploy-rs": {
"inputs": {
"flake-compat": [
"flake-compat"
],
"flake-compat": "flake-compat",
"nixpkgs": [
"nixpkgs"
],
"utils": [
"flake-utils"
"simple-nixos-mailserver",
"utils"
]
},
"locked": {
"lastModified": 1766051518,
"narHash": "sha256-znKOwPXQnt3o7lDb3hdf19oDo0BLP4MfBOYiWkEHoik=",
"lastModified": 1682063650,
"narHash": "sha256-VaDHh2z6xlnTHaONlNVHP7qEMcK5rZ8Js3sT6mKb2XY=",
"owner": "serokell",
"repo": "deploy-rs",
"rev": "d5eff7f948535b9c723d60cd8239f8f11ddc90fa",
"rev": "c2ea4e642dc50fd44b537e9860ec95867af30d39",
"type": "github"
},
"original": {
@@ -140,11 +133,11 @@
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1767039857,
"narHash": "sha256-vNpUSpF5Nuw8xvDLj2KCwwksIbjua2LZCqhV1LNRDns=",
"lastModified": 1668681692,
"narHash": "sha256-Ht91NGdewz8IQLtWZ9LCeNXMSXHUss+9COoqu6JLmXU=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "5edf11c44bc78a0d334f6334cdaf7d60d732daab",
"rev": "009399224d5e398d03b22badca40a37ac85412a1",
"type": "github"
},
"original": {
@@ -155,16 +148,14 @@
},
"flake-utils": {
"inputs": {
"systems": [
"systems"
]
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"lastModified": 1681202837,
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
"type": "github"
},
"original": {
@@ -173,96 +164,6 @@
"type": "github"
}
},
"git-hooks": {
"inputs": {
"flake-compat": [
"simple-nixos-mailserver",
"flake-compat"
],
"gitignore": "gitignore",
"nixpkgs": [
"simple-nixos-mailserver",
"nixpkgs"
]
},
"locked": {
"lastModified": 1763988335,
"narHash": "sha256-QlcnByMc8KBjpU37rbq5iP7Cp97HvjRP0ucfdh+M4Qc=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "50b9238891e388c9fdc6a5c49e49c42533a1b5ce",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"simple-nixos-mailserver",
"git-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"home-manager": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1768068402,
"narHash": "sha256-bAXnnJZKJiF7Xr6eNW6+PhBf1lg2P1aFUO9+xgWkXfA=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "8bc5473b6bc2b6e1529a9c4040411e1199c43b4c",
"type": "github"
},
"original": {
"owner": "nix-community",
"ref": "master",
"repo": "home-manager",
"type": "github"
}
},
"microvm": {
"inputs": {
"nixpkgs": [
"nixpkgs"
],
"spectrum": "spectrum"
},
"locked": {
"lastModified": 1770310890,
"narHash": "sha256-lyWAs4XKg3kLYaf4gm5qc5WJrDkYy3/qeV5G733fJww=",
"owner": "astro",
"repo": "microvm.nix",
"rev": "68c9f9c6ca91841f04f726a298c385411b7bfcd5",
"type": "github"
},
"original": {
"owner": "astro",
"repo": "microvm.nix",
"type": "github"
}
},
"nix-index-database": {
"inputs": {
"nixpkgs": [
@@ -270,11 +171,11 @@
]
},
"locked": {
"lastModified": 1765267181,
"narHash": "sha256-d3NBA9zEtBu2JFMnTBqWj7Tmi7R5OikoU2ycrdhQEws=",
"lastModified": 1681591833,
"narHash": "sha256-lW+xOELafAs29yw56FG4MzNOFkh8VHC/X/tRs1wsGn8=",
"owner": "Mic92",
"repo": "nix-index-database",
"rev": "82befcf7dc77c909b0f2a09f5da910ec95c5b78f",
"rev": "68ec961c51f48768f72d2bbdb396ce65a316677e",
"type": "github"
},
"original": {
@@ -283,29 +184,13 @@
"type": "github"
}
},
"nixos-hardware": {
"locked": {
"lastModified": 1767185284,
"narHash": "sha256-ljDBUDpD1Cg5n3mJI81Hz5qeZAwCGxon4kQW3Ho3+6Q=",
"owner": "NixOS",
"repo": "nixos-hardware",
"rev": "40b1a28dce561bea34858287fbb23052c3ee63fe",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "master",
"repo": "nixos-hardware",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1768250893,
"narHash": "sha256-fWNJYFx0QvnlGlcw54EoOYs/wv2icINHUz0FVdh9RIo=",
"lastModified": 1682133240,
"narHash": "sha256-s6yRsI/7V+k/+rckp0+/2cs/UXnea3SEfMpy95QiGcc=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "3971af1a8fc3646b1d554cb1269b26c84539c22e",
"rev": "8dafae7c03d6aa8c2ae0a0612fbcb47e994e3fb8",
"type": "github"
},
"original": {
@@ -315,63 +200,111 @@
"type": "github"
}
},
"root": {
"inputs": {
"agenix": "agenix",
"claude-code-nix": "claude-code-nix",
"dailybot": "dailybot",
"deploy-rs": "deploy-rs",
"flake-compat": "flake-compat",
"flake-utils": "flake-utils",
"home-manager": "home-manager",
"microvm": "microvm",
"nix-index-database": "nix-index-database",
"nixos-hardware": "nixos-hardware",
"nixpkgs": "nixpkgs",
"simple-nixos-mailserver": "simple-nixos-mailserver",
"systems": "systems"
"nixpkgs-22_05": {
"locked": {
"lastModified": 1654936503,
"narHash": "sha256-soKzdhI4jTHv/rSbh89RdlcJmrPgH8oMb/PLqiqIYVQ=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "dab6df51387c3878cdea09f43589a15729cae9f4",
"type": "github"
},
"original": {
"id": "nixpkgs",
"ref": "nixos-22.05",
"type": "indirect"
}
},
"simple-nixos-mailserver": {
"nixpkgs-hostapd-pr": {
"flake": false,
"locked": {
"narHash": "sha256-1rGQKcB1jeRPc1n021ulyOVkA6L6xmNYKmeqQ94+iRc=",
"type": "file",
"url": "https://github.com/NixOS/nixpkgs/pull/222536.patch"
},
"original": {
"type": "file",
"url": "https://github.com/NixOS/nixpkgs/pull/222536.patch"
}
},
"radio": {
"inputs": {
"blobs": "blobs",
"flake-compat": [
"flake-compat"
"flake-utils": [
"flake-utils"
],
"git-hooks": "git-hooks",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1766321686,
"narHash": "sha256-icOWbnD977HXhveirqA10zoqvErczVs3NKx8Bj+ikHY=",
"owner": "simple-nixos-mailserver",
"repo": "nixos-mailserver",
"rev": "7d433bf89882f61621f95082e90a4ab91eb0bdd3",
"type": "gitlab"
"lastModified": 1631585589,
"narHash": "sha256-q4o/4/2pEuJyaKZwNQC5KHnzG1obClzFB7zWk9XSDfY=",
"ref": "main",
"rev": "5bf607fed977d41a269942a7d1e92f3e6d4f2473",
"revCount": 38,
"type": "git",
"url": "https://git.neet.dev/zuckerberg/radio.git"
},
"original": {
"owner": "simple-nixos-mailserver",
"ref": "master",
"repo": "nixos-mailserver",
"type": "gitlab"
"ref": "main",
"rev": "5bf607fed977d41a269942a7d1e92f3e6d4f2473",
"type": "git",
"url": "https://git.neet.dev/zuckerberg/radio.git"
}
},
"spectrum": {
"radio-web": {
"flake": false,
"locked": {
"lastModified": 1759482047,
"narHash": "sha256-H1wiXRQHxxPyMMlP39ce3ROKCwI5/tUn36P8x6dFiiQ=",
"ref": "refs/heads/main",
"rev": "c5d5786d3dc938af0b279c542d1e43bce381b4b9",
"revCount": 996,
"lastModified": 1652121792,
"narHash": "sha256-j1Y9MAjUVNgyFSeGzPoqibAnEysJDjZSXukVfQ7+bsQ=",
"ref": "refs/heads/master",
"rev": "72e7a9e80b780c84ed8d4a6374bfbb242701f900",
"revCount": 5,
"type": "git",
"url": "https://spectrum-os.org/git/spectrum"
"url": "https://git.neet.dev/zuckerberg/radio-web.git"
},
"original": {
"type": "git",
"url": "https://spectrum-os.org/git/spectrum"
"url": "https://git.neet.dev/zuckerberg/radio-web.git"
}
},
"root": {
"inputs": {
"agenix": "agenix",
"archivebox": "archivebox",
"dailybuild_modules": "dailybuild_modules",
"deploy-rs": "deploy-rs",
"flake-utils": "flake-utils",
"nix-index-database": "nix-index-database",
"nixpkgs": "nixpkgs",
"nixpkgs-hostapd-pr": "nixpkgs-hostapd-pr",
"radio": "radio",
"radio-web": "radio-web",
"simple-nixos-mailserver": "simple-nixos-mailserver"
}
},
"simple-nixos-mailserver": {
"inputs": {
"blobs": "blobs",
"nixpkgs": [
"nixpkgs"
],
"nixpkgs-22_05": "nixpkgs-22_05",
"utils": "utils"
},
"locked": {
"lastModified": 1655930346,
"narHash": "sha256-ht56HHOzEhjeIgAv5ZNFjSVX/in1YlUs0HG9c1EUXTM=",
"owner": "simple-nixos-mailserver",
"repo": "nixos-mailserver",
"rev": "f535d8123c4761b2ed8138f3d202ea710a334a1d",
"type": "gitlab"
},
"original": {
"owner": "simple-nixos-mailserver",
"ref": "nixos-22.05",
"repo": "nixos-mailserver",
"type": "gitlab"
}
},
"systems": {
@@ -388,6 +321,21 @@
"repo": "default",
"type": "github"
}
},
"utils": {
"locked": {
"lastModified": 1605370193,
"narHash": "sha256-YyMTf3URDL/otKdKgtoMChu4vfVL3vCMkRqpGifhUn0=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "5021eac20303a61fafe17224c087f5519baed54d",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
}
},
"root": "root",

167
flake.nix
View File

@@ -1,91 +1,52 @@
{
inputs = {
# nixpkgs
nixpkgs.url = "github:NixOS/nixpkgs/master";
# nixpkgs-patch-howdy.url = "https://github.com/NixOS/nixpkgs/pull/216245.diff";
# nixpkgs-patch-howdy.flake = false;
# Common Utils Among flake inputs
systems.url = "github:nix-systems/default";
flake-utils = {
url = "github:numtide/flake-utils";
inputs.systems.follows = "systems";
};
flake-compat = {
url = "github:edolstra/flake-compat";
flake = false;
};
flake-utils.url = "github:numtide/flake-utils";
# NixOS hardware
nixos-hardware.url = "github:NixOS/nixos-hardware/master";
# mail server
simple-nixos-mailserver.url = "gitlab:simple-nixos-mailserver/nixos-mailserver/nixos-22.05";
simple-nixos-mailserver.inputs.nixpkgs.follows = "nixpkgs";
# Home Manager
home-manager = {
url = "github:nix-community/home-manager/master";
inputs.nixpkgs.follows = "nixpkgs";
};
# agenix
agenix.url = "github:ryantm/agenix";
agenix.inputs.nixpkgs.follows = "nixpkgs";
# Mail Server
simple-nixos-mailserver = {
url = "gitlab:simple-nixos-mailserver/nixos-mailserver/master";
inputs = {
nixpkgs.follows = "nixpkgs";
flake-compat.follows = "flake-compat";
};
};
# radio
radio.url = "git+https://git.neet.dev/zuckerberg/radio.git?ref=main&rev=5bf607fed977d41a269942a7d1e92f3e6d4f2473";
radio.inputs.nixpkgs.follows = "nixpkgs";
radio.inputs.flake-utils.follows = "flake-utils";
radio-web.url = "git+https://git.neet.dev/zuckerberg/radio-web.git";
radio-web.flake = false;
# Agenix
agenix = {
url = "github:ryantm/agenix";
inputs = {
nixpkgs.follows = "nixpkgs";
systems.follows = "systems";
home-manager.follows = "home-manager";
};
};
# drastikbot
dailybuild_modules.url = "git+https://git.neet.dev/zuckerberg/dailybuild_modules.git";
dailybuild_modules.inputs.nixpkgs.follows = "nixpkgs";
dailybuild_modules.inputs.flake-utils.follows = "flake-utils";
# Dailybot
dailybot = {
url = "git+https://git.neet.dev/zuckerberg/dailybot.git";
inputs = {
nixpkgs.follows = "nixpkgs";
flake-utils.follows = "flake-utils";
};
};
# archivebox
archivebox.url = "git+https://git.neet.dev/zuckerberg/archivebox.git";
archivebox.inputs.nixpkgs.follows = "nixpkgs";
archivebox.inputs.flake-utils.follows = "flake-utils";
# NixOS deployment
deploy-rs = {
url = "github:serokell/deploy-rs";
inputs = {
nixpkgs.follows = "nixpkgs";
flake-compat.follows = "flake-compat";
utils.follows = "flake-utils";
};
};
# nixos config deployment
deploy-rs.url = "github:serokell/deploy-rs";
deploy-rs.inputs.nixpkgs.follows = "nixpkgs";
deploy-rs.inputs.utils.follows = "simple-nixos-mailserver/utils";
# Prebuilt nix-index database
nix-index-database = {
url = "github:Mic92/nix-index-database";
inputs.nixpkgs.follows = "nixpkgs";
};
# prebuilt nix-index database
nix-index-database.url = "github:Mic92/nix-index-database";
nix-index-database.inputs.nixpkgs.follows = "nixpkgs";
# MicroVM support
microvm = {
url = "github:astro/microvm.nix";
inputs.nixpkgs.follows = "nixpkgs";
};
# Up to date claude-code
claude-code-nix = {
url = "github:sadjow/claude-code-nix";
inputs = {
nixpkgs.follows = "nixpkgs";
flake-utils.follows = "flake-utils";
};
};
nixpkgs-hostapd-pr.url = "https://github.com/NixOS/nixpkgs/pull/222536.patch";
nixpkgs-hostapd-pr.flake = false;
};
outputs = { self, nixpkgs, ... }@inputs:
let
machineHosts = (import ./common/machine-info/moduleless.nix
machines = (import ./common/machine-info/moduleless.nix
{
inherit nixpkgs;
assertionsModule = "${nixpkgs}/nixos/modules/misc/assertions.nix";
@@ -98,27 +59,16 @@
./common
simple-nixos-mailserver.nixosModule
agenix.nixosModules.default
dailybot.nixosModule
nix-index-database.nixosModules.default
home-manager.nixosModules.home-manager
microvm.nixosModules.host
self.nixosModules.kernel-modules
dailybuild_modules.nixosModule
archivebox.nixosModule
nix-index-database.nixosModules.nix-index
({ lib, ... }: {
config = {
nixpkgs.overlays = [
self.overlays.default
inputs.claude-code-nix.overlays.default
];
environment.systemPackages = [
agenix.packages.${system}.agenix
];
networking.hostName = hostname;
home-manager.useGlobalPkgs = true;
home-manager.useUserPackages = true;
home-manager.users.googlebot = import ./home/googlebot.nix;
};
# because nixos specialArgs doesn't work for containers... need to pass in inputs a different way
@@ -136,7 +86,8 @@
name = "nixpkgs-patched";
src = nixpkgs;
patches = [
./patches/dont-break-nix-serve.patch
inputs.nixpkgs-hostapd-pr
./patches/kexec-luks.patch
];
};
patchedNixpkgs = nixpkgs.lib.fix (self: (import "${patchedNixpkgsSrc}/flake.nix").outputs { self = nixpkgs; });
@@ -148,42 +99,34 @@
specialArgs = {
inherit allModules;
lib = self.lib;
nixos-hardware = inputs.nixos-hardware;
};
};
in
nixpkgs.lib.mapAttrs
(hostname: cfg:
mkSystem cfg.arch nixpkgs cfg.configurationPath hostname)
machineHosts;
machines;
# kexec produces a tarball; for a self-extracting bundle see:
# https://github.com/nix-community/nixos-generators/blob/master/formats/kexec.nix#L60
packages =
let
mkEphemeral = system: nixpkgs.lib.nixosSystem {
inherit system;
modules = [
./machines/ephemeral/minimal.nix
inputs.nix-index-database.nixosModules.default
];
};
mkKexec = system:
(nixpkgs.lib.nixosSystem {
inherit system;
modules = [ ./machines/ephemeral/kexec.nix ];
}).config.system.build.kexec_tarball;
mkIso = system:
(nixpkgs.lib.nixosSystem {
inherit system;
modules = [ ./machines/ephemeral/iso.nix ];
}).config.system.build.isoImage;
in
{
"x86_64-linux" = {
kexec = (mkEphemeral "x86_64-linux").config.system.build.images.kexec;
iso = (mkEphemeral "x86_64-linux").config.system.build.images.iso;
};
"aarch64-linux" = {
kexec = (mkEphemeral "aarch64-linux").config.system.build.images.kexec;
iso = (mkEphemeral "aarch64-linux").config.system.build.images.iso;
};
"x86_64-linux"."kexec" = mkKexec "x86_64-linux";
"x86_64-linux"."iso" = mkIso "x86_64-linux";
"aarch64-linux"."kexec" = mkKexec "aarch64-linux";
"aarch64-linux"."iso" = mkIso "aarch64-linux";
};
overlays.default = import ./overlays { inherit inputs; };
nixosModules.kernel-modules = import ./overlays/kernel-modules;
deploy.nodes =
let
mkDeploy = configName: arch: hostname: {
@@ -196,10 +139,8 @@
nixpkgs.lib.mapAttrs
(hostname: cfg:
mkDeploy hostname cfg.arch (builtins.head cfg.hostNames))
machineHosts;
machines;
checks = builtins.mapAttrs (system: deployLib: deployLib.deployChecks self.deploy) inputs.deploy-rs.lib;
lib = nixpkgs.lib.extend (final: prev: import ./lib { lib = nixpkgs.lib; });
};
}

View File

@@ -1,119 +0,0 @@
{ lib, pkgs, osConfig, ... }:
# https://home-manager-options.extranix.com/
# https://nix-community.github.io/home-manager/options.xhtml
let
# Check if the current machine has the role "personal"
thisMachineIsPersonal = osConfig.thisMachine.hasRole."personal";
in
{
home.username = "googlebot";
home.homeDirectory = "/home/googlebot";
home.stateVersion = "24.11";
programs.home-manager.enable = true;
services.ssh-agent.enable = true;
# System Monitoring
programs.btop.enable = true;
programs.bottom.enable = true;
# Modern "ls" replacement
programs.pls.enable = true;
programs.pls.enableFishIntegration = false;
programs.eza.enable = true;
# Graphical terminal
programs.ghostty.enable = thisMachineIsPersonal;
programs.ghostty.settings = {
theme = "Snazzy";
font-size = 10;
};
# Advanced terminal file explorer
programs.broot.enable = true;
# Shell promt theming
programs.fish.enable = true;
programs.starship.enable = true;
programs.starship.enableFishIntegration = true;
programs.starship.enableInteractive = true;
# programs.oh-my-posh.enable = true;
# programs.oh-my-posh.enableFishIntegration = true;
# Advanced search
programs.ripgrep.enable = true;
# tldr: Simplified, example based and community-driven man pages.
programs.tealdeer.enable = true;
home.shellAliases = {
sudo = "doas";
ls2 = "eza";
explorer = "broot";
};
programs.zed-editor = {
enable = thisMachineIsPersonal;
};
programs.vscode = {
enable = thisMachineIsPersonal;
# Must use fhs version for vscode-lldb
package = pkgs.vscodium-fhs;
profiles.default = {
userSettings = {
editor.formatOnSave = true;
nix = {
enableLanguageServer = true;
serverPath = "${pkgs.nil}/bin/nil";
serverSettings.nil = {
formatting.command = [ "${pkgs.nixpkgs-fmt}/bin/nixpkgs-fmt" ];
nix.flake.autoArchive = true;
};
};
dotnetAcquisitionExtension.sharedExistingDotnetPath = "${pkgs.dotnet-sdk_9}/bin";
godotTools = {
lsp.serverPort = 6005; # port needs to match Godot configuration
editorPath.godot4 = "godot-mono";
};
rust-analyzer = {
restartServerOnConfigChange = true;
testExplorer = true;
server.path = "rust-analyzer"; # Use the rust-analyzer from PATH (which is set by nixEnvSelector from the project's flake)
};
nixEnvSelector = {
useFlakes = true; # This hasn't ever worked for me and I have to use shell.nix... but maybe someday
suggestion = false; # Stop really annoy nagging
};
};
extensions = with pkgs.vscode-extensions; [
bbenoist.nix # nix syntax support
arrterian.nix-env-selector # nix dev envs
dart-code.dart-code
dart-code.flutter
golang.go
jnoortheen.nix-ide
ms-vscode.cpptools
rust-lang.rust-analyzer
vadimcn.vscode-lldb
tauri-apps.tauri-vscode
platformio.platformio-vscode-ide
vue.volar
wgsl-analyzer.wgsl-analyzer
# Godot
geequlim.godot-tools # For Godot GDScript support
ms-dotnettools.csharp
ms-dotnettools.vscode-dotnet-runtime
];
};
};
home.packages = lib.mkIf thisMachineIsPersonal [
pkgs.claude-code
pkgs.dotnetCorePackages.dotnet_9.sdk # For Godot-Mono VSCode-Extension CSharp
];
}

View File

@@ -1,65 +0,0 @@
{ lib, ... }:
with lib;
{
# Passthrough trace for debugging
pTrace = v: traceSeq v v;
# find the total sum of a int list
sum = foldr (x: y: x + y) 0;
# splits a list of length two into two params then they're passed to a func
splitPair = f: pair: f (head pair) (last pair);
# Finds the max value in a list
maxList = foldr max 0;
# Sorts a int list. Greatest value first
sortList = sort (x: y: x > y);
# Cuts a list in half and returns the two parts in a list
cutInHalf = l: [ (take (length l / 2) l) (drop (length l / 2) l) ];
# Splits a list into a list of lists with length cnt
chunksOf = cnt: l:
if length l > 0 then
[ (take cnt l) ] ++ chunksOf cnt (drop cnt l)
else [ ];
# same as intersectLists but takes an array of lists to intersect instead of just two
intersectManyLists = ll: foldr intersectLists (head ll) ll;
# converts a boolean to a int (c style)
boolToInt = b: if b then 1 else 0;
# drops the last element of a list
dropLast = l: take (length l - 1) l;
# transposes a matrix
transpose = ll:
let
outerSize = length ll;
innerSize = length (elemAt ll 0);
in
genList (i: genList (j: elemAt (elemAt ll j) i) outerSize) innerSize;
# attriset recursiveUpdate but for a list of attrisets
combineAttrs = foldl recursiveUpdate { };
# visits every single attriset element of an attriset recursively
# and accumulates the result of every visit in a flat list
recurisveVisitAttrs = f: set:
let
visitor = n: v:
if isAttrs v then [ (f n v) ] ++ recurisveVisitAttrs f v
else [ (f n v) ];
in
concatLists (map (name: visitor name set.${name}) (attrNames set));
# merges two lists of the same size (similar to map but both lists are inputs per iteration)
mergeLists = f: a: imap0 (i: f (elemAt a i));
map2D = f: ll:
let
outerSize = length ll;
innerSize = length (elemAt ll 0);
getElem = x: y: elemAt (elemAt ll y) x;
in
genList (y: genList (x: f x y (getElem x y)) innerSize) outerSize;
# Generate a deterministic MAC address from a name
# Uses locally administered unicast range (02:xx:xx:xx:xx:xx)
mkMac = name:
let
hash = builtins.hashString "sha256" name;
octets = map (i: builtins.substring i 2 hash) [ 0 2 4 6 8 ];
in
"02:${builtins.concatStringsSep ":" octets}";
}

4
lockfile-update.sh Executable file
View File

@@ -0,0 +1,4 @@
#! /usr/bin/env nix-shell
#! nix-shell -i bash -p bash
nix flake update --commit-lock-file

View File

@@ -0,0 +1,12 @@
{ modulesPath, ... }:
{
imports = [
(modulesPath + "/installer/cd-dvd/iso-image.nix")
./minimal.nix
];
isoImage.makeUsbBootable = true;
networking.hostName = "iso";
}

View File

@@ -0,0 +1,48 @@
# From https://mdleom.com/blog/2021/03/09/nixos-oracle/#Build-a-kexec-tarball
# Builds a kexec img
{ config, pkgs, modulesPath, ... }:
{
imports = [
(modulesPath + "/installer/netboot/netboot.nix")
(modulesPath + "/profiles/qemu-guest.nix")
./minimal.nix
];
networking.hostName = "kexec";
# stripped down version of https://github.com/cleverca22/nix-tests/tree/master/kexec
system.build = rec {
image = pkgs.runCommand "image" { buildInputs = [ pkgs.nukeReferences ]; } ''
mkdir $out
if [ -f ${config.system.build.kernel}/bzImage ]; then
cp ${config.system.build.kernel}/bzImage $out/kernel
else
cp ${config.system.build.kernel}/Image $out/kernel
fi
cp ${config.system.build.netbootRamdisk}/initrd $out/initrd
nuke-refs $out/kernel
'';
kexec_script = pkgs.writeTextFile {
executable = true;
name = "kexec-nixos";
text = ''
#!${pkgs.stdenv.shell}
set -e
${pkgs.kexectools}/bin/kexec -l ${image}/kernel --initrd=${image}/initrd --append="init=${builtins.unsafeDiscardStringContext config.system.build.toplevel}/init ${toString config.boot.kernelParams}"
sync
echo "executing kernel, filesystems will be improperly umounted"
${pkgs.kexectools}/bin/kexec -e
'';
};
kexec_tarball = pkgs.callPackage (modulesPath + "/../lib/make-system-tarball.nix") {
storeContents = [
{
object = config.system.build.kexec_script;
symlink = "/kexec_nixos";
}
];
contents = [ ];
};
};
}

View File

@@ -5,26 +5,18 @@
(modulesPath + "/installer/cd-dvd/channel.nix")
../../common/machine-info
../../common/ssh.nix
../../common/flakes.nix
../../common/shell.nix
];
boot.initrd.availableKernelModules = [
"ata_piix"
"uhci_hcd"
"e1000"
"e1000e"
"virtio_pci"
"r8169"
"sdhci"
"sdhci_pci"
"mmc_core"
"mmc_block"
];
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "e1000" "e1000e" "virtio_pci" "r8169" ];
boot.kernelParams = [
"panic=30"
"boot.panic_on_fail" # reboot the machine upon fatal boot issues
"console=ttyS0,115200" # enable serial console
"console=tty1"
];
# boot.kernelPackages = pkgs.linuxPackages_latest;
boot.kernel.sysctl."vm.overcommit_memory" = "1";
boot.kernelPackages = pkgs.linuxPackages_latest;
system.stateVersion = "21.11";
@@ -38,7 +30,6 @@
git-lfs
wget
htop
btop
dnsutils
pciutils
usbutils
@@ -47,7 +38,7 @@
environment.variables.GC_INITIAL_HEAP_SIZE = "1M";
# networking.useDHCP = true;
networking.useDHCP = true;
services.openssh = {
enable = true;
@@ -57,7 +48,6 @@
};
};
nix.flakes.enable = true;
services.getty.autologinUser = "root";
users.users.root.openssh.authorizedKeys.keys = config.machines.ssh.userKeys;
}

View File

@@ -1,85 +0,0 @@
{ config, pkgs, lib, ... }:
{
imports = [
./hardware-configuration.nix
];
# don't use remote builders
nix.distributedBuilds = lib.mkForce false;
nix.gc.automatic = lib.mkForce false;
# Upstream interface for sandbox networking (NAT)
networking.sandbox.upstreamInterface = lib.mkDefault "enp191s0";
# Enable sandboxed workspace
sandboxed-workspace = {
enable = true;
workspaces.test-incus = {
type = "incus";
autoStart = true;
config = ./workspaces/test-container.nix;
ip = "192.168.83.90";
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL0SNSy/MdW38NqKzLr1SG8WKrs8XkrqibacaJtJPzgW";
};
};
environment.systemPackages = with pkgs; [
system76-keyboard-configurator
];
services.ollama = {
enable = true;
package = pkgs.ollama-vulkan;
host = "127.0.0.1";
};
services.open-webui = {
enable = true;
host = "127.0.0.1"; # nginx proxy
port = 12831;
environment = {
ANONYMIZED_TELEMETRY = "False";
DO_NOT_TRACK = "True";
SCARF_NO_ANALYTICS = "True";
OLLAMA_API_BASE_URL = "http://localhost:${toString config.services.ollama.port}";
};
};
# nginx
services.nginx = {
enable = true;
openFirewall = false; # All nginx services are internal
virtualHosts =
let
mkHost = external: config:
{
${external} = {
useACMEHost = "fry.neet.dev"; # Use wildcard cert
forceSSL = true;
locations."/" = config;
};
};
mkVirtualHost = external: internal:
mkHost external {
proxyPass = internal;
proxyWebsockets = true;
};
in
lib.mkMerge [
(mkVirtualHost "chat.fry.neet.dev" "http://localhost:${toString config.services.open-webui.port}")
];
};
# Get wildcard cert
security.acme.certs."fry.neet.dev" = {
dnsProvider = "digitalocean";
credentialsFile = "/run/agenix/digitalocean-dns-credentials";
extraDomainNames = [ "*.fry.neet.dev" ];
group = "nginx";
dnsResolver = "1.1.1.1:53";
dnsPropagationCheck = false; # sadly this erroneously fails
};
age.secrets.digitalocean-dns-credentials.file = ../../secrets/digitalocean-dns-credentials.age;
}

View File

@@ -1,50 +0,0 @@
{ config, lib, pkgs, modulesPath, nixos-hardware, ... }:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
nixos-hardware.nixosModules.framework-amd-ai-300-series
];
boot.kernelPackages = pkgs.linuxPackages_latest;
services.fwupd.enable = true;
# boot
boot.loader.systemd-boot.enable = true;
boot.initrd.availableKernelModules = [ "nvme" "xhci_pci" "thunderbolt" "usb_storage" "sd_mod" "r8169" ];
boot.initrd.kernelModules = [ "dm-snapshot" ];
boot.kernelModules = [ "kvm-amd" ];
boot.extraModulePackages = [ ];
# thunderbolt
services.hardware.bolt.enable = true;
# firmware
firmware.x86_64.enable = true;
# disks
remoteLuksUnlock.enable = true;
boot.initrd.luks.devices."enc-pv" = {
device = "/dev/disk/by-uuid/d4f2f25a-5108-4285-968f-b24fb516d4f3";
allowDiscards = true;
};
fileSystems."/" =
{ device = "/dev/disk/by-uuid/a8901bc1-8642-442a-940a-ddd3f428cd0f";
fsType = "btrfs";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/13E5-C9D4";
fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ];
};
swapDevices =
[ { device = "/dev/disk/by-uuid/03356a74-33f0-4a2e-b57a-ec9dfc9d85c5"; }
];
# Ensures that dhcp is active during initrd (Network Manager is used post boot)
boot.initrd.network.udhcpc.enable = true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
}

View File

@@ -1,24 +0,0 @@
{
hostNames = [
"fry"
];
arch = "x86_64-linux";
systemRoles = [
"personal"
"dns-challenge"
];
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID/Df5lG07Il7fizEgZR/T9bMlR0joESRJ7cqM9BkOyP";
userKeys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM5/h6YySqNemA4+e+xslhspBp34ulXKembe3RoeZ5av"
];
remoteUnlock = {
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL1RC1lhP4TSL2THvKAQAH7Y/eSGQPo/MjhTsZD6CEES";
clearnetHost = "192.168.1.3";
onionHost = "z7smmigsfrabqfnxqogfogmsu36jhpsyscncmd332w5ioheblw6i4lid.onion";
};
}

View File

@@ -1,20 +0,0 @@
{ pkgs, ... }:
# Test container workspace configuration
#
# Add to sandboxed-workspace.workspaces in machines/fry/default.nix:
# sandboxed-workspace.workspaces.test-container = {
# type = "container" OR "incus";
# config = ./workspaces/test-container.nix;
# ip = "192.168.83.50";
# };
#
# The workspace name ("test-container") becomes the hostname automatically.
# The IP is configured in default.nix, not here.
{
# Install packages as needed
environment.systemPackages = with pkgs; [
# Add packages here
];
}

View File

@@ -1,12 +0,0 @@
{ lib, ... }:
{
imports = [
./hardware-configuration.nix
];
# don't use remote builders
nix.distributedBuilds = lib.mkForce false;
nix.gc.automatic = lib.mkForce false;
}

View File

@@ -1,58 +0,0 @@
{ config, lib, pkgs, modulesPath, nixos-hardware, ... }:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
nixos-hardware.nixosModules.framework-13-7040-amd
];
boot.kernelPackages = pkgs.linuxPackages_latest;
hardware.framework.amd-7040.preventWakeOnAC = true;
services.fwupd.enable = true;
# fingerprint reader has initially shown to be more of a nuisance than a help
# it makes sddm log in fail most of the time and take several minutes to finish
services.fprintd.enable = false;
# boot
boot.loader.systemd-boot.enable = true;
boot.initrd.availableKernelModules = [ "nvme" "xhci_pci" "thunderbolt" "usb_storage" "sd_mod" ];
boot.initrd.kernelModules = [ "dm-snapshot" ];
boot.kernelModules = [ "kvm-amd" ];
boot.extraModulePackages = [ ];
# thunderbolt
services.hardware.bolt.enable = true;
# firmware
firmware.x86_64.enable = true;
# disks
remoteLuksUnlock.enable = true;
boot.initrd.luks.devices."enc-pv" = {
device = "/dev/disk/by-uuid/2e4a6960-a6b1-40ee-9c2c-2766eb718d52";
allowDiscards = true;
};
fileSystems."/" =
{
device = "/dev/disk/by-uuid/1f62386c-3243-49f5-b72f-df8fc8f39db8";
fsType = "btrfs";
};
fileSystems."/boot" =
{
device = "/dev/disk/by-uuid/F4D9-C5E8";
fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ];
};
swapDevices =
[{ device = "/dev/disk/by-uuid/5f65cb11-2649-48fe-9c78-3e325b857c53"; }];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.wlp1s0.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
}

View File

@@ -1,22 +0,0 @@
{
hostNames = [
"howl"
];
arch = "x86_64-linux";
systemRoles = [
"personal"
];
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEQi3q8jU6vRruExAL60J7GFO1gS8HsmXVJuKRT4ljrG";
userKeys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKPnLt84bKhUgFxjQf10+Htro9Lo1Pabqm8mGalBUniv"
];
remoteUnlock = {
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN0N80r0Sl2WlJaUqfxZPkOtYyGumFazkIqq7eq3Gd2o";
onionHost = "ll6yjnkh4psmfwmtkmqoutl4gq4elqzbmjxv4s6gpgoavyi3kwhjvnqd.onion";
};
}

View File

@@ -1,4 +1,4 @@
{ ... }:
{ config, pkgs, fetchurl, lib, ... }:
{
imports = [
@@ -9,4 +9,7 @@
networking.hostName = "nat";
networking.interfaces.ens160.useDHCP = true;
de.enable = true;
de.touchpad.enable = true;
}

View File

@@ -1,7 +1,7 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ ... }:
{ config, lib, pkgs, modulesPath, ... }:
{
imports = [ ];

View File

@@ -1,9 +1,14 @@
{ lib, ... }:
{ config, pkgs, lib, ... }:
{
imports = [
./hardware-configuration.nix
];
networking.hostName = "phil";
services.gitea-runner = {
enable = true;
instanceUrl = "https://git.neet.dev";
};
system.autoUpgrade.enable = true;
}

View File

@@ -1,7 +1,7 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ lib, modulesPath, ... }:
{ config, lib, pkgs, modulesPath, ... }:
{
imports =
@@ -20,10 +20,7 @@
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
boot.initrd.luks.devices."enc-pv" = {
device = "/dev/disk/by-uuid/d26c1820-4c39-4615-98c2-51442504e194";
allowDiscards = true;
};
luks.devices = [ "/dev/disk/by-uuid/d26c1820-4c39-4615-98c2-51442504e194" ];
fileSystems."/" =
{

View File

@@ -8,7 +8,7 @@
systemRoles = [
"server"
"nix-builder"
"gitea-runner"
];
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBlgRPpuUkZqe8/lHugRPm/m2vcN9psYhh5tENHZt9I2";

View File

@@ -5,13 +5,14 @@
./hardware-configuration.nix
];
# system.autoUpgrade.enable = true;
system.autoUpgrade.enable = true;
# I want to manually trigger kexec updates for now on ponyo
system.autoUpgrade.allowKexec = false;
luks.enableKexec = true;
# p2p mesh network
services.tailscale.exitNode = true;
services.iperf3.enable = true;
# email server
mailserver.enable = true;
@@ -56,6 +57,29 @@
config.services.drastikbot.dataDir
];
# music radio
vpn-container.enable = true;
vpn-container.config = {
services.radio = {
enable = true;
host = "radio.runyan.org";
};
};
pia.wireguard.badPortForwardPorts = [ ];
services.nginx.virtualHosts."radio.runyan.org" = {
enableACME = true;
forceSSL = true;
locations = {
"/stream.mp3" = {
proxyPass = "http://vpn.containers:8001/stream.mp3";
extraConfig = ''
add_header Access-Control-Allow-Origin *;
'';
};
"/".root = config.inputs.radio-web;
};
};
# matrix home server
services.matrix = {
enable = true;
@@ -66,7 +90,7 @@
host = "chat.neet.space";
};
jitsi-meet = {
enable = false; # disabled until vulnerable libolm dependency is removed/fixed
enable = true;
host = "meet.neet.space";
};
turn = {
@@ -75,10 +99,21 @@
};
};
# pin postgresql for matrix (will need to migrate eventually)
services.postgresql.package = pkgs.postgresql_15;
services.postgresql.package = pkgs.postgresql_11;
# iodine DNS-based vpn
services.iodine.server.enable = true;
# proxied web services
services.nginx.enable = true;
services.nginx.virtualHosts."jellyfin.neet.cloud" = {
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://s0.koi-bebop.ts.net";
proxyWebsockets = true;
};
};
services.nginx.virtualHosts."navidrome.neet.cloud" = {
enableACME = true;
forceSSL = true;
@@ -92,20 +127,16 @@
root = "/var/www/tmp";
};
# redirect neet.cloud to nextcloud instance on runyan.org
services.nginx.virtualHosts."neet.cloud" = {
# redirect runyan.org to github
services.nginx.virtualHosts."runyan.org" = {
enableACME = true;
forceSSL = true;
extraConfig = ''
return 302 https://runyan.org$request_uri;
rewrite ^/(.*)$ https://github.com/GoogleBot42 redirect;
'';
};
# owncast live streaming
services.owncast.enable = true;
services.owncast.hostname = "live.neet.dev";
# librechat
services.librechat-container.enable = true;
services.librechat-container.host = "chat.neet.dev";
}

Some files were not shown because too many files have changed in this diff Show More