Compare commits

...

3 Commits

Author SHA1 Message Date
1c9fa418b3 Make s0 easier to unlock
All checks were successful
Check Flake / check-flake (push) Successful in 1m25s
2025-03-29 22:52:00 -07:00
8c4dc9cb74 Improve usage of roles. It should be much easier to read and use now. 2025-03-29 22:48:14 -07:00
1f9fbd87ac Use upstream pykms and Actual Budget. Move Actual to s0. Add automated backups for Actual.
All checks were successful
Check Flake / check-flake (push) Successful in 1m37s
2025-03-29 18:36:13 -07:00
17 changed files with 165 additions and 9360 deletions

View File

@ -100,7 +100,5 @@
security.acme.defaults.email = "zuckerberg@neet.dev";
# Enable Desktop Environment if this is a PC (machine role is "personal")
de.enable = (
builtins.elem config.networking.hostName config.machines.roles.personal
);
de.enable = lib.mkDefault (config.thisMachine.hasRole."personal");
}

View File

@ -5,6 +5,90 @@
let
machines = config.machines.hosts;
hostOptionsSubmoduleType = lib.types.submodule {
options = {
hostNames = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
List of hostnames for this machine. The first one is the default so it is the target of deployments.
Used for automatically trusting hosts for ssh connections.
'';
};
arch = lib.mkOption {
type = lib.types.enum [ "x86_64-linux" "aarch64-linux" ];
description = ''
The architecture of this machine.
'';
};
systemRoles = lib.mkOption {
type = lib.types.listOf lib.types.str; # TODO: maybe use an enum?
description = ''
The set of roles this machine holds. Affects secrets available. (TODO add service config as well using this info)
'';
};
hostKey = lib.mkOption {
type = lib.types.str;
description = ''
The system ssh host key of this machine. Used for automatically trusting hosts for ssh connections
and for decrypting secrets with agenix.
'';
};
remoteUnlock = lib.mkOption {
default = null;
type = lib.types.nullOr (lib.types.submodule {
options = {
hostKey = lib.mkOption {
type = lib.types.str;
description = ''
The system ssh host key of this machine used for luks boot unlocking only.
'';
};
clearnetHost = lib.mkOption {
default = null;
type = lib.types.nullOr lib.types.str;
description = ''
The hostname resolvable over clearnet used to luks boot unlock this machine
'';
};
onionHost = lib.mkOption {
default = null;
type = lib.types.nullOr lib.types.str;
description = ''
The hostname resolvable over tor used to luks boot unlock this machine
'';
};
};
});
};
userKeys = lib.mkOption {
default = [ ];
type = lib.types.listOf lib.types.str;
description = ''
The list of user keys. Each key here can be used to log into all other systems as `googlebot`.
TODO: consider auto populating other programs that use ssh keys such as gitea
'';
};
deployKeys = lib.mkOption {
default = [ ];
type = lib.types.listOf lib.types.str;
description = ''
The list of deployment keys. Each key here can be used to log into all other systems as `root`.
'';
};
configurationPath = lib.mkOption {
type = lib.types.path;
description = ''
The path to this machine's configuration directory.
'';
};
};
};
in
{
imports = [
@ -13,104 +97,16 @@ in
];
options.machines = {
hosts = lib.mkOption {
type = lib.types.attrsOf
(lib.types.submodule {
options = {
hostNames = lib.mkOption {
type = lib.types.listOf lib.types.str;
description = ''
List of hostnames for this machine. The first one is the default so it is the target of deployments.
Used for automatically trusting hosts for ssh connections.
'';
};
arch = lib.mkOption {
type = lib.types.enum [ "x86_64-linux" "aarch64-linux" ];
description = ''
The architecture of this machine.
'';
};
systemRoles = lib.mkOption {
type = lib.types.listOf lib.types.str; # TODO: maybe use an enum?
description = ''
The set of roles this machine holds. Affects secrets available. (TODO add service config as well using this info)
'';
};
hostKey = lib.mkOption {
type = lib.types.str;
description = ''
The system ssh host key of this machine. Used for automatically trusting hosts for ssh connections
and for decrypting secrets with agenix.
'';
};
remoteUnlock = lib.mkOption {
default = null;
type = lib.types.nullOr (lib.types.submodule {
options = {
hostKey = lib.mkOption {
type = lib.types.str;
description = ''
The system ssh host key of this machine used for luks boot unlocking only.
'';
};
clearnetHost = lib.mkOption {
default = null;
type = lib.types.nullOr lib.types.str;
description = ''
The hostname resolvable over clearnet used to luks boot unlock this machine
'';
};
onionHost = lib.mkOption {
default = null;
type = lib.types.nullOr lib.types.str;
description = ''
The hostname resolvable over tor used to luks boot unlock this machine
'';
};
};
});
};
userKeys = lib.mkOption {
default = [ ];
type = lib.types.listOf lib.types.str;
description = ''
The list of user keys. Each key here can be used to log into all other systems as `googlebot`.
TODO: consider auto populating other programs that use ssh keys such as gitea
'';
};
deployKeys = lib.mkOption {
default = [ ];
type = lib.types.listOf lib.types.str;
description = ''
The list of deployment keys. Each key here can be used to log into all other systems as `root`.
'';
};
configurationPath = lib.mkOption {
type = lib.types.path;
description = ''
The path to this machine's configuration directory.
'';
};
};
});
type = lib.types.attrsOf hostOptionsSubmoduleType;
};
};
options.thisMachine.config = lib.mkOption {
# For ease of use, a direct copy of the host config from machines.hosts.${hostName}
type = hostOptionsSubmoduleType;
};
config = {
assertions = (lib.concatLists (lib.mapAttrsToList
(
@ -196,5 +192,12 @@ in
builtins.map (p: { "${dirName p}" = p; }) propFiles;
in
properties ../../machines;
# Don't try to evaluate "thisMachine" when reflecting using moduleless.nix.
# When evaluated by moduleless.nix this will fail due to networking.hostName not
# existing. This is because moduleless.nix is not intended for reflection from the
# perspective of a perticular machine but is instead intended for reflecting on
# the properties of all machines as a whole system.
thisMachine.config = config.machines.hosts.${config.networking.hostName};
};
}

View File

@ -1,19 +1,55 @@
{ config, lib, ... }:
# Maps roles to their hosts
# Maps roles to their hosts.
# machines.withRole = {
# personal = [
# "machine1" "machine3"
# ];
# cache = [
# "machine2"
# ];
# };
#
# A list of all possible roles
# machines.allRoles = [
# "personal"
# "cache"
# ];
#
# For each role has true or false if the current machine has that role
# thisMachine.hasRole = {
# personal = true;
# cache = false;
# };
{
options.machines.roles = lib.mkOption {
options.machines.withRole = lib.mkOption {
type = lib.types.attrsOf (lib.types.listOf lib.types.str);
};
options.machines.allRoles = lib.mkOption {
type = lib.types.listOf lib.types.str;
};
options.thisMachine.hasRole = lib.mkOption {
type = lib.types.attrsOf lib.types.bool;
};
config = {
machines.roles = lib.zipAttrs
machines.withRole = lib.zipAttrs
(lib.mapAttrsToList
(host: cfg:
lib.foldl (lib.mergeAttrs) { }
(builtins.map (role: { ${role} = host; })
cfg.systemRoles))
config.machines.hosts);
machines.allRoles = lib.attrNames config.machines.withRole;
thisMachine.hasRole = lib.mapAttrs
(role: cfg:
builtins.elem config.networking.hostName config.machines.withRole.${role}
)
config.machines.withRole;
};
}

View File

@ -39,6 +39,6 @@ in
builtins.map
(host: machines.hosts.${host}.hostKey)
hosts)
machines.roles;
machines.withRole;
};
}

View File

@ -1,18 +1,14 @@
{ config, lib, ... }:
let
builderRole = "nix-builder";
builderUserName = "nix-builder";
machinesByRole = role: lib.filterAttrs (hostname: cfg: builtins.elem role cfg.systemRoles) config.machines.hosts;
otherMachinesByRole = role: lib.filterAttrs (hostname: cfg: hostname != config.networking.hostName) (machinesByRole role);
thisMachineHasRole = role: builtins.hasAttr config.networking.hostName (machinesByRole role);
builders = machinesByRole builderRole;
thisMachineIsABuilder = thisMachineHasRole builderRole;
builderRole = "nix-builder";
builders = config.machines.withRole.${builderRole};
thisMachineIsABuilder = config.thisMachine.hasRole.${builderRole};
# builders don't include themselves as a remote builder
otherBuilders = lib.filterAttrs (hostname: cfg: hostname != config.networking.hostName) builders;
otherBuilders = lib.filter (hostname: hostname != config.networking.hostName) builders;
in
lib.mkMerge [
# configure builder
@ -40,9 +36,9 @@ lib.mkMerge [
nix.distributedBuilds = true;
nix.buildMachines = builtins.map
(builderCfg: {
hostName = builtins.elemAt builderCfg.hostNames 0;
system = builderCfg.arch;
(builderHostname: {
hostName = builderHostname;
system = config.machines.hosts.${builderHostname}.arch;
protocol = "ssh-ng";
sshUser = builderUserName;
sshKey = "/etc/ssh/ssh_host_ed25519_key";
@ -50,7 +46,7 @@ lib.mkMerge [
speedFactor = 10;
supportedFeatures = [ "nixos-test" "benchmark" "big-parallel" "kvm" ];
})
(builtins.attrValues otherBuilders);
otherBuilders;
# It is very likely that the builder's internet is faster or just as fast
nix.extraOptions = ''

View File

@ -1,87 +1,16 @@
# Starting point:
# https://github.com/aldoborrero/mynixpkgs/commit/c501c1e32dba8f4462dcecb57eee4b9e52038e27
{ config, pkgs, lib, ... }:
let
cfg = config.services.actual-server;
stateDir = "/var/lib/${cfg.stateDirName}";
cfg = config.services.actual;
in
{
options.services.actual-server = {
enable = lib.mkEnableOption "Actual Server";
hostname = lib.mkOption {
type = lib.types.str;
default = "localhost";
description = "Hostname for the Actual Server.";
};
port = lib.mkOption {
type = lib.types.int;
default = 25448;
description = "Port on which the Actual Server should listen.";
};
stateDirName = lib.mkOption {
type = lib.types.str;
default = "actual-server";
description = "Name of the directory under /var/lib holding the server's data.";
};
upload = {
fileSizeSyncLimitMB = lib.mkOption {
type = lib.types.nullOr lib.types.int;
default = null;
description = "File size limit in MB for synchronized files.";
};
syncEncryptedFileSizeLimitMB = lib.mkOption {
type = lib.types.nullOr lib.types.int;
default = null;
description = "File size limit in MB for synchronized encrypted files.";
};
fileSizeLimitMB = lib.mkOption {
type = lib.types.nullOr lib.types.int;
default = null;
description = "File size limit in MB for file uploads.";
};
};
};
config = lib.mkIf cfg.enable {
systemd.services.actual-server = {
description = "Actual Server";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = "${pkgs.actual-server}/bin/actual-server";
Restart = "always";
StateDirectory = cfg.stateDirName;
WorkingDirectory = stateDir;
DynamicUser = true;
UMask = "0007";
};
environment = {
NODE_ENV = "production";
ACTUAL_PORT = toString cfg.port;
# Actual is actually very bad at configuring it's own paths despite that information being readily available
ACTUAL_USER_FILES = "${stateDir}/user-files";
ACTUAL_SERVER_FILES = "${stateDir}/server-files";
ACTUAL_DATA_DIR = stateDir;
ACTUAL_UPLOAD_FILE_SYNC_SIZE_LIMIT_MB = toString (cfg.upload.fileSizeSyncLimitMB or "");
ACTUAL_UPLOAD_SYNC_ENCRYPTED_FILE_SIZE_LIMIT_MB = toString (cfg.upload.syncEncryptedFileSizeLimitMB or "");
ACTUAL_UPLOAD_FILE_SIZE_LIMIT_MB = toString (cfg.upload.fileSizeLimitMB or "");
};
services.actual.settings = {
port = 25448;
};
services.nginx.virtualHosts.${cfg.hostname} = {
enableACME = true;
forceSSL = true;
locations."/".proxyPass = "http://localhost:${toString cfg.port}";
};
backup.group."actual-budget".paths = [
"/var/lib/actual"
];
};
}

View File

@ -9,10 +9,7 @@
# TODO: skipping running inside of nixos container for now because of issues getting docker/podman running
let
runnerRole = "gitea-actions-runner";
runners = config.machines.roles.${runnerRole};
thisMachineIsARunner = builtins.elem config.networking.hostName runners;
thisMachineIsARunner = config.thisMachine.hasRole."gitea-actions-runner";
containerName = "gitea-runner";
in
{

View File

@ -84,13 +84,11 @@
outputs = { self, nixpkgs, ... }@inputs:
let
machines = (import ./common/machine-info/moduleless.nix
machineHosts = (import ./common/machine-info/moduleless.nix
{
inherit nixpkgs;
assertionsModule = "${nixpkgs}/nixos/modules/misc/assertions.nix";
}).machines;
machineHosts = machines.hosts;
machineRoles = machines.roles;
}).machines.hosts;
in
{
nixosConfigurations =
@ -115,10 +113,7 @@
home-manager.useGlobalPkgs = true;
home-manager.useUserPackages = true;
home-manager.users.googlebot = import ./home/googlebot.nix {
inherit hostname;
inherit machineRoles;
};
home-manager.users.googlebot = import ./home/googlebot.nix;
};
# because nixos specialArgs doesn't work for containers... need to pass in inputs a different way

View File

@ -1,9 +1,8 @@
{ hostname, machineRoles }:
{ config, lib, pkgs, ... }:
{ config, lib, pkgs, osConfig, ... }:
let
# Check if the current machine has the role "personal"
thisMachineIsPersonal = builtins.elem hostname machineRoles.personal;
thisMachineIsPersonal = osConfig.thisMachine.hasRole."personal";
in
{
home.username = "googlebot";

View File

@ -149,7 +149,4 @@
# librechat
services.librechat.enable = true;
services.librechat.host = "chat.neet.dev";
services.actual-server.enable = true;
services.actual-server.hostname = "actual.runyan.org";
}

View File

@ -222,6 +222,7 @@
(mkVirtualHost "vacuum.s0.neet.dev" "http://192.168.1.125") # valetudo
(mkVirtualHost "sandman.s0.neet.dev" "http://192.168.9.14:3000") # es
(mkVirtualHost "todo.s0.neet.dev" "http://localhost:${toString config.services.vikunja.port}")
(mkVirtualHost "budget.s0.neet.dev" "http://localhost:${toString config.services.actual.settings.port}") # actual budget
];
tailscaleAuth = {
@ -270,7 +271,6 @@
openMinimalFirewall = true;
};
# TODO: setup backup
services.vikunja = {
enable = true;
port = 61473;
@ -284,5 +284,7 @@
"/var/lib/vikunja"
];
services.actual.enable = true;
boot.binfmt.emulatedSystems = [ "aarch64-linux" "armv7l-linux" ];
}

View File

@ -1,6 +1,7 @@
{
hostNames = [
"s0"
"s0.neet.dev"
];
arch = "x86_64-linux";
@ -19,6 +20,8 @@
remoteUnlock = {
hostKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFNiceeFMos5ZXcYem4yFxh8PiZNNnuvhlyLbQLrgIZH";
clearnetHost = "192.168.1.2";
onionHost = "r3zvf7f2ppaeithzswigma46pajt3hqytmkg3rshgknbl3jbni455fqd.onion";
};
}

View File

@ -1,39 +0,0 @@
{ lib
, buildNpmPackage
, fetchFromGitHub
, python3
, nodejs
, runtimeShell
}:
buildNpmPackage rec {
pname = "actual-server";
version = "24.10.1";
src = fetchFromGitHub {
owner = "actualbudget";
repo = pname;
rev = "refs/tags/v${version}";
hash = "sha256-VJAD+lNamwuYmiPJLXkum6piGi5zLOHBp8cUeZagb4s=";
};
npmDepsHash = "sha256-Z2e4+JMhI/keLerT0F4WYdLnXHRQCqL7NjNyA9SFEF8=";
patches = [
./migrations-should-use-pkg-path.patch
];
postPatch = ''
cp ${./package-lock.json} package-lock.json
'';
dontNpmBuild = true;
postInstall = ''
mkdir -p $out/bin
cat <<EOF > $out/bin/actual-server
#!${runtimeShell}
exec ${nodejs}/bin/node $out/lib/node_modules/actual-sync/app.js "\$@"
EOF
chmod +x $out/bin/actual-server
'';
}

View File

@ -1,48 +0,0 @@
diff --git a/src/load-config.js b/src/load-config.js
index d99ce42..42d1351 100644
--- a/src/load-config.js
+++ b/src/load-config.js
@@ -3,7 +3,8 @@ import path from 'node:path';
import { fileURLToPath } from 'node:url';
import createDebug from 'debug';
-const debug = createDebug('actual:config');
+// const debug = createDebug('actual:config');
+const debug = console.log;
const debugSensitive = createDebug('actual-sensitive:config');
const projectRoot = path.dirname(path.dirname(fileURLToPath(import.meta.url)));
@@ -108,6 +109,7 @@ const finalConfig = {
serverFiles: process.env.ACTUAL_SERVER_FILES || config.serverFiles,
userFiles: process.env.ACTUAL_USER_FILES || config.userFiles,
webRoot: process.env.ACTUAL_WEB_ROOT || config.webRoot,
+ dataDir: process.env.ACTUAL_DATA_DIR || config.dataDir,
https:
process.env.ACTUAL_HTTPS_KEY && process.env.ACTUAL_HTTPS_CERT
? {
diff --git a/src/migrations.js b/src/migrations.js
index cba7db0..9983471 100644
--- a/src/migrations.js
+++ b/src/migrations.js
@@ -1,6 +1,12 @@
import migrate from 'migrate';
import path from 'node:path';
import config from './load-config.js';
+import { fileURLToPath } from 'url';
+
+const __filename = fileURLToPath(import.meta.url);
+const __dirname = path.dirname(__filename);
+const appRoot = path.dirname(__dirname);
+const migrationsDirectory = path.join(appRoot, "migrations");
export default function run(direction = 'up') {
console.log(
@@ -13,7 +19,7 @@ export default function run(direction = 'up') {
stateStore: `${path.join(config.dataDir, '.migrate')}${
config.mode === 'test' ? '-test' : ''
}`,
- migrationsDirectory: `${path.join(config.projectRoot, 'migrations')}`,
+ migrationsDirectory
},
(err, set) => {
if (err) {

File diff suppressed because it is too large Load Diff

View File

@ -4,10 +4,4 @@ final: prev:
let
system = prev.system;
in
{
actual-server = prev.callPackage ./actualbudget { };
# Copied entire package from nixpkgs to downgrade to python 3.11 since 3.12 is broken.
# See: https://github.com/Py-KMS-Organization/py-kms/issues/117
pykms = prev.callPackage ./pykms.nix { };
}
{ }

View File

@ -1,103 +0,0 @@
{ lib
, fetchFromGitHub
, python311
, writeText
, writeShellScript
, sqlite
, nixosTests
}:
let
pypkgs = python311.pkgs;
dbSql = writeText "create_pykms_db.sql" ''
CREATE TABLE clients(
clientMachineId TEXT,
machineName TEXT,
applicationId TEXT,
skuId TEXT,
licenseStatus TEXT,
lastRequestTime INTEGER,
kmsEpid TEXT,
requestCount INTEGER
);
'';
dbScript = writeShellScript "create_pykms_db.sh" ''
set -eEuo pipefail
db=''${1:-/var/lib/pykms/clients.db}
if [ ! -e $db ] ; then
${lib.getBin sqlite}/bin/sqlite3 $db < ${dbSql}
fi
'';
in
pypkgs.buildPythonApplication rec {
pname = "pykms";
version = "unstable-2024-05-28";
src = fetchFromGitHub {
owner = "Py-KMS-Organization";
repo = "py-kms";
rev = "646f4766f4195dbea0695700a7ddaac70a3294f9";
hash = "sha256-YCqPo7WkCfXyuTjL4IYapdcUN/Vj465Jz6XhQessyz0=";
};
sourceRoot = "${src.name}/py-kms";
propagatedBuildInputs = with pypkgs; [
systemd
pytz
tzlocal
dnspython
];
postPatch = ''
siteDir=$out/${python311.sitePackages}
substituteInPlace pykms_DB2Dict.py \
--replace "'KmsDataBase.xml'" "'$siteDir/KmsDataBase.xml'"
'';
format = "other";
# there are no tests
doCheck = false;
installPhase = ''
runHook preInstall
mkdir -p $siteDir
PYTHONPATH="$PYTHONPATH:$siteDir"
mv * $siteDir
for b in Client Server ; do
makeWrapper ${python311.interpreter} $out/bin/''${b,,} \
--argv0 pykms-''${b,,} \
--add-flags $siteDir/pykms_$b.py \
--set PYTHONPATH $PYTHONPATH
done
install -Dm755 ${dbScript} $out/libexec/create_pykms_db.sh
install -Dm644 ../README.md -t $out/share/doc/pykms
${python311.interpreter} -m compileall $siteDir
runHook postInstall
'';
passthru.tests = { inherit (nixosTests) pykms; };
meta = with lib; {
description = "Windows KMS (Key Management Service) server written in Python";
homepage = "https://github.com/Py-KMS-Organization/py-kms";
license = licenses.unlicense;
maintainers = with maintainers; [
peterhoeg
zopieux
];
};
}