Add sandboxed-workspace module for isolated dev environments

Provides isolated development environments using either VMs (microvm.nix)
or containers (systemd-nspawn) with a unified configuration interface.

Features:
- Unified options with required type field ("vm" or "container")
- Shared base configuration for networking, SSH, users, packages
- Automatic SSH host key generation and persistence
- Shell aliases for workspace management (start/stop/status/ssh)
- Automatic /etc/hosts entries for workspace hostnames
- restartIfChanged support for both VMs and containers
- Passwordless doas in workspaces

Container backend:
- Uses hostBridge for proper bridge networking with /24 subnet
- systemd-networkd for IP configuration
- systemd-resolved for DNS

VM backend:
- TAP interface with deterministic MAC addresses
- virtiofs shares for workspace directories
- vsock CID generation
This commit is contained in:
2026-02-07 22:41:34 -08:00
parent 70f0064d7b
commit 87db330e5b
14 changed files with 978 additions and 4 deletions

View File

@@ -14,6 +14,7 @@
./machine-info
./nix-builder.nix
./ssh.nix
./sandboxed-workspace
];
nix.flakes.enable = true;

View File

@@ -12,6 +12,7 @@ in
./ping.nix
./tailscale.nix
./vpn.nix
./sandbox.nix
];
options.networking.ip_forward = mkEnableOption "Enable ip forwarding";

115
common/network/sandbox.nix Normal file
View File

@@ -0,0 +1,115 @@
{ config, lib, ... }:
# Network configuration for sandboxed workspaces (VMs and containers)
# Creates a bridge network with NAT for isolated environments
with lib;
let
cfg = config.networking.sandbox;
in
{
options.networking.sandbox = {
enable = mkEnableOption "sandboxed workspace network bridge";
bridgeName = mkOption {
type = types.str;
default = "sandbox-br";
description = "Name of the bridge interface for sandboxed workspaces";
};
subnet = mkOption {
type = types.str;
default = "192.168.83.0/24";
description = "Subnet for sandboxed workspace network";
};
hostAddress = mkOption {
type = types.str;
default = "192.168.83.1";
description = "Host address on the sandbox bridge";
};
upstreamInterface = mkOption {
type = types.str;
description = "Upstream network interface for NAT";
};
};
config = mkIf cfg.enable {
networking.ip_forward = true;
# Create the bridge interface
systemd.network.netdevs."10-${cfg.bridgeName}" = {
netdevConfig = {
Kind = "bridge";
Name = cfg.bridgeName;
};
};
systemd.network.networks."10-${cfg.bridgeName}" = {
matchConfig.Name = cfg.bridgeName;
networkConfig = {
Address = "${cfg.hostAddress}/24";
DHCPServer = false;
IPv4Forwarding = true;
IPv6Forwarding = false;
IPMasquerade = "ipv4";
};
linkConfig.RequiredForOnline = "no";
};
# Automatically attach VM tap interfaces to the bridge
systemd.network.networks."11-vm" = {
matchConfig.Name = "vm-*";
networkConfig.Bridge = cfg.bridgeName;
linkConfig.RequiredForOnline = "no";
};
# Automatically attach container veth interfaces to the bridge
systemd.network.networks."11-container" = {
matchConfig.Name = "ve-*";
networkConfig.Bridge = cfg.bridgeName;
linkConfig.RequiredForOnline = "no";
};
# NAT configuration for sandboxed workspaces
networking.nat = {
enable = true;
internalInterfaces = [ cfg.bridgeName ];
externalInterface = cfg.upstreamInterface;
};
# Enable systemd-networkd (required for bridge setup)
systemd.network.enable = true;
# When NetworkManager handles primary networking, disable systemd-networkd-wait-online.
# The bridge is the only interface managed by systemd-networkd and it never reaches
# "online" state without connected workspaces. NetworkManager-wait-online.service already
# gates network-online.target for the primary interface.
# On pure systemd-networkd systems (no NM), we just ignore the bridge.
systemd.network.wait-online.enable =
!config.networking.networkmanager.enable;
systemd.network.wait-online.ignoredInterfaces =
lib.mkIf (!config.networking.networkmanager.enable) [ cfg.bridgeName ];
# If NetworkManager is enabled, tell it to ignore sandbox interfaces
# This allows systemd-networkd and NetworkManager to coexist
networking.networkmanager.unmanaged = [
"interface-name:${cfg.bridgeName}"
"interface-name:vm-*"
"interface-name:ve-*"
];
# Make systemd-resolved listen on the bridge for workspace DNS queries.
# By default resolved only listens on 127.0.0.53 (localhost).
# DNSStubListenerExtra adds the bridge address so workspaces can use the host as DNS.
services.resolved.settings.Resolve.DNSStubListenerExtra = cfg.hostAddress;
# Allow DNS traffic from workspaces to the host
networking.firewall.interfaces.${cfg.bridgeName} = {
allowedTCPPorts = [ 53 ];
allowedUDPPorts = [ 53 ];
};
};
}

View File

@@ -0,0 +1,114 @@
{ hostConfig, workspaceName, ip, networkInterface }:
# Base configuration shared by all sandboxed workspaces (VMs and containers)
# This provides common settings for networking, SSH, users, and packages
#
# Parameters:
# hostConfig - The host's NixOS config (for inputs, ssh keys, etc.)
# workspaceName - Name of the workspace (used as hostname)
# ip - Static IP address for the workspace
# networkInterface - Match config for systemd-networkd (e.g., { Type = "ether"; } or { Name = "host0"; })
{ config, lib, pkgs, ... }:
{
imports = [
../shell.nix
hostConfig.inputs.home-manager.nixosModules.home-manager
hostConfig.inputs.nix-index-database.nixosModules.default
];
# Basic system configuration
system.stateVersion = "25.11";
# Set hostname to match the workspace name
networking.hostName = workspaceName;
# Networking with systemd-networkd
networking.useNetworkd = true;
systemd.network.enable = true;
# Enable resolved to populate /etc/resolv.conf from networkd's DNS settings
services.resolved.enable = true;
# Basic networking configuration
networking.useDHCP = false;
# Static IP configuration
# Uses the host as DNS server (host forwards to upstream DNS)
systemd.network.networks."20-workspace" = {
matchConfig = networkInterface;
networkConfig = {
Address = "${ip}/24";
Gateway = hostConfig.networking.sandbox.hostAddress;
DNS = [ hostConfig.networking.sandbox.hostAddress ];
};
};
# Disable firewall inside workspaces (we're behind NAT)
networking.firewall.enable = false;
# Enable SSH for access
services.openssh = {
enable = true;
settings = {
PasswordAuthentication = false;
PermitRootLogin = "prohibit-password";
};
};
# Use persistent SSH host keys from shared directory
services.openssh.hostKeys = lib.mkForce [
{
path = "/etc/ssh-host-keys/ssh_host_ed25519_key";
type = "ed25519";
}
];
# Basic system packages
environment.systemPackages = with pkgs; [
kakoune
vim
git
htop
wget
curl
tmux
dnsutils
];
# User configuration
users.mutableUsers = false;
users.users.googlebot = {
isNormalUser = true;
extraGroups = [ "wheel" ];
shell = pkgs.fish;
openssh.authorizedKeys.keys = hostConfig.machines.ssh.userKeys;
};
security.doas.enable = true;
security.sudo.enable = false;
security.doas.extraRules = [
{ groups = [ "wheel" ]; noPass = true; }
];
# Minimal locale settings
i18n.defaultLocale = "en_US.UTF-8";
time.timeZone = "America/Los_Angeles";
# Enable flakes
nix.settings.experimental-features = [ "nix-command" "flakes" ];
# Make nixpkgs available in NIX_PATH and registry (like the NixOS ISO)
# This allows `nix-shell -p`, `nix repl '<nixpkgs>'`, etc. to work
nix.nixPath = [ "nixpkgs=${hostConfig.inputs.nixpkgs}" ];
nix.registry.nixpkgs.flake = hostConfig.inputs.nixpkgs;
# Enable fish shell
programs.fish.enable = true;
# Home Manager configuration
home-manager.useGlobalPkgs = true;
home-manager.useUserPackages = true;
home-manager.users.googlebot = import ./home.nix;
}

View File

@@ -0,0 +1,72 @@
{ config, lib, ... }:
# Container-specific configuration for sandboxed workspaces using systemd-nspawn
# This module is imported by default.nix for workspaces with type = "container"
with lib;
let
cfg = config.sandboxed-workspace;
hostConfig = config;
# Filter for container-type workspaces only
containerWorkspaces = filterAttrs (n: ws: ws.type == "container") cfg.workspaces;
in
{
config = mkIf (cfg.enable && containerWorkspaces != { }) {
# NixOS container module only sets restartIfChanged when autoStart=true
# Work around this by setting it directly on the systemd service
systemd.services = mapAttrs'
(name: ws: nameValuePair "container@${name}" {
restartIfChanged = lib.mkForce true;
restartTriggers = [
config.containers.${name}.path
config.environment.etc."nixos-containers/${name}.conf".source
];
})
containerWorkspaces;
# Convert container workspace configs to NixOS containers format
containers = mapAttrs
(name: ws: {
autoStart = ws.autoStart;
privateNetwork = true;
ephemeral = true;
restartIfChanged = true;
# Attach container's veth to the sandbox bridge
# This creates the veth pair and attaches host side to the bridge
hostBridge = config.networking.sandbox.bridgeName;
bindMounts = {
"/home/googlebot/workspace" = {
hostPath = "/home/googlebot/sandboxed/${name}/workspace";
isReadOnly = false;
};
"/etc/ssh-host-keys" = {
hostPath = "/home/googlebot/sandboxed/${name}/ssh-host-keys";
isReadOnly = false;
};
"/home/googlebot/claude-config" = {
hostPath = "/home/googlebot/sandboxed/${name}/claude-config";
isReadOnly = false;
};
};
config = { config, lib, pkgs, ... }: {
imports = [
(import ./base.nix {
inherit hostConfig;
workspaceName = name;
ip = ws.ip;
networkInterface = { Name = "eth0"; };
})
(import ws.config)
];
networking.useHostResolvConf = false;
};
})
containerWorkspaces;
};
}

View File

@@ -0,0 +1,157 @@
{ config, lib, pkgs, ... }:
# Unified sandboxed workspace module supporting both VMs and containers
# This module provides isolated development environments with shared configuration
with lib;
let
cfg = config.sandboxed-workspace;
in
{
imports = [
./vm.nix
./container.nix
];
options.sandboxed-workspace = {
enable = mkEnableOption "sandboxed workspace management";
workspaces = mkOption {
type = types.attrsOf (types.submodule {
options = {
type = mkOption {
type = types.enum [ "vm" "container" ];
description = ''
Backend type for this workspace:
- "vm": microVM with cloud-hypervisor (more isolation, uses virtiofs)
- "container": systemd-nspawn container (less overhead, uses bind mounts)
'';
};
config = mkOption {
type = types.path;
description = "Path to the workspace configuration file";
};
ip = mkOption {
type = types.str;
example = "192.168.83.10";
description = ''
Static IP address for this workspace on the microvm bridge network.
Configures the workspace's network interface and adds an entry to /etc/hosts
on the host so the workspace can be accessed by name (e.g., ssh workspace-example).
Must be in the 192.168.83.0/24 subnet (or whatever networking.sandbox.subnet is).
'';
};
hostKey = mkOption {
type = types.nullOr types.str;
default = null;
example = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAA...";
description = ''
SSH host public key for this workspace. If set, adds to programs.ssh.knownHosts
so the host automatically trusts the workspace without prompting.
Get the key from: ~/sandboxed/<name>/ssh-host-keys/ssh_host_ed25519_key.pub
'';
};
autoStart = mkOption {
type = types.bool;
default = false;
description = "Whether to automatically start this workspace on boot";
};
cid = mkOption {
type = types.nullOr types.int;
default = null;
description = ''
vsock Context Identifier for this workspace (VM-only, ignored for containers).
If null, auto-generated from workspace name.
Must be unique per host. Valid range: 3 to 4294967294.
See: https://man7.org/linux/man-pages/man7/vsock.7.html
'';
};
};
});
default = { };
description = "Sandboxed workspace configurations";
};
};
config = mkIf cfg.enable {
# Automatically enable sandbox networking when workspaces are defined
networking.sandbox.enable = mkIf (cfg.workspaces != { }) true;
# Add workspace hostnames to /etc/hosts so they can be accessed by name
networking.hosts = lib.mkMerge (lib.mapAttrsToList
(name: ws: {
${ws.ip} = [ "workspace-${name}" ];
})
cfg.workspaces);
# Add workspace SSH host keys to known_hosts so host trusts workspaces without prompting
programs.ssh.knownHosts = lib.mkMerge (lib.mapAttrsToList
(name: ws:
lib.optionalAttrs (ws.hostKey != null) {
"workspace-${name}" = {
publicKey = ws.hostKey;
extraHostNames = [ ws.ip ];
};
})
cfg.workspaces);
# Shell aliases for workspace management
# Service names differ by type: microvm@<name> for VMs, container@<name> for containers
environment.shellAliases = lib.mkMerge (lib.mapAttrsToList
(name: ws:
let
serviceName = if ws.type == "vm" then "microvm@${name}" else "container@${name}";
in
{
"workspace_${name}" = "ssh googlebot@workspace-${name}";
"workspace_${name}_start" = "doas systemctl start ${serviceName}";
"workspace_${name}_stop" = "doas systemctl stop ${serviceName}";
"workspace_${name}_restart" = "doas systemctl restart ${serviceName}";
"workspace_${name}_status" = "doas systemctl status ${serviceName}";
})
cfg.workspaces);
# Automatically generate SSH host keys and directories for all workspaces
systemd.services = lib.mapAttrs'
(name: ws:
let
serviceName = if ws.type == "vm" then "microvm@${name}" else "container@${name}";
in
lib.nameValuePair "workspace-${name}-setup" {
description = "Setup directories and SSH keys for workspace ${name}";
wantedBy = [ "multi-user.target" ];
before = [ "${serviceName}.service" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
};
script = ''
# Create directories if they don't exist
mkdir -p /home/googlebot/sandboxed/${name}/workspace
mkdir -p /home/googlebot/sandboxed/${name}/ssh-host-keys
mkdir -p /home/googlebot/sandboxed/${name}/claude-config
# Fix ownership
chown -R googlebot:users /home/googlebot/sandboxed/${name}
# Generate SSH host key if it doesn't exist
if [ ! -f /home/googlebot/sandboxed/${name}/ssh-host-keys/ssh_host_ed25519_key ]; then
${pkgs.openssh}/bin/ssh-keygen -t ed25519 -N "" \
-f /home/googlebot/sandboxed/${name}/ssh-host-keys/ssh_host_ed25519_key
chown googlebot:users /home/googlebot/sandboxed/${name}/ssh-host-keys/ssh_host_ed25519_key*
echo "Generated SSH host key for workspace ${name}"
fi
'';
}
)
cfg.workspaces;
};
}

View File

@@ -0,0 +1,50 @@
{ config, lib, pkgs, ... }:
# Home Manager configuration for sandboxed workspace user environment
# This sets up the shell and tools inside VMs and containers
{
home.username = "googlebot";
home.homeDirectory = "/home/googlebot";
home.stateVersion = "24.11";
programs.home-manager.enable = true;
# Shell configuration
programs.fish.enable = true;
programs.starship.enable = true;
programs.starship.enableFishIntegration = true;
programs.starship.settings.container.disabled = true;
# Basic command-line tools
programs.btop.enable = true;
programs.ripgrep.enable = true;
programs.eza.enable = true;
# Git configuration
programs.git = {
enable = true;
settings = {
user.name = lib.mkDefault "googlebot";
user.email = lib.mkDefault "zuckerberg@neet.dev";
};
};
# Shell aliases
home.shellAliases = {
ls = "eza";
la = "eza -la";
ll = "eza -l";
};
# Environment variables for Claude Code
home.sessionVariables = {
# Isolate Claude config to a specific directory on the host
CLAUDE_CONFIG_DIR = "/home/googlebot/claude-config";
};
# Additional packages for development
home.packages = with pkgs; [
# Add packages as needed per workspace
];
}

View File

@@ -0,0 +1,138 @@
{ config, lib, ... }:
# VM-specific configuration for sandboxed workspaces using microvm.nix
# This module is imported by default.nix for workspaces with type = "vm"
with lib;
let
cfg = config.sandboxed-workspace;
hostConfig = config;
# Generate a deterministic vsock CID from workspace name.
#
# vsock (virtual sockets) enables host-VM communication without networking.
# cloud-hypervisor uses vsock for systemd-notify integration: when a VM finishes
# booting, systemd sends READY=1 to the host via vsock, allowing the host's
# microvm@ service to accurately track VM boot status instead of guessing.
#
# Each VM needs a unique CID (Context Identifier). Reserved CIDs per vsock(7):
# - VMADDR_CID_HYPERVISOR (0): reserved for hypervisor
# - VMADDR_CID_LOCAL (1): loopback address
# - VMADDR_CID_HOST (2): host address
# See: https://man7.org/linux/man-pages/man7/vsock.7.html
# https://docs.kernel.org/virt/kvm/vsock.html
#
# We auto-generate from SHA256 hash to ensure uniqueness without manual assignment.
# Range: 100 - 16777315 (offset avoids reserved CIDs and leaves 3-99 for manual use)
nameToCid = name:
let
hash = builtins.hashString "sha256" name;
hexPart = builtins.substring 0 6 hash;
in
100 + (builtins.foldl'
(acc: c: acc * 16 + (
if c == "a" then 10
else if c == "b" then 11
else if c == "c" then 12
else if c == "d" then 13
else if c == "e" then 14
else if c == "f" then 15
else lib.strings.toInt c
)) 0
(lib.stringToCharacters hexPart));
# Filter for VM-type workspaces only
vmWorkspaces = filterAttrs (n: ws: ws.type == "vm") cfg.workspaces;
# Generate VM configuration for a workspace
mkVmConfig = name: ws: {
config = import ws.config;
specialArgs = { inputs = hostConfig.inputs; };
extraModules = [
(import ./base.nix {
inherit hostConfig;
workspaceName = name;
ip = ws.ip;
networkInterface = { Type = "ether"; };
})
{
# MicroVM specific configuration
microvm = {
# Use cloud-hypervisor for better performance
hypervisor = lib.mkDefault "cloud-hypervisor";
# Resource allocation
vcpu = 8;
mem = 4096; # 4GB RAM
# Disk for writable overlay
volumes = [{
image = "overlay.img";
mountPoint = "/nix/.rw-store";
size = 8192; # 8GB
}];
# Shared directories with host using virtiofs
shares = [
{
# Share the host's /nix/store for accessing packages
proto = "virtiofs";
tag = "ro-store";
source = "/nix/store";
mountPoint = "/nix/.ro-store";
}
{
proto = "virtiofs";
tag = "workspace";
source = "/home/googlebot/sandboxed/${name}/workspace";
mountPoint = "/home/googlebot/workspace";
}
{
proto = "virtiofs";
tag = "ssh-host-keys";
source = "/home/googlebot/sandboxed/${name}/ssh-host-keys";
mountPoint = "/etc/ssh-host-keys";
}
{
proto = "virtiofs";
tag = "claude-config";
source = "/home/googlebot/sandboxed/${name}/claude-config";
mountPoint = "/home/googlebot/claude-config";
}
];
# Writeable overlay for /nix/store
writableStoreOverlay = "/nix/.rw-store";
# TAP interface for bridged networking
# The interface name "vm-*" matches the pattern in common/network/microvm.nix
# which automatically attaches it to the microbr bridge
interfaces = [{
type = "tap";
id = "vm-${name}";
# Generate a deterministic MAC from workspace name (02: prefix = locally administered)
mac =
let
hash = builtins.hashString "sha256" name;
in
"02:${builtins.substring 0 2 hash}:${builtins.substring 2 2 hash}:${builtins.substring 4 2 hash}:${builtins.substring 6 2 hash}:${builtins.substring 8 2 hash}";
}];
# Enable vsock for systemd-notify integration
vsock.cid =
if ws.cid != null
then ws.cid
else nameToCid name;
};
}
];
autostart = ws.autoStart;
};
in
{
config = mkIf (cfg.enable && vmWorkspaces != { }) {
# Convert VM workspace configs to microvm.nix format
microvm.vms = mapAttrs mkVmConfig vmWorkspaces;
};
}

42
flake.lock generated
View File

@@ -43,7 +43,7 @@
"type": "gitlab"
}
},
"dailybuild_modules": {
"dailybot": {
"inputs": {
"flake-utils": [
"flake-utils"
@@ -219,6 +219,27 @@
"type": "github"
}
},
"microvm": {
"inputs": {
"nixpkgs": [
"nixpkgs"
],
"spectrum": "spectrum"
},
"locked": {
"lastModified": 1770310890,
"narHash": "sha256-lyWAs4XKg3kLYaf4gm5qc5WJrDkYy3/qeV5G733fJww=",
"owner": "astro",
"repo": "microvm.nix",
"rev": "68c9f9c6ca91841f04f726a298c385411b7bfcd5",
"type": "github"
},
"original": {
"owner": "astro",
"repo": "microvm.nix",
"type": "github"
}
},
"nix-index-database": {
"inputs": {
"nixpkgs": [
@@ -310,11 +331,12 @@
"root": {
"inputs": {
"agenix": "agenix",
"dailybuild_modules": "dailybuild_modules",
"dailybot": "dailybot",
"deploy-rs": "deploy-rs",
"flake-compat": "flake-compat",
"flake-utils": "flake-utils",
"home-manager": "home-manager",
"microvm": "microvm",
"nix-index-database": "nix-index-database",
"nixos-generators": "nixos-generators",
"nixos-hardware": "nixos-hardware",
@@ -349,6 +371,22 @@
"type": "gitlab"
}
},
"spectrum": {
"flake": false,
"locked": {
"lastModified": 1759482047,
"narHash": "sha256-H1wiXRQHxxPyMMlP39ce3ROKCwI5/tUn36P8x6dFiiQ=",
"ref": "refs/heads/main",
"rev": "c5d5786d3dc938af0b279c542d1e43bce381b4b9",
"revCount": 996,
"type": "git",
"url": "https://spectrum-os.org/git/spectrum"
},
"original": {
"type": "git",
"url": "https://spectrum-os.org/git/spectrum"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,

View File

@@ -48,7 +48,7 @@
};
# Dailybot
dailybuild_modules = {
dailybot = {
url = "git+https://git.neet.dev/zuckerberg/dailybot.git";
inputs = {
nixpkgs.follows = "nixpkgs";
@@ -71,6 +71,12 @@
url = "github:Mic92/nix-index-database";
inputs.nixpkgs.follows = "nixpkgs";
};
# MicroVM support
microvm = {
url = "github:astro/microvm.nix";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs = { self, nixpkgs, ... }@inputs:
@@ -88,9 +94,10 @@
./common
simple-nixos-mailserver.nixosModule
agenix.nixosModules.default
dailybuild_modules.nixosModule
dailybot.nixosModule
nix-index-database.nixosModules.default
home-manager.nixosModules.home-manager
microvm.nixosModules.host
self.nixosModules.kernel-modules
({ lib, ... }: {
config = {

View File

@@ -10,6 +10,9 @@
nix.gc.automatic = lib.mkForce false;
# Upstream interface for sandbox networking (NAT)
networking.sandbox.upstreamInterface = lib.mkDefault "enp191s0";
environment.systemPackages = with pkgs; [
system76-keyboard-configurator
];

View File

@@ -0,0 +1,20 @@
{ config, lib, pkgs, ... }:
# Test container workspace configuration
#
# Add to sandboxed-workspace.workspaces in machines/fry/default.nix:
# sandboxed-workspace.workspaces.test-container = {
# type = "container";
# config = ./workspaces/test-container.nix;
# ip = "192.168.83.50";
# };
#
# The workspace name ("test-container") becomes the hostname automatically.
# The IP is configured in default.nix, not here.
{
# Install packages as needed
environment.systemPackages = with pkgs; [
# Add packages here
];
}

View File

@@ -0,0 +1,23 @@
{ config, lib, pkgs, ... }:
# Example VM workspace configuration
#
# Add to sandboxed-workspace.workspaces in machines/fry/default.nix:
# sandboxed-workspace.workspaces.example = {
# type = "vm";
# config = ./workspaces/example.nix;
# ip = "192.168.83.10";
# };
#
# The workspace name ("example") becomes the hostname automatically.
# The IP is configured in default.nix, not here.
{
# Install packages as needed
environment.systemPackages = with pkgs; [
# Add packages here
];
# Additional shares beyond the standard ones (workspace, ssh-host-keys, claude-config):
# microvm.shares = [ ... ];
}

View File

@@ -0,0 +1,235 @@
# Create Workspace Skill
This skill enables you to create new ephemeral sandboxed workspaces for isolated development environments. Workspaces can be either VMs (using microvm.nix) or containers (using systemd-nspawn).
## When to use this skill
Use this skill when:
- Creating a new isolated development environment
- Setting up a workspace for a specific project
- Need a clean environment to run AI coding agents safely
- Want to test something without affecting the host system
## Choosing between VM and Container
| Feature | VM (`type = "vm"`) | Container (`type = "container"`) |
|---------|-------------------|----------------------------------|
| Isolation | Full kernel isolation | Shared kernel with namespaces |
| Overhead | Higher (separate kernel) | Lower (process-level) |
| Startup time | Slower | Faster |
| Storage | virtiofs shares | bind mounts |
| Use case | Untrusted code, kernel testing | General development |
**Recommendation**: Use containers for most development work. Use VMs when you need stronger isolation or are testing potentially dangerous code.
## How to create a workspace
Follow these steps to create a new workspace:
### 1. Choose workspace name, type, and IP address
- Workspace name should be descriptive (e.g., "myproject", "testing", "nixpkgs-contrib")
- Type should be "vm" or "container"
- IP address should be in the 192.168.83.x range (192.168.83.10-254)
- Check existing workspaces in `machines/fry/default.nix` to avoid IP conflicts
### 2. Create workspace configuration file
Create `machines/fry/workspaces/<name>.nix`:
```nix
{ config, lib, pkgs, ... }:
# The workspace name becomes the hostname automatically.
# The IP is configured in default.nix, not here.
{
# Install packages as needed
environment.systemPackages = with pkgs; [
# Add packages here
];
# Additional configuration as needed
}
```
The module automatically configures:
- **Hostname**: Set to the workspace name from `sandboxed-workspace.workspaces.<name>`
- **Static IP**: From the `ip` option
- **DNS**: Uses the host as DNS server
- **Network**: TAP interface (VM) or veth pair (container) on the bridge
- **Standard shares**: workspace, ssh-host-keys, claude-config
### 3. Register workspace in machines/fry/default.nix
Add the workspace to the `sandboxed-workspace.workspaces` attribute set:
```nix
sandboxed-workspace = {
enable = true;
workspaces.<name> = {
type = "vm"; # or "container"
config = ./workspaces/<name>.nix;
ip = "192.168.83.XX"; # Choose unique IP
autoStart = false; # optional, defaults to false
};
};
```
### 4. Optional: Pre-create workspace with project
If you want to clone a repository before deployment:
```bash
mkdir -p ~/sandboxed/<name>/workspace
cd ~/sandboxed/<name>/workspace
git clone <repository-url>
```
Note: Directories and SSH keys are auto-created on first deployment if they don't exist.
### 5. Verify configuration builds
```bash
nix build .#nixosConfigurations.fry.config.system.build.toplevel --dry-run
```
### 6. Deploy the configuration
```bash
doas nixos-rebuild switch --flake .#fry
```
### 7. Start the workspace
```bash
# Using the shell alias:
workspace_<name>_start
# Or manually:
doas systemctl start microvm@<name> # for VMs
doas systemctl start container@<name> # for containers
```
### 8. Access the workspace
SSH into the workspace by name (added to /etc/hosts automatically):
```bash
# Using the shell alias:
workspace_<name>
# Or manually:
ssh googlebot@workspace-<name>
```
Or by IP:
```bash
ssh googlebot@192.168.83.XX
```
## Managing workspaces
### Shell aliases
For each workspace, these aliases are automatically created:
- `workspace_<name>` - SSH into the workspace
- `workspace_<name>_start` - Start the workspace
- `workspace_<name>_stop` - Stop the workspace
- `workspace_<name>_restart` - Restart the workspace
- `workspace_<name>_status` - Show workspace status
### Check workspace status
```bash
workspace_<name>_status
```
### Stop workspace
```bash
workspace_<name>_stop
```
### View workspace logs
```bash
doas journalctl -u microvm@<name> # for VMs
doas journalctl -u container@<name> # for containers
```
### List running workspaces
```bash
doas systemctl list-units 'microvm@*' 'container@*'
```
## Example workflow
Creating a VM workspace named "nixpkgs-dev":
```bash
# 1. Create machines/fry/workspaces/nixpkgs-dev.nix (minimal, just packages if needed)
# 2. Update machines/fry/default.nix:
# sandboxed-workspace.workspaces.nixpkgs-dev = {
# type = "vm";
# config = ./workspaces/nixpkgs-dev.nix;
# ip = "192.168.83.20";
# };
# 3. Build and deploy (auto-creates directories and SSH keys)
doas nixos-rebuild switch --flake .#fry
# 4. Optional: Clone repository into workspace
mkdir -p ~/sandboxed/nixpkgs-dev/workspace
cd ~/sandboxed/nixpkgs-dev/workspace
git clone https://github.com/NixOS/nixpkgs.git
# 5. Start the workspace
workspace_nixpkgs-dev_start
# 6. SSH into the workspace
workspace_nixpkgs-dev
```
Creating a container workspace named "quick-test":
```bash
# 1. Create machines/fry/workspaces/quick-test.nix
# 2. Update machines/fry/default.nix:
# sandboxed-workspace.workspaces.quick-test = {
# type = "container";
# config = ./workspaces/quick-test.nix;
# ip = "192.168.83.30";
# };
# 3. Build and deploy
doas nixos-rebuild switch --flake .#fry
# 4. Start and access
workspace_quick-test_start
workspace_quick-test
```
## Directory structure
Workspaces store persistent data in `~/sandboxed/<name>/`:
```
~/sandboxed/<name>/
├── workspace/ # Shared workspace directory
├── ssh-host-keys/ # Persistent SSH host keys
└── claude-config/ # Claude Code configuration
```
## Notes
- Workspaces are ephemeral - only data in shared directories persists
- VMs have isolated nix store via overlay
- Containers share the host's nix store (read-only)
- SSH host keys persist across workspace rebuilds
- Claude config directory is isolated per workspace
- Workspaces can access the internet via NAT through the host
- DNS queries go through the host (uses host's DNS)
- Default VM resources: 8 vCPUs, 4GB RAM, 8GB disk overlay
- Containers have no resource limits by default