2 Commits

Author SHA1 Message Date
a3f59d1e0e wip 2022-11-28 00:52:18 -05:00
c66f8ef8d4 wip 2022-11-27 22:28:50 -05:00
32 changed files with 9398 additions and 499 deletions

View File

@@ -60,6 +60,7 @@ in {
"oboonakemofpalcgghocfoadofidjkkk" # keepassxc plugin "oboonakemofpalcgghocfoadofidjkkk" # keepassxc plugin
"cimiefiiaegbelhefglklhhakcgmhkai" # plasma integration "cimiefiiaegbelhefglklhhakcgmhkai" # plasma integration
"hkgfoiooedgoejojocmhlaklaeopbecg" # picture in picture "hkgfoiooedgoejojocmhlaklaeopbecg" # picture in picture
"fihnjjcciajhdojfnbdddfaoknhalnja" # I don't care about cookies
"mnjggcdmjocbbbhaepdhchncahnbgone" # SponsorBlock "mnjggcdmjocbbbhaepdhchncahnbgone" # SponsorBlock
"dhdgffkkebhmkfjojejmpbldmpobfkfo" # Tampermonkey "dhdgffkkebhmkfjojejmpbldmpobfkfo" # Tampermonkey
# "ehpdicggenhgapiikfpnmppdonadlnmp" # Disable Scroll Jacking # "ehpdicggenhgapiikfpnmppdonadlnmp" # Disable Scroll Jacking
@@ -79,7 +80,6 @@ in {
nixpkgs.config.packageOverrides = pkgs: { nixpkgs.config.packageOverrides = pkgs: {
vaapiIntel = pkgs.vaapiIntel.override { enableHybridCodec = true; }; vaapiIntel = pkgs.vaapiIntel.override { enableHybridCodec = true; };
chromium = pkgs.chromium.override { chromium = pkgs.chromium.override {
enableWideVine = true;
# ungoogled = true; # ungoogled = true;
# --enable-native-gpu-memory-buffers # fails on AMD APU # --enable-native-gpu-memory-buffers # fails on AMD APU
# --enable-webrtc-vp9-support # --enable-webrtc-vp9-support
@@ -89,7 +89,7 @@ in {
# todo vulkan in chrome # todo vulkan in chrome
# todo video encoding in chrome # todo video encoding in chrome
hardware.opengl = { hardware.opengl = {
enable = true; enable = cfg.enableAcceleration;
extraPackages = with pkgs; [ extraPackages = with pkgs; [
intel-media-driver # LIBVA_DRIVER_NAME=iHD intel-media-driver # LIBVA_DRIVER_NAME=iHD
vaapiIntel # LIBVA_DRIVER_NAME=i965 (older but works better for Firefox/Chromium) vaapiIntel # LIBVA_DRIVER_NAME=i965 (older but works better for Firefox/Chromium)

View File

@@ -14,20 +14,25 @@ in {
./pithos.nix ./pithos.nix
./spotify.nix ./spotify.nix
./vscodium.nix ./vscodium.nix
./discord.nix # FIXME make optional
./steam.nix # ./discord.nix
# ./steam.nix
./touchpad.nix ./touchpad.nix
./mount-samba.nix ./mount-samba.nix
]; ];
options.de = { options.de = {
enable = lib.mkEnableOption "enable desktop environment"; enable = lib.mkEnableOption "enable desktop environment";
enableAcceleration = lib.mkOption {
type = lib.types.bool;
default = true;
};
}; };
config = lib.mkIf cfg.enable { config = lib.mkIf cfg.enable {
# vulkan # vulkan
hardware.opengl.driSupport = true; hardware.opengl.driSupport = cfg.enableAcceleration;
hardware.opengl.driSupport32Bit = true; hardware.opengl.driSupport32Bit = cfg.enableAcceleration;
# Applications # Applications
users.users.googlebot.packages = with pkgs; [ users.users.googlebot.packages = with pkgs; [
@@ -40,8 +45,7 @@ in {
element-desktop element-desktop
mpv mpv
nextcloud-client nextcloud-client
signal-desktop # signal-desktop # FIXME
minecraft
gparted gparted
libreoffice-fresh libreoffice-fresh
thunderbird thunderbird

View File

@@ -1,76 +0,0 @@
{ lib, config, pkgs, ... }:
with lib;
let
cfg = config.services.pia;
in {
imports = [
./pia.nix
];
options.services.pia = {
enable = lib.mkEnableOption "Enable PIA Client";
dataDir = lib.mkOption {
type = lib.types.str;
default = "/var/lib/pia";
description = ''
Path to the pia data directory
'';
};
user = lib.mkOption {
type = lib.types.str;
default = "root";
description = ''
The user pia should run as
'';
};
group = lib.mkOption {
type = lib.types.str;
default = "piagrp";
description = ''
The group pia should run as
'';
};
users = mkOption {
type = with types; listOf str;
default = [];
description = ''
Usernames to be added to the "spotifyd" group, so that they
can start and interact with the userspace daemon.
'';
};
};
config = mkIf cfg.enable {
# users.users.${cfg.user} =
# if cfg.user == "pia" then {
# isSystemUser = true;
# group = cfg.group;
# home = cfg.dataDir;
# createHome = true;
# }
# else {};
users.groups.${cfg.group}.members = cfg.users;
systemd.services.pia-daemon = {
enable = true;
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig.ExecStart = "${pkgs.pia-daemon}/bin/pia-daemon";
serviceConfig.PrivateTmp="yes";
serviceConfig.User = cfg.user;
serviceConfig.Group = cfg.group;
preStart = ''
mkdir -p ${cfg.dataDir}
chown ${cfg.user}:${cfg.group} ${cfg.dataDir}
'';
};
};
}

View File

@@ -1,147 +0,0 @@
diff --git a/Rakefile b/Rakefile
index fa6d771..bcd6fb1 100644
--- a/Rakefile
+++ b/Rakefile
@@ -151,41 +151,6 @@ end
# Install LICENSE.txt
stage.install('LICENSE.txt', :res)
-# Download server lists to ship preloaded copies with the app. These tasks
-# depend on version.txt so they're refreshed periodically (whenver a new commit
-# is made), but not for every build.
-#
-# SERVER_DATA_DIR can be set to use existing files instead of downloading them;
-# this is primarily intended for reproducing a build.
-#
-# Create a probe for SERVER_DATA_DIR so these are updated if it changes.
-serverDataProbe = Probe.new('serverdata')
-serverDataProbe.file('serverdata.txt', "#{ENV['SERVER_DATA_DIR']}")
-# JSON resource build directory
-jsonFetched = Build.new('json-fetched')
-# These are the assets we need to fetch and the URIs we get them from
-{
- 'modern_shadowsocks.json': 'https://serverlist.piaservers.net/shadow_socks',
- 'modern_servers.json': 'https://serverlist.piaservers.net/vpninfo/servers/v6',
- 'modern_region_meta.json': 'https://serverlist.piaservers.net/vpninfo/regions/v2'
-}.each do |k, v|
- fetchedFile = jsonFetched.artifact(k.to_s)
- serverDataDir = ENV['SERVER_DATA_DIR']
- file fetchedFile => [version.artifact('version.txt'),
- serverDataProbe.artifact('serverdata.txt'),
- jsonFetched.componentDir] do |t|
- if(serverDataDir)
- # Use the copy provided instead of fetching (for reproducing a build)
- File.copy(File.join(serverDataDir, k), fetchedFile)
- else
- # Fetch from the web API (write with "binary" mode so LF is not
- # converted to CRLF on Windows)
- File.binwrite(t.name, Net::HTTP.get(URI(v)))
- end
- end
- stage.install(fetchedFile, :res)
-end
-
# Install version/brand/arch info in case an upgrade needs to know what is
# currently installed
stage.install(version.artifact('version.txt'), :res)
diff --git a/common/src/posix/unixsignalhandler.cpp b/common/src/posix/unixsignalhandler.cpp
index f820a6d..e1b6c33 100644
--- a/common/src/posix/unixsignalhandler.cpp
+++ b/common/src/posix/unixsignalhandler.cpp
@@ -132,7 +132,7 @@ void UnixSignalHandler::_signalHandler(int, siginfo_t *info, void *)
// we checked it, we can't even log because the logger is not reentrant.
auto pThis = instance();
if(pThis)
- ::write(pThis->_sigFd[0], info, sizeof(siginfo_t));
+ auto _ = ::write(pThis->_sigFd[0], info, sizeof(siginfo_t));
}
template<int Signal>
void UnixSignalHandler::setAbortAction()
diff --git a/daemon/src/linux/linux_nl.cpp b/daemon/src/linux/linux_nl.cpp
index fd3aced..2367a5e 100644
--- a/daemon/src/linux/linux_nl.cpp
+++ b/daemon/src/linux/linux_nl.cpp
@@ -642,6 +642,6 @@ LinuxNl::~LinuxNl()
unsigned char term = 0;
PosixFd killSocket = _workerKillSocket.get();
if(killSocket)
- ::write(killSocket.get(), &term, sizeof(term));
+ auto _ = ::write(killSocket.get(), &term, sizeof(term));
_workerThread.join();
}
diff --git a/extras/support-tool/launcher/linux-launcher.cpp b/extras/support-tool/launcher/linux-launcher.cpp
index 3f63ac2..420d54d 100644
--- a/extras/support-tool/launcher/linux-launcher.cpp
+++ b/extras/support-tool/launcher/linux-launcher.cpp
@@ -48,7 +48,7 @@ int fork_execv(gid_t gid, char *filename, char *const argv[])
if(forkResult == 0)
{
// Apply gid as both real and effective
- setregid(gid, gid);
+ auto _ = setregid(gid, gid);
int execErr = execv(filename, argv);
std::cerr << "exec err: " << execErr << " / " << errno << " - "
diff --git a/rake/model/qt.rb b/rake/model/qt.rb
index c8cd362..a6abe59 100644
--- a/rake/model/qt.rb
+++ b/rake/model/qt.rb
@@ -171,12 +171,7 @@ class Qt
end
def getQtRoot(qtVersion, arch)
- qtToolchainPtns = getQtToolchainPatterns(arch)
- qtRoots = FileList[*Util.joinPaths([[qtVersion], qtToolchainPtns])]
- # Explicitly filter for existing paths - if the pattern has wildcards
- # we only get existing directories, but if the patterns are just
- # alternates with no wildcards, we can get directories that don't exist
- qtRoots.find_all { |r| File.exist?(r) }.max
+ ENV['QTROOT']
end
def getQtVersionScore(minor, patch)
@@ -192,12 +187,7 @@ class Qt
end
def getQtPathVersion(path)
- verMatch = path.match('^.*/Qt[^/]*/5\.(\d+)\.?(\d*)$')
- if(verMatch == nil)
- nil
- else
- [verMatch[1].to_i, verMatch[2].to_i]
- end
+ [ENV['QT_MAJOR'].to_i, ENV['QT_MINOR'].to_i]
end
# Build a component definition with the defaults. The "Core" component will
diff --git a/rake/product/linux.rb b/rake/product/linux.rb
index f43fb3e..83505af 100644
--- a/rake/product/linux.rb
+++ b/rake/product/linux.rb
@@ -18,8 +18,7 @@ module PiaLinux
QT_BINARIES = %w(pia-client pia-daemon piactl pia-support-tool)
# Version of libicu (needed to determine lib*.so.## file names in deployment)
- ICU_VERSION = FileList[File.join(Executable::Qt.targetQtRoot, 'lib', 'libicudata.so.*')]
- .first.match(/libicudata\.so\.(\d+)(\..*|)/)[1]
+ ICU_VERSION = ENV['ICU_MAJOR'].to_i;
# Copy a directory recursively, excluding *.debug files (debugging symbols)
def self.copyWithoutDebug(sourceDir, destDir)
@@ -220,16 +219,5 @@ module PiaLinux
# Since these are just development workflow tools, they can be skipped if
# specific dependencies are not available.
def self.defineTools(toolsStage)
- # Test if we have libthai-dev, for the Thai word breaking utility
- if(Executable::Tc.sysHeaderAvailable?('thai/thwbrk.h'))
- Executable.new('thaibreak')
- .source('tools/thaibreak')
- .lib('thai')
- .install(toolsStage, :bin)
- toolsStage.install('tools/thaibreak/thai_ts.sh', :bin)
- toolsStage.install('tools/onesky_import/import_translations.sh', :bin)
- else
- puts "skipping thaibreak utility, install libthai-dev to build thaibreak"
- end
end
end

View File

@@ -1,139 +0,0 @@
{ pkgs, lib, config, ... }:
{
nixpkgs.overlays = [
(self: super:
with self;
let
# arch = builtins.elemAt (lib.strings.splitString "-" builtins.currentSystem) 0;
arch = "x86_64";
pia-desktop = clangStdenv.mkDerivation rec {
pname = "pia-desktop";
version = "3.3.0";
src = fetchgit {
url = "https://github.com/pia-foss/desktop";
rev = version;
fetchLFS = true;
sha256 = "D9txL5MUWyRYTnsnhlQdYT4dGVpj8PFsVa5hkrb36cw=";
};
patches = [
./fix-pia.patch
];
nativeBuildInputs = [
cmake
rake
];
prePatch = ''
sed -i 's|/usr/include/libnl3|${libnl.dev}/include/libnl3|' Rakefile
'';
installPhase = ''
mkdir -p $out/bin $out/lib $out/share
cp -r ../out/pia_release_${arch}/stage/bin $out
cp -r ../out/pia_release_${arch}/stage/lib $out
cp -r ../out/pia_release_${arch}/stage/share $out
'';
cmakeFlags = [
"-DCMAKE_BUILD_TYPE=Release"
];
QTROOT = "${qt5.full}";
QT_MAJOR = lib.versions.minor (lib.strings.parseDrvName qt5.full.name).version;
QT_MINOR = lib.versions.patch (lib.strings.parseDrvName qt5.full.name).version;
ICU_MAJOR = lib.versions.major (lib.strings.parseDrvName icu.name).version;
buildInputs = [
mesa
libsForQt5.qt5.qtquickcontrols
libsForQt5.qt5.qtquickcontrols2
icu
libnl
];
dontWrapQtApps = true;
};
in rec {
openvpn-updown = buildFHSUserEnv {
name = "openvpn-updown";
targetPkgs = pkgs: (with pkgs; [ pia-desktop ]);
runScript = "openvpn-updown.sh";
};
pia-client = buildFHSUserEnv {
name = "pia-client";
targetPkgs = pkgs: (with pkgs; [
pia-desktop
xorg.libXau
xorg.libXdmcp
]);
runScript = "pia-client";
};
piactl = buildFHSUserEnv {
name = "piactl";
targetPkgs = pkgs: (with pkgs; [ pia-desktop ]);
runScript = "piactl";
};
pia-daemon = buildFHSUserEnv {
name = "pia-daemon";
targetPkgs = pkgs: (with pkgs; [ pia-desktop ]);
runScript = "pia-daemon";
};
pia-hnsd = buildFHSUserEnv {
name = "pia-hnsd";
targetPkgs = pkgs: (with pkgs; [ pia-desktop ]);
runScript = "pia-hnsd";
};
pia-openvpn = buildFHSUserEnv {
name = "pia-openvpn";
targetPkgs = pkgs: (with pkgs; [ pia-desktop ]);
runScript = "pia-openvpn";
};
pia-ss-local = buildFHSUserEnv {
name = "pia-ss-local";
targetPkgs = pkgs: (with pkgs; [ pia-desktop ]);
runScript = "pia-ss-local";
};
pia-support-tool = buildFHSUserEnv {
name = "pia-support-tool";
targetPkgs = pkgs: (with pkgs; [
pia-desktop
xorg.libXau
xorg.libXdmcp
]);
runScript = "pia-support-tool";
};
pia-unbound = buildFHSUserEnv {
name = "pia-unbound";
targetPkgs = pkgs: (with pkgs; [ pia-desktop ]);
runScript = "pia-unbound";
};
pia-wireguard-go = buildFHSUserEnv {
name = "pia-wireguard-go";
targetPkgs = pkgs: (with pkgs; [ pia-desktop ]);
runScript = "pia-wireguard-go";
};
support-tool-launcher = buildFHSUserEnv {
name = "support-tool-launcher";
targetPkgs = pkgs: (with pkgs; [ pia-desktop ]);
runScript = "support-tool-launcher";
};
})
];
}

View File

@@ -0,0 +1,58 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.cloudflared;
settingsFormat = pkgs.formats.yaml { };
in
{
meta.maintainers = with maintainers; [ pmc ];
options = {
services.cloudflared = {
enable = mkEnableOption "cloudflared";
package = mkOption {
type = types.package;
default = pkgs.cloudflared;
description = "The cloudflared package to use";
example = literalExpression ''pkgs.cloudflared'';
};
config = mkOption {
type = settingsFormat.type;
description = "Contents of the config.yaml as an attrset; see https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/configuration/configuration-file for documentation on the contents";
example = literalExpression ''
{
url = "http://localhost:3000";
tunnel = "505c8dd1-e4fb-4ea4-b909-26b8f61ceaaf";
credentials-file = "/var/lib/cloudflared/505c8dd1-e4fb-4ea4-b909-26b8f61ceaaf.json";
}
'';
};
configFile = mkOption {
type = types.path;
description = "Path to cloudflared config.yaml.";
example = literalExpression ''"/etc/cloudflared/config.yaml"'';
};
};
};
config = mkIf cfg.enable ({
# Prefer the config file over settings if both are set.
services.cloudflared.configFile = mkDefault (settingsFormat.generate "cloudflared.yaml" cfg.config);
systemd.services.cloudflared = {
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
description = "Cloudflare Argo Tunnel";
serviceConfig = {
TimeoutStartSec = 0;
Type = "notify";
ExecStart = "${cfg.package}/bin/cloudflared --config ${cfg.configFile} --no-autoupdate tunnel run";
Restart = "on-failure";
RestartSec = "5s";
};
};
});
}

View File

@@ -13,6 +13,7 @@
./privatebin/privatebin.nix ./privatebin/privatebin.nix
./radio.nix ./radio.nix
./samba.nix ./samba.nix
./cloudflared.nix
./owncast.nix ./owncast.nix
]; ];
} }

View File

@@ -6,7 +6,7 @@
# - add some handy shell commands # - add some handy shell commands
let let
nix-locate = config.inputs.nix-locate.packages.${config.currentSystem}.default; nix-locate = config.inputs.nix-locate.defaultPackage.${config.currentSystem};
in { in {
programs.command-not-found.enable = false; programs.command-not-found.enable = false;

View File

@@ -5,6 +5,7 @@ rec {
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO0VFnn3+Mh0nWeN92jov81qNE9fpzTAHYBphNoY7HUx" # reg "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO0VFnn3+Mh0nWeN92jov81qNE9fpzTAHYBphNoY7HUx" # reg
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHSkKiRUUmnErOKGx81nyge/9KqjkPh8BfDk0D3oP586" # nat "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHSkKiRUUmnErOKGx81nyge/9KqjkPh8BfDk0D3oP586" # nat
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFeTK1iARlNIKP/DS8/ObBm9yUM/3L1Ub4XI5A2r9OzP" # ray "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFeTK1iARlNIKP/DS8/ObBm9yUM/3L1Ub4XI5A2r9OzP" # ray
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKXc9PX3uTYVrgvKdztk+LBh5WMNBUzbXlAo50SCAeNw" # nat 2
]; ];
system = { system = {
liza = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDY/pNyWedEfU7Tq9ikGbriRuF1ZWkHhegGS17L0Vcdl"; liza = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDY/pNyWedEfU7Tq9ikGbriRuF1ZWkHhegGS17L0Vcdl";
@@ -12,6 +13,7 @@ rec {
ponyo-unlock = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIC9LQuuImgWlkjDhEEIbM1wOd+HqRv1RxvYZuLXPSdRi"; ponyo-unlock = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIC9LQuuImgWlkjDhEEIbM1wOd+HqRv1RxvYZuLXPSdRi";
ray = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDQM8hwKRgl8cZj7UVYATSLYu4LhG7I0WFJ9m2iWowiB"; ray = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDQM8hwKRgl8cZj7UVYATSLYu4LhG7I0WFJ9m2iWowiB";
s0 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAwiXcUFtAvZCayhu4+AIcF+Ktrdgv9ee/mXSIhJbp4q"; s0 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAwiXcUFtAvZCayhu4+AIcF+Ktrdgv9ee/mXSIhJbp4q";
nat = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGVgZc5Z2Oh426z7lEftcFUwCFcrZy8bvqS09Tj49GWE";
n1 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPWlhd1Oid5Xf2zdcBrcdrR0TlhObutwcJ8piobRTpRt"; n1 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPWlhd1Oid5Xf2zdcBrcdrR0TlhObutwcJ8piobRTpRt";
n2 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJ7bRiRutnI7Bmyt/I238E3Fp5DqiClIXiVibsccipOr"; n2 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJ7bRiRutnI7Bmyt/I238E3Fp5DqiClIXiVibsccipOr";
n3 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB+rJEaRrFDGirQC2UoWQkmpzLg4qgTjGJgVqiipWiU5"; n3 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB+rJEaRrFDGirQC2UoWQkmpzLg4qgTjGJgVqiipWiU5";
@@ -26,6 +28,7 @@ rec {
liza liza
ponyo ponyo
ray ray
nat
s0 s0
n1 n1
n2 n2
@@ -37,6 +40,7 @@ rec {
]; ];
personal = with system; [ personal = with system; [
ray ray
nat
]; ];
servers = with system; [ servers = with system; [
liza liza

67
flake.lock generated
View File

@@ -2,17 +2,16 @@
"nodes": { "nodes": {
"agenix": { "agenix": {
"inputs": { "inputs": {
"darwin": "darwin",
"nixpkgs": [ "nixpkgs": [
"nixpkgs" "nixpkgs"
] ]
}, },
"locked": { "locked": {
"lastModified": 1675176355, "lastModified": 1665870395,
"narHash": "sha256-Qjxh5cmN56siY97mzmBLI1+cdjXSPqmfPVsKxBvHmwI=", "narHash": "sha256-Tsbqb27LDNxOoPLh0gw2hIb6L/6Ow/6lIBvqcHzEKBI=",
"owner": "ryantm", "owner": "ryantm",
"repo": "agenix", "repo": "agenix",
"rev": "b7ffcfe77f817d9ee992640ba1f270718d197f28", "rev": "a630400067c6d03c9b3e0455347dc8559db14288",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -33,7 +32,7 @@
"locked": { "locked": {
"lastModified": 1648612759, "lastModified": 1648612759,
"narHash": "sha256-SJwlpD2Wz3zFoX2mIYCQfwIOYHaOdeiWGFeDXsLGM84=", "narHash": "sha256-SJwlpD2Wz3zFoX2mIYCQfwIOYHaOdeiWGFeDXsLGM84=",
"ref": "refs/heads/master", "ref": "master",
"rev": "39d338b9b24159d8ef3309eecc0d32a2a9f102b5", "rev": "39d338b9b24159d8ef3309eecc0d32a2a9f102b5",
"revCount": 2, "revCount": 2,
"type": "git", "type": "git",
@@ -72,7 +71,7 @@
"locked": { "locked": {
"lastModified": 1651719222, "lastModified": 1651719222,
"narHash": "sha256-p/GY5vOP+HUlxNL4OtEhmBNEVQsedOHXEmjfCGONVmE=", "narHash": "sha256-p/GY5vOP+HUlxNL4OtEhmBNEVQsedOHXEmjfCGONVmE=",
"ref": "refs/heads/master", "ref": "master",
"rev": "1290ddd9a2ff2bf2d0f702750768312b80efcd34", "rev": "1290ddd9a2ff2bf2d0f702750768312b80efcd34",
"revCount": 19, "revCount": 19,
"type": "git", "type": "git",
@@ -83,36 +82,14 @@
"url": "https://git.neet.dev/zuckerberg/dailybuild_modules.git" "url": "https://git.neet.dev/zuckerberg/dailybuild_modules.git"
} }
}, },
"darwin": {
"inputs": {
"nixpkgs": [
"agenix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1673295039,
"narHash": "sha256-AsdYgE8/GPwcelGgrntlijMg4t3hLFJFCRF3tL5WVjA=",
"owner": "lnl7",
"repo": "nix-darwin",
"rev": "87b9d090ad39b25b2400029c64825fc2a8868943",
"type": "github"
},
"original": {
"owner": "lnl7",
"ref": "master",
"repo": "nix-darwin",
"type": "github"
}
},
"flake-compat": { "flake-compat": {
"flake": false, "flake": false,
"locked": { "locked": {
"lastModified": 1668681692, "lastModified": 1641205782,
"narHash": "sha256-Ht91NGdewz8IQLtWZ9LCeNXMSXHUss+9COoqu6JLmXU=", "narHash": "sha256-4jY7RCWUoZ9cKD8co0/4tFARpWB+57+r1bLLvXNJliY=",
"owner": "edolstra", "owner": "edolstra",
"repo": "flake-compat", "repo": "flake-compat",
"rev": "009399224d5e398d03b22badca40a37ac85412a1", "rev": "b7547d3eed6f32d06102ead8991ec52ab0a4f1a7",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -144,32 +121,32 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1673969751, "lastModified": 1652819416,
"narHash": "sha256-U6aYz3lqZ4NVEGEWiti1i0FyqEo4bUjnTAnA73DPnNU=", "narHash": "sha256-OzYSb66kQUVP1FM0E7Z0ij13mm14DkJi79FAMprAavo=",
"owner": "bennofs", "owner": "googlebot42",
"repo": "nix-index", "repo": "nix-index",
"rev": "5f98881b1ed27ab6656e6d71b534f88430f6823a", "rev": "a28bb3175d370c6cb9569e6d4b5570e9ca016a3e",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "bennofs", "owner": "googlebot42",
"repo": "nix-index", "repo": "nix-index",
"type": "github" "type": "github"
} }
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1672580127, "lastModified": 1668994630,
"narHash": "sha256-3lW3xZslREhJogoOkjeZtlBtvFMyxHku7I/9IVehhT8=", "narHash": "sha256-1lqx6HLyw6fMNX/hXrrETG1vMvZRGm2XVC9O/Jt0T6c=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "0874168639713f547c05947c76124f78441ea46c", "rev": "af50806f7c6ab40df3e6b239099e8f8385f6c78b",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "NixOS", "owner": "NixOS",
"ref": "nixos-22.05",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "af50806f7c6ab40df3e6b239099e8f8385f6c78b",
"type": "github" "type": "github"
} }
}, },
@@ -190,16 +167,16 @@
}, },
"nixpkgs-unstable": { "nixpkgs-unstable": {
"locked": { "locked": {
"lastModified": 1675835843, "lastModified": 1669411043,
"narHash": "sha256-y1dSCQPcof4CWzRYRqDj4qZzbBl+raVPAko5Prdil28=", "narHash": "sha256-LfPd3+EY+jaIHTRIEOUtHXuanxm59YKgUacmSzaqMLc=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "32f914af34f126f54b45e482fb2da4ae78f3095f", "rev": "5dc7114b7b256d217fe7752f1614be2514e61bb8",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "NixOS", "owner": "NixOS",
"ref": "master", "ref": "nixos-unstable",
"repo": "nixpkgs", "repo": "nixpkgs",
"type": "github" "type": "github"
} }
@@ -234,7 +211,7 @@
"locked": { "locked": {
"lastModified": 1652121792, "lastModified": 1652121792,
"narHash": "sha256-j1Y9MAjUVNgyFSeGzPoqibAnEysJDjZSXukVfQ7+bsQ=", "narHash": "sha256-j1Y9MAjUVNgyFSeGzPoqibAnEysJDjZSXukVfQ7+bsQ=",
"ref": "refs/heads/master", "ref": "master",
"rev": "72e7a9e80b780c84ed8d4a6374bfbb242701f900", "rev": "72e7a9e80b780c84ed8d4a6374bfbb242701f900",
"revCount": 5, "revCount": 5,
"type": "git", "type": "git",

View File

@@ -1,16 +1,17 @@
{ {
inputs = { inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-22.05"; nixpkgs.url = "github:NixOS/nixpkgs/af50806f7c6ab40df3e6b239099e8f8385f6c78b";
nixpkgs-unstable.url = "github:NixOS/nixpkgs/master"; nixpkgs-unstable.url = "github:NixOS/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils"; flake-utils.url = "github:numtide/flake-utils";
nix-locate.url = "github:bennofs/nix-index"; nix-locate.url = "github:googlebot42/nix-index";
nix-locate.inputs.nixpkgs.follows = "nixpkgs"; nix-locate.inputs.nixpkgs.follows = "nixpkgs";
# mail server # mail server
simple-nixos-mailserver.url = "gitlab:simple-nixos-mailserver/nixos-mailserver/nixos-22.05"; simple-nixos-mailserver.url = "gitlab:simple-nixos-mailserver/nixos-mailserver/nixos-22.05";
simple-nixos-mailserver.inputs.nixpkgs.follows = "nixpkgs"; simple-nixos-mailserver.inputs.nixpkgs.follows = "nixpkgs";
simple-nixos-mailserver.inputs.nixpkgs-21_11.follows = "nixpkgs";
# agenix # agenix
agenix.url = "github:ryantm/agenix"; agenix.url = "github:ryantm/agenix";
@@ -41,12 +42,12 @@
modules = system: [ modules = system: [
./common ./common
inputs.simple-nixos-mailserver.nixosModule inputs.simple-nixos-mailserver.nixosModule
inputs.agenix.nixosModules.default inputs.agenix.nixosModule
inputs.dailybuild_modules.nixosModule inputs.dailybuild_modules.nixosModule
inputs.archivebox.nixosModule inputs.archivebox.nixosModule
({ lib, ... }: { ({ lib, ... }: {
config.environment.systemPackages = [ config.environment.systemPackages = [
inputs.agenix.packages.${system}.agenix inputs.agenix.defaultPackage.${system}
]; ];
# because nixos specialArgs doesn't work for containers... need to pass in inputs a different way # because nixos specialArgs doesn't work for containers... need to pass in inputs a different way
@@ -69,7 +70,7 @@
in in
{ {
"reg" = mkSystem "x86_64-linux" nixpkgs ./machines/reg/configuration.nix; "reg" = mkSystem "x86_64-linux" nixpkgs ./machines/reg/configuration.nix;
"ray" = mkSystem "x86_64-linux" nixpkgs-unstable ./machines/ray/configuration.nix; "ray" = mkSystem "x86_64-linux" nixpkgs ./machines/ray/configuration.nix;
"nat" = mkSystem "aarch64-linux" nixpkgs ./machines/nat/configuration.nix; "nat" = mkSystem "aarch64-linux" nixpkgs ./machines/nat/configuration.nix;
"liza" = mkSystem "x86_64-linux" nixpkgs ./machines/liza/configuration.nix; "liza" = mkSystem "x86_64-linux" nixpkgs ./machines/liza/configuration.nix;
"ponyo" = mkSystem "x86_64-linux" nixpkgs ./machines/ponyo/configuration.nix; "ponyo" = mkSystem "x86_64-linux" nixpkgs ./machines/ponyo/configuration.nix;
@@ -87,18 +88,11 @@
mkKexec = system: mkKexec = system:
(nixpkgs.lib.nixosSystem { (nixpkgs.lib.nixosSystem {
inherit system; inherit system;
modules = [ ./machines/ephemeral/kexec.nix ]; modules = [ ./machines/kexec.nix ];
}).config.system.build.kexec_tarball; }).config.system.build.kexec_tarball;
mkIso = system:
(nixpkgs.lib.nixosSystem {
inherit system;
modules = [ ./machines/ephemeral/iso.nix ];
}).config.system.build.isoImage;
in { in {
"x86_64-linux"."kexec" = mkKexec "x86_64-linux"; "x86_64-linux"."kexec" = mkKexec "x86_64-linux";
"x86_64-linux"."iso" = mkIso "x86_64-linux";
"aarch64-linux"."kexec" = mkKexec "aarch64-linux"; "aarch64-linux"."kexec" = mkKexec "aarch64-linux";
"aarch64-linux"."iso" = mkIso "aarch64-linux";
}; };
}; };
} }

View File

@@ -1,12 +0,0 @@
{ modulesPath, ... }:
{
imports = [
(modulesPath + "/installer/cd-dvd/iso-image.nix")
./minimal.nix
];
isoImage.makeUsbBootable = true;
networking.hostName = "iso";
}

View File

@@ -1,28 +0,0 @@
{ pkgs, ... }:
{
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "e1000" "e1000e" "virtio_pci" "r8169" ];
boot.kernelParams = [
"panic=30" "boot.panic_on_fail" # reboot the machine upon fatal boot issues
"console=ttyS0" # enable serial console
"console=tty1"
];
boot.kernel.sysctl."vm.overcommit_memory" = "1";
environment.systemPackages = with pkgs; [
cryptsetup
btrfs-progs
];
environment.variables.GC_INITIAL_HEAP_SIZE = "1M";
networking.useDHCP = true;
services.openssh = {
enable = true;
challengeResponseAuthentication = false;
passwordAuthentication = false;
};
services.getty.autologinUser = "root";
users.users.root.openssh.authorizedKeys.keys = (import ../common/ssh.nix).users;
}

View File

@@ -6,11 +6,8 @@
imports = [ imports = [
(modulesPath + "/installer/netboot/netboot.nix") (modulesPath + "/installer/netboot/netboot.nix")
(modulesPath + "/profiles/qemu-guest.nix") (modulesPath + "/profiles/qemu-guest.nix")
./minimal.nix
]; ];
networking.hostName = "kexec";
# stripped down version of https://github.com/cleverca22/nix-tests/tree/master/kexec # stripped down version of https://github.com/cleverca22/nix-tests/tree/master/kexec
system.build = rec { system.build = rec {
image = pkgs.runCommand "image" { buildInputs = [ pkgs.nukeReferences ]; } '' image = pkgs.runCommand "image" { buildInputs = [ pkgs.nukeReferences ]; } ''
@@ -45,4 +42,31 @@
contents = [ ]; contents = [ ];
}; };
}; };
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "e1000" "e1000e" "virtio_pci" "r8169" ];
boot.kernelParams = [
"panic=30" "boot.panic_on_fail" # reboot the machine upon fatal boot issues
"console=ttyS0" # enable serial console
"console=tty1"
];
boot.kernel.sysctl."vm.overcommit_memory" = "1";
environment.systemPackages = with pkgs; [
cryptsetup
btrfs-progs
];
environment.variables.GC_INITIAL_HEAP_SIZE = "1M";
networking.useDHCP = true;
networking.hostName = "kexec";
services.openssh = {
enable = true;
challengeResponseAuthentication = false;
passwordAuthentication = false;
};
services.getty.autologinUser = "root";
users.users.root.openssh.authorizedKeys.keys = (import ../common/ssh.nix).users;
} }

View File

@@ -1,17 +1,52 @@
{ config, pkgs, fetchurl, lib, ... }: { config, lib, pkgs, ... }:
{ {
imports = [ imports = [
./hardware-configuration.nix ./hardware-configuration.nix
./m1-support
]; ];
efi.enable = true;
networking.hostName = "nat"; networking.hostName = "nat";
networking.interfaces.ens160.useDHCP = true;
services.zerotierone.enable = true;
de.enable = true; de.enable = true;
de.enableAcceleration = false;
de.touchpad.enable = true; de.touchpad.enable = true;
# nixpkgs.overlays = [
# (final: prev: {
# signal-desktop = prev.signal-desktop.overrideAttrs (old: {
# version = "5.50.1";
# src = final.fetchurl {
# url = "https://github.com/0mniteck/Signal-Desktop-Builder/raw/2610eaded94b3c717a63fdff3cb872dbbaf16383/builds/release/signal-desktop_5.50.1_arm64.deb";
# sha256 = "sha256-++xG3fCMvU+nwlkBwjZ0d0wfWiNDSUhyCfzTirsY2xs=";
# };
# #buildInputs = old.buildInputs ++ [ final.openssl_3_0 ];
# preFixup = ''
# gappsWrapperArgs+=(
# --prefix LD_LIBRARY_PATH : "${lib.makeLibraryPath [ final.stdenv.cc.cc ] }"
# --add-flags "\''${NIXOS_OZONE_WL:+\''${WAYLAND_DISPLAY:+--enable-features=UseOzonePlatform --ozone-platform=wayland}}"
# --suffix PATH : ${lib.makeBinPath [ final.xdg-utils ]}
# )
# # Fix the desktop link
# substituteInPlace $out/share/applications/signal-desktop.desktop \
# --replace /opt/Signal/signal-desktop $out/bin/signal-desktop
# autoPatchelf --no-recurse -- $out/lib/Signal/
# patchelf --add-needed ${final.libpulseaudio}/lib/libpulse.so $out/lib/Signal/resources/app.asar.unpacked/node_modules/ringrtc/build/linux/libringrtc-arm64.node
# patchelf --add-needed ${final.openssl_3_0}/lib/libcrypto.so.3 $out/lib/Signal/resources/app.asar.unpacked/node_modules/ringrtc/build/linux/libringrtc-arm64.node
# '';
# meta.platforms = [ "aarch64-linux" ];
# });
# })
# ];
nixpkgs.overlays = [
(final: prev: {
jellyfin-media-player = prev.jellyfin-media-player.overrideAttrs (old: {
meta.platforms = [ "aarch64-linux" ];
});
})
];
} }

View File

@@ -4,22 +4,66 @@
{ config, lib, pkgs, modulesPath, ... }: { config, lib, pkgs, modulesPath, ... }:
{ {
imports = [ ]; imports =
[ (modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "uhci_hcd" "ahci" "nvme" "usbhid" ]; efi.enable = true;
boot.initrd.kernelModules = [ ];
# 4k kernel for m1
hardware.asahi.use4KPages = false;
boot.initrd.availableKernelModules = [ "usb_storage" ];
boot.initrd.kernelModules = [ "dm-snapshot" ];
boot.kernelModules = [ ]; boot.kernelModules = [ ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
# fix keys
boot.extraModprobeConfig = ''
options hid-apple swap_fn_leftctrl=1 swap_opt_cmd=1
'';
boot.initrd.luks.devices."enc-pv" = {
device = "/dev/nvme0n1p5";
allowDiscards = true;
};
fileSystems."/" = fileSystems."/" =
{ device = "/dev/disk/by-uuid/02a8c0c7-fd4e-4443-a83c-2d0b63848779"; { device = "/dev/disk/by-uuid/f3021c34-2034-4bf0-bf3f-64d6d02c0eff";
fsType = "btrfs"; fsType = "btrfs";
options = [ "subvol=root" ];
};
fileSystems."/home" =
{ device = "/dev/disk/by-uuid/f3021c34-2034-4bf0-bf3f-64d6d02c0eff";
fsType = "btrfs";
options = [ "subvol=home" ];
};
fileSystems."/nix" =
{ device = "/dev/disk/by-uuid/f3021c34-2034-4bf0-bf3f-64d6d02c0eff";
fsType = "btrfs";
options = [ "subvol=nix" ];
}; };
fileSystems."/boot" = fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/0C95-1290"; { device = "/dev/disk/by-uuid/D33C-18EE";
fsType = "vfat"; fsType = "vfat";
}; };
swapDevices = [ ]; swapDevices =
[ { device = "/dev/disk/by-uuid/98e875e4-4c34-42e9-8c71-404dfe137ba7"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.wlp1s0f0.useDHCP = lib.mkDefault true;
#nixpkgs.hostPlatform = lib.mkDefault "aarch64-linux";
powerManagement.cpuFreqGovernor = lib.mkDefault "ondemand";
# high-resolution display
hardware.video.hidpi.enable = lib.mkDefault true;
} }

View File

@@ -0,0 +1,29 @@
{ lib
, python3
, python3Packages
, fetchFromGitHub
, gzip
, gnutar
, lzfse
}:
python3Packages.buildPythonApplication rec {
pname = "asahi-fwextract";
version = "0.5pre10";
# tracking version: https://github.com/AsahiLinux/PKGBUILDs/blob/main/asahi-fwextract/PKGBUILD
src = fetchFromGitHub {
owner = "AsahiLinux";
repo = "asahi-installer";
rev = "v${version}";
hash = "sha256-93dTRrNNo7yilSGpSNjXir+DhQe29DeoZHXusrk9PN8=";
};
postPatch = ''
substituteInPlace asahi_firmware/img4.py \
--replace 'liblzfse.so' '${lzfse}/lib/liblzfse.so'
substituteInPlace asahi_firmware/update.py \
--replace '"tar"' '"${gnutar}/bin/tar"' \
--replace '"xf"' '"-x", "-I", "${gzip}/bin/gzip", "-f"'
'';
}

View File

@@ -0,0 +1,53 @@
{ config, pkgs, lib, ... }:
let
bootM1n1 = config.hardware.asahi.pkgs.callPackage ../m1n1 {
isRelease = true;
withTools = false;
customLogo = config.boot.m1n1CustomLogo;
};
bootUBoot = config.hardware.asahi.pkgs.callPackage ../u-boot {
m1n1 = bootM1n1;
};
bootFiles = {
"m1n1/boot.bin" = pkgs.runCommand "boot.bin" {} ''
cat ${bootM1n1}/build/m1n1.bin > $out
cat ${config.boot.kernelPackages.kernel}/dtbs/apple/*.dtb >> $out
cat ${bootUBoot}/u-boot-nodtb.bin.gz >> $out
if [ -n "${config.boot.m1n1ExtraOptions}" ]; then
echo '${config.boot.m1n1ExtraOptions}' >> $out
fi
'';
};
in {
config = {
# install m1n1 with the boot loader
boot.loader.grub.extraFiles = bootFiles;
boot.loader.systemd-boot.extraFiles = bootFiles;
# ensure the installer has m1n1 in the image
system.extraDependencies = lib.mkForce [ bootM1n1 bootUBoot ];
system.build.m1n1 = bootFiles."m1n1/boot.bin";
};
options.boot = {
m1n1ExtraOptions = lib.mkOption {
type = lib.types.str;
default = "";
description = ''
Append extra options to the m1n1 boot binary. Might be useful for fixing
display problems on Mac minis.
https://github.com/AsahiLinux/m1n1/issues/159
'';
};
m1n1CustomLogo = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
description = ''
Custom logo to build into m1n1. The path must point to a 256x256 PNG.
'';
};
};
}

View File

@@ -0,0 +1,40 @@
{ config, pkgs, lib, ... }:
{
imports = [
./kernel
./peripheral-firmware
./boot-m1n1
];
config = {
hardware.asahi.pkgs = if config.hardware.asahi.pkgsSystem != "aarch64-linux"
then import (pkgs.path) {
system = config.hardware.asahi.pkgsSystem;
crossSystem.system = "aarch64-linux";
}
else pkgs;
};
options.hardware.asahi = {
pkgsSystem = lib.mkOption {
type = lib.types.str;
default = "aarch64-linux";
description = ''
System architecture that should be used to build the major Asahi
packages, if not the default aarch64-linux. This allows installing from
a cross-built ISO without rebuilding them during installation.
'';
};
pkgs = lib.mkOption {
type = lib.types.raw;
description = ''
Package set used to build the major Asahi packages. Defaults to the
ambient set if not cross-built, otherwise re-imports the ambient set
with the system defined by `hardware.asahi.pkgsSystem`.
'';
};
};
}

View File

@@ -0,0 +1,28 @@
From 1c60b7662b82d7d5d54aca1cd24f9517a8c4595f Mon Sep 17 00:00:00 2001
From: Thomas Watson <twatson52@icloud.com>
Date: Thu, 24 Nov 2022 11:00:43 -0600
Subject: [PATCH] drivers/usb/dwc3: remove apple dr_mode check
This check prevents the driver from probing with old device trees.
Allegedly this check is incorrect anyway as the dr_mode should default
to OTG but this is not true at the time of the check.
---
drivers/usb/dwc3/core.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 1d88119cdbb8..b92401c1ee0c 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1882,8 +1882,7 @@ static int dwc3_probe(struct platform_device *pdev)
}
if (of_device_is_compatible(dev->of_node, "apple,dwc3")) {
- if (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) ||
- dwc->dr_mode != USB_DR_MODE_OTG) {
+ if (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH)) {
dev_err(dev,
"Apple DWC3 requires role switch support.\n"
);
--
2.17.1

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,13 @@
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a1eb6572ecd2..b94fbd9b3d70 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1036,7 +1036,7 @@ endmenu
choice
prompt "Page size"
- default ARM64_4K_PAGES
+ default ARM64_16K_PAGES
help
Page size (translation granule) configuration.

View File

@@ -0,0 +1,98 @@
# the Asahi Linux kernel and options that must go along with it
{ config, pkgs, lib, ... }:
{
config = {
boot.kernelPackages = config.hardware.asahi.pkgs.callPackage ./package.nix {
_4KBuild = config.hardware.asahi.use4KPages;
};
# we definitely want to use CONFIG_ENERGY_MODEL, and
# schedutil is a prerequisite for using it
# source: https://www.kernel.org/doc/html/latest/scheduler/sched-energy.html
powerManagement.cpuFreqGovernor = lib.mkOverride 800 "schedutil";
boot.initrd.includeDefaultModules = false;
boot.initrd.availableKernelModules = [
# list of initrd modules stolen from
# https://github.com/AsahiLinux/asahi-scripts/blob/e4d6151a7dcb63ae5e3779c3cf57362eb37d908a/initcpio/install/asahi
"apple-mailbox"
"nvme_apple"
"pinctrl-apple-gpio"
"macsmc"
"macsmc-rtkit"
"i2c-apple"
"tps6598x"
"apple-dart"
"dwc3"
"dwc3-of-simple"
"xhci-pci"
"pcie-apple"
"gpio_macsmc"
"spi-apple"
"spi-hid-apple"
"spi-hid-apple-of"
"rtc-macsmc"
"simple-mfd-spmi"
"spmi-apple-controller"
"nvmem_spmi_mfd"
"apple-dockchannel"
"dockchannel-hid"
"apple-rtkit-helper"
"dm_crypt"
# additional stuff necessary to boot off USB for the installer
# and if the initrd (i.e. stage 1) goes wrong
"usb-storage"
"xhci-plat-hcd"
"usbhid"
"hid_generic"
];
boot.kernelParams = [
"earlycon"
"console=ttySAC0,1500000"
"console=tty0"
"boot.shell_on_fail"
# Apple's SSDs are slow (~dozens of ms) at processing flush requests which
# slows down programs that make a lot of fsync calls. This parameter sets
# a delay in ms before actually flushing so that such requests can be
# coalesced. Be warned that increasing this parameter above zero (default
# is 1000) has the potential, though admittedly unlikely, risk of
# UNBOUNDED data corruption in case of power loss!!!! Don't even think
# about it on desktops!!
"nvme_apple.flush_interval=1000"
];
# U-Boot does not support EFI variables
boot.loader.efi.canTouchEfiVariables = lib.mkForce false;
# GRUB has to be installed as removable if the user chooses to use it
boot.loader.grub = lib.mkDefault {
version = 2;
efiSupport = true;
efiInstallAsRemovable = true;
device = "nodev";
};
};
imports = [
(lib.mkRemovedOptionModule [ "boot" "kernelBuildIsCross" ] ''
If it should still be true (which is unlikely), replace it
with 'hardware.asahi.pkgsSystem = "x86_64-linux"'. Otherwise, delete it.
'')
(lib.mkRemovedOptionModule [ "boot" "kernelBuildIs16K" ] ''
Replaced with 'hardware.asahi.use4KPages' which defaults to false.
'')
];
options.hardware.asahi.use4KPages = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
Build the Asahi Linux kernel with 4K pages to improve compatibility in
some cases at the cost of performance in others.
'';
};
}

View File

@@ -0,0 +1,63 @@
{ pkgs, _4KBuild ? false }: let
localPkgs =
# we do this so the config can be read on any system and not affect
# the output hash
if builtins ? currentSystem then import (pkgs.path) { system = builtins.currentSystem; }
else pkgs;
readConfig = configfile: import (localPkgs.runCommand "config.nix" {} ''
echo "{" > "$out"
while IFS='=' read key val; do
[ "x''${key#CONFIG_}" != "x$key" ] || continue
no_firstquote="''${val#\"}";
echo ' "'"$key"'" = "'"''${no_firstquote%\"}"'";' >> "$out"
done < "${configfile}"
echo "}" >> $out
'').outPath;
linux_asahi_pkg = { stdenv, lib, fetchFromGitHub, fetchpatch, linuxKernel, ... } @ args:
linuxKernel.manualConfig rec {
inherit stdenv lib;
version = "6.1.0-rc6-asahi";
modDirVersion = version;
src = fetchFromGitHub {
# tracking: https://github.com/AsahiLinux/PKGBUILDs/blob/stable/linux-asahi/PKGBUILD
owner = "AsahiLinux";
repo = "linux";
rev = "asahi-6.1-rc6-5";
hash = "sha256-HHPfAtNh5wR0TCsEYuMdSbp55p1IVhF07tg4dlfgXk0=";
};
kernelPatches = [
# sven says this is okay since our kernel config supports it, and that
# it will be fixed at some point to not be necessary. but this allows
# new kernels to get USB up with old device trees
{ name = "0001-drivers-usb-dwc3-remove-apple-dr_mode-check";
patch = ./0001-drivers-usb-dwc3-remove-apple-dr_mode-check.patch;
}
] ++ lib.optionals _4KBuild [
# thanks to Sven Peter
# https://lore.kernel.org/linux-iommu/20211019163737.46269-1-sven@svenpeter.dev/
{ name = "sven-iommu-4k";
patch = ./sven-iommu-4k.patch;
}
] ++ lib.optionals (!_4KBuild) [
# patch the kernel to set the default size to 16k instead of modifying
# the config so we don't need to convert our config to the nixos
# infrastructure or patch it and thus introduce a dependency on the host
# system architecture
{ name = "default-pagesize-16k";
patch = ./default-pagesize-16k.patch;
}
];
configfile = ./config;
config = readConfig configfile;
extraMeta.branch = "6.1";
} // (args.argsOverride or {});
linux_asahi = (pkgs.callPackage linux_asahi_pkg { });
in pkgs.recurseIntoAttrs (pkgs.linuxPackagesFor linux_asahi)

View File

@@ -0,0 +1,449 @@
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 4f1a37bdd42d..c8c3ea81d818 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -97,7 +97,6 @@ struct apple_dart_hw {
* @lock: lock for hardware operations involving this dart
* @pgsize: pagesize supported by this DART
* @supports_bypass: indicates if this DART supports bypass mode
- * @force_bypass: force bypass mode due to pagesize mismatch?
* @sid2group: maps stream ids to iommu_groups
* @iommu: iommu core device
*/
@@ -115,7 +114,6 @@ struct apple_dart {
u32 pgsize;
u32 supports_bypass : 1;
- u32 force_bypass : 1;
struct iommu_group *sid2group[DART_MAX_STREAMS];
struct iommu_device iommu;
@@ -499,9 +497,6 @@ static int apple_dart_attach_dev(struct iommu_domain *domain,
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
struct apple_dart_domain *dart_domain = to_dart_domain(domain);
- if (cfg->stream_maps[0].dart->force_bypass &&
- domain->type != IOMMU_DOMAIN_IDENTITY)
- return -EINVAL;
if (!cfg->stream_maps[0].dart->supports_bypass &&
domain->type == IOMMU_DOMAIN_IDENTITY)
return -EINVAL;
@@ -630,8 +625,6 @@ static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args)
if (cfg_dart) {
if (cfg_dart->supports_bypass != dart->supports_bypass)
return -EINVAL;
- if (cfg_dart->force_bypass != dart->force_bypass)
- return -EINVAL;
if (cfg_dart->pgsize != dart->pgsize)
return -EINVAL;
}
@@ -736,8 +729,6 @@ static int apple_dart_def_domain_type(struct device *dev)
{
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
- if (cfg->stream_maps[0].dart->force_bypass)
- return IOMMU_DOMAIN_IDENTITY;
if (!cfg->stream_maps[0].dart->supports_bypass)
return IOMMU_DOMAIN_DMA;
@@ -1121,8 +1121,6 @@ static int apple_dart_probe(struct platform_device *pdev)
goto err_clk_disable;
}
- dart->force_bypass = dart->pgsize > PAGE_SIZE;
-
ret = apple_dart_hw_reset(dart);
if (ret)
goto err_clk_disable;
@@ -1149,8 +1147,8 @@ static int apple_dart_probe(struct platform_device *pdev)
dev_info(
&pdev->dev,
- "DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d] initialized\n",
- dart->pgsize, dart->num_streams, dart->supports_bypass, dart->force_bypass);
+ "DART [pagesize %x, %d streams, bypass support: %d] initialized\n",
+ dart->pgsize, dart->num_streams, dart->supports_bypass);
return 0;
err_sysfs_remove:
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 09f6e1c0f9c0..094592751cfa 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -20,9 +20,11 @@
#include <linux/iommu.h>
#include <linux/iova.h>
#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/list_sort.h>
#include <linux/mm.h>
#include <linux/mutex.h>
+#include <linux/pfn.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
@@ -710,6 +712,9 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
{
struct page **pages;
unsigned int i = 0, nid = dev_to_node(dev);
+ unsigned int j;
+ unsigned long min_order = __fls(order_mask);
+ unsigned int min_order_size = 1U << min_order;
order_mask &= (2U << MAX_ORDER) - 1;
if (!order_mask)
@@ -749,15 +754,37 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
split_page(page, order);
break;
}
- if (!page) {
- __iommu_dma_free_pages(pages, i);
- return NULL;
+
+ /*
+ * If we have no valid page here we might be trying to allocate
+ * the last block consisting of 1<<order pages (to guarantee
+ * alignment) but actually need less pages than that.
+ * In that case we just try to allocate the entire block and
+ * directly free the spillover pages again.
+ */
+ if (!page && !order_mask && count < min_order_size) {
+ page = alloc_pages_node(nid, gfp, min_order);
+ if (!page)
+ goto free_pages;
+ split_page(page, min_order);
+
+ for (j = count; j < min_order_size; ++j)
+ __free_page(page + j);
+
+ order_size = count;
}
+
+ if (!page)
+ goto free_pages;
count -= order_size;
while (order_size--)
pages[i++] = page++;
}
return pages;
+
+free_pages:
+ __iommu_dma_free_pages(pages, i);
+ return NULL;
}
/*
@@ -785,16 +787,28 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
+ struct sg_append_table sgt_append = {};
+ struct scatterlist *last_sg;
struct page **pages;
dma_addr_t iova;
ssize_t ret;
+ phys_addr_t orig_s_phys;
+ size_t orig_s_len, orig_s_off, s_iova_off, iova_size;
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
iommu_deferred_attach(dev, domain))
return NULL;
min_size = alloc_sizes & -alloc_sizes;
- if (min_size < PAGE_SIZE) {
+ if (iovad->granule > PAGE_SIZE) {
+ if (size < iovad->granule) {
+ /* ensure a single contiguous allocation */
+ min_size = ALIGN(size, PAGE_SIZE*(1U<<get_order(size)));
+ alloc_sizes = min_size;
+ }
+
+ size = PAGE_ALIGN(size);
+ } else if (min_size < PAGE_SIZE) {
min_size = PAGE_SIZE;
alloc_sizes |= PAGE_SIZE;
} else {
@@ -797,13 +836,17 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
if (!pages)
return NULL;
- size = iova_align(iovad, size);
- iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
+ iova_size = iova_align(iovad, size);
+ iova = iommu_dma_alloc_iova(domain, iova_size, dev->coherent_dma_mask, dev);
if (!iova)
goto out_free_pages;
- if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL))
+ /* append_table is only used to get a pointer to the last entry */
+ if (sg_alloc_append_table_from_pages(&sgt_append, pages, count, 0,
+ iova_size, UINT_MAX, 0, GFP_KERNEL))
goto out_free_iova;
+ memcpy(sgt, &sgt_append.sgt, sizeof(*sgt));
+ last_sg = sgt_append.prv;
if (!(ioprot & IOMMU_CACHE)) {
struct scatterlist *sg;
@@ -825,18 +839,59 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
arch_dma_prep_coherent(sg_page(sg), sg->length);
}
+ if (iovad->granule > PAGE_SIZE) {
+ if (size < iovad->granule) {
+ /*
+ * we only have a single sg list entry here that is
+ * likely not aligned to iovad->granule. adjust the
+ * entry to represent the encapsulating IOMMU page
+ * and then later restore everything to its original
+ * values, similar to the impedance matching done in
+ * iommu_dma_map_sg.
+ */
+ orig_s_phys = sg_phys(sgt->sgl);
+ orig_s_len = sgt->sgl->length;
+ orig_s_off = sgt->sgl->offset;
+ s_iova_off = iova_offset(iovad, orig_s_phys);
+
+ sg_set_page(sgt->sgl,
+ pfn_to_page(PHYS_PFN(orig_s_phys - s_iova_off)),
+ iova_align(iovad, orig_s_len + s_iova_off),
+ sgt->sgl->offset & ~s_iova_off);
+ } else {
+ /*
+ * convince iommu_map_sg_atomic to map the last block
+ * even though it may be too small.
+ */
+ orig_s_len = last_sg->length;
+ last_sg->length = iova_align(iovad, last_sg->length);
+ }
+ }
+
ret = iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot);
- if (ret < 0 || ret < size)
+ if (ret < 0 || ret < iova_size)
goto out_free_sg;
+ if (iovad->granule > PAGE_SIZE) {
+ if (size < iovad->granule) {
+ sg_set_page(sgt->sgl,
+ pfn_to_page(PHYS_PFN(orig_s_phys)),
+ orig_s_len, orig_s_off);
+
+ iova += s_iova_off;
+ } else {
+ last_sg->length = orig_s_len;
+ }
+ }
+
sgt->sgl->dma_address = iova;
- sgt->sgl->dma_length = size;
+ sgt->sgl->dma_length = iova_size;
return pages;
out_free_sg:
sg_free_table(sgt);
out_free_iova:
- iommu_dma_free_iova(cookie, iova, size, NULL);
+ iommu_dma_free_iova(cookie, iova, iova_size, NULL);
out_free_pages:
__iommu_dma_free_pages(pages, count);
return NULL;
@@ -1040,8 +1124,9 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
unsigned int s_length = sg_dma_len(s);
unsigned int s_iova_len = s->length;
- s->offset += s_iova_off;
- s->length = s_length;
+ sg_set_page(s,
+ pfn_to_page(PHYS_PFN(sg_phys(s) + s_iova_off)),
+ s_length, s_iova_off & ~PAGE_MASK);
sg_dma_address(s) = DMA_MAPPING_ERROR;
sg_dma_len(s) = 0;
@@ -1082,13 +1167,17 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
static void __invalidate_sg(struct scatterlist *sg, int nents)
{
struct scatterlist *s;
+ phys_addr_t orig_paddr;
int i;
for_each_sg(sg, s, nents, i) {
- if (sg_dma_address(s) != DMA_MAPPING_ERROR)
- s->offset += sg_dma_address(s);
- if (sg_dma_len(s))
- s->length = sg_dma_len(s);
+ if (sg_dma_len(s)) {
+ orig_paddr = sg_phys(s) + sg_dma_address(s);
+ sg_set_page(s,
+ pfn_to_page(PHYS_PFN(orig_paddr)),
+ sg_dma_len(s),
+ sg_dma_address(s) & ~PAGE_MASK);
+ }
sg_dma_address(s) = DMA_MAPPING_ERROR;
sg_dma_len(s) = 0;
}
@@ -1166,15 +1255,16 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
* stashing the unaligned parts in the as-yet-unused DMA fields.
*/
for_each_sg(sg, s, nents, i) {
- size_t s_iova_off = iova_offset(iovad, s->offset);
+ phys_addr_t s_phys = sg_phys(s);
+ size_t s_iova_off = iova_offset(iovad, s_phys);
size_t s_length = s->length;
size_t pad_len = (mask - iova_len + 1) & mask;
sg_dma_address(s) = s_iova_off;
sg_dma_len(s) = s_length;
- s->offset -= s_iova_off;
s_length = iova_align(iovad, s_length + s_iova_off);
- s->length = s_length;
+ sg_set_page(s, pfn_to_page(PHYS_PFN(s_phys - s_iova_off)),
+ s_length, s->offset & ~s_iova_off);
/*
* Due to the alignment of our single IOVA allocation, we can
@@ -1412,9 +1502,15 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
struct page *page;
int ret;
+ if (iovad->granule > PAGE_SIZE)
+ return -ENXIO;
+
if (is_vmalloc_addr(cpu_addr)) {
struct page **pages = dma_common_find_pages(cpu_addr);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index f2c45b85b9fc..0c370e486d6e 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -80,6 +80,8 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
unsigned type);
static int __iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
+static void __iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev);
static int __iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group);
static void __iommu_detach_group(struct iommu_domain *domain,
@@ -1976,6 +1978,24 @@ void iommu_domain_free(struct iommu_domain *domain)
}
EXPORT_SYMBOL_GPL(iommu_domain_free);
+static int iommu_check_page_size(struct iommu_domain *domain,
+ struct device *dev)
+{
+ bool trusted = !(dev_is_pci(dev) && to_pci_dev(dev)->untrusted);
+
+ if (!iommu_is_paging_domain(domain))
+ return 0;
+ if (iommu_is_large_pages_domain(domain) && trusted)
+ return 0;
+
+ if (!(domain->pgsize_bitmap & (PAGE_SIZE | (PAGE_SIZE - 1)))) {
+ pr_warn("IOMMU pages cannot exactly represent CPU pages.\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
static int __iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
@@ -1985,9 +2005,23 @@ static int __iommu_attach_device(struct iommu_domain *domain,
return -ENODEV;
ret = domain->ops->attach_dev(domain, dev);
- if (!ret)
- trace_attach_device_to_domain(dev);
- return ret;
+ if (ret)
+ return ret;
+
+ /*
+ * Check that CPU pages can be represented by the IOVA granularity.
+ * This has to be done after ops->attach_dev since many IOMMU drivers
+ * only limit domain->pgsize_bitmap after having attached the first
+ * device.
+ */
+ ret = iommu_check_page_size(domain, dev);
+ if (ret) {
+ __iommu_detach_device(domain, dev);
+ return ret;
+ }
+
+ trace_attach_device_to_domain(dev);
+ return 0;
}
int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index db77aa675145..180ce65a6789 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -49,10 +49,11 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
{
/*
* IOVA granularity will normally be equal to the smallest
- * supported IOMMU page size; both *must* be capable of
- * representing individual CPU pages exactly.
+ * supported IOMMU page size; while both usually are capable of
+ * representing individual CPU pages exactly the IOVA allocator
+ * supports any granularities that are an exact power of two.
*/
- BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
+ BUG_ON(!is_power_of_2(granule));
spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT;
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 9208eca4b0d1..dec2dd70a876 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -63,6 +63,8 @@ struct iommu_domain_geometry {
implementation */
#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
#define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
+#define __IOMMU_DOMAIN_LP (1U << 4) /* Support for PAGE_SIZE smaller
+ than IOMMU page size */
/*
* This are the possible domain-types
@@ -82,10 +84,12 @@ struct iommu_domain_geometry {
#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
- __IOMMU_DOMAIN_DMA_API)
+ __IOMMU_DOMAIN_DMA_API | \
+ __IOMMU_DOMAIN_LP)
#define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \
__IOMMU_DOMAIN_DMA_API | \
- __IOMMU_DOMAIN_DMA_FQ)
+ __IOMMU_DOMAIN_DMA_FQ | \
+ __IOMMU_DOMAIN_LP)
struct iommu_domain {
unsigned type;
@@ -102,6 +106,16 @@ static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
return domain->type & __IOMMU_DOMAIN_DMA_API;
}
+static inline bool iommu_is_paging_domain(struct iommu_domain *domain)
+{
+ return domain->type & __IOMMU_DOMAIN_PAGING;
+}
+
+static inline bool iommu_is_large_pages_domain(struct iommu_domain *domain)
+{
+ return domain->type & __IOMMU_DOMAIN_LP;
+}
+
enum iommu_cap {
IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
transactions */

View File

@@ -0,0 +1,96 @@
{ stdenv
, lib
, fetchFromGitHub
, pkgsCross
, python3
, dtc
, imagemagick
, isRelease ? false
, withTools ? true
, withChainloading ? false
, rust-bin ? null
, customLogo ? null
}:
assert withChainloading -> rust-bin != null;
let
pyenv = python3.withPackages (p: with p; [
construct
pyserial
]);
rustenv = rust-bin.selectLatestNightlyWith (toolchain: toolchain.minimal.override {
targets = [ "aarch64-unknown-none-softfloat" ];
});
in stdenv.mkDerivation rec {
pname = "m1n1";
version = "1.1.8";
src = fetchFromGitHub {
# tracking: https://github.com/AsahiLinux/PKGBUILDs/blob/stable/m1n1/PKGBUILD
owner = "AsahiLinux";
repo = "m1n1";
rev = "v${version}";
hash = "sha256-4Ykh+EzOCRtZQD1upUDJpi5ikMOCnxLwvLWajtMo7LU=";
fetchSubmodules = true;
};
makeFlags = [ "ARCH=aarch64-unknown-linux-gnu-" ]
++ lib.optional isRelease "RELEASE=1"
++ lib.optional withChainloading "CHAINLOADING=1";
nativeBuildInputs = [
dtc
pkgsCross.aarch64-multiplatform.buildPackages.gcc
] ++ lib.optional withChainloading rustenv
++ lib.optional (customLogo != null) imagemagick;
postPatch = ''
substituteInPlace proxyclient/m1n1/asm.py \
--replace 'aarch64-linux-gnu-' 'aarch64-unknown-linux-gnu-' \
--replace 'TOOLCHAIN = ""' 'TOOLCHAIN = "'$out'/toolchain-bin/"'
'';
preConfigure = lib.optionalString (customLogo != null) ''
pushd data &>/dev/null
ln -fs ${customLogo} bootlogo_256.png
if [[ "$(magick identify bootlogo_256.png)" != 'bootlogo_256.png PNG 256x256'* ]]; then
echo "Custom logo is not a 256x256 PNG"
exit 1
fi
rm bootlogo_128.png
convert bootlogo_256.png -resize 128x128 bootlogo_128.png
./makelogo.sh
popd &>/dev/null
'';
installPhase = ''
runHook preInstall
mkdir -p $out/build
cp build/m1n1.macho $out/build
cp build/m1n1.bin $out/build
'' + (lib.optionalString withTools ''
mkdir -p $out/{bin,script,toolchain-bin}
cp -r proxyclient $out/script
cp -r tools $out/script
for toolpath in $out/script/proxyclient/tools/*.py; do
tool=$(basename $toolpath .py)
script=$out/bin/m1n1-$tool
cat > $script <<EOF
#!/bin/sh
${pyenv}/bin/python $toolpath "\$@"
EOF
chmod +x $script
done
GCC=${pkgsCross.aarch64-multiplatform.buildPackages.gcc}
BINUTILS=${pkgsCross.aarch64-multiplatform.buildPackages.binutils-unwrapped}
ln -s $GCC/bin/*-gcc $out/toolchain-bin/
ln -s $GCC/bin/*-ld $out/toolchain-bin/
ln -s $BINUTILS/bin/*-objcopy $out/toolchain-bin/
ln -s $BINUTILS/bin/*-objdump $out/toolchain-bin/
ln -s $GCC/bin/*-nm $out/toolchain-bin/
'') + ''
runHook postInstall
'';
}

View File

@@ -0,0 +1,70 @@
{ config, pkgs, lib, ... }:
{
config = {
assertions = lib.mkIf config.hardware.asahi.extractPeripheralFirmware [
{ assertion = config.hardware.asahi.peripheralFirmwareDirectory != null;
message = ''
Asahi peripheral firmware extraction is enabled but the firmware
location appears incorrect.
'';
}
];
hardware.firmware = let
asahi-fwextract = pkgs.callPackage ../asahi-fwextract {};
in lib.mkIf ((config.hardware.asahi.peripheralFirmwareDirectory != null)
&& config.hardware.asahi.extractPeripheralFirmware) [
(pkgs.stdenv.mkDerivation {
name = "asahi-peripheral-firmware";
nativeBuildInputs = [ asahi-fwextract pkgs.cpio ];
buildCommand = ''
mkdir extracted
asahi-fwextract ${/. + config.hardware.asahi.peripheralFirmwareDirectory} extracted
mkdir -p $out/lib/firmware
cat extracted/firmware.cpio | cpio -id --quiet --no-absolute-filenames
mv vendorfw/* $out/lib/firmware
'';
})
];
};
options.hardware.asahi = {
extractPeripheralFirmware = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Automatically extract the non-free non-redistributable peripheral
firmware necessary for features like Wi-Fi.
'';
};
peripheralFirmwareDirectory = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = let
paths = [
# path when the system is operating normally
"/boot/asahi"
# path when the system is mounted in the installer
"/mnt/boot/asahi"
./.
];
validPaths = (builtins.filter
(p: builtins.pathExists (p + "/all_firmware.tar.gz"))
paths) ++ [ null ];
in builtins.elemAt validPaths 0;
description = ''
Path to the directory containing the non-free non-redistributable
peripheral firmware necessary for features like Wi-Fi. Ordinarily, this
will automatically point to the appropriate location on the ESP. Flake
users and those interested in maximum purity will want to copy those
files elsewhere and specify this manually.
Currently, this consists of the files `all-firmware.tar.gz` and
`kernelcache*`. The official Asahi Linux installer places these files
in the `asahi` directory of the EFI system partition when creating it.
'';
};
};
}

View File

@@ -0,0 +1,40 @@
{ lib
, fetchFromGitHub
, pkgs
, pkgsCross
, m1n1
}: let
# u-boot's buildInputs get a different hash and don't build right if we try to
# cross-build for aarch64 on itself for whatever reason
buildPkgs = if pkgs.stdenv.system == "aarch64-linux" then pkgs else pkgsCross.aarch64-multiplatform;
in (buildPkgs.buildUBoot rec {
src = fetchFromGitHub {
# tracking: https://github.com/AsahiLinux/PKGBUILDs/blob/stable/uboot-asahi/PKGBUILD
owner = "AsahiLinux";
repo = "u-boot";
rev = "asahi-v2022.10-1";
hash = "sha256-/dtTJ+GxC2GFlqduAa2WWPGwktLjM7tUKus6/aUyPNQ=";
};
version = "2022.10.asahi1-1";
defconfig = "apple_m1_defconfig";
extraMeta.platforms = [ "aarch64-linux" ];
filesToInstall = [
"u-boot-nodtb.bin.gz"
"m1n1-u-boot.macho"
"m1n1-u-boot.bin"
];
extraConfig = ''
CONFIG_IDENT_STRING=" ${version}"
'';
}).overrideAttrs (o: {
# nixos's downstream patches are not applicable
patches = [ ];
preInstall = ''
# compress so that m1n1 knows U-Boot's size and can find things after it
gzip -n u-boot-nodtb.bin
cat ${m1n1}/build/m1n1.macho arch/arm/dts/t[68]*.dtb u-boot-nodtb.bin.gz > m1n1-u-boot.macho
cat ${m1n1}/build/m1n1.bin arch/arm/dts/t[68]*.dtb u-boot-nodtb.bin.gz > m1n1-u-boot.bin
'';
})

View File

@@ -1,8 +1,12 @@
{ config, pkgs, lib, ... }: { config, pkgs, lib, ... }:
{ {
disabledModules = [
"hardware/video/nvidia.nix"
];
imports = [ imports = [
./hardware-configuration.nix ./hardware-configuration.nix
./nvidia.nix
]; ];
firmware.x86_64.enable = true; firmware.x86_64.enable = true;
@@ -19,30 +23,26 @@
hardware.enableAllFirmware = true; hardware.enableAllFirmware = true;
# depthai # newer kernel for wifi
services.udev.extraRules = '' boot.kernelPackages = pkgs.linuxPackages_latest;
SUBSYSTEM=="usb", ATTRS{idVendor}=="03e7", MODE="0666"
'';
# gpu # gpu
services.xserver.videoDrivers = [ "nvidia" ]; services.xserver.videoDrivers = [ "nvidia" ];
services.xserver.logFile = "/var/log/Xorg.0.log";
hardware.nvidia = { hardware.nvidia = {
modesetting.enable = true; # for nvidia-vaapi-driver modesetting.enable = true; # for nvidia-vaapi-driver
prime = { prime = {
reverseSync.enable = true; sync.enable = true;
offload.enableOffloadCmd = true;
nvidiaBusId = "PCI:1:0:0"; nvidiaBusId = "PCI:1:0:0";
amdgpuBusId = "PCI:4:0:0"; amdgpuBusId = "PCI:4:0:0";
}; };
powerManagement = {
# enable = true;
# finegrained = true;
# coarsegrained = true;
};
}; };
# virt-manager
virtualisation.libvirtd.enable = true;
programs.dconf.enable = true;
virtualisation.spiceUSBRedirection.enable = true;
environment.systemPackages = with pkgs; [ virt-manager ];
users.users.googlebot.extraGroups = [ "libvirtd" ];
# vpn-container.enable = true; # vpn-container.enable = true;
# containers.vpn.interfaces = [ "piaw" ]; # containers.vpn.interfaces = [ "piaw" ];

485
machines/ray/nvidia.nix Normal file
View File

@@ -0,0 +1,485 @@
# This module provides the proprietary NVIDIA X11 / OpenGL drivers.
{ config, lib, pkgs, ... }:
with lib;
let
nvidia_x11 = let
drivers = config.services.xserver.videoDrivers;
isDeprecated = str: (hasPrefix "nvidia" str) && (str != "nvidia");
hasDeprecated = drivers: any isDeprecated drivers;
in if (hasDeprecated drivers) then
throw ''
Selecting an nvidia driver has been modified for NixOS 19.03. The version is now set using `hardware.nvidia.package`.
''
else if (elem "nvidia" drivers) then cfg.package else null;
enabled = nvidia_x11 != null;
cfg = config.hardware.nvidia;
pCfg = cfg.prime;
syncCfg = pCfg.sync;
offloadCfg = pCfg.offload;
reverseSyncCfg = pCfg.reverse_sync;
primeEnabled = syncCfg.enable || reverseSyncCfg.enable || offloadCfg.enable;
nvidiaPersistencedEnabled = cfg.nvidiaPersistenced;
nvidiaSettings = cfg.nvidiaSettings;
in
{
imports =
[
(mkRenamedOptionModule [ "hardware" "nvidia" "optimus_prime" "enable" ] [ "hardware" "nvidia" "prime" "sync" "enable" ])
(mkRenamedOptionModule [ "hardware" "nvidia" "optimus_prime" "allowExternalGpu" ] [ "hardware" "nvidia" "prime" "allowExternalGpu" ])
(mkRenamedOptionModule [ "hardware" "nvidia" "prime" "sync" "allowExternalGpu" ] [ "hardware" "nvidia" "prime" "allowExternalGpu" ])
(mkRenamedOptionModule [ "hardware" "nvidia" "optimus_prime" "nvidiaBusId" ] [ "hardware" "nvidia" "prime" "nvidiaBusId" ])
(mkRenamedOptionModule [ "hardware" "nvidia" "optimus_prime" "intelBusId" ] [ "hardware" "nvidia" "prime" "intelBusId" ])
];
options = {
hardware.nvidia.powerManagement.enable = mkOption {
type = types.bool;
default = false;
description = ''
Experimental power management through systemd. For more information, see
the NVIDIA docs, on Chapter 21. Configuring Power Management Support.
'';
};
hardware.nvidia.powerManagement.finegrained = mkOption {
type = types.bool;
default = false;
description = ''
Experimental power management of PRIME offload. For more information, see
the NVIDIA docs, chapter 22. PCI-Express runtime power management.
'';
};
hardware.nvidia.powerManagement.coarsegrained = mkOption {
type = types.bool;
default = false;
description = ''
Experimental power management of PRIME offload. For more information, see
the NVIDIA docs, chapter 22. PCI-Express runtime power management.
'';
};
hardware.nvidia.modesetting.enable = mkOption {
type = types.bool;
default = false;
description = ''
Enable kernel modesetting when using the NVIDIA proprietary driver.
Enabling this fixes screen tearing when using Optimus via PRIME (see
<option>hardware.nvidia.prime.sync.enable</option>. This is not enabled
by default because it is not officially supported by NVIDIA and would not
work with SLI.
'';
};
hardware.nvidia.prime.nvidiaBusId = mkOption {
type = types.str;
default = "";
example = "PCI:1:0:0";
description = ''
Bus ID of the NVIDIA GPU. You can find it using lspci; for example if lspci
shows the NVIDIA GPU at "01:00.0", set this option to "PCI:1:0:0".
'';
};
hardware.nvidia.prime.intelBusId = mkOption {
type = types.str;
default = "";
example = "PCI:0:2:0";
description = ''
Bus ID of the Intel GPU. You can find it using lspci; for example if lspci
shows the Intel GPU at "00:02.0", set this option to "PCI:0:2:0".
'';
};
hardware.nvidia.prime.amdgpuBusId = mkOption {
type = types.str;
default = "";
example = "PCI:4:0:0";
description = ''
Bus ID of the AMD APU. You can find it using lspci; for example if lspci
shows the AMD APU at "04:00.0", set this option to "PCI:4:0:0".
'';
};
hardware.nvidia.prime.sync.enable = mkOption {
type = types.bool;
default = false;
description = ''
Enable NVIDIA Optimus support using the NVIDIA proprietary driver via PRIME.
If enabled, the NVIDIA GPU will be always on and used for all rendering,
while enabling output to displays attached only to the integrated Intel/AMD
GPU without a multiplexer.
Note that this option only has any effect if the "nvidia" driver is specified
in <option>services.xserver.videoDrivers</option>, and it should preferably
be the only driver there.
If this is enabled, then the bus IDs of the NVIDIA and Intel/AMD GPUs have to
be specified (<option>hardware.nvidia.prime.nvidiaBusId</option> and
<option>hardware.nvidia.prime.intelBusId</option> or
<option>hardware.nvidia.prime.amdgpuBusId</option>).
If you enable this, you may want to also enable kernel modesetting for the
NVIDIA driver (<option>hardware.nvidia.modesetting.enable</option>) in order
to prevent tearing.
Note that this configuration will only be successful when a display manager
for which the <option>services.xserver.displayManager.setupCommands</option>
option is supported is used.
'';
};
hardware.nvidia.prime.allowExternalGpu = mkOption {
type = types.bool;
default = false;
description = ''
Configure X to allow external NVIDIA GPUs when using Prime [Reverse] Sync.
'';
};
hardware.nvidia.prime.offload.enable = mkOption {
type = types.bool;
default = false;
description = ''
Enable render offload support using the NVIDIA proprietary driver via PRIME.
If this is enabled, then the bus IDs of the NVIDIA and Intel/AMD GPUs have to
be specified (<option>hardware.nvidia.prime.nvidiaBusId</option> and
<option>hardware.nvidia.prime.intelBusId</option> or
<option>hardware.nvidia.prime.amdgpuBusId</option>).
'';
};
hardware.nvidia.prime.offload.enableOffloadCmd = mkOption {
type = types.bool;
default = false;
description = ''
Adds a `nvidia-offload` convenience script to <option>environment.systemPackages</option>
for offloading programs to an nvidia device. To work, should have also enabled
<option>hardware.nvidia.prime.offload.enable</option> or <option>hardware.nvidia.prime.reverse_sync.enable</option>
Example usage `nvidia-offload sauerbraten_client`
'';
};
hardware.nvidia.prime.reverse_sync.enable = mkOption {
type = types.bool;
default = false;
description = ''
Warning: This feature is relatively new, depending on your system this might
work poorly. AMD support, especially so.
See: https://forums.developer.nvidia.com/t/the-all-new-outputsink-feature-aka-reverse-prime/129828
Enable NVIDIA Optimus support using the NVIDIA proprietary driver via reverse
PRIME. If enabled, the Intel/AMD GPU will be used for all rendering, while
enabling output to displays attached only to the NVIDIA GPU without a
multiplexer.
Note that this option only has any effect if the "nvidia" driver is specified
in <option>services.xserver.videoDrivers</option>, and it should preferably
be the only driver there.
If this is enabled, then the bus IDs of the NVIDIA and Intel/AMD GPUs have to
be specified (<option>hardware.nvidia.prime.nvidiaBusId</option> and
<option>hardware.nvidia.prime.intelBusId</option> or
<option>hardware.nvidia.prime.amdgpuBusId</option>).
If you enable this, you may want to also enable kernel modesetting for the
NVIDIA driver (<option>hardware.nvidia.modesetting.enable</option>) in order
to prevent tearing.
Note that this configuration will only be successful when a display manager
for which the <option>services.xserver.displayManager.setupCommands</option>
option is supported is used.
'';
};
hardware.nvidia.nvidiaSettings = mkOption {
default = true;
type = types.bool;
description = ''
Whether to add nvidia-settings, NVIDIA's GUI configuration tool, to
systemPackages.
'';
};
hardware.nvidia.nvidiaPersistenced = mkOption {
default = false;
type = types.bool;
description = ''
Update for NVIDA GPU headless mode, i.e. nvidia-persistenced. It ensures all
GPUs stay awake even during headless mode.
'';
};
hardware.nvidia.package = lib.mkOption {
type = lib.types.package;
default = config.boot.kernelPackages.nvidiaPackages.stable;
defaultText = literalExpression "config.boot.kernelPackages.nvidiaPackages.stable";
description = ''
The NVIDIA X11 derivation to use.
'';
example = literalExpression "config.boot.kernelPackages.nvidiaPackages.legacy_340";
};
};
config = let
igpuDriver = if pCfg.intelBusId != "" then "modesetting" else "amdgpu";
igpuBusId = if pCfg.intelBusId != "" then pCfg.intelBusId else pCfg.amdgpuBusId;
in mkIf enabled {
assertions = [
{
assertion = primeEnabled -> pCfg.intelBusId == "" || pCfg.amdgpuBusId == "";
message = ''
You cannot configure both an Intel iGPU and an AMD APU. Pick the one corresponding to your processor.
'';
}
{
assertion = offloadCfg.enableOffloadCmd -> offloadCfg.enable || reverseSyncCfg.enable;
message = ''
Offload command requires offloading or reverse prime sync to be enabled.
'';
}
{
assertion = primeEnabled -> pCfg.nvidiaBusId != "" && (pCfg.intelBusId != "" || pCfg.amdgpuBusId != "");
message = ''
When NVIDIA PRIME is enabled, the GPU bus IDs must configured.
'';
}
{
assertion = offloadCfg.enable -> versionAtLeast nvidia_x11.version "435.21";
message = "NVIDIA PRIME render offload is currently only supported on versions >= 435.21.";
}
{
assertion = (reverseSyncCfg.enable && pCfg.amdgpuBusId != "") -> versionAtLeast nvidia_x11.version "470.0";
message = "NVIDIA PRIME render offload for AMD APUs is currently only supported on versions >= 470 beta.";
}
{
assertion = !(syncCfg.enable && offloadCfg.enable);
message = "PRIME Sync and Offload cannot be both enabled";
}
{
assertion = !(syncCfg.enable && reverseSyncCfg.enable);
message = "PRIME Sync and PRIME Reverse Sync cannot be both enabled";
}
{
assertion = !(syncCfg.enable && cfg.powerManagement.finegrained && cfg.powerManagement.coarsegrained);
message = "Sync precludes powering down the NVIDIA GPU.";
}
{
assertion = cfg.powerManagement.finegrained -> offloadCfg.enable;
message = "Fine-grained power management requires offload to be enabled.";
}
{
assertion = cfg.powerManagement.coarsegrained -> offloadCfg.enable;
message = "Coarse-grained power management requires offload to be enabled.";
}
{
assertion = cfg.powerManagement.enable -> (
builtins.pathExists (cfg.package.out + "/bin/nvidia-sleep.sh") &&
builtins.pathExists (cfg.package.out + "/lib/systemd/system-sleep/nvidia")
);
message = "Required files for driver based power management don't exist.";
}
];
# If Optimus/PRIME is enabled, we:
# - Specify the configured NVIDIA GPU bus ID in the Device section for the
# "nvidia" driver.
# - Add the AllowEmptyInitialConfiguration option to the Screen section for the
# "nvidia" driver, in order to allow the X server to start without any outputs.
# - Add a separate Device section for the Intel GPU, using the "modesetting"
# driver and with the configured BusID.
# - OR add a separate Device section for the AMD APU, using the "amdgpu"
# driver and with the configures BusID.
# - Reference that Device section from the ServerLayout section as an inactive
# device.
# - Configure the display manager to run specific `xrandr` commands which will
# configure/enable displays connected to the Intel iGPU / AMD APU.
services.xserver.useGlamor = mkDefault offloadCfg.enable;
# reverse sync implies offloading
hardware.nvidia.prime.offload.enable = mkDefault reverseSyncCfg.enable;
services.xserver.drivers = optional primeEnabled {
name = igpuDriver;
display = !syncCfg.enable;
modules = optional (igpuDriver == "amdgpu") [ pkgs.xorg.xf86videoamdgpu ];
deviceSection = ''
BusID "${igpuBusId}"
${optionalString (syncCfg.enable && igpuDriver != "amdgpu") ''Option "AccelMethod" "none"''}
'';
} ++ singleton {
name = "nvidia";
modules = [ nvidia_x11.bin ];
display = syncCfg.enable;
deviceSection = optionalString primeEnabled ''
BusID "${pCfg.nvidiaBusId}"
${optionalString pCfg.allowExternalGpu "Option \"AllowExternalGpus\""}
'';
};
services.xserver.serverLayoutSection = optionalString syncCfg.enable ''
Inactive "Device-${igpuDriver}[0]"
'' + optionalString reverseSyncCfg.enable ''
Inactive "Device-nvidia[0]"
'' + optionalString offloadCfg.enable ''
Option "AllowNVIDIAGPUScreens"
'';
services.xserver.displayManager.setupCommands = let
gpuProviderName = if igpuDriver == "amdgpu" then
# find the name of the provider if amdgpu
"`${pkgs.xorg.xrandr}/bin/xrandr --listproviders | ${pkgs.gnugrep}/bin/grep -i AMD | ${pkgs.gnused}/bin/sed -n 's/^.*name://p'`"
else
igpuDriver;
providerCmdParams = if syncCfg.enable then "\"${gpuProviderName}\" NVIDIA-0" else "NVIDIA-G0 \"${gpuProviderName}\"";
in optionalString (syncCfg.enable || reverseSyncCfg.enable) ''
# Added by nvidia configuration module for Optimus/PRIME.
${pkgs.xorg.xrandr}/bin/xrandr --setprovideroutputsource ${providerCmdParams}
${pkgs.xorg.xrandr}/bin/xrandr --auto
'';
environment.etc."nvidia/nvidia-application-profiles-rc" = mkIf nvidia_x11.useProfiles {
source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";
};
# 'nvidia_x11' installs it's files to /run/opengl-driver/...
environment.etc."egl/egl_external_platform.d".source =
"/run/opengl-driver/share/egl/egl_external_platform.d/";
hardware.opengl.extraPackages = [
nvidia_x11.out
# pkgs.nvidia-vaapi-driver
];
hardware.opengl.extraPackages32 = [
nvidia_x11.lib32
# pkgs.pkgsi686Linux.nvidia-vaapi-driver
];
environment.systemPackages = [ nvidia_x11.bin ]
++ optionals cfg.nvidiaSettings [ nvidia_x11.settings ]
++ optionals nvidiaPersistencedEnabled [ nvidia_x11.persistenced ]
++ optionals offloadCfg.enableOffloadCmd [
(pkgs.writeShellScriptBin "nvidia-offload" ''
export __NV_PRIME_RENDER_OFFLOAD=1
export __NV_PRIME_RENDER_OFFLOAD_PROVIDER=NVIDIA-G0
export __GLX_VENDOR_LIBRARY_NAME=nvidia
export __VK_LAYER_NV_optimus=NVIDIA_only
exec -a "$0" "$@"
'')
];
systemd.packages = optional cfg.powerManagement.enable nvidia_x11.out;
systemd.services = let
baseNvidiaService = state: {
description = "NVIDIA system ${state} actions";
path = with pkgs; [ kbd ];
serviceConfig = {
Type = "oneshot";
ExecStart = "${nvidia_x11.out}/bin/nvidia-sleep.sh '${state}'";
};
};
nvidiaService = sleepState: (baseNvidiaService sleepState) // {
before = [ "systemd-${sleepState}.service" ];
requiredBy = [ "systemd-${sleepState}.service" ];
};
services = (builtins.listToAttrs (map (t: nameValuePair "nvidia-${t}" (nvidiaService t)) ["hibernate" "suspend"]))
// {
nvidia-resume = (baseNvidiaService "resume") // {
after = [ "systemd-suspend.service" "systemd-hibernate.service" ];
requiredBy = [ "systemd-suspend.service" "systemd-hibernate.service" ];
};
};
in optionalAttrs cfg.powerManagement.enable services
// optionalAttrs nvidiaPersistencedEnabled {
"nvidia-persistenced" = mkIf nvidiaPersistencedEnabled {
description = "NVIDIA Persistence Daemon";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "forking";
Restart = "always";
PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
ExecStart = "${nvidia_x11.persistenced}/bin/nvidia-persistenced --verbose";
ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
};
};
};
systemd.tmpfiles.rules = optional config.virtualisation.docker.enableNvidia
"L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin"
++ optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
"L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
boot.extraModulePackages = [ nvidia_x11.bin ];
# nvidia-uvm is required by CUDA applications.
boot.kernelModules = [ "nvidia-uvm" ] ++
optionals config.services.xserver.enable [ "nvidia" "nvidia_modeset" "nvidia_drm" ];
# If requested enable modesetting via kernel parameter.
boot.kernelParams = optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
++ optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1";
services.udev.extraRules =
''
# Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 255'"
KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 254'"
KERNEL=="card*", SUBSYSTEM=="drm", DRIVERS=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia%n c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) %n'"
KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 0'"
KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 0'"
'' + optionalString (cfg.powerManagement.finegrained || cfg.powerManagement.coarsegrained) ''
# Remove NVIDIA USB xHCI Host Controller devices, if present
ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1"
# Remove NVIDIA USB Type-C UCSI devices, if present
ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c8000", ATTR{remove}="1"
# Remove NVIDIA Audio devices, if present
ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x040300", ATTR{remove}="1"
# Enable runtime PM for NVIDIA VGA/3D controller devices on driver bind
ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="auto"
ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="auto"
# Disable runtime PM for NVIDIA VGA/3D controller devices on driver unbind
ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="on"
ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="on"
'';
boot.extraModprobeConfig = optionalString cfg.powerManagement.finegrained ''
options nvidia "NVreg_DynamicPowerManagement=0x02"
'' + optionalString cfg.powerManagement.coarsegrained ''
options nvidia "NVreg_DynamicPowerManagement=0x01"
'';
boot.blacklistedKernelModules = [ "nouveau" "nvidiafb" ];
services.acpid.enable = true;
};
}