Merge master into haskell-updates

This commit is contained in:
github-actions[bot] 2024-02-29 00:12:06 +00:00 committed by GitHub
commit 47e7b83fd1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
686 changed files with 32334 additions and 2191 deletions

View file

@ -108,3 +108,7 @@ charset = unset
[lib/tests/*.plist]
indent_style = tab
insert_final_newline = unset
[pkgs/kde/generated/**]
insert_final_newline = unset
end_of_line = unset

17
.github/CODEOWNERS vendored
View file

@ -185,11 +185,18 @@ pkgs/development/python-modules/buildcatrust/ @ajs124 @lukegb @mweinelt
# Licenses
/lib/licenses.nix @alyssais
# Qt / KDE
/pkgs/applications/kde @ttuegel
/pkgs/desktops/plasma-5 @ttuegel
/pkgs/development/libraries/kde-frameworks @ttuegel
/pkgs/development/libraries/qt-5 @ttuegel
# Qt
/pkgs/development/libraries/qt-5 @NixOS/qt-kde
/pkgs/development/libraries/qt-6 @NixOS/qt-kde
# KDE / Plasma 5
/pkgs/applications/kde @NixOS/qt-kde
/pkgs/desktops/plasma-5 @NixOS/qt-kde
/pkgs/development/libraries/kde-frameworks @NixOS/qt-kde
# KDE / Plasma 6
/pkgs/kde @NixOS/qt-kde
/maintainers/scripts/kde @NixOS/qt-kde
# PostgreSQL and related stuff
/pkgs/servers/sql/postgresql @thoughtpolice @marsam

View file

@ -507,6 +507,16 @@ This allows the function to produce reproducible images.
_Default value:_ `"1970-01-01T00:00:01Z"`.
`uid` (Number; _optional_) []{#dockerTools-buildLayeredImage-arg-uid}
`gid` (Number; _optional_) []{#dockerTools-buildLayeredImage-arg-gid}
`uname` (String; _optional_) []{#dockerTools-buildLayeredImage-arg-uname}
`gname` (String; _optional_) []{#dockerTools-buildLayeredImage-arg-gname}
: Credentials for Nix store ownership.
Can be overridden to e.g. `1000` / `1000` / `"user"` / `"user"` to enable building a container where Nix can be used as an unprivileged user in single-user mode.
_Default value:_ `0` / `0` / `"root"` / `"root"`
`maxLayers` (Number; _optional_) []{#dockerTools-buildLayeredImage-arg-maxLayers}
: The maximum number of layers that will be used by the generated image.

View file

@ -1,5 +1,12 @@
# darwin.linux-builder {#sec-darwin-builder}
:::{.warning}
By default, `darwin.linux-builder` uses a publicly-known private SSH **host key** (this is different from the SSH key used by the user that connects to the builder).
Given the intended use case for it (a Linux builder that runs **on the same machine**), this shouldn't be an issue.
However, if you plan to deviate from this use case in any way (e.g. by exposing this builder to remote machines), you should understand the security implications of doing so and take any appropriate measures.
:::
`darwin.linux-builder` provides a way to bootstrap a Linux remote builder on a macOS machine.
This requires macOS version 12.4 or later.
@ -97,6 +104,7 @@ $ sudo launchctl kickstart -k system/org.nixos.nix-daemon
{ virtualisation = {
host.pkgs = pkgs;
darwin-builder.workingDirectory = "/var/lib/darwin-builder";
darwin-builder.hostPort = 22;
};
}
];
@ -110,7 +118,9 @@ $ sudo launchctl kickstart -k system/org.nixos.nix-daemon
{
nix.distributedBuilds = true;
nix.buildMachines = [{
hostName = "ssh://builder@localhost";
hostName = "localhost";
sshUser = "builder";
sshKey = "/etc/nix/builder_ed25519";
system = linuxSystem;
maxJobs = 4;
supportedFeatures = [ "kvm" "benchmark" "big-parallel" ];

View file

@ -2,10 +2,10 @@
{ lib }:
let
inherit (builtins) head tail length;
inherit (lib.trivial) id mergeAttrs warn;
inherit (builtins) head length;
inherit (lib.trivial) mergeAttrs warn;
inherit (lib.strings) concatStringsSep concatMapStringsSep escapeNixIdentifier sanitizeDerivationName;
inherit (lib.lists) foldr foldl' concatMap concatLists elemAt all partition groupBy take foldl;
inherit (lib.lists) foldr foldl' concatMap elemAt all partition groupBy take foldl;
in
rec {
@ -369,7 +369,7 @@ rec {
Type:
attrValues :: AttrSet -> [Any]
*/
attrValues = builtins.attrValues or (attrs: attrVals (attrNames attrs) attrs);
attrValues = builtins.attrValues;
/* Given a set of attribute names, return the set of the corresponding
@ -398,8 +398,7 @@ rec {
Type:
catAttrs :: String -> [AttrSet] -> [Any]
*/
catAttrs = builtins.catAttrs or
(attr: l: concatLists (map (s: if s ? ${attr} then [s.${attr}] else []) l));
catAttrs = builtins.catAttrs;
/* Filter an attribute set by removing all attributes for which the
@ -608,9 +607,7 @@ rec {
Type:
mapAttrs :: (String -> Any -> Any) -> AttrSet -> AttrSet
*/
mapAttrs = builtins.mapAttrs or
(f: set:
listToAttrs (map (attr: { name = attr; value = f attr set.${attr}; }) (attrNames set)));
mapAttrs = builtins.mapAttrs;
/* Like `mapAttrs`, but allows the name of each attribute to be

View file

@ -345,13 +345,17 @@ checkFileset() {
#### Error messages #####
# We're using [[:blank:]] here instead of \s, because only the former is POSIX
# (see https://pubs.opengroup.org/onlinepubs/007908799/xbd/re.html#tag_007_003_005).
# And indeed, Darwin's bash only supports the former
# Absolute paths in strings cannot be passed as `root`
expectFailure 'toSource { root = "/nix/store/foobar"; fileset = ./.; }' 'lib.fileset.toSource: `root` \(/nix/store/foobar\) is a string-like value, but it should be a path instead.
\s*Paths in strings are not supported by `lib.fileset`, use `lib.sources` or derivations instead.'
[[:blank:]]*Paths in strings are not supported by `lib.fileset`, use `lib.sources` or derivations instead.'
expectFailure 'toSource { root = cleanSourceWith { src = ./.; }; fileset = ./.; }' 'lib.fileset.toSource: `root` is a `lib.sources`-based value, but it should be a path instead.
\s*To use a `lib.sources`-based value, convert it to a file set using `lib.fileset.fromSource` and pass it as `fileset`.
\s*Note that this only works for sources created from paths.'
[[:blank:]]*To use a `lib.sources`-based value, convert it to a file set using `lib.fileset.fromSource` and pass it as `fileset`.
[[:blank:]]*Note that this only works for sources created from paths.'
# Only paths are accepted as `root`
expectFailure 'toSource { root = 10; fileset = ./.; }' 'lib.fileset.toSource: `root` is of type int, but it should be a path instead.'
@ -361,9 +365,9 @@ mkdir -p {foo,bar}/mock-root
expectFailure 'with ((import <nixpkgs/lib>).extend (import <nixpkgs/lib/fileset/mock-splitRoot.nix>)).fileset;
toSource { root = ./foo/mock-root; fileset = ./bar/mock-root; }
' 'lib.fileset.toSource: Filesystem roots are not the same for `fileset` and `root` \('"$work"'/foo/mock-root\):
\s*`root`: Filesystem root is "'"$work"'/foo/mock-root"
\s*`fileset`: Filesystem root is "'"$work"'/bar/mock-root"
\s*Different filesystem roots are not supported.'
[[:blank:]]*`root`: Filesystem root is "'"$work"'/foo/mock-root"
[[:blank:]]*`fileset`: Filesystem root is "'"$work"'/bar/mock-root"
[[:blank:]]*Different filesystem roots are not supported.'
rm -rf -- *
# `root` needs to exist
@ -372,8 +376,8 @@ expectFailure 'toSource { root = ./a; fileset = ./.; }' 'lib.fileset.toSource: `
# `root` needs to be a file
touch a
expectFailure 'toSource { root = ./a; fileset = ./a; }' 'lib.fileset.toSource: `root` \('"$work"'/a\) is a file, but it should be a directory instead. Potential solutions:
\s*- If you want to import the file into the store _without_ a containing directory, use string interpolation or `builtins.path` instead of this function.
\s*- If you want to import the file into the store _with_ a containing directory, set `root` to the containing directory, such as '"$work"', and set `fileset` to the file path.'
[[:blank:]]*- If you want to import the file into the store _without_ a containing directory, use string interpolation or `builtins.path` instead of this function.
[[:blank:]]*- If you want to import the file into the store _with_ a containing directory, set `root` to the containing directory, such as '"$work"', and set `fileset` to the file path.'
rm -rf -- *
# The fileset argument should be evaluated, even if the directory is empty
@ -382,36 +386,36 @@ expectFailure 'toSource { root = ./.; fileset = abort "This should be evaluated"
# Only paths under `root` should be able to influence the result
mkdir a
expectFailure 'toSource { root = ./a; fileset = ./.; }' 'lib.fileset.toSource: `fileset` could contain files in '"$work"', which is not under the `root` \('"$work"'/a\). Potential solutions:
\s*- Set `root` to '"$work"' or any directory higher up. This changes the layout of the resulting store path.
\s*- Set `fileset` to a file set that cannot contain files outside the `root` \('"$work"'/a\). This could change the files included in the result.'
[[:blank:]]*- Set `root` to '"$work"' or any directory higher up. This changes the layout of the resulting store path.
[[:blank:]]*- Set `fileset` to a file set that cannot contain files outside the `root` \('"$work"'/a\). This could change the files included in the result.'
rm -rf -- *
# non-regular and non-symlink files cannot be added to the Nix store
mkfifo a
expectFailure 'toSource { root = ./.; fileset = ./a; }' 'lib.fileset.toSource: `fileset` contains a file that cannot be added to the store: '"$work"'/a
\s*This file is neither a regular file nor a symlink, the only file types supported by the Nix store.
\s*Therefore the file set cannot be added to the Nix store as is. Make sure to not include that file to avoid this error.'
[[:blank:]]*This file is neither a regular file nor a symlink, the only file types supported by the Nix store.
[[:blank:]]*Therefore the file set cannot be added to the Nix store as is. Make sure to not include that file to avoid this error.'
rm -rf -- *
# Path coercion only works for paths
expectFailure 'toSource { root = ./.; fileset = 10; }' 'lib.fileset.toSource: `fileset` is of type int, but it should be a file set or a path instead.'
expectFailure 'toSource { root = ./.; fileset = "/some/path"; }' 'lib.fileset.toSource: `fileset` \("/some/path"\) is a string-like value, but it should be a file set or a path instead.
\s*Paths represented as strings are not supported by `lib.fileset`, use `lib.sources` or derivations instead.'
[[:blank:]]*Paths represented as strings are not supported by `lib.fileset`, use `lib.sources` or derivations instead.'
expectFailure 'toSource { root = ./.; fileset = cleanSourceWith { src = ./.; }; }' 'lib.fileset.toSource: `fileset` is a `lib.sources`-based value, but it should be a file set or a path instead.
\s*To convert a `lib.sources`-based value to a file set you can use `lib.fileset.fromSource`.
\s*Note that this only works for sources created from paths.'
[[:blank:]]*To convert a `lib.sources`-based value to a file set you can use `lib.fileset.fromSource`.
[[:blank:]]*Note that this only works for sources created from paths.'
# Path coercion errors for non-existent paths
expectFailure 'toSource { root = ./.; fileset = ./a; }' 'lib.fileset.toSource: `fileset` \('"$work"'/a\) is a path that does not exist.
\s*To create a file set from a path that may not exist, use `lib.fileset.maybeMissing`.'
[[:blank:]]*To create a file set from a path that may not exist, use `lib.fileset.maybeMissing`.'
# File sets cannot be evaluated directly
expectFailure 'union ./. ./.' 'lib.fileset: Directly evaluating a file set is not supported.
\s*To turn it into a usable source, use `lib.fileset.toSource`.
\s*To pretty-print the contents, use `lib.fileset.trace` or `lib.fileset.traceVal`.'
[[:blank:]]*To turn it into a usable source, use `lib.fileset.toSource`.
[[:blank:]]*To pretty-print the contents, use `lib.fileset.trace` or `lib.fileset.traceVal`.'
expectFailure '_emptyWithoutBase' 'lib.fileset: Directly evaluating a file set is not supported.
\s*To turn it into a usable source, use `lib.fileset.toSource`.
\s*To pretty-print the contents, use `lib.fileset.trace` or `lib.fileset.traceVal`.'
[[:blank:]]*To turn it into a usable source, use `lib.fileset.toSource`.
[[:blank:]]*To pretty-print the contents, use `lib.fileset.trace` or `lib.fileset.traceVal`.'
# Past versions of the internal representation are supported
expectEqual '_coerce "<tests>: value" { _type = "fileset"; _internalVersion = 0; _internalBase = ./.; }' \
@ -423,9 +427,9 @@ expectEqual '_coerce "<tests>: value" { _type = "fileset"; _internalVersion = 2;
# Future versions of the internal representation are unsupported
expectFailure '_coerce "<tests>: value" { _type = "fileset"; _internalVersion = 4; }' '<tests>: value is a file set created from a future version of the file set library with a different internal representation:
\s*- Internal version of the file set: 4
\s*- Internal version of the library: 3
\s*Make sure to update your Nixpkgs to have a newer version of `lib.fileset`.'
[[:blank:]]*- Internal version of the file set: 4
[[:blank:]]*- Internal version of the library: 3
[[:blank:]]*Make sure to update your Nixpkgs to have a newer version of `lib.fileset`.'
# _create followed by _coerce should give the inputs back without any validation
expectEqual '{
@ -539,16 +543,16 @@ mkdir -p {foo,bar}/mock-root
expectFailure 'with ((import <nixpkgs/lib>).extend (import <nixpkgs/lib/fileset/mock-splitRoot.nix>)).fileset;
toSource { root = ./.; fileset = union ./foo/mock-root ./bar/mock-root; }
' 'lib.fileset.union: Filesystem roots are not the same:
\s*First argument: Filesystem root is "'"$work"'/foo/mock-root"
\s*Second argument: Filesystem root is "'"$work"'/bar/mock-root"
\s*Different filesystem roots are not supported.'
[[:blank:]]*First argument: Filesystem root is "'"$work"'/foo/mock-root"
[[:blank:]]*Second argument: Filesystem root is "'"$work"'/bar/mock-root"
[[:blank:]]*Different filesystem roots are not supported.'
expectFailure 'with ((import <nixpkgs/lib>).extend (import <nixpkgs/lib/fileset/mock-splitRoot.nix>)).fileset;
toSource { root = ./.; fileset = unions [ ./foo/mock-root ./bar/mock-root ]; }
' 'lib.fileset.unions: Filesystem roots are not the same:
\s*Element 0: Filesystem root is "'"$work"'/foo/mock-root"
\s*Element 1: Filesystem root is "'"$work"'/bar/mock-root"
\s*Different filesystem roots are not supported.'
[[:blank:]]*Element 0: Filesystem root is "'"$work"'/foo/mock-root"
[[:blank:]]*Element 1: Filesystem root is "'"$work"'/bar/mock-root"
[[:blank:]]*Different filesystem roots are not supported.'
rm -rf -- *
# Coercion errors show the correct context
@ -652,9 +656,9 @@ mkdir -p {foo,bar}/mock-root
expectFailure 'with ((import <nixpkgs/lib>).extend (import <nixpkgs/lib/fileset/mock-splitRoot.nix>)).fileset;
toSource { root = ./.; fileset = intersection ./foo/mock-root ./bar/mock-root; }
' 'lib.fileset.intersection: Filesystem roots are not the same:
\s*First argument: Filesystem root is "'"$work"'/foo/mock-root"
\s*Second argument: Filesystem root is "'"$work"'/bar/mock-root"
\s*Different filesystem roots are not supported.'
[[:blank:]]*First argument: Filesystem root is "'"$work"'/foo/mock-root"
[[:blank:]]*Second argument: Filesystem root is "'"$work"'/bar/mock-root"
[[:blank:]]*Different filesystem roots are not supported.'
rm -rf -- *
# Coercion errors show the correct context
@ -761,8 +765,8 @@ rm -rf -- *
# Also not the other way around
mkdir a
expectFailure 'toSource { root = ./a; fileset = difference ./. ./a; }' 'lib.fileset.toSource: `fileset` could contain files in '"$work"', which is not under the `root` \('"$work"'/a\). Potential solutions:
\s*- Set `root` to '"$work"' or any directory higher up. This changes the layout of the resulting store path.
\s*- Set `fileset` to a file set that cannot contain files outside the `root` \('"$work"'/a\). This could change the files included in the result.'
[[:blank:]]*- Set `root` to '"$work"' or any directory higher up. This changes the layout of the resulting store path.
[[:blank:]]*- Set `fileset` to a file set that cannot contain files outside the `root` \('"$work"'/a\). This could change the files included in the result.'
rm -rf -- *
# Difference actually works
@ -839,7 +843,7 @@ expectFailure 'fileFilter null (abort "this is not needed")' 'lib.fileset.fileFi
# The second argument needs to be an existing path
expectFailure 'fileFilter (file: abort "this is not needed") _emptyWithoutBase' 'lib.fileset.fileFilter: Second argument is a file set, but it should be a path instead.
\s*If you need to filter files in a file set, use `intersection fileset \(fileFilter pred \./\.\)` instead.'
[[:blank:]]*If you need to filter files in a file set, use `intersection fileset \(fileFilter pred \./\.\)` instead.'
expectFailure 'fileFilter (file: abort "this is not needed") null' 'lib.fileset.fileFilter: Second argument is of type null, but it should be a path instead.'
expectFailure 'fileFilter (file: abort "this is not needed") ./a' 'lib.fileset.fileFilter: Second argument \('"$work"'/a\) is a path that does not exist.'
@ -1103,7 +1107,7 @@ rm -rf -- *
# String-like values are not supported
expectFailure 'fromSource (lib.cleanSource "")' 'lib.fileset.fromSource: The source origin of the argument is a string-like value \(""\), but it should be a path instead.
\s*Sources created from paths in strings cannot be turned into file sets, use `lib.sources` or derivations instead.'
[[:blank:]]*Sources created from paths in strings cannot be turned into file sets, use `lib.sources` or derivations instead.'
# Wrong type
expectFailure 'fromSource null' 'lib.fileset.fromSource: The source origin of the argument is of type null, but it should be a path instead.'
@ -1420,10 +1424,10 @@ expectEqual '(import '"$storePath"' { fs = lib.fileset; }).outPath' \""$storePat
## But it fails if the path is imported with a fetcher that doesn't remove .git (like just using "${./.}")
expectFailure 'import "${./.}" { fs = lib.fileset; }' 'lib.fileset.gitTracked: The argument \(.*\) is a store path within a working tree of a Git repository.
\s*This indicates that a source directory was imported into the store using a method such as `import "\$\{./.\}"` or `path:.`.
\s*This function currently does not support such a use case, since it currently relies on `builtins.fetchGit`.
\s*You could make this work by using a fetcher such as `fetchGit` instead of copying the whole repository.
\s*If you can'\''t avoid copying the repo to the store, see https://github.com/NixOS/nix/issues/9292.'
[[:blank:]]*This indicates that a source directory was imported into the store using a method such as `import "\$\{./.\}"` or `path:.`.
[[:blank:]]*This function currently does not support such a use case, since it currently relies on `builtins.fetchGit`.
[[:blank:]]*You could make this work by using a fetcher such as `fetchGit` instead of copying the whole repository.
[[:blank:]]*If you can'\''t avoid copying the repo to the store, see https://github.com/NixOS/nix/issues/9292.'
## Even with submodules
if [[ -n "$fetchGitSupportsSubmodules" ]]; then
@ -1447,15 +1451,15 @@ if [[ -n "$fetchGitSupportsSubmodules" ]]; then
## But it fails if the path is imported with a fetcher that doesn't remove .git (like just using "${./.}")
expectFailure 'import "${./.}" { fs = lib.fileset; }' 'lib.fileset.gitTrackedWith: The second argument \(.*\) is a store path within a working tree of a Git repository.
\s*This indicates that a source directory was imported into the store using a method such as `import "\$\{./.\}"` or `path:.`.
\s*This function currently does not support such a use case, since it currently relies on `builtins.fetchGit`.
\s*You could make this work by using a fetcher such as `fetchGit` instead of copying the whole repository.
\s*If you can'\''t avoid copying the repo to the store, see https://github.com/NixOS/nix/issues/9292.'
[[:blank:]]*This indicates that a source directory was imported into the store using a method such as `import "\$\{./.\}"` or `path:.`.
[[:blank:]]*This function currently does not support such a use case, since it currently relies on `builtins.fetchGit`.
[[:blank:]]*You could make this work by using a fetcher such as `fetchGit` instead of copying the whole repository.
[[:blank:]]*If you can'\''t avoid copying the repo to the store, see https://github.com/NixOS/nix/issues/9292.'
expectFailure 'import "${./.}/sub" { fs = lib.fileset; }' 'lib.fileset.gitTracked: The argument \(.*/sub\) is a store path within a working tree of a Git repository.
\s*This indicates that a source directory was imported into the store using a method such as `import "\$\{./.\}"` or `path:.`.
\s*This function currently does not support such a use case, since it currently relies on `builtins.fetchGit`.
\s*You could make this work by using a fetcher such as `fetchGit` instead of copying the whole repository.
\s*If you can'\''t avoid copying the repo to the store, see https://github.com/NixOS/nix/issues/9292.'
[[:blank:]]*This indicates that a source directory was imported into the store using a method such as `import "\$\{./.\}"` or `path:.`.
[[:blank:]]*This function currently does not support such a use case, since it currently relies on `builtins.fetchGit`.
[[:blank:]]*You could make this work by using a fetcher such as `fetchGit` instead of copying the whole repository.
[[:blank:]]*If you can'\''t avoid copying the repo to the store, see https://github.com/NixOS/nix/issues/9292.'
fi
rm -rf -- *

View file

@ -4,7 +4,6 @@ let
inherit (lib.strings) toInt;
inherit (lib.trivial) compare min id warn;
inherit (lib.attrsets) mapAttrs;
inherit (lib.lists) sort;
in
rec {
@ -172,7 +171,7 @@ rec {
concatMap (x: [x] ++ ["z"]) ["a" "b"]
=> [ "a" "z" "b" "z" ]
*/
concatMap = builtins.concatMap or (f: list: concatLists (map f list));
concatMap = builtins.concatMap;
/* Flatten the argument into a single list; that is, nested lists are
spliced into the top-level lists.
@ -316,7 +315,7 @@ rec {
any isString [ 1 { } ]
=> false
*/
any = builtins.any or (pred: foldr (x: y: if pred x then true else y) false);
any = builtins.any;
/* Return true if function `pred` returns true for all elements of
`list`.
@ -329,7 +328,7 @@ rec {
all (x: x < 3) [ 1 2 3 ]
=> false
*/
all = builtins.all or (pred: foldr (x: y: if pred x then y else false) true);
all = builtins.all;
/* Count how many elements of `list` match the supplied predicate
function.
@ -428,12 +427,7 @@ rec {
partition (x: x > 2) [ 5 1 2 3 4 ]
=> { right = [ 5 3 4 ]; wrong = [ 1 2 ]; }
*/
partition = builtins.partition or (pred:
foldr (h: t:
if pred h
then { right = [h] ++ t.right; wrong = t.wrong; }
else { right = t.right; wrong = [h] ++ t.wrong; }
) { right = []; wrong = []; });
partition = builtins.partition;
/* Splits the elements of a list into many lists, using the return value of a predicate.
Predicate should return a string which becomes keys of attrset `groupBy` returns.
@ -602,22 +596,7 @@ rec {
Type:
sort :: (a -> a -> Bool) -> [a] -> [a]
*/
sort = builtins.sort or (
strictLess: list:
let
len = length list;
first = head list;
pivot' = n: acc@{ left, right }: let el = elemAt list n; next = pivot' (n + 1); in
if n == len
then acc
else if strictLess first el
then next { inherit left; right = [ el ] ++ right; }
else
next { left = [ el ] ++ left; inherit right; };
pivot = pivot' 1 { left = []; right = []; };
in
if len < 2 then list
else (sort strictLess pivot.left) ++ [ first ] ++ (sort strictLess pivot.right));
sort = builtins.sort;
/*
Sort a list based on the default comparison of a derived property `b`.

View file

@ -95,8 +95,7 @@ rec {
concatStringsSep "/" ["usr" "local" "bin"]
=> "usr/local/bin"
*/
concatStringsSep = builtins.concatStringsSep or (separator: list:
lib.foldl' (x: y: x + y) "" (intersperse separator list));
concatStringsSep = builtins.concatStringsSep;
/* Maps a function over a list of strings and then concatenates the
result with the specified separator interspersed between

View file

@ -95,21 +95,6 @@ in {
/* boolean and */
and = x: y: x && y;
/* bitwise and */
bitAnd = builtins.bitAnd
or (import ./zip-int-bits.nix
(a: b: if a==1 && b==1 then 1 else 0));
/* bitwise or */
bitOr = builtins.bitOr
or (import ./zip-int-bits.nix
(a: b: if a==1 || b==1 then 1 else 0));
/* bitwise xor */
bitXor = builtins.bitXor
or (import ./zip-int-bits.nix
(a: b: if a!=b then 1 else 0));
/* bitwise not */
bitNot = builtins.sub (-1);
@ -165,8 +150,8 @@ in {
inherit (builtins)
pathExists readFile isBool
isInt isFloat add sub lessThan
seq deepSeq genericClosure;
seq deepSeq genericClosure
bitAnd bitOr bitXor;
## nixpkgs version strings

View file

@ -9,7 +9,7 @@ rec {
splitVersion "1.2.3"
=> ["1" "2" "3"]
*/
splitVersion = builtins.splitVersion or (lib.splitString ".");
splitVersion = builtins.splitVersion;
/* Get the major version string from a string.

View file

@ -1,39 +0,0 @@
/* Helper function to implement a fallback for the bit operators
`bitAnd`, `bitOr` and `bitXor` on older nix version.
See ./trivial.nix
*/
f: x: y:
let
# (intToBits 6) -> [ 0 1 1 ]
intToBits = x:
if x == 0 || x == -1 then
[]
else
let
headbit = if (x / 2) * 2 != x then 1 else 0; # x & 1
tailbits = if x < 0 then ((x + 1) / 2) - 1 else x / 2; # x >> 1
in
[headbit] ++ (intToBits tailbits);
# (bitsToInt [ 0 1 1 ] 0) -> 6
# (bitsToInt [ 0 1 0 ] 1) -> -6
bitsToInt = l: signum:
if l == [] then
(if signum == 0 then 0 else -1)
else
(builtins.head l) + (2 * (bitsToInt (builtins.tail l) signum));
xsignum = if x < 0 then 1 else 0;
ysignum = if y < 0 then 1 else 0;
zipListsWith' = fst: snd:
if fst==[] && snd==[] then
[]
else if fst==[] then
[(f xsignum (builtins.head snd))] ++ (zipListsWith' [] (builtins.tail snd))
else if snd==[] then
[(f (builtins.head fst) ysignum )] ++ (zipListsWith' (builtins.tail fst) [] )
else
[(f (builtins.head fst) (builtins.head snd))] ++ (zipListsWith' (builtins.tail fst) (builtins.tail snd));
in
assert (builtins.isInt x) && (builtins.isInt y);
bitsToInt (zipListsWith' (intToBits x) (intToBits y)) (f xsignum ysignum)

View file

@ -5805,6 +5805,13 @@
githubId = 8706;
name = "Rafael Fernández López";
};
erethon = {
email = "dgrig@erethon.com";
matrix = "@dgrig:erethon.com";
github = "erethon";
githubId = 1254842;
name = "Dionysis Grigoropoulos";
};
ericbmerritt = {
email = "eric@afiniate.com";
github = "ericbmerritt";
@ -16686,6 +16693,12 @@
githubId = 6445619;
name = "Ruben Cano Diaz";
};
RudiOnTheAir = {
name = "Rüdiger Schwoon";
email = "wolf@schwoon.info";
github = "RudiOnTheAir";
githubId = 47517341;
};
rudolfvesely = {
name = "Rudolf Vesely";
email = "i@rudolfvesely.com";
@ -17174,6 +17187,12 @@
fingerprint = "E173 237A C782 296D 98F5 ADAC E13D FD4B 4712 7951";
}];
};
sdht0 = {
email = "nixpkgs@sdht.in";
github = "sdht0";
githubId = 867424;
name = "Siddhartha Sahu";
};
sdier = {
email = "scott@dier.name";
matrix = "@sdier:matrix.org";

View file

@ -39,7 +39,7 @@ target:
```
To validate cross-targets `binfmt` `NixOS` helper can be useful.
For `riscv64-unknown-linux-gnu` the `/etc/nixox/configuraqtion.nix`
For `riscv64-unknown-linux-gnu` the `/etc/nixos/configuration.nix`
entry would be `boot.binfmt.emulatedSystems = [ "riscv64-linux" ]`.
3. Propose the commit as a PR to update bootstrap tarballs, tag people

View file

@ -0,0 +1,31 @@
#!/usr/bin/env nix-shell
#!nix-shell -i bash -p gnutar jq reuse
set -eu
cd "$(dirname "$(readlink -f "$0")")"/../../..
TMPDIR=$(mktemp -d)
trap 'rm -rf $TMPDIR' EXIT
echo "# Prebuilding sources..."
nix-build -A kdePackages.sources --no-link || true
echo "# Evaluating sources..."
declare -A sources
eval "$(nix-instantiate --eval -A kdePackages.sources --json --strict | jq 'to_entries[] | "sources[" + .key + "]=" + .value' -r)"
echo "# Collecting licenses..."
for k in "${!sources[@]}"; do
echo "- Processing $k..."
if [ ! -f "${sources[$k]}" ]; then
echo "Not found!"
continue
fi
mkdir "$TMPDIR/$k"
tar -C "$TMPDIR/$k" -xf "${sources[$k]}"
(cd "$TMPDIR/$k"; reuse lint --json) | jq --arg name "$k" '{$name: .summary.used_licenses | sort}' -c > "$TMPDIR/$k.json"
done
jq -s 'add' -S "$TMPDIR"/*.json > pkgs/kde/generated/licenses.json

View file

@ -0,0 +1,11 @@
#!/usr/bin/env nix-shell
#!nix-shell -i nu -p nushell
cd $"($env.FILE_PWD)/../../.."
mkdir logs
nix-env -qaP -f . -A kdePackages --json --out-path | from json | values | par-each { |it|
echo $"Processing ($it.pname)..."
if "outputs" in $it {
nix-store --read-log $it.outputs.out | save -f $"logs/($it.pname).log"
}
}

View file

@ -0,0 +1,36 @@
#!/usr/bin/env nix-shell
#!nix-shell -i python3 -p "python3.withPackages(ps: [ ps.click ps.pyyaml ])"
import pathlib
import click
import utils
@click.command
@click.argument(
"repo-metadata",
type=click.Path(
exists=True,
file_okay=False,
resolve_path=True,
path_type=pathlib.Path,
),
)
@click.option(
"--nixpkgs",
type=click.Path(
exists=True,
file_okay=False,
resolve_path=True,
writable=True,
path_type=pathlib.Path,
),
default=pathlib.Path(__file__).parent.parent.parent.parent
)
def main(repo_metadata: pathlib.Path, nixpkgs: pathlib.Path):
metadata = utils.KDERepoMetadata.from_repo_metadata_checkout(repo_metadata)
out_dir = nixpkgs / "pkgs/kde/generated"
metadata.write_json(out_dir)
if __name__ == "__main__":
main() # type: ignore

View file

@ -0,0 +1,127 @@
#!/usr/bin/env nix-shell
#!nix-shell -i python3 -p python3
import pathlib
OK_MISSING = {
# we don't use precompiled QML
'Qt6QuickCompiler',
'Qt6QmlCompilerPlusPrivate',
# usually used for version numbers
'Git',
# useless by itself, will warn if something else is not found
'PkgConfig',
# license verification
'ReuseTool',
# dev only
'ClangFormat',
# doesn't exist
'Qt6X11Extras',
}
OK_MISSING_BY_PACKAGE = {
"angelfish": {
"Qt6Feedback", # we don't have it
},
"attica": {
"Python3", # only used for license checks
},
"discover": {
"rpm-ostree-1", # we don't have rpm-ostree (duh)
"Snapd", # we don't have snaps and probably never will
},
"elisa": {
"UPNPQT", # upstream says it's broken
},
"extra-cmake-modules": {
"Sphinx", # only used for docs, bloats closure size
"QCollectionGenerator"
},
"kio-extras-kf5": {
"KDSoapWSDiscoveryClient", # actually vendored on KF5 version
},
"kitinerary": {
"OsmTools", # used for map data updates, we use prebuilt
},
"kosmindoormap": {
"OsmTools", # same
"Protobuf",
},
"kpty": {
"UTEMPTER", # we don't have it and it probably wouldn't work anyway
},
"kpublictransport": {
"OsmTools", # same
"PolyClipping",
"Protobuf",
},
"krfb": {
"Qt6XkbCommonSupport", # not real
},
"kuserfeedback": {
"Qt6Svg", # all used for backend console stuff we don't ship
"QmlLint",
"Qt6Charts",
"FLEX",
"BISON",
"Php",
"PhpUnit",
},
"kwin": {
"display-info", # newer versions identify as libdisplay-info
},
"mlt": {
"Qt5", # intentionally disabled
"SWIG",
},
"plasma-desktop": {
"scim", # upstream is dead, not packaged in Nixpkgs
},
"powerdevil": {
"DDCUtil", # cursed, intentionally disabled
},
"pulseaudio-qt": {
"Qt6Qml", # tests only
"Qt6Quick",
},
"syntax-highlighting": {
"XercesC", # only used for extra validation at build time
}
}
def main():
here = pathlib.Path(__file__).parent.parent.parent.parent
logs = (here / "logs").glob("*.log")
for log in sorted(logs):
pname = log.stem
missing = []
is_in_block = False
with log.open(errors="replace") as fd:
for line in fd:
line = line.strip()
if line.startswith("-- No package '"):
package = line.removeprefix("-- No package '").removesuffix("' found")
missing.append(package)
if line == "-- The following OPTIONAL packages have not been found:" or line == "-- The following RECOMMENDED packages have not been found:":
is_in_block = True
elif line.startswith("--") and is_in_block:
is_in_block = False
elif line.startswith("*") and is_in_block:
package = line.removeprefix("* ")
missing.append(package)
missing = {
package
for package in missing
if not any(package.startswith(i) for i in OK_MISSING | OK_MISSING_BY_PACKAGE.get(pname, set()))
}
if missing:
print(pname + ":")
for line in missing:
print(" -", line)
print()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,113 @@
#!/usr/bin/env nix-shell
#!nix-shell -i python3 -p "python3.withPackages(ps: [ ps.beautifulsoup4 ps.click ps.httpx ps.jinja2 ps.pyyaml ])
import base64
import binascii
import json
import pathlib
from urllib.parse import urlparse
import bs4
import click
import httpx
import jinja2
import utils
LEAF_TEMPLATE = jinja2.Template('''
{mkKdeDerivation}:
mkKdeDerivation {
pname = "{{ pname }}";
}
'''.strip())
ROOT_TEMPLATE = jinja2.Template('''
{callPackage}: {
{%- for p in packages %}
{{ p }} = callPackage ./{{ p }} {};
{%- endfor %}
}
'''.strip());
def to_sri(hash):
raw = binascii.unhexlify(hash)
b64 = base64.b64encode(raw).decode()
return f"sha256-{b64}"
@click.command
@click.argument(
"set",
type=click.Choice(["frameworks", "gear", "plasma"]),
required=True
)
@click.argument(
"version",
type=str,
required=True
)
@click.option(
"--nixpkgs",
type=click.Path(
exists=True,
file_okay=False,
resolve_path=True,
writable=True,
path_type=pathlib.Path,
),
default=pathlib.Path(__file__).parent.parent.parent.parent
)
def main(set: str, version: str, nixpkgs: pathlib.Path):
root_dir = nixpkgs / "pkgs/kde"
set_dir = root_dir / set
generated_dir = root_dir / "generated"
metadata = utils.KDERepoMetadata.from_json(generated_dir)
set_url = {
"frameworks": "kf",
"gear": "releases",
"plasma": "plasma",
}[set]
sources = httpx.get(f"https://kde.org/info/sources/source-{set_url}-{version}.html")
sources.raise_for_status()
bs = bs4.BeautifulSoup(sources.text, features="html.parser")
results = {}
for item in bs.select("tr")[1:]:
link = item.select_one("td:nth-child(1) a")
assert link
hash = item.select_one("td:nth-child(3) tt")
assert hash
project_name, version = link.text.rsplit("-", maxsplit=1)
if project_name not in metadata.projects_by_name:
print(f"Warning: unknown tarball: {project_name}")
results[project_name] = {
"version": version,
"url": "mirror://kde" + urlparse(link.attrs["href"]).path,
"hash": to_sri(hash.text)
}
pkg_dir = set_dir / project_name
pkg_file = pkg_dir / "default.nix"
if not pkg_file.exists():
print(f"Generated new package: {set}/{project_name}")
pkg_dir.mkdir(parents=True, exist_ok=True)
with pkg_file.open("w") as fd:
fd.write(LEAF_TEMPLATE.render(pname=project_name) + "\n")
set_dir.mkdir(parents=True, exist_ok=True)
with (set_dir / "default.nix").open("w") as fd:
fd.write(ROOT_TEMPLATE.render(packages=results.keys()) + "\n")
sources_dir = generated_dir / "sources"
sources_dir.mkdir(parents=True, exist_ok=True)
with (sources_dir / f"{set}.json").open("w") as fd:
json.dump(results, fd, indent=2)
if __name__ == "__main__":
main() # type: ignore

View file

@ -0,0 +1,185 @@
import collections
import dataclasses
import functools
import json
import pathlib
import subprocess
import yaml
class DataclassEncoder(json.JSONEncoder):
def default(self, it):
if dataclasses.is_dataclass(it):
return dataclasses.asdict(it)
return super().default(it)
@dataclasses.dataclass
class Project:
name: str
description: str | None
project_path: str
repo_path: str | None
def __hash__(self) -> int:
return hash(self.name)
@classmethod
def from_yaml(cls, path: pathlib.Path):
data = yaml.safe_load(path.open())
return cls(
name=data["identifier"],
description=data["description"],
project_path=data["projectpath"],
repo_path=data["repopath"]
)
def get_git_commit(path: pathlib.Path):
return subprocess.check_output(["git", "-C", path, "rev-parse", "--short", "HEAD"]).decode().strip()
def validate_unique(projects: list[Project], attr: str):
seen = set()
for item in projects:
attr_value = getattr(item, attr)
if attr_value in seen:
raise Exception(f"Duplicate {attr}: {attr_value}")
seen.add(attr_value)
THIRD_PARTY = {
"third-party/appstream": "appstream-qt",
"third-party/cmark": "cmark",
"third-party/gpgme": "gpgme",
"third-party/kdsoap": "kdsoap",
"third-party/libaccounts-qt": "accounts-qt",
"third-party/libgpg-error": "libgpg-error",
"third-party/libquotient": "libquotient",
"third-party/packagekit-qt": "packagekit-qt",
"third-party/poppler": "poppler",
"third-party/qcoro": "qcoro",
"third-party/qmltermwidget": "qmltermwidget",
"third-party/qtkeychain": "qtkeychain",
"third-party/signond": "signond",
"third-party/taglib": "taglib",
"third-party/wayland-protocols": "wayland-protocols",
"third-party/wayland": "wayland",
"third-party/zxing-cpp": "zxing-cpp",
}
IGNORE = {
"kdesupport/phonon-directshow",
"kdesupport/phonon-mmf",
"kdesupport/phonon-mplayer",
"kdesupport/phonon-quicktime",
"kdesupport/phonon-waveout",
"kdesupport/phonon-xine"
}
WARNED = set()
@dataclasses.dataclass
class KDERepoMetadata:
version: str
projects: list[Project]
dep_graph: dict[Project, set[Project]]
@functools.cached_property
def projects_by_name(self):
return {p.name: p for p in self.projects}
@functools.cached_property
def projects_by_path(self):
return {p.project_path: p for p in self.projects}
def try_lookup_package(self, path):
if path in IGNORE:
return None
project = self.projects_by_path.get(path)
if project is None and path not in WARNED:
WARNED.add(path)
print(f"Warning: unknown project {path}")
return project
@classmethod
def from_repo_metadata_checkout(cls, repo_metadata: pathlib.Path):
projects = [
Project.from_yaml(metadata_file)
for metadata_file in repo_metadata.glob("projects-invent/**/metadata.yaml")
] + [
Project(id, None, project_path, None)
for project_path, id in THIRD_PARTY.items()
]
validate_unique(projects, "name")
validate_unique(projects, "project_path")
self = cls(
version=get_git_commit(repo_metadata),
projects=projects,
dep_graph={},
)
dep_specs = [
"dependency-data-common",
"dependency-data-kf6-qt6"
]
dep_graph = collections.defaultdict(set)
for spec in dep_specs:
spec_path = repo_metadata / "dependencies" / spec
for line in spec_path.open():
line = line.strip()
if line.startswith("#"):
continue
if not line:
continue
dependent, dependency = line.split(": ")
dependent = self.try_lookup_package(dependent)
if dependent is None:
continue
dependency = self.try_lookup_package(dependency)
if dependency is None:
continue
dep_graph[dependent].add(dependency)
self.dep_graph = dep_graph
return self
def write_json(self, root: pathlib.Path):
root.mkdir(parents=True, exist_ok=True)
with (root / "projects.json").open("w") as fd:
json.dump(self.projects_by_name, fd, cls=DataclassEncoder, sort_keys=True, indent=2)
with (root / "dependencies.json").open("w") as fd:
deps = {k.name: sorted(dep.name for dep in v) for k, v in self.dep_graph.items()}
json.dump({"version": self.version, "dependencies": deps}, fd, cls=DataclassEncoder, sort_keys=True, indent=2)
@classmethod
def from_json(cls, root: pathlib.Path):
projects = [
Project(**v) for v in json.load((root / "projects.json").open()).values()
]
deps = json.load((root / "dependencies.json").open())
self = cls(
version=deps["version"],
projects=projects,
dep_graph={},
)
dep_graph = collections.defaultdict(set)
for dependent, dependencies in deps["dependencies"].items():
for dependency in dependencies:
dep_graph[self.projects_by_name[dependent]].add(self.projects_by_name[dependency])
self.dep_graph = dep_graph
return self

View file

@ -52,6 +52,8 @@ In addition to numerous new and upgraded packages, this release has the followin
}
```
- Plasma 6 is now available and can be installed with `services.xserver.desktopManager.plasma6.enable = true;`. Plasma 5 will likely be deprecated in the next release (24.11). Note that Plasma 6 runs as Wayland by default, and the X11 session needs to be explicitly selected if necessary.
## New Services {#sec-release-24.05-new-services}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@ -283,6 +285,10 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
- Cinnamon has been updated to 6.0. Please beware that the [Wayland session](https://blog.linuxmint.com/?p=4591) is still experimental in this release.
- New `boot.loader.systemd-boot.xbootldrMountPoint` allows setting up a separate [XBOOTLDR partition](https://uapi-group.org/specifications/specs/boot_loader_specification/) to store boot files. Useful on systems with a small EFI System partition that cannot be easily repartitioned.
- `boot.loader.systemd-boot` will now verify that `efiSysMountPoint` (and `xbootldrMountPoint` if configured) are mounted partitions.
- `services.postgresql.extraPlugins` changed its type from just a list of packages to also a function that returns such a list.
For example a config line like ``services.postgresql.extraPlugins = with pkgs.postgresql_11.pkgs; [ postgis ];`` is recommended to be changed to ``services.postgresql.extraPlugins = ps: with ps; [ postgis ];``;
@ -384,6 +390,11 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
- The `mpich` package expression now requires `withPm` to be a list, e.g. `"hydra:gforker"` becomes `[ "hydra" "gforker" ]`.
- When merging systemd unit options (of type `unitOption`),
if at least one definition is a list, all those which aren't are now lifted into a list,
making it possible to accumulate definitions without resorting to `mkForce`,
hence to retain the definitions not anticipating that need.
- YouTrack is bumped to 2023.3. The update is not performed automatically, it requires manual interaction. See the YouTrack section in the manual for details.
- QtMultimedia has changed its default backend to `QT_MEDIA_BACKEND=ffmpeg` (previously `gstreamer` on Linux or `darwin` on MacOS).

View file

@ -56,6 +56,14 @@ This partition table type uses GPT and:
- creates an FAT32 ESP partition from 8MiB to specified `bootSize` parameter (256MiB by default), set it bootable ;
- creates an primary ext4 partition starting after the boot partition and extending to the full disk image
#### `efixbootldr`
This partition table type uses GPT and:
- creates an FAT32 ESP partition from 8MiB to 100MiB, set it bootable ;
- creates an FAT32 BOOT partition from 100MiB to specified `bootSize` parameter (256MiB by default), set `bls_boot` flag ;
- creates an primary ext4 partition starting after the boot partition and extending to the full disk image
#### `hybrid`
This partition table type uses GPT and:
@ -111,19 +119,7 @@ To solve this, you can run `fdisk -l $image` and generate `dd if=$image of=$imag
# When setting one of `user' or `group', the other needs to be set too.
contents ? []
, # Type of partition table to use; either "legacy", "efi", or "none".
# For "efi" images, the GPT partition table is used and a mandatory ESP
# partition of reasonable size is created in addition to the root partition.
# For "legacy", the msdos partition table is used and a single large root
# partition is created.
# For "legacy+gpt", the GPT partition table is used, a 1MiB no-fs partition for
# use by the bootloader is created, and a single large root partition is
# created.
# For "hybrid", the GPT partition table is used and a mandatory ESP
# partition of reasonable size is created in addition to the root partition.
# Also a legacy MBR will be present.
# For "none", no partition table is created. Enabling `installBootLoader`
# most likely fails as GRUB will probably refuse to install.
, # Type of partition table to use; described in the `Image Partitioning` section above.
partitionTableType ? "legacy"
, # Whether to invoke `switch-to-configuration boot` during image creation
@ -193,11 +189,11 @@ To solve this, you can run `fdisk -l $image` and generate `dd if=$image of=$imag
additionalPaths ? []
}:
assert (lib.assertOneOf "partitionTableType" partitionTableType [ "legacy" "legacy+gpt" "efi" "hybrid" "none" ]);
assert (lib.assertOneOf "partitionTableType" partitionTableType [ "legacy" "legacy+gpt" "efi" "efixbootldr" "hybrid" "none" ]);
assert (lib.assertMsg (fsType == "ext4" && deterministic -> rootFSUID != null) "In deterministic mode with a ext4 partition, rootFSUID must be non-null, by default, it is equal to rootGPUID.");
# We use -E offset=X below, which is only supported by e2fsprogs
assert (lib.assertMsg (partitionTableType != "none" -> fsType == "ext4") "to produce a partition table, we need to use -E offset flag which is support only for fsType = ext4");
assert (lib.assertMsg (touchEFIVars -> partitionTableType == "hybrid" || partitionTableType == "efi" || partitionTableType == "legacy+gpt") "EFI variables can be used only with a partition table of type: hybrid, efi or legacy+gpt.");
assert (lib.assertMsg (touchEFIVars -> partitionTableType == "hybrid" || partitionTableType == "efi" || partitionTableType == "efixbootldr" || partitionTableType == "legacy+gpt") "EFI variables can be used only with a partition table of type: hybrid, efi, efixbootldr, or legacy+gpt.");
# If only Nix store image, then: contents must be empty, configFile must be unset, and we should no install bootloader.
assert (lib.assertMsg (onlyNixStore -> contents == [] && configFile == null && !installBootLoader) "In a only Nix store image, the contents must be empty, no configuration must be provided and no bootloader should be installed.");
# Either both or none of {user,group} need to be set
@ -225,6 +221,7 @@ let format' = format; in let
legacy = "1";
"legacy+gpt" = "2";
efi = "2";
efixbootldr = "3";
hybrid = "3";
}.${partitionTableType};
@ -266,6 +263,23 @@ let format' = format; in let
$diskImage
''}
'';
efixbootldr = ''
parted --script $diskImage -- \
mklabel gpt \
mkpart ESP fat32 8MiB 100MiB \
set 1 boot on \
mkpart BOOT fat32 100MiB ${bootSize} \
set 2 bls_boot on \
mkpart ROOT ext4 ${bootSize} -1
${optionalString deterministic ''
sgdisk \
--disk-guid=97FD5997-D90B-4AA3-8D16-C1723AEA73C \
--partition-guid=1:1C06F03B-704E-4657-B9CD-681A087A2FDC \
--partition-guid=2:970C694F-AFD0-4B99-B750-CDB7A329AB6F \
--partition-guid=3:${rootGPUID} \
$diskImage
''}
'';
hybrid = ''
parted --script $diskImage -- \
mklabel gpt \
@ -436,7 +450,7 @@ let format' = format; in let
diskImage=nixos.raw
${if diskSize == "auto" then ''
${if partitionTableType == "efi" || partitionTableType == "hybrid" then ''
${if partitionTableType == "efi" || partitionTableType == "efixbootldr" || partitionTableType == "hybrid" then ''
# Add the GPT at the end
gptSpace=$(( 512 * 34 * 1 ))
# Normally we'd need to account for alignment and things, if bootSize
@ -570,6 +584,15 @@ let format' = format; in let
${optionalString touchEFIVars "mount -t efivarfs efivarfs /sys/firmware/efi/efivars"}
''}
${optionalString (partitionTableType == "efixbootldr") ''
mkdir -p /mnt/{boot,efi}
mkfs.vfat -n ESP /dev/vda1
mkfs.vfat -n BOOT /dev/vda2
mount /dev/vda1 /mnt/efi
mount /dev/vda2 /mnt/boot
${optionalString touchEFIVars "mount -t efivarfs efivarfs /sys/firmware/efi/efivars"}
''}
# Install a configuration.nix
mkdir -p /mnt/etc/nixos

View file

@ -21,14 +21,8 @@ in rec {
let
defs' = filterOverrides defs;
in
if isList (head defs').value
then concatMap (def:
if builtins.typeOf def.value == "list"
then def.value
else
throw "The definitions for systemd unit options should be either all lists, representing repeatable options, or all non-lists, but for the option ${showOption loc}, the definitions are a mix of list and non-list ${lib.options.showDefs defs'}"
) defs'
if any (def: isList def.value) defs'
then concatMap (def: toList def.value) defs'
else mergeEqualOption loc defs';
};

View file

@ -12,6 +12,8 @@ from test_driver.machine import Machine, NixStartScript, retry
from test_driver.polling_condition import PollingCondition
from test_driver.vlan import VLan
SENTINEL = object()
def get_tmp_dir() -> Path:
"""Returns a temporary directory that is defined by TMPDIR, TEMP, TMP or CWD
@ -187,23 +189,58 @@ class Driver:
# to swallow them and prevent itself from terminating.
os.kill(os.getpid(), signal.SIGTERM)
def create_machine(self, args: Dict[str, Any]) -> Machine:
def create_machine(
self,
start_command: str | dict,
*,
name: Optional[str] = None,
keep_vm_state: bool = False,
) -> Machine:
# Legacy args handling
# FIXME: remove after 24.05
if isinstance(start_command, dict):
if name is not None or keep_vm_state:
raise TypeError(
"Dictionary passed to create_machine must be the only argument"
)
args = start_command
start_command = args.pop("startCommand", SENTINEL)
if start_command is SENTINEL:
raise TypeError(
"Dictionary passed to create_machine must contain startCommand"
)
if not isinstance(start_command, str):
raise TypeError(
f"startCommand must be a string, got: {repr(start_command)}"
)
name = args.pop("name", None)
keep_vm_state = args.pop("keep_vm_state", False)
if args:
raise TypeError(
f"Unsupported arguments passed to create_machine: {args}"
)
rootlog.warning(
"Using create_machine with a single dictionary argument is deprecated, and will be removed in NixOS 24.11"
)
# End legacy args handling
tmp_dir = get_tmp_dir()
if args.get("startCommand"):
start_command: str = args.get("startCommand", "")
cmd = NixStartScript(start_command)
name = args.get("name", cmd.machine_name)
else:
cmd = Machine.create_startcommand(args) # type: ignore
name = args.get("name", "machine")
cmd = NixStartScript(start_command)
name = name or cmd.machine_name
return Machine(
tmp_dir=tmp_dir,
out_dir=self.out_dir,
start_command=cmd,
name=name,
keep_vm_state=args.get("keep_vm_state", False),
keep_vm_state=keep_vm_state,
)
def serial_stdout_on(self) -> None:

View file

@ -208,7 +208,6 @@ class StartCommand:
),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
cwd=state_dir,
env=self.build_environment(state_dir, shared_dir),
@ -235,77 +234,6 @@ class NixStartScript(StartCommand):
return name
class LegacyStartCommand(StartCommand):
"""Used in some places to create an ad-hoc machine instead of
using nix test instrumentation + module system for that purpose.
Legacy.
"""
def __init__(
self,
netBackendArgs: Optional[str] = None, # noqa: N803
netFrontendArgs: Optional[str] = None, # noqa: N803
hda: Optional[Tuple[Path, str]] = None,
cdrom: Optional[str] = None,
usb: Optional[str] = None,
bios: Optional[str] = None,
qemuBinary: Optional[str] = None, # noqa: N803
qemuFlags: Optional[str] = None, # noqa: N803
):
if qemuBinary is not None:
self._cmd = qemuBinary
else:
self._cmd = "qemu-kvm"
self._cmd += " -m 384"
# networking
net_backend = "-netdev user,id=net0"
net_frontend = "-device virtio-net-pci,netdev=net0"
if netBackendArgs is not None:
net_backend += "," + netBackendArgs
if netFrontendArgs is not None:
net_frontend += "," + netFrontendArgs
self._cmd += f" {net_backend} {net_frontend}"
# hda
hda_cmd = ""
if hda is not None:
hda_path = hda[0].resolve()
hda_interface = hda[1]
if hda_interface == "scsi":
hda_cmd += (
f" -drive id=hda,file={hda_path},werror=report,if=none"
" -device scsi-hd,drive=hda"
)
else:
hda_cmd += f" -drive file={hda_path},if={hda_interface},werror=report"
self._cmd += hda_cmd
# cdrom
if cdrom is not None:
self._cmd += f" -cdrom {cdrom}"
# usb
usb_cmd = ""
if usb is not None:
# https://github.com/qemu/qemu/blob/master/docs/usb2.txt
usb_cmd += (
" -device usb-ehci"
f" -drive id=usbdisk,file={usb},if=none,readonly"
" -device usb-storage,drive=usbdisk "
)
self._cmd += usb_cmd
# bios
if bios is not None:
self._cmd += f" -bios {bios}"
# qemu flags
if qemuFlags is not None:
self._cmd += f" {qemuFlags}"
class Machine:
"""A handle to the machine with this name, that also knows how to manage
the machine lifecycle with the help of a start script / command."""
@ -377,29 +305,6 @@ class Machine:
self.booted = False
self.connected = False
@staticmethod
def create_startcommand(args: Dict[str, str]) -> StartCommand:
rootlog.warning(
"Using legacy create_startcommand(), "
"please use proper nix test vm instrumentation, instead "
"to generate the appropriate nixos test vm qemu startup script"
)
hda = None
if args.get("hda"):
hda_arg: str = args.get("hda", "")
hda_arg_path: Path = Path(hda_arg)
hda = (hda_arg_path, args.get("hdaInterface", ""))
return LegacyStartCommand(
netBackendArgs=args.get("netBackendArgs"),
netFrontendArgs=args.get("netFrontendArgs"),
hda=hda,
cdrom=args.get("cdrom"),
usb=args.get("usb"),
bios=args.get("bios"),
qemuBinary=args.get("qemuBinary"),
qemuFlags=args.get("qemuFlags"),
)
def is_up(self) -> bool:
return self.booted and self.connected

View file

@ -26,6 +26,17 @@ class PollingConditionProtocol(Protocol):
raise Exception("This is just type information for the Nix test driver")
class CreateMachineProtocol(Protocol):
def __call__(
self,
start_command: str | dict,
*,
name: Optional[str] = None,
keep_vm_state: bool = False,
) -> Machine:
raise Exception("This is just type information for the Nix test driver")
start_all: Callable[[], None]
subtest: Callable[[str], ContextManager[None]]
retry: RetryProtocol
@ -34,7 +45,7 @@ machines: List[Machine]
vlans: List[VLan]
driver: Driver
log: Logger
create_machine: Callable[[Dict[str, Any]], Machine]
create_machine: CreateMachineProtocol
run_tests: Callable[[], None]
join_all: Callable[[], None]
serial_stdout_off: Callable[[], None]

View file

@ -0,0 +1,46 @@
# This module defines a NixOS installation CD that contains Plasma 6.
{ pkgs, ... }:
{
imports = [ ./installation-cd-graphical-calamares.nix ];
isoImage.edition = "plasma6";
services.xserver = {
desktopManager.plasma6.enable = true;
# Automatically login as nixos.
displayManager = {
sddm.enable = true;
autoLogin = {
enable = true;
user = "nixos";
};
};
};
environment.systemPackages = [
# FIXME: using Qt5 builds of Maliit as upstream has not ported to Qt6 yet
pkgs.maliit-framework
pkgs.maliit-keyboard
];
system.activationScripts.installerDesktop = let
# Comes from documentation.nix when xserver and nixos.enable are true.
manualDesktopFile = "/run/current-system/sw/share/applications/nixos-manual.desktop";
homeDir = "/home/nixos/";
desktopDir = homeDir + "Desktop/";
in ''
mkdir -p ${desktopDir}
chown nixos ${homeDir} ${desktopDir}
ln -sfT ${manualDesktopFile} ${desktopDir + "nixos-manual.desktop"}
ln -sfT ${pkgs.gparted}/share/applications/gparted.desktop ${desktopDir + "gparted.desktop"}
ln -sfT ${pkgs.calamares-nixos}/share/applications/io.calamares.calamares.desktop ${desktopDir + "io.calamares.calamares.desktop"}
'';
}

View file

@ -15,6 +15,7 @@ let
defaultPinentryFlavor =
if xserverCfg.desktopManager.lxqt.enable
|| xserverCfg.desktopManager.plasma5.enable
|| xserverCfg.desktopManager.plasma6.enable
|| xserverCfg.desktopManager.deepin.enable then
"qt"
else if xserverCfg.desktopManager.xfce.enable then

View file

@ -96,6 +96,10 @@ let
pamOpts = { config, name, ... }: let cfg = config; in let config = parentConfig; in {
imports = [
(lib.mkRenamedOptionModule [ "enableKwallet" ] [ "kwallet" "enable" ])
];
options = {
name = mkOption {
@ -462,16 +466,23 @@ let
'';
};
enableKwallet = mkOption {
default = false;
type = types.bool;
description = lib.mdDoc ''
If enabled, pam_wallet will attempt to automatically unlock the
user's default KDE wallet upon login. If the user has no wallet named
"kdewallet", or the login password does not match their wallet
password, KDE will prompt separately after login.
'';
kwallet = {
enable = mkOption {
default = false;
type = types.bool;
description = lib.mdDoc ''
If enabled, pam_wallet will attempt to automatically unlock the
user's default KDE wallet upon login. If the user has no wallet named
"kdewallet", or the login password does not match their wallet
password, KDE will prompt separately after login.
'';
};
package = mkPackageOption pkgs.plasma5Packages "kwallet-pam" {
pkgsText = "pkgs.plasma5Packages";
};
};
sssdStrictAccess = mkOption {
default = false;
type = types.bool;
@ -686,7 +697,7 @@ let
(config.security.pam.enableEcryptfs
|| config.security.pam.enableFscrypt
|| cfg.pamMount
|| cfg.enableKwallet
|| cfg.kwallet.enable
|| cfg.enableGnomeKeyring
|| config.services.intune.enable
|| cfg.googleAuthenticator.enable
@ -711,9 +722,7 @@ let
{ name = "mount"; enable = cfg.pamMount; control = "optional"; modulePath = "${pkgs.pam_mount}/lib/security/pam_mount.so"; settings = {
disable_interactive = true;
}; }
{ name = "kwallet5"; enable = cfg.enableKwallet; control = "optional"; modulePath = "${pkgs.plasma5Packages.kwallet-pam}/lib/security/pam_kwallet5.so"; settings = {
kwalletd = "${pkgs.plasma5Packages.kwallet.bin}/bin/kwalletd5";
}; }
{ name = "kwallet"; enable = cfg.kwallet.enable; control = "optional"; modulePath = "${cfg.kwallet.package}/lib/security/pam_kwallet5.so"; }
{ name = "gnome_keyring"; enable = cfg.enableGnomeKeyring; control = "optional"; modulePath = "${pkgs.gnome.gnome-keyring}/lib/security/pam_gnome_keyring.so"; }
{ name = "intune"; enable = config.services.intune.enable; control = "optional"; modulePath = "${pkgs.intune-portal}/lib/security/pam_intune.so"; }
{ name = "gnupg"; enable = cfg.gnupg.enable; control = "optional"; modulePath = "${pkgs.pam_gnupg}/lib/security/pam_gnupg.so"; settings = {
@ -848,9 +857,7 @@ let
order = "user,group,default";
debug = true;
}; }
{ name = "kwallet5"; enable = cfg.enableKwallet; control = "optional"; modulePath = "${pkgs.plasma5Packages.kwallet-pam}/lib/security/pam_kwallet5.so"; settings = {
kwalletd = "${pkgs.plasma5Packages.kwallet.bin}/bin/kwalletd5";
}; }
{ name = "kwallet"; enable = cfg.kwallet.enable; control = "optional"; modulePath = "${cfg.kwallet.package}/lib/security/pam_kwallet5.so"; }
{ name = "gnome_keyring"; enable = cfg.enableGnomeKeyring; control = "optional"; modulePath = "${pkgs.gnome.gnome-keyring}/lib/security/pam_gnome_keyring.so"; settings = {
auto_start = true;
}; }

View file

@ -293,6 +293,18 @@ in {
assertion = (cfg.alsa.enable || cfg.pulse.enable) -> cfg.audio.enable;
message = "Using PipeWire's ALSA/PulseAudio compatibility layers requires running PipeWire as the sound server. Set `services.pipewire.audio.enable` to true.";
}
{
assertion = builtins.length
(builtins.attrNames
(
lib.filterAttrs
(name: value:
lib.hasPrefix "pipewire/" name || name == "pipewire"
)
config.environment.etc
)) == 1;
message = "Using `environment.etc.\"pipewire<...>\"` directly is no longer supported in 24.05. Use `services.pipewire.extraConfig` or `services.pipewire.configPackages` instead.";
}
];
environment.systemPackages = [ cfg.package ]

View file

@ -56,13 +56,13 @@ in
-- PipeWire is not used for audio, so prevent it from grabbing audio devices
alsa_monitor.enable = function() end
'';
systemwideConfigPkg = pkgs.writeTextDir "wireplumber/main.lua.d/80-systemwide.lua" ''
systemwideConfigPkg = pkgs.writeTextDir "share/wireplumber/main.lua.d/80-systemwide.lua" ''
-- When running system-wide, these settings need to be disabled (they
-- use functions that aren't available on the system dbus).
alsa_monitor.properties["alsa.reserve"] = false
default_access.properties["enable-flatpak-portal"] = false
'';
systemwideBluetoothConfigPkg = pkgs.writeTextDir "wireplumber/bluetooth.lua.d/80-systemwide.lua" ''
systemwideBluetoothConfigPkg = pkgs.writeTextDir "share/wireplumber/bluetooth.lua.d/80-systemwide.lua" ''
-- When running system-wide, logind-integration needs to be disabled.
bluez_monitor.properties["with-logind"] = false
'';
@ -98,6 +98,18 @@ in
assertion = !config.hardware.bluetooth.hsphfpd.enable;
message = "Using WirePlumber conflicts with hsphfpd, as it provides the same functionality. `hardware.bluetooth.hsphfpd.enable` needs be set to false";
}
{
assertion = builtins.length
(builtins.attrNames
(
lib.filterAttrs
(name: value:
lib.hasPrefix "wireplumber/" name || name == "wireplumber"
)
config.environment.etc
)) == 1;
message = "Using `environment.etc.\"wireplumber<...>\"` directly is no longer supported in 24.05. Use `services.wireplumber.configPackages` instead.";
}
];
environment.systemPackages = [ cfg.package ];

View file

@ -1,11 +1,13 @@
{ config, lib, pkgs, ... }:
let
inherit (lib.types) nullOr enum;
inherit (lib) types;
cfg = config.services.ollama;
ollamaPackage = cfg.package.override {
inherit (cfg) acceleration;
linuxPackages.nvidia_x11 = config.hardware.nvidia.package;
linuxPackages = config.boot.kernelPackages // {
nvidia_x11 = config.hardware.nvidia.package;
};
};
in
{
@ -15,14 +17,14 @@ in
lib.mdDoc "Server for local large language models"
);
listenAddress = lib.mkOption {
type = lib.types.str;
type = types.str;
default = "127.0.0.1:11434";
description = lib.mdDoc ''
Specifies the bind address on which the ollama server HTTP interface listens.
'';
};
acceleration = lib.mkOption {
type = nullOr (enum [ "rocm" "cuda" ]);
type = types.nullOr (types.enum [ "rocm" "cuda" ]);
default = null;
example = "rocm";
description = lib.mdDoc ''

View file

@ -331,7 +331,7 @@ let
formatListener = idx: listener:
[
"listener ${toString listener.port} ${toString listener.address}"
"acl_file /etc/mosquitto/mosquitto-acl-${toString idx}.conf"
"acl_file /etc/mosquitto/acl-${toString idx}.conf"
]
++ optional (! listener.omitPasswordAuth) "password_file ${cfg.dataDir}/passwd-${toString idx}"
++ formatFreeform {} listener.settings
@ -690,7 +690,7 @@ in
environment.etc = listToAttrs (
imap0
(idx: listener: {
name = "mosquitto/mosquitto-acl-${toString idx}.conf";
name = "mosquitto/acl-${toString idx}.conf";
value = {
user = config.users.users.mosquitto.name;
group = config.users.users.mosquitto.group;

View file

@ -18,7 +18,7 @@ in
# determines the default: later modules (if enabled) are preferred.
# E.g., if Plasma 5 is enabled, it supersedes xterm.
imports = [
./none.nix ./xterm.nix ./phosh.nix ./xfce.nix ./plasma5.nix ./lumina.nix
./none.nix ./xterm.nix ./phosh.nix ./xfce.nix ./plasma5.nix ./plasma6.nix ./lumina.nix
./lxqt.nix ./enlightenment.nix ./gnome.nix ./retroarch.nix ./kodi.nix
./mate.nix ./pantheon.nix ./surf-display.nix ./cde.nix
./cinnamon.nix ./budgie.nix ./deepin.nix

View file

@ -362,7 +362,7 @@ in
security.pam.services.kde = { allowNullPassword = true; };
security.pam.services.login.enableKwallet = true;
security.pam.services.login.kwallet.enable = true;
systemd.user.services = {
plasma-early-setup = mkIf cfg.runUsingSystemd {

View file

@ -0,0 +1,276 @@
{
config,
lib,
pkgs,
utils,
...
}: let
xcfg = config.services.xserver;
cfg = xcfg.desktopManager.plasma6;
inherit (pkgs) kdePackages;
inherit (lib) literalExpression mkDefault mkIf mkOption mkPackageOptionMD types;
in {
options = {
services.xserver.desktopManager.plasma6 = {
enable = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc "Enable the Plasma 6 (KDE 6) desktop environment.";
};
enableQt5Integration = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc "Enable Qt 5 integration (theming, etc). Disable for a pure Qt 6 system.";
};
notoPackage = mkPackageOptionMD pkgs "Noto fonts - used for UI by default" {
default = ["noto-fonts"];
example = "noto-fonts-lgc-plus";
};
};
environment.plasma6.excludePackages = mkOption {
description = lib.mdDoc "List of default packages to exclude from the configuration";
type = types.listOf types.package;
default = [];
example = literalExpression "[ pkgs.kdePackages.elisa ]";
};
};
config = mkIf cfg.enable {
assertions = [
{
assertion = cfg.enable -> !config.services.xserver.desktopManager.plasma5.enable;
message = "Cannot enable plasma5 and plasma6 at the same time!";
}
];
qt.enable = true;
environment.systemPackages = with kdePackages; let
requiredPackages = [
# Hack? To make everything run on Wayland
qtwayland
# Needed to render SVG icons
qtsvg
# Frameworks with globally loadable bits
frameworkintegration # provides Qt plugin
kauth # provides helper service
kcoreaddons # provides extra mime type info
kded # provides helper service
kfilemetadata # provides Qt plugins
kguiaddons # provides geo URL handlers
kiconthemes # provides Qt plugins
kimageformats # provides Qt plugins
kio # provides helper service + a bunch of other stuff
kpackage # provides kpackagetool tool
kservice # provides kbuildsycoca6 tool
kwallet # provides helper service
kwallet-pam # provides helper service
kwalletmanager # provides KCMs and stuff
plasma-activities # provides plasma-activities-cli tool
solid # provides solid-hardware6 tool
phonon-vlc # provides Phonon plugin
# Core Plasma parts
kwin
pkgs.xwayland
kscreen
libkscreen
kscreenlocker
kactivitymanagerd
kde-cli-tools
kglobalacceld
kwrited # wall message proxy, not to be confused with kwrite
milou
polkit-kde-agent-1
plasma-desktop
plasma-workspace
# Crash handler
drkonqi
# Application integration
libplasma # provides Kirigami platform theme
plasma-integration # provides Qt platform theme
kde-gtk-config
# Artwork + themes
breeze
breeze-icons
breeze-gtk
ocean-sound-theme
plasma-workspace-wallpapers
pkgs.hicolor-icon-theme # fallback icons
qqc2-breeze-style
qqc2-desktop-style
# misc Plasma extras
kdeplasma-addons
pkgs.xdg-user-dirs # recommended upstream
# Plasma utilities
kmenuedit
kinfocenter
plasma-systemmonitor
ksystemstats
libksysguard
spectacle
systemsettings
# Gear
baloo
dolphin
dolphin-plugins
ffmpegthumbs
kdegraphics-thumbnailers
kde-inotify-survey
kio-admin
kio-extras
kio-fuse
];
optionalPackages = [
plasma-browser-integration
konsole
(lib.getBin qttools) # Expose qdbus in PATH
ark
elisa
gwenview
okular
kate
khelpcenter
print-manager
];
in
requiredPackages
++ utils.removePackagesByName optionalPackages config.environment.plasma6.excludePackages
++ lib.optionals config.services.xserver.desktopManager.plasma6.enableQt5Integration [
breeze.qt5
plasma-integration.qt5
pkgs.plasma5Packages.kwayland-integration
kio-extras-kf5
]
# Optional hardware support features
++ lib.optionals config.hardware.bluetooth.enable [bluedevil bluez-qt pkgs.openobex pkgs.obexftp]
++ lib.optional config.networking.networkmanager.enable plasma-nm
++ lib.optional config.hardware.pulseaudio.enable plasma-pa
++ lib.optional config.services.pipewire.pulse.enable plasma-pa
++ lib.optional config.powerManagement.enable powerdevil
++ lib.optional config.services.colord.enable colord-kde
++ lib.optional config.services.hardware.bolt.enable plasma-thunderbolt
++ lib.optionals config.services.samba.enable [kdenetwork-filesharing pkgs.samba]
++ lib.optional config.services.xserver.wacom.enable wacomtablet
++ lib.optional config.services.flatpak.enable flatpak-kcm;
environment.pathsToLink = [
# FIXME: modules should link subdirs of `/share` rather than relying on this
"/share"
"/libexec" # for drkonqi
];
environment.etc."X11/xkb".source = xcfg.xkb.dir;
# Add ~/.config/kdedefaults to XDG_CONFIG_DIRS for shells, since Plasma sets that.
# FIXME: maybe we should append to XDG_CONFIG_DIRS in /etc/set-environment instead?
environment.sessionVariables.XDG_CONFIG_DIRS = ["$HOME/.config/kdedefaults"];
# Needed for things that depend on other store.kde.org packages to install correctly,
# notably Plasma look-and-feel packages (a.k.a. Global Themes)
#
# FIXME: this is annoyingly impure and should really be fixed at source level somehow,
# but kpackage is a library so we can't just wrap the one thing invoking it and be done.
# This also means things won't work for people not on Plasma, but at least this way it
# works for SOME people.
environment.sessionVariables.KPACKAGE_DEP_RESOLVERS_PATH = "${kdePackages.frameworkintegration.out}/libexec/kf6/kpackagehandlers";
# Enable GTK applications to load SVG icons
services.xserver.gdk-pixbuf.modulePackages = [pkgs.librsvg];
fonts.packages = [cfg.notoPackage pkgs.hack-font];
fonts.fontconfig.defaultFonts = {
monospace = ["Hack" "Noto Sans Mono"];
sansSerif = ["Noto Sans"];
serif = ["Noto Serif"];
};
programs.ssh.askPassword = mkDefault "${kdePackages.ksshaskpass.out}/bin/ksshaskpass";
# Enable helpful DBus services.
services.accounts-daemon.enable = true;
# when changing an account picture the accounts-daemon reads a temporary file containing the image which systemsettings5 may place under /tmp
systemd.services.accounts-daemon.serviceConfig.PrivateTmp = false;
services.power-profiles-daemon.enable = mkDefault true;
services.system-config-printer.enable = mkIf config.services.printing.enable (mkDefault true);
services.udisks2.enable = true;
services.upower.enable = config.powerManagement.enable;
services.xserver.libinput.enable = mkDefault true;
# Extra UDEV rules used by Solid
services.udev.packages = [
# libmtp has "bin", "dev", "out" outputs. UDEV rules file is in "out".
pkgs.libmtp.out
pkgs.media-player-info
];
# Set up Dr. Konqi as crash handler
systemd.packages = [kdePackages.drkonqi];
systemd.services."drkonqi-coredump-processor@".wantedBy = ["systemd-coredump@.service"];
xdg.portal.enable = true;
xdg.portal.extraPortals = [kdePackages.xdg-desktop-portal-kde];
xdg.portal.configPackages = mkDefault [kdePackages.xdg-desktop-portal-kde];
services.pipewire.enable = mkDefault true;
services.xserver.displayManager = {
sessionPackages = [kdePackages.plasma-workspace];
defaultSession = mkDefault "plasma";
};
services.xserver.displayManager.sddm = {
package = kdePackages.sddm;
theme = mkDefault "breeze";
extraPackages = with kdePackages; [
breeze-icons
kirigami
plasma5support
qtsvg
qtvirtualkeyboard
];
};
security.pam.services = {
login.kwallet = {
enable = true;
package = kdePackages.kwallet-pam;
};
kde.kwallet = {
enable = true;
package = kdePackages.kwallet-pam;
};
kde-fingerprint = lib.mkIf config.services.fprintd.enable { fprintAuth = true; };
kde-smartcard = lib.mkIf config.security.pam.p11.enable { p11Auth = true; };
};
programs.dconf.enable = true;
programs.firefox.nativeMessagingHosts.packages = [kdePackages.plasma-browser-integration];
programs.chromium = {
enablePlasmaBrowserIntegration = true;
plasmaBrowserIntegrationPackage = pkgs.kdePackages.plasma-browser-integration;
};
programs.kdeconnect.package = kdePackages.kdeconnect-kde;
};
}

View file

@ -7,7 +7,10 @@ let
cfg = dmcfg.sddm;
xEnv = config.systemd.services.display-manager.environment;
sddm = cfg.package;
sddm = cfg.package.override(old: {
withWayland = cfg.wayland.enable;
extraPackages = old.extraPackages or [] ++ cfg.extraPackages;
});
iniFmt = pkgs.formats.ini { };
@ -140,6 +143,15 @@ in
'';
};
extraPackages = mkOption {
type = types.listOf types.package;
default = [];
defaultText = "[]";
description = lib.mdDoc ''
Extra Qt plugins / QML libraries to add to the environment.
'';
};
autoNumlock = mkOption {
type = types.bool;
default = false;
@ -211,7 +223,7 @@ in
keymap_variant = xcfg.xkb.variant;
keymap_options = xcfg.xkb.options;
};
}; in "${pkgs.weston}/bin/weston --shell=fullscreen-shell.so -c ${westonIni}";
}; in "${pkgs.weston}/bin/weston --shell=kiosk -c ${westonIni}";
description = lib.mdDoc "Command used to start the selected compositor";
};
};
@ -235,15 +247,7 @@ in
}
];
services.xserver.displayManager.job = {
environment = {
# Load themes from system environment
QT_PLUGIN_PATH = "/run/current-system/sw/" + pkgs.qt5.qtbase.qtPluginPrefix;
QML2_IMPORT_PATH = "/run/current-system/sw/" + pkgs.qt5.qtbase.qtQmlPrefix;
};
execCmd = "exec /run/current-system/sw/bin/sddm";
};
services.xserver.displayManager.job.execCmd = "exec /run/current-system/sw/bin/sddm";
security.pam.services = {
sddm.text = ''

View file

@ -17,6 +17,9 @@ from dataclasses import dataclass
# These values will be replaced with actual values during the package build
EFI_SYS_MOUNT_POINT = "@efiSysMountPoint@"
BOOT_MOUNT_POINT = "@bootMountPoint@"
LOADER_CONF = f"{EFI_SYS_MOUNT_POINT}/loader/loader.conf" # Always stored on the ESP
NIXOS_DIR = "@nixosDir@"
TIMEOUT = "@timeout@"
EDITOR = "@editor@" == "1"
CONSOLE_MODE = "@consoleMode@"
@ -28,6 +31,7 @@ CONFIGURATION_LIMIT = int("@configurationLimit@")
CAN_TOUCH_EFI_VARIABLES = "@canTouchEfiVariables@"
GRACEFUL = "@graceful@"
COPY_EXTRA_FILES = "@copyExtraFiles@"
CHECK_MOUNTPOINTS = "@checkMountpoints@"
@dataclass
class BootSpec:
@ -87,7 +91,7 @@ def generation_conf_filename(profile: str | None, generation: int, specialisatio
def write_loader_conf(profile: str | None, generation: int, specialisation: str | None) -> None:
with open(f"{EFI_SYS_MOUNT_POINT}/loader/loader.conf.tmp", 'w') as f:
with open(f"{LOADER_CONF}.tmp", 'w') as f:
if TIMEOUT != "":
f.write(f"timeout {TIMEOUT}\n")
f.write("default %s\n" % generation_conf_filename(profile, generation, specialisation))
@ -96,7 +100,7 @@ def write_loader_conf(profile: str | None, generation: int, specialisation: str
f.write(f"console-mode {CONSOLE_MODE}\n")
f.flush()
os.fsync(f.fileno())
os.rename(f"{EFI_SYS_MOUNT_POINT}/loader/loader.conf.tmp", f"{EFI_SYS_MOUNT_POINT}/loader/loader.conf")
os.rename(f"{LOADER_CONF}.tmp", LOADER_CONF)
def get_bootspec(profile: str | None, generation: int) -> BootSpec:
@ -126,9 +130,9 @@ def copy_from_file(file: str, dry_run: bool = False) -> str:
store_file_path = os.path.realpath(file)
suffix = os.path.basename(store_file_path)
store_dir = os.path.basename(os.path.dirname(store_file_path))
efi_file_path = "/efi/nixos/%s-%s.efi" % (store_dir, suffix)
efi_file_path = f"{NIXOS_DIR}/{store_dir}-{suffix}.efi"
if not dry_run:
copy_if_not_exists(store_file_path, f"{EFI_SYS_MOUNT_POINT}%s" % (efi_file_path))
copy_if_not_exists(store_file_path, f"{BOOT_MOUNT_POINT}{efi_file_path}")
return efi_file_path
def write_entry(profile: str | None, generation: int, specialisation: str | None,
@ -145,7 +149,7 @@ def write_entry(profile: str | None, generation: int, specialisation: str | None
try:
if bootspec.initrdSecrets is not None:
subprocess.check_call([bootspec.initrdSecrets, f"{EFI_SYS_MOUNT_POINT}%s" % (initrd)])
subprocess.check_call([bootspec.initrdSecrets, f"{BOOT_MOUNT_POINT}%s" % (initrd)])
except subprocess.CalledProcessError:
if current:
print("failed to create initrd secrets!", file=sys.stderr)
@ -155,7 +159,7 @@ def write_entry(profile: str | None, generation: int, specialisation: str | None
f'for "{title} - Configuration {generation}", an older generation', file=sys.stderr)
print("note: this is normal after having removed "
"or renamed a file in `boot.initrd.secrets`", file=sys.stderr)
entry_file = f"{EFI_SYS_MOUNT_POINT}/loader/entries/%s" % (
entry_file = f"{BOOT_MOUNT_POINT}/loader/entries/%s" % (
generation_conf_filename(profile, generation, specialisation))
tmp_path = "%s.tmp" % (entry_file)
kernel_params = "init=%s " % bootspec.init
@ -202,14 +206,14 @@ def get_generations(profile: str | None = None) -> list[SystemIdentifier]:
def remove_old_entries(gens: list[SystemIdentifier]) -> None:
rex_profile = re.compile(r"^" + re.escape(EFI_SYS_MOUNT_POINT) + "/loader/entries/nixos-(.*)-generation-.*\.conf$")
rex_generation = re.compile(r"^" + re.escape(EFI_SYS_MOUNT_POINT) + "/loader/entries/nixos.*-generation-([0-9]+)(-specialisation-.*)?\.conf$")
rex_profile = re.compile(r"^" + re.escape(BOOT_MOUNT_POINT) + "/loader/entries/nixos-(.*)-generation-.*\.conf$")
rex_generation = re.compile(r"^" + re.escape(BOOT_MOUNT_POINT) + "/loader/entries/nixos.*-generation-([0-9]+)(-specialisation-.*)?\.conf$")
known_paths = []
for gen in gens:
bootspec = get_bootspec(gen.profile, gen.generation)
known_paths.append(copy_from_file(bootspec.kernel, True))
known_paths.append(copy_from_file(bootspec.initrd, True))
for path in glob.iglob(f"{EFI_SYS_MOUNT_POINT}/loader/entries/nixos*-generation-[1-9]*.conf"):
for path in glob.iglob(f"{BOOT_MOUNT_POINT}/loader/entries/nixos*-generation-[1-9]*.conf"):
if rex_profile.match(path):
prof = rex_profile.sub(r"\1", path)
else:
@ -220,11 +224,18 @@ def remove_old_entries(gens: list[SystemIdentifier]) -> None:
continue
if not (prof, gen_number, None) in gens:
os.unlink(path)
for path in glob.iglob(f"{EFI_SYS_MOUNT_POINT}/efi/nixos/*"):
for path in glob.iglob(f"{BOOT_MOUNT_POINT}/{NIXOS_DIR}/*"):
if not path in known_paths and not os.path.isdir(path):
os.unlink(path)
def cleanup_esp() -> None:
for path in glob.iglob(f"{EFI_SYS_MOUNT_POINT}/loader/entries/nixos*"):
os.unlink(path)
if os.path.isdir(f"{EFI_SYS_MOUNT_POINT}/{NIXOS_DIR}"):
shutil.rmtree(f"{EFI_SYS_MOUNT_POINT}/{NIXOS_DIR}")
def get_profiles() -> list[str]:
if os.path.isdir("/nix/var/nix/profiles/system-profiles/"):
return [x
@ -255,6 +266,9 @@ def install_bootloader(args: argparse.Namespace) -> None:
# flags to pass to bootctl install/update
bootctl_flags = []
if BOOT_MOUNT_POINT != EFI_SYS_MOUNT_POINT:
bootctl_flags.append(f"--boot-path={BOOT_MOUNT_POINT}")
if CAN_TOUCH_EFI_VARIABLES != "1":
bootctl_flags.append("--no-variables")
@ -263,8 +277,8 @@ def install_bootloader(args: argparse.Namespace) -> None:
if os.getenv("NIXOS_INSTALL_BOOTLOADER") == "1":
# bootctl uses fopen() with modes "wxe" and fails if the file exists.
if os.path.exists(f"{EFI_SYS_MOUNT_POINT}/loader/loader.conf"):
os.unlink(f"{EFI_SYS_MOUNT_POINT}/loader/loader.conf")
if os.path.exists(LOADER_CONF):
os.unlink(LOADER_CONF)
subprocess.check_call([f"{SYSTEMD}/bin/bootctl", f"--esp-path={EFI_SYS_MOUNT_POINT}"] + bootctl_flags + ["install"])
else:
@ -291,13 +305,15 @@ def install_bootloader(args: argparse.Namespace) -> None:
print("updating systemd-boot from %s to %s" % (installed_version, available_version))
subprocess.check_call([f"{SYSTEMD}/bin/bootctl", f"--esp-path={EFI_SYS_MOUNT_POINT}"] + bootctl_flags + ["update"])
os.makedirs(f"{EFI_SYS_MOUNT_POINT}/efi/nixos", exist_ok=True)
os.makedirs(f"{EFI_SYS_MOUNT_POINT}/loader/entries", exist_ok=True)
os.makedirs(f"{BOOT_MOUNT_POINT}/{NIXOS_DIR}", exist_ok=True)
os.makedirs(f"{BOOT_MOUNT_POINT}/loader/entries", exist_ok=True)
gens = get_generations()
for profile in get_profiles():
gens += get_generations(profile)
remove_old_entries(gens)
for gen in gens:
try:
bootspec = get_bootspec(gen.profile, gen.generation)
@ -315,9 +331,15 @@ def install_bootloader(args: argparse.Namespace) -> None:
else:
raise e
for root, _, files in os.walk(f"{EFI_SYS_MOUNT_POINT}/efi/nixos/.extra-files", topdown=False):
relative_root = root.removeprefix(f"{EFI_SYS_MOUNT_POINT}/efi/nixos/.extra-files").removeprefix("/")
actual_root = os.path.join(f"{EFI_SYS_MOUNT_POINT}", relative_root)
if BOOT_MOUNT_POINT != EFI_SYS_MOUNT_POINT:
# Cleanup any entries in ESP if xbootldrMountPoint is set.
# If the user later unsets xbootldrMountPoint, entries in XBOOTLDR will not be cleaned up
# automatically, as we don't have information about the mount point anymore.
cleanup_esp()
for root, _, files in os.walk(f"{BOOT_MOUNT_POINT}/{NIXOS_DIR}/.extra-files", topdown=False):
relative_root = root.removeprefix(f"{BOOT_MOUNT_POINT}/{NIXOS_DIR}/.extra-files").removeprefix("/")
actual_root = os.path.join(f"{BOOT_MOUNT_POINT}", relative_root)
for file in files:
actual_file = os.path.join(actual_root, file)
@ -330,7 +352,7 @@ def install_bootloader(args: argparse.Namespace) -> None:
os.rmdir(actual_root)
os.rmdir(root)
os.makedirs(f"{EFI_SYS_MOUNT_POINT}/efi/nixos/.extra-files", exist_ok=True)
os.makedirs(f"{BOOT_MOUNT_POINT}/{NIXOS_DIR}/.extra-files", exist_ok=True)
subprocess.check_call(COPY_EXTRA_FILES)
@ -340,6 +362,8 @@ def main() -> None:
parser.add_argument('default_config', metavar='DEFAULT-CONFIG', help=f"The default {DISTRO_NAME} config to boot")
args = parser.parse_args()
subprocess.check_call(CHECK_MOUNTPOINTS)
try:
install_bootloader(args)
finally:
@ -347,9 +371,14 @@ def main() -> None:
# it can leave the system in an unbootable state, when a crash/outage
# happens shortly after an update. To decrease the likelihood of this
# event sync the efi filesystem after each update.
rc = libc.syncfs(os.open(f"{EFI_SYS_MOUNT_POINT}", os.O_RDONLY))
rc = libc.syncfs(os.open(f"{BOOT_MOUNT_POINT}", os.O_RDONLY))
if rc != 0:
print(f"could not sync {EFI_SYS_MOUNT_POINT}: {os.strerror(rc)}", file=sys.stderr)
print(f"could not sync {BOOT_MOUNT_POINT}: {os.strerror(rc)}", file=sys.stderr)
if BOOT_MOUNT_POINT != EFI_SYS_MOUNT_POINT:
rc = libc.syncfs(os.open(EFI_SYS_MOUNT_POINT, os.O_RDONLY))
if rc != 0:
print(f"could not sync {EFI_SYS_MOUNT_POINT}: {os.strerror(rc)}", file=sys.stderr)
if __name__ == '__main__':

View file

@ -7,7 +7,7 @@ let
efi = config.boot.loader.efi;
systemdBootBuilder = pkgs.substituteAll {
systemdBootBuilder = pkgs.substituteAll rec {
src = ./systemd-boot-builder.py;
isExecutable = true;
@ -28,23 +28,40 @@ let
inherit (efi) efiSysMountPoint canTouchEfiVariables;
bootMountPoint = if cfg.xbootldrMountPoint != null
then cfg.xbootldrMountPoint
else efi.efiSysMountPoint;
nixosDir = "/EFI/nixos";
inherit (config.system.nixos) distroName;
memtest86 = optionalString cfg.memtest86.enable pkgs.memtest86plus;
netbootxyz = optionalString cfg.netbootxyz.enable pkgs.netbootxyz-efi;
checkMountpoints = pkgs.writeShellScript "check-mountpoints" ''
fail() {
echo "$1 = '$2' is not a mounted partition. Is the path configured correctly?" >&2
exit 1
}
${pkgs.util-linuxMinimal}/bin/findmnt ${efiSysMountPoint} > /dev/null || fail efiSysMountPoint ${efiSysMountPoint}
${lib.optionalString
(cfg.xbootldrMountPoint != null)
"${pkgs.util-linuxMinimal}/bin/findmnt ${cfg.xbootldrMountPoint} > /dev/null || fail xbootldrMountPoint ${cfg.xbootldrMountPoint}"}
'';
copyExtraFiles = pkgs.writeShellScript "copy-extra-files" ''
empty_file=$(${pkgs.coreutils}/bin/mktemp)
${concatStrings (mapAttrsToList (n: v: ''
${pkgs.coreutils}/bin/install -Dp "${v}" "${efi.efiSysMountPoint}/"${escapeShellArg n}
${pkgs.coreutils}/bin/install -D $empty_file "${efi.efiSysMountPoint}/efi/nixos/.extra-files/"${escapeShellArg n}
${pkgs.coreutils}/bin/install -Dp "${v}" "${bootMountPoint}/"${escapeShellArg n}
${pkgs.coreutils}/bin/install -D $empty_file "${bootMountPoint}/${nixosDir}/.extra-files/"${escapeShellArg n}
'') cfg.extraFiles)}
${concatStrings (mapAttrsToList (n: v: ''
${pkgs.coreutils}/bin/install -Dp "${pkgs.writeText n v}" "${efi.efiSysMountPoint}/loader/entries/"${escapeShellArg n}
${pkgs.coreutils}/bin/install -D $empty_file "${efi.efiSysMountPoint}/efi/nixos/.extra-files/loader/entries/"${escapeShellArg n}
${pkgs.coreutils}/bin/install -Dp "${pkgs.writeText n v}" "${bootMountPoint}/loader/entries/"${escapeShellArg n}
${pkgs.coreutils}/bin/install -D $empty_file "${bootMountPoint}/${nixosDir}/.extra-files/loader/entries/"${escapeShellArg n}
'') cfg.extraEntries)}
'';
};
@ -99,6 +116,18 @@ in {
'';
};
xbootldrMountPoint = mkOption {
default = null;
type = types.nullOr types.str;
description = lib.mdDoc ''
Where the XBOOTLDR partition is mounted.
If set, this partition will be used as $BOOT to store boot loader entries and extra files
instead of the EFI partition. As per the bootloader specification, it is recommended that
the EFI and XBOOTLDR partitions be mounted at `/efi` and `/boot`, respectively.
'';
};
configurationLimit = mkOption {
default = null;
example = 120;
@ -108,7 +137,7 @@ in {
Useful to prevent boot partition running out of disk space.
`null` means no limit i.e. all generations
that were not garbage collected yet.
that have not been garbage collected yet.
'';
};
@ -200,7 +229,7 @@ in {
'';
description = lib.mdDoc ''
Any additional entries you want added to the `systemd-boot` menu.
These entries will be copied to {file}`/boot/loader/entries`.
These entries will be copied to {file}`$BOOT/loader/entries`.
Each attribute name denotes the destination file name,
and the corresponding attribute value is the contents of the entry.
@ -217,9 +246,9 @@ in {
{ "efi/memtest86/memtest.efi" = "''${pkgs.memtest86plus}/memtest.efi"; }
'';
description = lib.mdDoc ''
A set of files to be copied to {file}`/boot`.
A set of files to be copied to {file}`$BOOT`.
Each attribute name denotes the destination file name in
{file}`/boot`, while the corresponding
{file}`$BOOT`, while the corresponding
attribute value specifies the source file.
'';
};
@ -243,6 +272,18 @@ in {
config = mkIf cfg.enable {
assertions = [
{
assertion = (hasPrefix "/" efi.efiSysMountPoint);
message = "The ESP mount point '${efi.efiSysMountPoint}' must be an absolute path";
}
{
assertion = cfg.xbootldrMountPoint == null || (hasPrefix "/" cfg.xbootldrMountPoint);
message = "The XBOOTLDR mount point '${cfg.xbootldrMountPoint}' must be an absolute path";
}
{
assertion = cfg.xbootldrMountPoint != efi.efiSysMountPoint;
message = "The XBOOTLDR mount point '${cfg.xbootldrMountPoint}' cannot be the same as the ESP mount point '${efi.efiSysMountPoint}'";
}
{
assertion = (config.boot.kernelPackages.kernel.features or { efiBootStub = true; }) ? efiBootStub;
message = "This kernel does not support the EFI boot stub";

View file

@ -647,9 +647,9 @@ let
"BatmanAdvanced"
])
# Note: For DHCP the values both, none, v4, v6 are deprecated
(assertValueOneOf "DHCP" ["yes" "no" "ipv4" "ipv6"])
(assertValueOneOf "DHCP" (boolValues ++ ["ipv4" "ipv6"]))
(assertValueOneOf "DHCPServer" boolValues)
(assertValueOneOf "LinkLocalAddressing" ["yes" "no" "ipv4" "ipv6" "fallback" "ipv4-fallback"])
(assertValueOneOf "LinkLocalAddressing" (boolValues ++ ["ipv4" "ipv6" "fallback" "ipv4-fallback"]))
(assertValueOneOf "IPv6LinkLocalAddressGenerationMode" ["eui64" "none" "stable-privacy" "random"])
(assertValueOneOf "IPv4LLRoute" boolValues)
(assertValueOneOf "DefaultRouteOnDevice" boolValues)

View file

@ -52,7 +52,7 @@ in {
# See: https://github.com/NixOS/nixpkgs/issues/213408
pkgs.substitute {
src = "${systemd}/example/sysctl.d/50-coredump.conf";
replacements = [
substitutions = [
"--replace"
"${systemd}"
"${pkgs.symlinkJoin { name = "systemd"; paths = [ systemd ]; }}"

View file

@ -10,6 +10,20 @@ let
"repart.d"
format
(lib.mapAttrs (_n: v: { Partition = v; }) cfg.partitions);
partitionAssertions = lib.mapAttrsToList (fileName: definition:
let
maxLabelLength = 36; # GPT_LABEL_MAX defined in systemd's gpt.h
labelLength = builtins.stringLength definition.Label;
in
{
assertion = definition ? Label -> maxLabelLength >= labelLength;
message = ''
The partition label '${definition.Label}' defined for '${fileName}' is ${toString labelLength}
characters long, but the maximum label length supported by systemd is ${toString maxLabelLength}.
'';
}
) cfg.partitions;
in
{
options = {
@ -81,7 +95,7 @@ in
'boot.initrd.systemd.repart.enable' requires 'boot.initrd.systemd.enable' to be enabled.
'';
}
];
] ++ partitionAssertions;
# systemd-repart uses loopback devices for partition creation
boot.initrd.availableKernelModules = lib.optional initrdCfg.enable "loop";

View file

@ -347,24 +347,12 @@ in
removeLinuxDRM = lib.mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Linux 6.2 dropped some kernel symbols required on aarch64 required by zfs.
Enabling this option will bring them back to allow this kernel version.
Note that in some jurisdictions this may be illegal as it might be considered
removing copyright protection from the code.
See https://www.ifross.org/?q=en/artikel/ongoing-dispute-over-value-exportsymbolgpl-function for further information.
description = ''
Patch the kernel to change symbols needed by ZFS from
EXPORT_SYMBOL_GPL to EXPORT_SYMBOL.
If configure your kernel package with `zfs.latestCompatibleLinuxPackages`, you will need to also pass removeLinuxDRM to that package like this:
```
{ pkgs, ... }: {
boot.kernelPackages = (pkgs.zfs.override {
removeLinuxDRM = pkgs.hostPlatform.isAarch64;
}).latestCompatibleLinuxPackages;
boot.zfs.removeLinuxDRM = true;
}
```
Currently has no effect, but may again in future if a kernel
update breaks ZFS due to symbols being newly changed to GPL.
'';
};
};
@ -588,9 +576,7 @@ in
kernelParams = lib.optionals (!config.boot.zfs.allowHibernation) [ "nohibernate" ];
extraModulePackages = [
(cfgZfs.modulePackage.override
(lib.optionalAttrs (lib.versionOlder cfgZfs.package.version "2.2.3")
{ inherit (cfgZfs) removeLinuxDRM; }))
cfgZfs.modulePackage
];
};
@ -727,21 +713,6 @@ in
services.udev.packages = [ cfgZfs.package ]; # to hook zvol naming, etc.
systemd.packages = [ cfgZfs.package ];
# Export kernel_neon_* symbols again.
# This change is necessary until ZFS figures out a solution
# with upstream or in their build system to fill the gap for
# this symbol.
# In the meantime, we restore what was once a working piece of code
# in the kernel.
boot.kernelPatches = lib.optional (lib.versionOlder cfgZfs.package.version "2.2.3" && cfgZfs.removeLinuxDRM && pkgs.stdenv.hostPlatform.system == "aarch64-linux") {
name = "export-neon-symbols-as-gpl";
patch = pkgs.fetchpatch {
url = "https://github.com/torvalds/linux/commit/aaeca98456431a8d9382ecf48ac4843e252c07b3.patch";
hash = "sha256-L2g4G1tlWPIi/QRckMuHDcdWBcKpObSWSRTvbHRIwIk=";
revert = true;
};
};
systemd.services = let
createImportService' = pool: createImportService {
inherit pool;

View file

@ -177,6 +177,12 @@ in rec {
inherit system;
});
iso_plasma6 = forMatchingSystems supportedSystems (system: makeIso {
module = ./modules/installer/cd-dvd/installation-cd-graphical-calamares-plasma6.nix;
type = "plasma6";
inherit system;
});
iso_gnome = forMatchingSystems supportedSystems (system: makeIso {
module = ./modules/installer/cd-dvd/installation-cd-graphical-calamares-gnome.nix;
type = "gnome";

View file

@ -695,6 +695,7 @@ in {
plantuml-server = handleTest ./plantuml-server.nix {};
plasma-bigscreen = handleTest ./plasma-bigscreen.nix {};
plasma5 = handleTest ./plasma5.nix {};
plasma6 = handleTest ./plasma6.nix {};
plasma5-systemd-start = handleTest ./plasma5-systemd-start.nix {};
plausible = handleTest ./plausible.nix {};
please = handleTest ./please.nix {};

View file

@ -4,10 +4,41 @@
}:
with import ../lib/testing-python.nix { inherit system pkgs; };
with pkgs.lib;
let
qemu-common = import ../lib/qemu-common.nix { inherit (pkgs) lib pkgs; };
lib = pkgs.lib;
qemu-common = import ../lib/qemu-common.nix { inherit lib pkgs; };
mkStartCommand = {
memory ? 2048,
cdrom ? null,
usb ? null,
pxe ? null,
uboot ? false,
uefi ? false,
extraFlags ? [],
}: let
qemu = qemu-common.qemuBinary pkgs.qemu_test;
flags = [
"-m" (toString memory)
"-netdev" ("user,id=net0" + (lib.optionalString (pxe != null) ",tftp=${pxe},bootfile=netboot.ipxe"))
"-device" ("virtio-net-pci,netdev=net0" + (lib.optionalString (pxe != null && uefi) ",romfile=${pkgs.ipxe}/ipxe.efirom"))
] ++ lib.optionals (cdrom != null) [
"-cdrom" cdrom
] ++ lib.optionals (usb != null) [
"-device" "usb-ehci"
"-drive" "id=usbdisk,file=${usb},if=none,readonly"
"-device" "usb-storage,drive=usbdisk"
] ++ lib.optionals (pxe != null) [
"-boot" "order=n"
] ++ lib.optionals uefi [
"-drive" "if=pflash,format=raw,unit=0,readonly=on,file=${pkgs.OVMF.firmware}"
"-drive" "if=pflash,format=raw,unit=1,readonly=on,file=${pkgs.OVMF.variables}"
] ++ extraFlags;
flagsStr = lib.concatStringsSep " " flags;
in "${qemu} ${flagsStr}";
iso =
(import ../lib/eval-config.nix {
@ -28,21 +59,16 @@ let
];
}).config.system.build.sdImage;
pythonDict = params: "\n {\n ${concatStringsSep ",\n " (mapAttrsToList (name: param: "\"${name}\": \"${param}\"") params)},\n }\n";
makeBootTest = name: extraConfig:
makeBootTest = name: config:
let
machineConfig = pythonDict ({
qemuBinary = qemu-common.qemuBinary pkgs.qemu_test;
qemuFlags = "-m 768";
} // extraConfig);
startCommand = mkStartCommand config;
in
makeTest {
name = "boot-" + name;
nodes = { };
testScript =
''
machine = create_machine(${machineConfig})
machine = create_machine("${startCommand}")
machine.start()
machine.wait_for_unit("multi-user.target")
machine.succeed("nix store verify --no-trust -r --option experimental-features nix-command /run/current-system")
@ -73,43 +99,35 @@ let
config.system.build.netbootIpxeScript
];
};
machineConfig = pythonDict ({
qemuBinary = qemu-common.qemuBinary pkgs.qemu_test;
qemuFlags = "-boot order=n -m 2000";
netBackendArgs = "tftp=${ipxeBootDir},bootfile=netboot.ipxe";
startCommand = mkStartCommand ({
pxe = ipxeBootDir;
} // extraConfig);
in
makeTest {
name = "boot-netboot-" + name;
nodes = { };
testScript = ''
machine = create_machine(${machineConfig})
machine = create_machine("${startCommand}")
machine.start()
machine.wait_for_unit("multi-user.target")
machine.shutdown()
'';
};
uefiBinary = {
x86_64-linux = "${pkgs.OVMF.fd}/FV/OVMF.fd";
aarch64-linux = "${pkgs.OVMF.fd}/FV/QEMU_EFI.fd";
}.${pkgs.stdenv.hostPlatform.system};
in {
uefiCdrom = makeBootTest "uefi-cdrom" {
uefi = true;
cdrom = "${iso}/iso/${iso.isoName}";
bios = uefiBinary;
};
uefiUsb = makeBootTest "uefi-usb" {
uefi = true;
usb = "${iso}/iso/${iso.isoName}";
bios = uefiBinary;
};
uefiNetboot = makeNetbootTest "uefi" {
bios = uefiBinary;
# Custom ROM is needed for EFI PXE boot. I failed to understand exactly why, because QEMU should still use iPXE for EFI.
netFrontendArgs = "romfile=${pkgs.ipxe}/ipxe.efirom";
uefi = true;
};
} // optionalAttrs (pkgs.stdenv.hostPlatform.system == "x86_64-linux") {
} // lib.optionalAttrs (pkgs.stdenv.hostPlatform.system == "x86_64-linux") {
biosCdrom = makeBootTest "bios-cdrom" {
cdrom = "${iso}/iso/${iso.isoName}";
};
@ -124,9 +142,12 @@ in {
sdImage = "${sd}/sd-image/${sd.imageName}";
mutableImage = "/tmp/linked-image.qcow2";
machineConfig = pythonDict {
bios = "${pkgs.ubootQemuX86}/u-boot.rom";
qemuFlags = "-m 768 -machine type=pc,accel=tcg -drive file=${mutableImage},if=ide,format=qcow2";
startCommand = mkStartCommand {
extraFlags = [
"-bios" "${pkgs.ubootQemuX86}/u-boot.rom"
"-machine" "type=pc,accel=tcg"
"-drive" "file=${mutableImage},if=virtio"
];
};
in makeTest {
name = "boot-uboot-extlinux";
@ -138,11 +159,14 @@ in {
if os.system("qemu-img create -f qcow2 -F raw -b ${sdImage} ${mutableImage}") != 0:
raise RuntimeError("Could not create mutable linked image")
machine = create_machine(${machineConfig})
machine = create_machine("${startCommand}")
machine.start()
machine.wait_for_unit("multi-user.target")
machine.succeed("nix store verify -r --no-trust --option experimental-features nix-command /run/current-system")
machine.shutdown()
'';
# kernel can't find rootfs after boot - investigate?
meta.broken = true;
};
}

View file

@ -61,7 +61,7 @@ with pkgs.lib;
+ " $QEMU_OPTS"
)
machine = create_machine({"startCommand": start_command})
machine = create_machine(start_command)
try:
'' + indentLines script + ''
finally:

View file

@ -42,6 +42,8 @@ let
];
networking.firewall = firewallSettings;
nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "consul" ];
services.consul = {
enable = true;
inherit webUi;
@ -65,6 +67,8 @@ let
];
networking.firewall = firewallSettings;
nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "consul" ];
services.consul =
assert builtins.elem thisConsensusServerHost allConsensusServerHosts;
{

View file

@ -58,6 +58,20 @@ let
'';
config.Cmd = [ "${pkgs.coreutils}/bin/stat" "-c" "%u:%g" "/testfile" ];
};
nonRootTestImage =
pkgs.dockerTools.streamLayeredImage rec {
name = "non-root-test";
tag = "latest";
uid = 1000;
gid = 1000;
uname = "user";
gname = "user";
config = {
User = "user";
Cmd = [ "${pkgs.coreutils}/bin/stat" "-c" "%u:%g" "${pkgs.coreutils}/bin/stat" ];
};
};
in {
name = "docker-tools";
meta = with pkgs.lib.maintainers; {
@ -181,7 +195,7 @@ in {
):
docker.succeed(
"docker load --input='${examples.bashLayeredWithUser}'",
"docker run -u somebody --rm ${examples.bashLayeredWithUser.imageName} ${pkgs.bash}/bin/bash -c 'test 555 == $(stat --format=%a /nix) && test 555 == $(stat --format=%a /nix/store)'",
"docker run -u somebody --rm ${examples.bashLayeredWithUser.imageName} ${pkgs.bash}/bin/bash -c 'test 755 == $(stat --format=%a /nix) && test 755 == $(stat --format=%a /nix/store)'",
"docker rmi ${examples.bashLayeredWithUser.imageName}",
)
@ -604,5 +618,11 @@ in {
"${chownTestImage} | docker load",
"docker run --rm ${chownTestImage.imageName} | diff /dev/stdin <(echo 12345:12345)"
)
with subtest("streamLayeredImage: with non-root user"):
docker.succeed(
"${nonRootTestImage} | docker load",
"docker run --rm ${chownTestImage.imageName} | diff /dev/stdin <(echo 12345:12345)"
)
'';
})

View file

@ -5,6 +5,8 @@ let
configuration = {
# Building documentation makes the test unnecessarily take a longer time:
documentation.enable = lib.mkForce false;
boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
} // extra;
};
@ -40,6 +42,12 @@ in
with machine.nested("Waiting for instance to start and be usable"):
retry(instance_is_up)
def check_sysctl(instance):
with subtest("systemd sysctl settings are applied"):
machine.succeed(f"incus exec {instance} -- systemctl status systemd-sysctl")
sysctl = machine.succeed(f"incus exec {instance} -- sysctl net.ipv4.ip_forward").strip().split(" ")[-1]
assert "1" == sysctl, f"systemd-sysctl configuration not correctly applied, {sysctl} != 1"
machine.wait_for_unit("incus.service")
# no preseed should mean no service
@ -83,6 +91,7 @@ in
with subtest("lxc-container generator configures plain container"):
# reuse the existing container to save some time
machine.succeed("incus exec container test -- -e /run/systemd/system/service.d/zzz-lxc-service.conf")
check_sysctl("container")
with subtest("lxc-container generator configures nested container"):
machine.execute("incus delete --force container")
@ -94,6 +103,8 @@ in
target = machine.succeed("incus exec container readlink -- -f /run/systemd/system/systemd-binfmt.service").strip()
assert target == "/dev/null", "lxc generator did not correctly mask /run/systemd/system/systemd-binfmt.service"
check_sysctl("container")
with subtest("lxc-container generator configures privileged container"):
machine.execute("incus delete --force container")
machine.succeed("incus launch nixos container --config security.privileged=true")
@ -101,5 +112,7 @@ in
retry(instance_is_up)
machine.succeed("incus exec container test -- -e /run/systemd/system/service.d/zzz-lxc-service.conf")
check_sysctl("container")
'';
})

View file

@ -83,46 +83,34 @@ let
, postInstallCommands, preBootCommands, postBootCommands, extraConfig
, testSpecialisationConfig, testFlakeSwitch, clevisTest, clevisFallbackTest
}:
let iface = "virtio";
isEfi = bootLoader == "systemd-boot" || (bootLoader == "grub" && grubUseEfi);
bios = if pkgs.stdenv.isAarch64 then "QEMU_EFI.fd" else "OVMF.fd";
let
qemu-common = import ../lib/qemu-common.nix { inherit (pkgs) lib pkgs; };
isEfi = bootLoader == "systemd-boot" || (bootLoader == "grub" && grubUseEfi);
qemu = qemu-common.qemuBinary pkgs.qemu_test;
in if !isEfi && !pkgs.stdenv.hostPlatform.isx86 then ''
machine.succeed("true")
'' else ''
import subprocess
tpm_folder = os.environ['NIX_BUILD_TOP']
def assemble_qemu_flags():
flags = "-cpu max"
${if (system == "x86_64-linux" || system == "i686-linux")
then ''flags += " -m 1024"''
else ''flags += " -m 768 -enable-kvm -machine virt,gic-version=host"''
}
${optionalString clevisTest ''flags += f" -chardev socket,id=chrtpm,path={tpm_folder}/swtpm-sock -tpmdev emulator,id=tpm0,chardev=chrtpm -device tpm-tis,tpmdev=tpm0"''}
${optionalString clevisTest ''flags += " -device virtio-net-pci,netdev=vlan1,mac=52:54:00:12:11:02 -netdev vde,id=vlan1,sock=\"$QEMU_VDE_SOCKET_1\""''}
return flags
qemu_flags = {"qemuFlags": assemble_qemu_flags()}
import os
import subprocess
tpm_folder = os.environ['NIX_BUILD_TOP']
startcommand = "${qemu} -m 2048"
${optionalString clevisTest ''
startcommand += f" -chardev socket,id=chrtpm,path={tpm_folder}/swtpm-sock -tpmdev emulator,id=tpm0,chardev=chrtpm -device tpm-tis,tpmdev=tpm0"
startcommand += " -device virtio-net-pci,netdev=vlan1,mac=52:54:00:12:11:02 -netdev vde,id=vlan1,sock=\"$QEMU_VDE_SOCKET_1\""
''}
${optionalString isEfi ''
startcommand +=" -drive if=pflash,format=raw,unit=0,readonly=on,file=${pkgs.OVMF.firmware} -drive if=pflash,format=raw,unit=1,readonly=on,file=${pkgs.OVMF.variables}"
''}
image_dir = machine.state_dir
disk_image = os.path.join(image_dir, "machine.qcow2")
hd_flags = {
"hdaInterface": "${iface}",
"hda": disk_image,
}
${optionalString isEfi ''
hd_flags.update(
bios="${pkgs.OVMF.fd}/FV/${bios}"
)''
}
default_flags = {**hd_flags, **qemu_flags}
startcommand += f" -drive file={disk_image},if=virtio,werror=report"
def create_machine_named(name):
return create_machine({**default_flags, "name": name})
return create_machine(startcommand, name=name)
class Tpm:
def __init__(self):
@ -471,7 +459,7 @@ let
# builds stuff in the VM, needs more juice
virtualisation.diskSize = 8 * 1024;
virtualisation.cores = 8;
virtualisation.memorySize = 1536;
virtualisation.memorySize = 2048;
boot.initrd.systemd.enable = systemdStage1;

View file

@ -30,7 +30,6 @@ let
linux_5_10_hardened
linux_5_15_hardened
linux_6_1_hardened
linux_6_5_hardened
linux_6_6_hardened
linux_6_7_hardened
linux_rt_5_4

64
nixos/tests/plasma6.nix Normal file
View file

@ -0,0 +1,64 @@
import ./make-test-python.nix ({ pkgs, ...} :
{
name = "plasma6";
meta = with pkgs.lib.maintainers; {
maintainers = [ k900 ];
};
nodes.machine = { ... }:
{
imports = [ ./common/user-account.nix ];
services.xserver.enable = true;
services.xserver.displayManager.sddm.enable = true;
# FIXME: this should be testing Wayland
services.xserver.displayManager.defaultSession = "plasmax11";
services.xserver.desktopManager.plasma6.enable = true;
environment.plasma6.excludePackages = [ pkgs.kdePackages.elisa ];
services.xserver.displayManager.autoLogin = {
enable = true;
user = "alice";
};
};
testScript = { nodes, ... }: let
user = nodes.machine.users.users.alice;
xdo = "${pkgs.xdotool}/bin/xdotool";
in ''
with subtest("Wait for login"):
start_all()
machine.wait_for_file("/tmp/xauth_*")
machine.succeed("xauth merge /tmp/xauth_*")
with subtest("Check plasmashell started"):
machine.wait_until_succeeds("pgrep plasmashell")
machine.wait_for_window("^Desktop ")
with subtest("Check that KDED is running"):
machine.succeed("pgrep kded6")
with subtest("Ensure Elisa is not installed"):
machine.fail("which elisa")
machine.succeed("su - ${user.name} -c 'xauth merge /tmp/xauth_*'")
with subtest("Run Dolphin"):
machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 dolphin >&2 &'")
machine.wait_for_window(" Dolphin")
with subtest("Run Konsole"):
machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 konsole >&2 &'")
machine.wait_for_window("Konsole")
with subtest("Run systemsettings"):
machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 systemsettings >&2 &'")
machine.wait_for_window("Settings")
with subtest("Wait to get a screenshot"):
machine.execute(
"${xdo} key Alt+F1 sleep 10"
)
machine.screenshot("screen")
'';
})

View file

@ -14,6 +14,72 @@ let
boot.loader.efi.canTouchEfiVariables = true;
environment.systemPackages = [ pkgs.efibootmgr ];
};
commonXbootldr = { config, lib, pkgs, ... }:
let
diskImage = import ../lib/make-disk-image.nix {
inherit config lib pkgs;
label = "nixos";
format = "qcow2";
partitionTableType = "efixbootldr";
touchEFIVars = true;
installBootLoader = true;
};
in
{
imports = [ common ];
virtualisation.useBootLoader = lib.mkForce false; # Only way to tell qemu-vm not to create the default system image
virtualisation.directBoot.enable = false; # But don't direct boot either because we're testing systemd-boot
system.build.diskImage = diskImage; # Use custom disk image with an XBOOTLDR partition
virtualisation.efi.variables = "${diskImage}/efi-vars.fd";
virtualisation.useDefaultFilesystems = false; # Needs custom setup for `diskImage`
virtualisation.bootPartition = null;
virtualisation.fileSystems = {
"/" = {
device = "/dev/vda3";
fsType = "ext4";
};
"/boot" = {
device = "/dev/vda2";
fsType = "vfat";
noCheck = true;
};
"/efi" = {
device = "/dev/vda1";
fsType = "vfat";
noCheck = true;
};
};
boot.loader.systemd-boot.enable = true;
boot.loader.efi.efiSysMountPoint = "/efi";
boot.loader.systemd-boot.xbootldrMountPoint = "/boot";
};
customDiskImage = nodes: ''
import os
import subprocess
import tempfile
tmp_disk_image = tempfile.NamedTemporaryFile()
subprocess.run([
"${nodes.machine.virtualisation.qemu.package}/bin/qemu-img",
"create",
"-f",
"qcow2",
"-b",
"${nodes.machine.system.build.diskImage}/nixos.qcow2",
"-F",
"qcow2",
tmp_disk_image.name,
])
# Set NIX_DISK_IMAGE so that the qemu script finds the right disk image.
os.environ['NIX_DISK_IMAGE'] = tmp_disk_image.name
'';
in
{
basic = makeTest {
@ -65,6 +131,32 @@ in
'';
};
basicXbootldr = makeTest {
name = "systemd-boot-xbootldr";
meta.maintainers = with pkgs.lib.maintainers; [ sdht0 ];
nodes.machine = commonXbootldr;
testScript = { nodes, ... }: ''
${customDiskImage nodes}
machine.start()
machine.wait_for_unit("multi-user.target")
machine.succeed("test -e /efi/EFI/systemd/systemd-bootx64.efi")
machine.succeed("test -e /boot/loader/entries/nixos-generation-1.conf")
# Ensure we actually booted using systemd-boot
# Magic number is the vendor UUID used by systemd-boot.
machine.succeed(
"test -e /sys/firmware/efi/efivars/LoaderEntrySelected-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f"
)
# "bootctl install" should have created an EFI entry
machine.succeed('efibootmgr | grep "Linux Boot Manager"')
'';
};
# Check that specialisations create corresponding boot entries.
specialisation = makeTest {
name = "systemd-boot-specialisation";
@ -184,6 +276,29 @@ in
'';
};
entryFilenameXbootldr = makeTest {
name = "systemd-boot-entry-filename-xbootldr";
meta.maintainers = with pkgs.lib.maintainers; [ sdht0 ];
nodes.machine = { pkgs, lib, ... }: {
imports = [ commonXbootldr ];
boot.loader.systemd-boot.memtest86.enable = true;
boot.loader.systemd-boot.memtest86.entryFilename = "apple.conf";
};
testScript = { nodes, ... }: ''
${customDiskImage nodes}
machine.start()
machine.wait_for_unit("multi-user.target")
machine.succeed("test -e /efi/EFI/systemd/systemd-bootx64.efi")
machine.fail("test -e /boot/loader/entries/memtest86.conf")
machine.succeed("test -e /boot/loader/entries/apple.conf")
machine.succeed("test -e /boot/EFI/memtest86/memtest.efi")
'';
};
extraEntries = makeTest {
name = "systemd-boot-extra-entries";
meta.maintainers = with pkgs.lib.maintainers; [ Enzime julienmalka ];

View file

@ -45,13 +45,13 @@ stdenv.mkDerivation {
pname = binName;
# versions are specified in `squeezelite.h`
# see https://github.com/ralph-irving/squeezelite/issues/29
version = "2.0.0.1465";
version = "2.0.0.1468";
src = fetchFromGitHub {
owner = "ralph-irving";
repo = "squeezelite";
rev = "6de9e229aa4cc7c3131ff855f3ead39581127090";
hash = "sha256-qSRmiX1+hbsWQsU9cRQ7QRkdXs5Q6aE7n7lxZsx8+Hs=";
rev = "fd89d67b1b9a17a6dd212be0c91d0417b440f60a";
hash = "sha256-wYVRlv+Y1jvdAGlj2zXKUhQBwWX9pGgNX6U71PsfySg=";
};
buildInputs = [ flac libmad libvorbis mpg123 ]

View file

@ -22,11 +22,11 @@ let
in
stdenv.mkDerivation rec {
pname = "clightning";
version = "23.11.2";
version = "24.02";
src = fetchurl {
url = "https://github.com/ElementsProject/lightning/releases/download/v${version}/clightning-v${version}.zip";
sha256 = "sha256-n1+9Q493N/N5sr7sVpzhObtbKpEejsNUUhhbYPukveg=";
sha256 = "sha256-hud6NU2apAJNf2epNk+3nwTUmRy5DfNOYiGp402H4ik=";
};
# when building on darwin we need dawin.cctools to provide the correct libtool

View file

@ -16,12 +16,12 @@
stdenv.mkDerivation rec {
pname = "lightdm-gtk-greeter";
version = "2.0.8";
version = "2.0.9";
src = fetchurl {
# Release tarball differs from source tarball.
url = "https://github.com/Xubuntu/lightdm-gtk-greeter/releases/download/lightdm-gtk-greeter-${version}/lightdm-gtk-greeter-${version}.tar.gz";
sha256 = "vvuzAMezT/IYZf28iBIB9zD8fFYOngHRfomelHcVBhM=";
hash = "sha256-yP3xmKqaP50NrQtI3+I8Ine3kQfo/PxillKQ8QgfZF0=";
};
nativeBuildInputs = [
@ -56,7 +56,7 @@ stdenv.mkDerivation rec {
postInstall = ''
substituteInPlace "$out/share/xgreeters/lightdm-gtk-greeter.desktop" \
--replace "Exec=lightdm-gtk-greeter" "Exec=$out/bin/lightdm-gtk-greeter"
--replace-fail "Exec=lightdm-gtk-greeter" "Exec=$out/bin/lightdm-gtk-greeter"
'';
passthru.xgreeters = linkFarm "lightdm-gtk-greeter-xgreeters" [{

View file

@ -1,82 +1,37 @@
{ stdenv, lib, fetchFromGitHub
, cmake, pkg-config, qttools
, libxcb, libXau, pam, qtbase, wrapQtAppsHook, qtdeclarative
, qtquickcontrols2 ? null, systemd, xkeyboardconfig
{
lib,
callPackage,
runCommand,
qtwayland,
wrapQtAppsHook,
unwrapped ? callPackage ./unwrapped.nix {},
withWayland ? false,
extraPackages ? [],
}:
let
isQt6 = lib.versions.major qtbase.version == "6";
in stdenv.mkDerivation {
pname = "sddm";
version = "0.20.0-unstable-2023-12-29";
runCommand "sddm-wrapped" {
inherit (unwrapped) version;
src = fetchFromGitHub {
owner = "sddm";
repo = "sddm";
rev = "501129294be1487f753482c29949fc1c19ef340e";
hash = "sha256-mLm987Ah0X9s0tBK2a45iERwYoh5JzWb3TFlSoxi8CA=";
buildInputs = unwrapped.buildInputs ++ extraPackages ++ lib.optional withWayland qtwayland;
nativeBuildInputs = [ wrapQtAppsHook ];
passthru = {
inherit unwrapped;
};
patches = [
./sddm-ignore-config-mtime.patch
./sddm-default-session.patch
];
meta = unwrapped.meta;
} ''
mkdir -p $out/bin
postPatch = ''
substituteInPlace src/greeter/waylandkeyboardbackend.cpp \
--replace "/usr/share/X11/xkb/rules/evdev.xml" "${xkeyboardconfig}/share/X11/xkb/rules/evdev.xml"
'';
cd ${unwrapped}
nativeBuildInputs = [ wrapQtAppsHook cmake pkg-config qttools ];
for i in *; do
if [ "$i" == "bin" ]; then
continue
fi
ln -s ${unwrapped}/$i $out/$i
done
buildInputs = [
libxcb
libXau
pam
qtbase
qtdeclarative
qtquickcontrols2
systemd
];
cmakeFlags = [
(lib.cmakeBool "BUILD_WITH_QT6" isQt6)
"-DCONFIG_FILE=/etc/sddm.conf"
"-DCONFIG_DIR=/etc/sddm.conf.d"
# Set UID_MIN and UID_MAX so that the build script won't try
# to read them from /etc/login.defs (fails in chroot).
# The values come from NixOS; they may not be appropriate
# for running SDDM outside NixOS, but that configuration is
# not supported anyway.
"-DUID_MIN=1000"
"-DUID_MAX=29999"
# we still want to run the DM on VT 7 for the time being, as 1-6 are
# occupied by getties by default
"-DSDDM_INITIAL_VT=7"
"-DQT_IMPORTS_DIR=${placeholder "out"}/${qtbase.qtQmlPrefix}"
"-DCMAKE_INSTALL_SYSCONFDIR=${placeholder "out"}/etc"
"-DSYSTEMD_SYSTEM_UNIT_DIR=${placeholder "out"}/lib/systemd/system"
"-DSYSTEMD_SYSUSERS_DIR=${placeholder "out"}/lib/sysusers.d"
"-DSYSTEMD_TMPFILES_DIR=${placeholder "out"}/lib/tmpfiles.d"
"-DDBUS_CONFIG_DIR=${placeholder "out"}/share/dbus-1/system.d"
];
postInstall = ''
# remove empty scripts
rm "$out/share/sddm/scripts/Xsetup" "$out/share/sddm/scripts/Xstop"
for f in $out/share/sddm/themes/**/theme.conf ; do
substituteInPlace $f \
--replace 'background=' "background=$(dirname $f)/"
done
'';
meta = with lib; {
description = "QML based X11 display manager";
homepage = "https://github.com/sddm/sddm";
maintainers = with maintainers; [ abbradar ttuegel ];
platforms = platforms.linux;
license = licenses.gpl2Plus;
};
}
for i in bin/*; do
makeQtWrapper ${unwrapped}/$i $out/$i --set SDDM_GREETER_DIR $out/bin
done
''

View file

@ -0,0 +1,14 @@
diff --git a/src/daemon/Greeter.cpp b/src/daemon/Greeter.cpp
index 07fccde..dd22a07 100644
--- a/src/daemon/Greeter.cpp
+++ b/src/daemon/Greeter.cpp
@@ -83,7 +83,8 @@ namespace SDDM {
QString Greeter::greeterPathForQt(int qtVersion)
{
const QString suffix = qtVersion == 5 ? QString() : QStringLiteral("-qt%1").arg(qtVersion);
- return QStringLiteral(BIN_INSTALL_DIR "/sddm-greeter%1").arg(suffix);
+ const QString greeterDir = qEnvironmentVariable("SDDM_GREETER_DIR", QStringLiteral(BIN_INSTALL_DIR));
+ return QStringLiteral("%1/sddm-greeter%2").arg(greeterDir).arg(suffix);
}
bool Greeter::start() {

View file

@ -0,0 +1,86 @@
{ stdenv, lib, fetchFromGitHub
, cmake, pkg-config, qttools
, libxcb, libXau, pam, qtbase, qtdeclarative
, qtquickcontrols2 ? null, systemd, xkeyboardconfig
}:
let
isQt6 = lib.versions.major qtbase.version == "6";
in stdenv.mkDerivation(finalAttrs: {
pname = "sddm-unwrapped";
version = "0.21.0";
src = fetchFromGitHub {
owner = "sddm";
repo = "sddm";
rev = "v${finalAttrs.version}";
hash = "sha256-r5mnEWham2WnoEqRh5tBj/6rn5mN62ENOCmsLv2Ht+w=";
};
patches = [
./greeter-path.patch
./sddm-ignore-config-mtime.patch
./sddm-default-session.patch
];
postPatch = ''
substituteInPlace src/greeter/waylandkeyboardbackend.cpp \
--replace "/usr/share/X11/xkb/rules/evdev.xml" "${xkeyboardconfig}/share/X11/xkb/rules/evdev.xml"
'';
nativeBuildInputs = [ cmake pkg-config qttools ];
buildInputs = [
libxcb
libXau
pam
qtbase
qtdeclarative
qtquickcontrols2
systemd
];
# We will wrap manually later
dontWrapQtApps = true;
cmakeFlags = [
(lib.cmakeBool "BUILD_WITH_QT6" isQt6)
"-DCONFIG_FILE=/etc/sddm.conf"
"-DCONFIG_DIR=/etc/sddm.conf.d"
# Set UID_MIN and UID_MAX so that the build script won't try
# to read them from /etc/login.defs (fails in chroot).
# The values come from NixOS; they may not be appropriate
# for running SDDM outside NixOS, but that configuration is
# not supported anyway.
"-DUID_MIN=1000"
"-DUID_MAX=29999"
# we still want to run the DM on VT 7 for the time being, as 1-6 are
# occupied by getties by default
"-DSDDM_INITIAL_VT=7"
"-DQT_IMPORTS_DIR=${placeholder "out"}/${qtbase.qtQmlPrefix}"
"-DCMAKE_INSTALL_SYSCONFDIR=${placeholder "out"}/etc"
"-DSYSTEMD_SYSTEM_UNIT_DIR=${placeholder "out"}/lib/systemd/system"
"-DSYSTEMD_SYSUSERS_DIR=${placeholder "out"}/lib/sysusers.d"
"-DSYSTEMD_TMPFILES_DIR=${placeholder "out"}/lib/tmpfiles.d"
"-DDBUS_CONFIG_DIR=${placeholder "out"}/share/dbus-1/system.d"
];
postInstall = ''
# remove empty scripts
rm "$out/share/sddm/scripts/Xsetup" "$out/share/sddm/scripts/Xstop"
for f in $out/share/sddm/themes/**/theme.conf ; do
substituteInPlace $f \
--replace 'background=' "background=$(dirname $f)/"
done
'';
meta = with lib; {
description = "QML based X11 display manager";
homepage = "https://github.com/sddm/sddm";
maintainers = with maintainers; [ abbradar ttuegel k900 ];
platforms = platforms.linux;
license = licenses.gpl2Plus;
};
})

View file

@ -38,14 +38,14 @@ let
in
stdenv.mkDerivation rec {
pname = "mame";
version = "0.262";
version = "0.263";
srcVersion = builtins.replaceStrings [ "." ] [ "" ] version;
src = fetchFromGitHub {
owner = "mamedev";
repo = "mame";
rev = "mame${srcVersion}";
hash = "sha256-avVHtnmKPUq+mMtxyaqSaGyrdsi5LXF1YS8JAb2QvBo=";
hash = "sha256-6MH4dMGOekiiq4yE68dIAiWWfvQvFcvqKtT/Z1SQ1aY=";
};
outputs = [ "out" "tools" ];

View file

@ -49,13 +49,13 @@ in
stdenv.mkDerivation (finalAttrs: {
pname = "imagemagick";
version = "7.1.1-28";
version = "7.1.1-29";
src = fetchFromGitHub {
owner = "ImageMagick";
repo = "ImageMagick";
rev = finalAttrs.version;
hash = "sha256-WT058DZzMrNKn9E56dH476iCgeOi7QQ3jNBxKAqT6h4=";
hash = "sha256-W9WbHzmTa0dA9+mOxXu88qmN1mO9ORaH0Nj6r2s1Q+E=";
};
outputs = [ "out" "dev" "doc" ]; # bin/ isn't really big

View file

@ -8,7 +8,7 @@
src = fetchurl {
url = "https://github.com/upscayl/upscayl/releases/download/v${version}/upscayl-${version}-linux.AppImage";
hash = "sha256-33jJRMvRQxL7rPJ6VigEKcDhge46CAA0jJUOhzEyWzA=";
hash = "sha256-EoTFvlLsXQYZldXfEHhP3/bHygm+NdeDsf+p138mM8w";
};
appimageContents = appimageTools.extractType2 {

View file

@ -13,7 +13,7 @@ IF YOUR PACKAGE IS NOT LISTED IN `./srcs.nix`, IT DOES NOT GO HERE.
Many of the packages released upstream are not yet built in Nixpkgs due to lack
of demand. To add a Nixpkgs build for an upstream package, copy one of the
existing packages here and modify it as necessary. A simple example package that
still shows most of the available features is in `./gwenview.nix`.
still shows most of the available features is in `./gwenview`.
# Updates
@ -92,7 +92,7 @@ let
ghostwriter = callPackage ./ghostwriter.nix {};
granatier = callPackage ./granatier.nix {};
grantleetheme = callPackage ./grantleetheme {};
gwenview = callPackage ./gwenview.nix {};
gwenview = callPackage ./gwenview {};
incidenceeditor = callPackage ./incidenceeditor.nix {};
itinerary = callPackage ./itinerary.nix {};
juk = callPackage ./juk.nix {};

View file

@ -16,6 +16,10 @@ mkDerivation {
maintainers = [ lib.maintainers.ttuegel ];
mainProgram = "gwenview";
};
# Fix build with versioned kImageAnnotator
patches = [./kimageannotator.patch];
nativeBuildInputs = [ extra-cmake-modules kdoctools ];
buildInputs = [
baloo kactivities kio kitemmodels kparts libkdcraw libkipi phonon

View file

@ -0,0 +1,56 @@
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 01db0fb1..06319c54 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -166,12 +166,12 @@ if(NOT WITHOUT_X11)
endif()
if (QT_MAJOR_VERSION STREQUAL "5")
- find_package(kImageAnnotator)
- set_package_properties(kImageAnnotator PROPERTIES URL "https://github.com/ksnip/kImageAnnotator" DESCRIPTION "The kImageAnnotator library provides tools to annotate" TYPE REQUIRED)
- if(kImageAnnotator_FOUND)
+ find_package(kImageAnnotator-Qt5)
+ set_package_properties(kImageAnnotator-Qt5 PROPERTIES URL "https://github.com/ksnip/kImageAnnotator" DESCRIPTION "The kImageAnnotator library provides tools to annotate" TYPE REQUIRED)
+ if(kImageAnnotator-Qt5_FOUND)
set(KIMAGEANNOTATOR_FOUND 1)
- find_package(kColorPicker REQUIRED)
- if(NOT kImageAnnotator_VERSION VERSION_LESS 0.5.0)
+ find_package(kColorPicker-Qt5 REQUIRED)
+ if(NOT kImageAnnotator-Qt5_VERSION VERSION_LESS 0.5.0)
set(KIMAGEANNOTATOR_CAN_LOAD_TRANSLATIONS 1)
endif()
endif()
diff --git a/app/CMakeLists.txt b/app/CMakeLists.txt
index 8c136835..ef4cff74 100644
--- a/app/CMakeLists.txt
+++ b/app/CMakeLists.txt
@@ -157,6 +157,6 @@ target_link_libraries(slideshowfileitemaction
KF${QT_MAJOR_VERSION}::KIOWidgets
KF${QT_MAJOR_VERSION}::Notifications)
-if(kImageAnnotator_FOUND)
+if(kImageAnnotator-Qt5_FOUND)
target_link_libraries(gwenview kImageAnnotator::kImageAnnotator)
endif()
diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt
index 05a2ea67..4167a1bb 100644
--- a/lib/CMakeLists.txt
+++ b/lib/CMakeLists.txt
@@ -157,7 +157,7 @@ set(gwenviewlib_SRCS
touch/touch_helper.cpp
${GV_JPEG_DIR}/transupp.c
)
-if (kImageAnnotator_FOUND)
+if (kImageAnnotator-Qt5_FOUND)
set(gwenviewlib_SRCS ${gwenviewlib_SRCS}
annotate/annotatedialog.cpp
annotate/annotateoperation.cpp
@@ -338,7 +338,7 @@ if (GWENVIEW_SEMANTICINFO_BACKEND_BALOO)
)
endif()
-if(kImageAnnotator_FOUND)
+if(kImageAnnotator-Qt5_FOUND)
target_link_libraries(gwenviewlib kImageAnnotator::kImageAnnotator)
endif()

View file

@ -12,6 +12,14 @@ let
libX11 libXrender libXrandr libxcb libXmu libpthreadstubs libXext libXdmcp
libXxf86vm libXinerama libSM libXv libXaw libXi libXcursor libXcomposite
];
gstreamerDeps = pkgs: with pkgs.gst_all_1; [
gstreamer
gst-plugins-base
gst-plugins-good
gst-plugins-ugly
gst-plugins-bad
gst-libav
];
in buildFHSEnv {
name = "lutris";
@ -121,6 +129,7 @@ in buildFHSEnv {
# Winetricks
fribidi
] ++ xorgDeps pkgs
++ gstreamerDeps pkgs
++ extraLibraries pkgs;
extraInstallCommands = ''

View file

@ -1,5 +1,5 @@
{ lib, stdenv, fetchFromGitHub, meson, ninja, pkg-config, scdoc
, systemd, pango, cairo, gdk-pixbuf, jq
, systemd, pango, cairo, gdk-pixbuf, jq, bash
, wayland, wayland-protocols
, wrapGAppsHook }:
@ -25,7 +25,7 @@ stdenv.mkDerivation rec {
preFixup = ''
gappsWrapperArgs+=(
--prefix PATH : "${lib.makeBinPath [ systemd /* for busctl */ jq ]}"
--prefix PATH : "${lib.makeBinPath [ systemd /* for busctl */ jq bash ]}"
)
'';

View file

@ -17,13 +17,13 @@
stdenv.mkDerivation rec {
pname = "rofi-emoji";
version = "3.2.0";
version = "3.3.0";
src = fetchFromGitHub {
owner = "Mange";
repo = pname;
rev = "v${version}";
sha256 = "sha256-P7AHLwqicKYj5I0Rl9B5mdD/v9iW9aihkNo7enonRF4=";
sha256 = "sha256-Y+E5TViKFVtqXMLVEcq1VMLPzG04vfZyThUM7a/fFGM=";
};
patches = [

View file

@ -1,9 +1,8 @@
{ mkDerivation
, lib
{ lib
, stdenv
, fetchFromGitHub
, substituteAll
, qtbase
, qtsvg
, qtwebengine
, qtdeclarative
, extra-cmake-modules
@ -46,6 +45,7 @@ stdenv.mkDerivation (finalAttrs: {
buildInputs = [
qtbase
qtsvg
cpp-utilities
qtutilities
boost
@ -74,7 +74,7 @@ stdenv.mkDerivation (finalAttrs: {
doCheck = !stdenv.isDarwin;
preCheck = ''
export QT_QPA_PLATFORM=offscreen
export QT_PLUGIN_PATH="${qtbase.bin}/${qtbase.qtPluginPrefix}"
export QT_PLUGIN_PATH="${lib.getBin qtbase}/${qtbase.qtPluginPrefix}"
'';
# don't test --help on Darwin because output is .app
doInstallCheck = !stdenv.isDarwin;
@ -83,6 +83,8 @@ stdenv.mkDerivation (finalAttrs: {
'';
cmakeFlags = [
"-DQT_PACKAGE_PREFIX=Qt${lib.versions.major qtbase.version}"
"-DKF_PACKAGE_PREFIX=KF${lib.versions.major qtbase.version}"
"-DBUILD_TESTING=ON"
# See https://github.com/Martchus/syncthingtray/issues/208
"-DEXCLUDE_TESTS_FROM_ALL=OFF"

View file

@ -7,16 +7,16 @@
buildGoModule rec {
pname = "typioca";
version = "2.9.0";
version = "2.10.0";
src = fetchFromGitHub {
owner = "bloznelis";
repo = "typioca";
rev = version;
hash = "sha256-N7+etRqHxLX0eVvdOofXQ1fqEUTsck7UAL5mX6NUsOU=";
hash = "sha256-D6I1r+8cvUerqXR2VyBL33lapWAs5Cl5yvYOsmUBnHo=";
};
vendorHash = "sha256-FKLAbrZVtF8gj90NU7m47pG+BBKYkPjJKax5nZmpehY=";
vendorHash = "sha256-j/nyAHNwUoNkcdNJqcaUuhQk5a2VHQw/XgYIoTR9ctQ=";
ldflags = [
"-s"

View file

@ -15,13 +15,13 @@
stdenv.mkDerivation rec {
pname = "asn";
version = "0.76.0";
version = "0.76.1";
src = fetchFromGitHub {
owner = "nitefood";
repo = "asn";
rev = "refs/tags/v${version}";
hash = "sha256-pdtRf9VKEdNg1UeYSaLNLm9O057dT+n5g3Dd0bcP4EI=";
hash = "sha256-9UDd0tgRKEFC1V1+1s9Ghev0I48L8UR9/YbZKX3F1MU=";
};
nativeBuildInputs = [

View file

@ -1,11 +1,11 @@
{
stable = {
chromedriver = {
hash_darwin = "sha256-qo7eiMC4MR4pskSim6twkC2QDeqe3qfZsIEe5mjS7jg=";
hash_darwin = "sha256-iv370BYH8HobUxeYbsV4/A6JyZG2feEuVbJCLVZc3Og=";
hash_darwin_aarch64 =
"sha256-RHqu0wNeAx34LTkVgNjBfXrSWvZ1G7OkNAIGA4WUhmw=";
hash_linux = "sha256-K4QeHFp520Z3KjefvVsJf8V7gz7gTf2BCSW4Jxz/H9M=";
version = "122.0.6261.69";
"sha256-Gc0OXG7dx5Mvy0aAsnqVIJtPFK8OYmFr8Ofy+UXacM4=";
hash_linux = "sha256-ZcN/v7co08aRWM88s93nBU/FLEfE7JGf/hEH0tk3qy8=";
version = "122.0.6261.94";
};
deps = {
gn = {
@ -15,9 +15,9 @@
version = "2024-01-22";
};
};
hash = "sha256-uEN1hN6DOLgw4QDrMBZdiLLPx+yKQc5MimIf/vbCC84=";
hash_deb_amd64 = "sha256-k3/Phs72eIMB6LAU4aU0+ze/cRu6KlRhpBshKhmq9N4=";
version = "122.0.6261.69";
hash = "sha256-7fIs8qQon9L0iNmM/cHuyqtVm09qf7L4j9qb6KSbw2w=";
hash_deb_amd64 = "sha256-hOm7YZ9ya/SmwKhj6uIPkdgIDv5bIbss398szBYHuXk=";
version = "122.0.6261.94";
};
ungoogled-chromium = {
deps = {

View file

@ -7,7 +7,7 @@
((buildMozillaMach rec {
pname = "floorp";
packageVersion = "11.10.2";
packageVersion = "11.10.5";
applicationName = "Floorp";
binaryName = "floorp";
branding = "browser/branding/official";
@ -20,7 +20,7 @@
repo = "Floorp";
fetchSubmodules = true;
rev = "v${packageVersion}";
hash = "sha256-fjLYR59AZaR6S1zcAT+DNpdsCdrW+3NdkRQBoVNdwYw=";
hash = "sha256-uKgN74xn0v86E/YfqbJNnMIR3gS+3dhdgLJ5VUerurQ=";
};
extraConfigureFlags = [

View file

@ -8,16 +8,16 @@
buildGoModule rec {
pname = "karmor";
version = "1.1.0";
version = "1.1.1";
src = fetchFromGitHub {
owner = "kubearmor";
repo = "kubearmor-client";
rev = "v${version}";
hash = "sha256-HQJHtRi/ddKD+CNG3Ea61jz8zKcACBYCUR+qKbzADcI=";
hash = "sha256-NeLMHecfDyMhXmq1HO3qRIWeYpkoj9Od5wWStZEkHYU=";
};
vendorHash = "sha256-Lzp6n66oMrzTk4oWERa8Btb3FwiASpSj8hdQmYxYges=";
vendorHash = "sha256-EIvwzgpC9Ls43RJEhxNYDlF4luKthFgJleaXcYzOYow=";
nativeBuildInputs = [ installShellFiles ];

View file

@ -33,11 +33,11 @@
in
stdenv.mkDerivation rec {
pname = "suricata";
version = "7.0.2";
version = "7.0.3";
src = fetchurl {
url = "https://www.openinfosecfoundation.org/download/${pname}-${version}.tar.gz";
hash = "sha256-tOtgSDjvmag5a8i3u1TK0R8kQsvXy7MA5/WqsZCXvE0=";
hash = "sha256-6gdC16mHg/GvSldmGvYGi8LYUKw+ygSzIE0ozhZeNf8=";
};
nativeBuildInputs = [

View file

@ -4,7 +4,7 @@ let
if stdenv.isLinux then {
stable = "0.0.43";
ptb = "0.0.71";
canary = "0.0.282";
canary = "0.0.285";
development = "0.0.13";
} else {
stable = "0.0.294";
@ -25,7 +25,7 @@ let
};
canary = fetchurl {
url = "https://dl-canary.discordapp.net/apps/linux/${version}/discord-canary-${version}.tar.gz";
hash = "sha256-+Ijl/yPa7DVzVKOWTxCu6FxIsschIqYa+tYBnnKdCBA=";
hash = "sha256-dfBwe/YOzUUAfBrf51mNXtpyGL3Mp235e6TfQM4h04s=";
};
development = fetchurl {
url = "https://dl-development.discordapp.net/apps/linux/${version}/discord-development-${version}.tar.gz";

View file

@ -1,9 +1,9 @@
{
"version" = "1.11.58";
"version" = "1.11.59";
"hashes" = {
"desktopSrcHash" = "sha256-otZNhe6V/kGErx6J0+TcIwk5ASD/H4K/pYtm864VTIE=";
"desktopYarnHash" = "1pdja3rw4ykf9pgk937i4n0w8dj1r64fz7nzk9fsqlq8ciygabsq";
"webSrcHash" = "sha256-IAIsg9dvZMFfWst1xeVQLp+8URUauiaL3j2ui4lpKaY=";
"webYarnHash" = "0gv0vrgb62hgw58lgrmn6yywvrl9a5v5msd4l06n5wgnbbqi0i5j";
"desktopSrcHash" = "sha256-dasRfLsa8Jc6Vyay02f6IytjvYs3xbSFB2fU5bxi79E=";
"desktopYarnHash" = "00jvid2li68ji1xkbbpdiy39fzvhmw7ypnr3x82wbqqafkc5vil6";
"webSrcHash" = "sha256-UpRRTPrNiFsqXKD072jXVIqS8ZiuKt/BUzx1oja90VA=";
"webYarnHash" = "1s9lp2dd3slpp70rrbmsbmzphm6fwglnrqwk9fgylzqa1ds8nfjd";
};
}

File diff suppressed because it is too large Load diff

View file

@ -21,14 +21,14 @@
stdenv.mkDerivation rec {
pname = "flare";
version = "0.12.0";
version = "0.13.0";
src = fetchFromGitLab {
domain = "gitlab.com";
owner = "schmiddi-on-mobile";
repo = "flare";
rev = version;
hash = "sha256-Dg5UhVTmxiwPIbU8fG/ehX9Zp8WI2V+JoOEI7P1Way4=";
hash = "sha256-WfW2xUlF1vCaYFVP6ds06+niULKZgMMxgAOm66LK2xQ=";
};
cargoDeps = rustPlatform.importCargoLock {
@ -36,8 +36,8 @@ stdenv.mkDerivation rec {
outputHashes = {
"curve25519-dalek-4.0.0" = "sha256-KUXvYXeVvJEQ/+dydKzXWCZmA2bFa2IosDzaBL6/Si0=";
"libsignal-protocol-0.1.0" = "sha256-FCrJO7porlY5FrwZ2c67UPd4tgN7cH2/3DTwfPjihwM=";
"libsignal-service-0.1.0" = "sha256-lzyUUP1mhxxIU+xCr+5VAoeEO6FlDgeEJtWhm9avJb8=";
"presage-0.6.0-dev" = "sha256-PqMz6jJuL/4LVY3kNFQ9NmKt3D6cwQkGiPs2QJsL01A=";
"libsignal-service-0.1.0" = "sha256-XkCb83IvlnmvhHD8Vi9D5fNuBOoR9yX0/Vlb+YhrDz8=";
"presage-0.6.0-dev" = "sha256-zot92dlGtB7B423BU74oqpPzQKvLm2Dw9P8lCWkbsoE=";
};
};

View file

@ -2,7 +2,7 @@
callPackage ./generic.nix {} rec {
pname = "signal-desktop-beta";
dir = "Signal Beta";
version = "7.0.0-beta.1";
version = "7.0.0-beta.2";
url = "https://updates.signal.org/desktop/apt/pool/s/signal-desktop-beta/signal-desktop-beta_${version}_amd64.deb";
hash = "sha256-mMwOQVPihko/+ukEsaSu8l2u7obuY6gkTLAhSoWAVLo=";
hash = "sha256-yfa82JI/CKyQNT+oq0laupLyMIrq9Xs99M/xxgM9eQs=";
}

View file

@ -11,16 +11,16 @@
rustPlatform.buildRustPackage rec {
pname = "twitch-tui";
version = "2.6.3";
version = "2.6.4";
src = fetchFromGitHub {
owner = "Xithrius";
repo = pname;
rev = "refs/tags/v${version}";
hash = "sha256-h8qpsrMFFb49yfNb5mKEYRpul0hB0m1rDCvVW6jW+Pg=";
hash = "sha256-8jb5SrPAPas4TG4RbsaiL7bdbl/o/KX2WpDu/avPxp8=";
};
cargoHash = "sha256-L7psqmU4Zd7c0mbd4pK/tmPslTaxIhQoWtN0/RRMerA=";
cargoHash = "sha256-riDRGoPMLoDuBD1iFxoTgr5pgZLYkL18sidtQM5HYNk=";
nativeBuildInputs = [
pkg-config

View file

@ -27,7 +27,7 @@ stdenv.mkDerivation rec {
These modes were all designed for making reliable, confirmed ham radio
contacts under extreme weak-signal conditions.
'';
homepage = "https://physics.princeton.edu/pulsar/k1jt/wsjtx.html";
homepage = "https://wsjt.sourceforge.io";
license = with licenses; [ gpl3Plus ];
platforms = platforms.linux;
maintainers = with maintainers; [ lasandell numinit melling ];

View file

@ -7,13 +7,13 @@
stdenv.mkDerivation rec {
pname = "eigenmath";
version = "unstable-2024-02-04";
version = "unstable-2024-02-25";
src = fetchFromGitHub {
owner = "georgeweigt";
repo = pname;
rev = "3e37263611e181e2927d63b97b7656790c7f4fe1";
hash = "sha256-gjmz9Ma7OLQyIry6i2HMNy4Ai5Wh5NUzDKPO2a9Hp+s=";
rev = "4391a5bfe22d095cdf9fc12f376f64a8ffccccd9";
hash = "sha256-p+dnu35HGX8SgVpq5NczoZVehzfcuN+uucGurT7lWYM=";
};
checkPhase = let emulator = stdenv.hostPlatform.emulator buildPackages; in ''

View file

@ -8,16 +8,16 @@
rustPlatform.buildRustPackage rec {
pname = "git-gone";
version = "1.0.0";
version = "1.1.0";
src = fetchFromGitHub {
owner = "swsnr";
repo = "git-gone";
rev = "v${version}";
hash = "sha256-cEMFbG7L48s1SigLD/HfQ2NplGZPpO+KIgs3oV3rgQQ=";
hash = "sha256-Mc9/P4VBmLOC05xqdx/yopbhvdpQS3uejc4YA7BIgug=";
};
cargoHash = "sha256-CCPVjOWM59ELd4AyT968v6kvGdVwkMxxLZGDiJlLkzA=";
cargoHash = "sha256-NyyficEDJReMLAw2VAK2fOXNIwHilnUqQRACGck+0Vo=";
nativeBuildInputs = [ installShellFiles ];

View file

@ -1,4 +1,4 @@
{ lib, buildKodiAddon, fetchFromGitHub, addonUpdateScript, myconnpy }:
{ lib, buildKodiAddon, fetchFromGitHub, myconnpy }:
buildKodiAddon rec {
pname = "mediathekview";
@ -16,14 +16,10 @@ buildKodiAddon rec {
myconnpy
];
passthru.updateScript = addonUpdateScript {
attrPath = "kodi.packages.mediathekview";
};
meta = with lib; {
homepage = "https://github.com/mediathekview/plugin.video.mediathekview";
description = "Access media libraries of German speaking broadcasting stations";
license = licenses.mit;
maintainers = teams.kodi.members;
maintainers = teams.kodi.members ++ [ lib.maintainers.dschrempf ];
};
}

View file

@ -3,13 +3,13 @@
buildKodiAddon rec {
pname = "youtube";
namespace = "plugin.video.youtube";
version = "7.0.3";
version = "7.0.3.2";
src = fetchFromGitHub {
owner = "anxdpanic";
repo = "plugin.video.youtube";
rev = "v${version}";
hash = "sha256-dD9jl/W8RDfYHv13TBniOeRyc4cocj8160BHWz3MKlE=";
hash = "sha256-gJ7RGB0pSG/iLdpmXHpQOoQTisXnMl1Mgd0KYFgg2qI=";
};
propagatedBuildInputs = [

View file

@ -1,7 +1,6 @@
{ stdenv, nixosTests, lib, edk2, util-linux, nasm, acpica-tools, llvmPackages
, fetchurl, python3, pexpect, xorriso, qemu, dosfstools, mtools
, csmSupport ? false, seabios
, fdSize2MB ? csmSupport
, fdSize2MB ? false
, fdSize4MB ? secureBoot
, secureBoot ? false
, systemManagementModeRequired ? secureBoot && stdenv.hostPlatform.isx86
@ -99,7 +98,6 @@ edk2.mkDerivation projectDscPath (finalAttrs: {
++ lib.optionals sourceDebug [ "-D SOURCE_DEBUG_ENABLE=TRUE" ]
++ lib.optionals secureBoot [ "-D SECURE_BOOT_ENABLE=TRUE" ]
++ lib.optionals systemManagementModeRequired [ "-D SMM_REQUIRE=TRUE" ]
++ lib.optionals csmSupport [ "-D CSM_ENABLE" ]
++ lib.optionals fdSize2MB ["-D FD_SIZE_2MB"]
++ lib.optionals fdSize4MB ["-D FD_SIZE_4MB"]
++ lib.optionals httpSupport [ "-D NETWORK_HTTP_ENABLE=TRUE" "-D NETWORK_HTTP_BOOT_ENABLE=TRUE" ]
@ -115,10 +113,6 @@ edk2.mkDerivation projectDscPath (finalAttrs: {
unpackFile ${debian-edk-src}
'';
postPatch = lib.optionalString csmSupport ''
cp ${seabios}/share/seabios/Csm16.bin OvmfPkg/Csm/Csm16/Csm16.bin
'';
postConfigure = lib.optionalDrvAttr msVarsTemplate ''
tr -d '\n' < ${vendorPkKek} | sed \
-e 's/.*-----BEGIN CERTIFICATE-----/${OvmfPkKek1AppPrefix}:/' \

View file

@ -9,31 +9,31 @@
}:
let
version = "0.19.1";
version = "0.20.1";
dist = {
aarch64-darwin = rec {
archSuffix = "Darwin-arm64";
url = "https://github.com/lima-vm/lima/releases/download/v${version}/lima-${version}-${archSuffix}.tar.gz";
sha256 = "0dfcf3a39782baf1c2ea43cf026f8df0321c671d914c105fbb78de507aa8bda4";
sha256 = "a561a457d3620965e017fc750805dd2fb99db1c21b2f14e8f044dfaa042de76f";
};
x86_64-darwin = rec {
archSuffix = "Darwin-x86_64";
url = "https://github.com/lima-vm/lima/releases/download/v${version}/lima-${version}-${archSuffix}.tar.gz";
sha256 = "ac8827479f66ef1b288b31f164b22f6433faa14c44ce5bbebe09e6e913582479";
sha256 = "c57d2b317e5488c96b642b05146146a5ec94d0407cccba0f31401f52824d404d";
};
aarch64-linux = rec {
archSuffix = "Linux-aarch64";
url = "https://github.com/lima-vm/lima/releases/download/v${version}/lima-${version}-${archSuffix}.tar.gz";
sha256 = "c55e57ddbefd9988d0f3676bb873bcc6e0f7b3c3d47a1f07599ee151c5198d96";
sha256 = "1d93b5fc0bde1369fce3029c917934ef57514fa23a715f8fb7fb333c1db9ec41";
};
x86_64-linux = rec {
archSuffix = "Linux-x86_64";
url = "https://github.com/lima-vm/lima/releases/download/v${version}/lima-${version}-${archSuffix}.tar.gz";
sha256 = "7d18b1716aae14bf98d6ea93a703e8877b0c3142f7ba2e87401d47d5d0fe3ff1";
sha256 = "e7093ca1889d2dab436d9f0e6b53d65336f75cf8ebd54f583085eca462a1fc4b";
};
};
in

View file

@ -11,16 +11,16 @@
buildGoModule rec {
pname = "lima";
version = "0.19.1";
version = "0.20.1";
src = fetchFromGitHub {
owner = "lima-vm";
repo = pname;
rev = "v${version}";
sha256 = "sha256-0EKVWXNxOnz7j+f1ExkwQW69khhazj2Uz7RBAvwSjmQ=";
sha256 = "sha256-MeTFATaAGRSaUXmC1fv9/gMFWafvkteKVJS6MHaqt8A=";
};
vendorHash = "sha256-SfN4gj5nC9TEVD7aogsUv1um5w5Hvdy1eOSSNjGmnEw=";
vendorHash = "sha256-wd7YiEo4Gy2kHF7aCRoNGlbOQUxqQnKqP3znzMqS2PI=";
nativeBuildInputs = [ makeWrapper installShellFiles ]
++ lib.optionals stdenv.isDarwin [ xcbuild.xcrun sigtool ];

View file

@ -125,7 +125,7 @@ callPackage (import ./generic.nix (rec {
++ optional (withSeabios) "--with-system-seabios=${seabios}/share/seabios"
++ optional (!withInternalSeabios && !withSeabios) "--disable-seabios"
++ optional (withOVMF) "--with-system-ovmf=${OVMF.fd}/FV/OVMF.fd"
++ optional (withOVMF) "--with-system-ovmf=${OVMF.firmware}"
++ optional (withInternalOVMF) "--enable-ovmf";
NIX_CFLAGS_COMPILE = toString [

View file

@ -890,41 +890,26 @@ rec {
})
);
# Arguments are documented in ../../../doc/build-helpers/images/dockertools.section.md
streamLayeredImage = lib.makeOverridable (
{
# Image Name
name
, # Image tag, the Nix's output hash will be used if null
tag ? null
, # Parent image, to append to.
fromImage ? null
, # Files to put on the image (a nix store path or list of paths).
contents ? [ ]
, # Docker config; e.g. what command to run on the container.
config ? { }
, # Image architecture, defaults to the architecture of the `hostPlatform` when unset
architecture ? defaultArchitecture
, # Time of creation of the image. Passing "now" will make the
# created date be the time of building.
created ? "1970-01-01T00:00:01Z"
, # Optional bash script to run on the files prior to fixturizing the layer.
extraCommands ? ""
, # Optional bash script to run inside fakeroot environment.
# Could be used for changing ownership of files in customisation layer.
fakeRootCommands ? ""
, # Whether to run fakeRootCommands in fakechroot as well, so that they
# appear to run inside the image, but have access to the normal Nix store.
# Perhaps this could be enabled on by default on pkgs.stdenv.buildPlatform.isLinux
enableFakechroot ? false
, # We pick 100 to ensure there is plenty of room for extension. I
# believe the actual maximum is 128.
maxLayers ? 100
, # Whether to include store paths in the image. You generally want to leave
# this on, but tooling may disable this to insert the store paths more
# efficiently via other means, such as bind mounting the host store.
includeStorePaths ? true
, # Passthru arguments for the underlying derivation.
passthru ? {}
, tag ? null
, fromImage ? null
, contents ? [ ]
, config ? { }
, architecture ? defaultArchitecture
, created ? "1970-01-01T00:00:01Z"
, uid ? 0
, gid ? 0
, uname ? "root"
, gname ? "root"
, maxLayers ? 100
, extraCommands ? ""
, fakeRootCommands ? ""
, enableFakechroot ? false
, includeStorePaths ? true
, passthru ? {}
,
}:
assert
@ -1007,7 +992,7 @@ rec {
conf = runCommand "${baseName}-conf.json"
{
inherit fromImage maxLayers created;
inherit fromImage maxLayers created uid gid uname gname;
imageName = lib.toLower name;
preferLocalBuild = true;
passthru.imageTag =
@ -1086,14 +1071,22 @@ rec {
"store_layers": $store_layers[0],
"customisation_layer", $customisation_layer,
"repo_tag": $repo_tag,
"created": $created
"created": $created,
"uid": $uid,
"gid": $gid,
"uname": $uname,
"gname": $gname
}
' --arg store_dir "${storeDir}" \
--argjson from_image ${if fromImage == null then "null" else "'\"${fromImage}\"'"} \
--slurpfile store_layers store_layers.json \
--arg customisation_layer ${customisationLayer} \
--arg repo_tag "$imageName:$imageTag" \
--arg created "$created" |
--arg created "$created" \
--arg uid "$uid" \
--arg gid "$gid" \
--arg uname "$uname" \
--arg gname "$gname" |
tee $out
'';

View file

@ -9,6 +9,8 @@ image as an uncompressed tarball to stdout:
the fields with the same name on the image spec [2].
* "created" can be "now".
* "created" is also used as mtime for files added to the image.
* "uid", "gid", "uname", "gname" is the file ownership, for example,
0, 0, "root", "root".
* "store_layers" is a list of layers in ascending order, where each
layer is the list of store paths to include in that layer.
@ -45,7 +47,7 @@ from datetime import datetime, timezone
from collections import namedtuple
def archive_paths_to(obj, paths, mtime):
def archive_paths_to(obj, paths, mtime, uid, gid, uname, gname):
"""
Writes the given store paths as a tar file to the given stream.
@ -61,14 +63,14 @@ def archive_paths_to(obj, paths, mtime):
def apply_filters(ti):
ti.mtime = mtime
ti.uid = 0
ti.gid = 0
ti.uname = "root"
ti.gname = "root"
ti.uid = uid
ti.gid = gid
ti.uname = uname
ti.gname = gname
return ti
def nix_root(ti):
ti.mode = 0o0555 # r-xr-xr-x
ti.mode = 0o0755 # rwxr-xr-x
return ti
def dir(path):
@ -208,7 +210,7 @@ def overlay_base_config(from_image, final_config):
return final_config
def add_layer_dir(tar, paths, store_dir, mtime):
def add_layer_dir(tar, paths, store_dir, mtime, uid, gid, uname, gname):
"""
Appends given store paths to a TarFile object as a new layer.
@ -231,7 +233,7 @@ def add_layer_dir(tar, paths, store_dir, mtime):
archive_paths_to(
extract_checksum,
paths,
mtime=mtime,
mtime, uid, gid, uname, gname
)
(checksum, size) = extract_checksum.extract()
@ -247,7 +249,7 @@ def add_layer_dir(tar, paths, store_dir, mtime):
archive_paths_to(
write,
paths,
mtime=mtime,
mtime, uid, gid, uname, gname
)
write.close()
@ -324,6 +326,10 @@ def main():
else datetime.fromisoformat(conf["created"])
)
mtime = int(created.timestamp())
uid = int(conf["uid"])
gid = int(conf["gid"])
uname = conf["uname"]
gname = conf["gname"]
store_dir = conf["store_dir"]
from_image = load_from_image(conf["from_image"])
@ -336,7 +342,8 @@ def main():
for num, store_layer in enumerate(conf["store_layers"], start=start):
print("Creating layer", num, "from paths:", store_layer,
file=sys.stderr)
info = add_layer_dir(tar, store_layer, store_dir, mtime=mtime)
info = add_layer_dir(tar, store_layer, store_dir,
mtime, uid, gid, uname, gname)
layers.append(info)
print("Creating layer", len(layers) + 1, "with customisation...",

View file

@ -1,14 +1,58 @@
{ stdenvNoCC }:
{ lib, stdenvNoCC }:
/*
This is a wrapper around `substitute` in the stdenv.
Attribute arguments:
- `name` (optional): The name of the resulting derivation
- `src`: The path to the file to substitute
- `substitutions`: The list of substitution arguments to pass
See https://nixos.org/manual/nixpkgs/stable/#fun-substitute
- `replacements`: Deprecated version of `substitutions`
that doesn't support spaces in arguments.
Example:
```nix
{ substitute }:
substitute {
src = ./greeting.txt;
substitutions = [
"--replace"
"world"
"paul"
];
}
```
See ../../test/substitute for tests
*/
args:
# This is a wrapper around `substitute` in the stdenv.
# The `replacements` attribute should be a list of list of arguments
# to `substitute`, such as `[ "--replace" "sourcetext" "replacementtext" ]`
stdenvNoCC.mkDerivation ({
let
name = if args ? name then args.name else baseNameOf (toString args.src);
deprecationReplacement = lib.pipe args.replacements [
lib.toList
(map (lib.splitString " "))
lib.concatLists
(lib.concatMapStringsSep " " lib.strings.escapeNixString)
];
optionalDeprecationWarning =
# substitutions is only available starting 24.05.
# TODO: Remove support for replacements sometime after the next release
lib.warnIf (args ? replacements && lib.isInOldestRelease 2405) ''
pkgs.substitute: For "${name}", `replacements` is used, which is deprecated since it doesn't support arguments with spaces. Use `substitutions` instead:
substitutions = [ ${deprecationReplacement} ];'';
in
optionalDeprecationWarning
stdenvNoCC.mkDerivation ({
inherit name;
builder = ./substitute.sh;
inherit (args) src;
preferLocalBuild = true;
allowSubstitutes = false;
} // args // { replacements = args.replacements; })
} // args // lib.optionalAttrs (args ? substitutions) {
substitutions =
assert lib.assertMsg (lib.isList args.substitutions) ''
pkgs.substitute: For "${name}", `substitutions` is passed, which is expected to be a list, but it's a ${builtins.typeOf args.substitutions} instead.'';
lib.escapeShellArgs args.substitutions;
})

View file

@ -8,7 +8,13 @@ if test -n "$dir"; then
mkdir -p $out/$dir
fi
substitute $src $target $replacements
substitutionsList=($replacements)
if [[ -v substitutions ]]; then
eval "substitutionsList+=($substitutions)"
fi
substitute $src $target "${substitutionsList[@]}"
if test -n "$isExecutable"; then
chmod +x $target

View file

@ -1,9 +1,9 @@
{
"owner": "advplyr",
"repo": "audiobookshelf",
"rev": "90f4833c9e0957f08799af15966d1909516b335e",
"hash": "sha256-m+CwUV3Bu9sHvRKCA1vFXYYRx48bxZ8N3BornO1tLQ0=",
"version": "2.7.2",
"depsHash": "sha256-1623oXtkOp43xQvHI3GbJpEQLvgr5WD5FpfNO+d0RR8=",
"clientDepsHash": "sha256-ugf9C/L5aBTO7gCy561kV06Ihb/mg/ZW916NKngIYXI="
"rev": "85fecbd1b9fc424d8bfd1b63cbae45b8b23a7d34",
"hash": "sha256-GWaaoVa1UJptbYAZ99LbrzfKEksSqK0GSsl3Vh2xKKs=",
"version": "2.8.0",
"depsHash": "sha256-vznd+ZKn0nx0Q7/lsMfWRUZUsK2LtxQor/3C4fQc0Ss=",
"clientDepsHash": "sha256-oqINZO4v5WeRRiLQnnChrnK8VeIzLg1MRhG/gEjkv58="
}

View file

@ -1,5 +1,5 @@
#!/usr/bin/env nix-shell
#!nix-shell -i nu -p nushell common-updater-scripts prefetch-npm-deps
#!nix-shell -i nu -p nushell common-updater-scripts prefetch-npm-deps nix-prefetch-github
def main [] {
let sourceFile = $"(pwd)/pkgs/by-name/au/audiobookshelf/source.json"

View file

@ -6,16 +6,16 @@
buildGoModule rec {
pname = "bitmagnet";
version = "0.6.2";
version = "0.7.0";
src = fetchFromGitHub {
owner = "bitmagnet-io";
repo = "bitmagnet";
rev = "v${version}";
hash = "sha256-17jRktEqBCAXiddx8FnqHg3+c/03nqKHC8BQc9AhQA0=";
hash = "sha256-lomTfG6Fo4IywI8VMRvv4mBNRxLCq6IQGIuaR61UwOE=";
};
vendorHash = "sha256-YfsSz72CeHdrh5610Ilo1NYxlCT993hxWRWh0OsvEQc=";
vendorHash = "sha256-tKU4GoaEwwdbpWjojx+Z/mWxXKjceJPYRg5UTpYzad4=";
ldflags = [ "-s" "-w" ];

View file

@ -29,7 +29,7 @@ let
icon = "bitwarden";
electron = electron_28;
in buildNpmPackage rec {
pname = "bitwarden";
pname = "bitwarden-desktop";
version = "2024.2.0";
src = fetchFromGitHub {
@ -41,8 +41,7 @@ in buildNpmPackage rec {
patches = [
(fetchpatch2 {
# https://github.com/bitwarden/clients/pull/7508
url = "https://github.com/amarshall/bitwarden-clients/commit/e85fa4ef610d9dd05bd22a9b93d54b0c7901776d.patch";
url = "https://github.com/bitwarden/clients/commit/746bf0a4745423b9e70c2c54dcf76a95ffb62e11.patch";
hash = "sha256-P9MTsiNbAb2kKo/PasIm9kGm0lQjuVUxAJ3Fh1DfpzY=";
})
];
@ -68,7 +67,7 @@ in buildNpmPackage rec {
patches;
patchFlags = [ "-p4" ];
sourceRoot = "${src.name}/${cargoRoot}";
hash = "sha256-KJUz5hvdsurnohUWRZedXvuWMnLtR0dcdTeHtJGrZBs=";
hash = "sha256-LjwtOmIJlwtOiy36Y0pP+jJEwfmCGTN4RhqgmD3Yj6E=";
};
cargoRoot = "apps/desktop/desktop_native";

View file

@ -9,16 +9,16 @@
rustPlatform.buildRustPackage rec {
pname = "bpftop";
version = "0.2.1";
version = "0.2.2";
src = fetchFromGitHub {
owner = "Netflix";
repo = "bpftop";
rev = "v${version}";
hash = "sha256-HP8ubzCfBNgISrAyLACylH4PHxLhJPzIQFmIWEL5gjo=";
hash = "sha256-1Wgfe+M1s3hxcN9g1KiBeZycdgpMiHy5FWlE0jlNq/U=";
};
cargoHash = "sha256-+zh7GZ/fbhxLNQkkHFZqtJxy2IeS+KX5s2Qi5N21u/0=";
cargoHash = "sha256-CrAH3B3dCg3GsxvRrVp/jx3YSpmEg4/jyNuXUO/zeq0=";
buildInputs = [
elfutils
@ -34,7 +34,10 @@ rustPlatform.buildRustPackage rec {
description = "A dynamic real-time view of running eBPF programs";
homepage = "https://github.com/Netflix/bpftop";
license = lib.licenses.asl20;
maintainers = with lib.maintainers; [ mfrw ];
maintainers = with lib.maintainers; [
_0x4A6F
mfrw
];
mainProgram = "bpftop";
};
}

Some files were not shown because too many files have changed in this diff Show more