Merge branch 'master' into amazon-ec2-amis

This commit is contained in:
Rok Garbas 2024-03-13 00:37:12 +01:00 committed by GitHub
commit 6f11ba9ffe
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2631 changed files with 60557 additions and 35085 deletions

View file

@ -17,6 +17,10 @@ end_of_line = unset
insert_final_newline = unset
trim_trailing_whitespace = unset
# We want readFile .version to return the version without a newline.
[.version]
insert_final_newline = false
# see https://nixos.org/nixpkgs/manual/#chap-conventions
# Match json/lockfiles/markdown/nix/perl/python/ruby/shell/docbook files, set indent to spaces

View file

@ -19,7 +19,7 @@ jobs:
# we don't limit this action to only NixOS repo since the checks are cheap and useful developer feedback
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
- uses: cachix/install-nix-action@8887e596b4ee1134dae06b98d573bd674693f47c # v26
- uses: cachix/cachix-action@18cf96c7c98e048e10a83abd92116114cd8504be # v14
with:
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.

View file

@ -98,7 +98,7 @@ jobs:
base=$(mktemp -d)
git worktree add "$base" "$(git rev-parse HEAD^1)"
echo "base=$base" >> "$GITHUB_ENV"
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
- uses: cachix/install-nix-action@8887e596b4ee1134dae06b98d573bd674693f47c # v26
- name: Fetching the pinned tool
# Update the pinned version using pkgs/test/nixpkgs-check-by-name/scripts/update-pinned-tool.sh
run: |

View file

@ -16,7 +16,7 @@ jobs:
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
- uses: cachix/install-nix-action@8887e596b4ee1134dae06b98d573bd674693f47c # v26
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true

View file

@ -28,7 +28,7 @@ jobs:
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
- uses: cachix/install-nix-action@8887e596b4ee1134dae06b98d573bd674693f47c # v26
with:
# nixpkgs commit is pinned so that it doesn't break
# editorconfig-checker 2.4.0

View file

@ -18,7 +18,7 @@ jobs:
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
- uses: cachix/install-nix-action@8887e596b4ee1134dae06b98d573bd674693f47c # v26
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true

View file

@ -19,7 +19,7 @@ jobs:
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
- uses: cachix/install-nix-action@8887e596b4ee1134dae06b98d573bd674693f47c # v26
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true

View file

@ -29,7 +29,7 @@ jobs:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
if: ${{ env.CHANGED_FILES && env.CHANGED_FILES != '' }}
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
- uses: cachix/install-nix-action@8887e596b4ee1134dae06b98d573bd674693f47c # v26
with:
nix_path: nixpkgs=channel:nixpkgs-unstable
- name: Parse all changed or added nix files

View file

@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
- uses: cachix/install-nix-action@8887e596b4ee1134dae06b98d573bd674693f47c # v26
with:
nix_path: nixpkgs=channel:nixpkgs-unstable
- name: setup
@ -46,7 +46,7 @@ jobs:
run: |
git clean -f
- name: create PR
uses: peter-evans/create-pull-request@153407881ec5c347639a548ade7d8ad1d6740e38 # v5.0.2
uses: peter-evans/create-pull-request@a4f52f8033a6168103c2538976c07b467e8163bc # v6.0.1
with:
body: |
Automatic update by [update-terraform-providers](https://github.com/NixOS/nixpkgs/blob/master/.github/workflows/update-terraform-providers.yml) action.

2
.gitignore vendored
View file

@ -11,12 +11,12 @@ outputs/
result-*
result
repl-result-*
tags
!pkgs/development/python-modules/result
/doc/NEWS.html
/doc/NEWS.txt
/doc/manual.html
/doc/manual.pdf
/result
/source/
.version-suffix

View file

@ -1 +0,0 @@
24.05

1
.version Symbolic link
View file

@ -0,0 +1 @@
lib/.version

View file

@ -262,6 +262,10 @@ or
***
```
This function should only be used by non-redistributable software with an unfree license that we need to require the user to download manually.
It produces packages that cannot be built automatically.
## `fetchtorrent` {#fetchtorrent}
`fetchtorrent` expects two arguments. `url` which can either be a Magnet URI (Magnet Link) such as `magnet:?xt=urn:btih:dd8255ecdc7ca55fb0bbf81323d87062db1f6d1c` or an HTTP URL pointing to a `.torrent` file. It can also take a `config` argument which will craft a `settings.json` configuration file and give it to `transmission`, the underlying program that is performing the fetch. The available config options for `transmission` can be found [here](https://github.com/transmission/transmission/blob/main/docs/Editing-Configuration-Files.md#options)

View file

@ -7,7 +7,9 @@ Like [`stdenv.mkDerivation`](#sec-using-stdenv), each of these build helpers cre
`runCommand :: String -> AttrSet -> String -> Derivation`
`runCommand name drvAttrs buildCommand` returns a derivation that is built by running the specified shell commands.
The result of `runCommand name drvAttrs buildCommand` is a derivation that is built by running the specified shell commands.
By default `runCommand` runs in a stdenv with no compiler environment, whereas [`runCommandCC`](#trivial-builder-runCommandCC) uses the default stdenv, `pkgs.stdenv`.
`name :: String`
: The name that Nix will append to the store path in the same way that `stdenv.mkDerivation` uses its `name` attribute.
@ -153,6 +155,12 @@ Write a text file to the Nix store.
Default: `true`
`derivationArgs` (Attribute set, _optional_)
: Extra arguments to pass to the underlying call to `stdenv.mkDerivation`.
Default: `{}`
The resulting store path will include some variation of the name, and it will be a file unless `destination` is used, in which case it will be a directory.
::: {.example #ex-writeTextFile}

View file

@ -6,8 +6,9 @@ All generators follow a similar call interface: `generatorName configFunctions d
Generators can be fine-tuned to produce exactly the file format required by your application/service. One example is an INI-file format which uses `: ` as separator, the strings `"yes"`/`"no"` as boolean values and requires all string values to be quoted:
```nix
with lib;
let
inherit (lib) generators isString;
customToINI = generators.toINI {
# specifies how to format a key/value pair
mkKeyValue = generators.mkKeyValueDefault {

View file

@ -55,7 +55,13 @@ Here is a simple package example. It is a pure Coq library, thus it depends on C
```nix
{ lib, mkCoqDerivation, version ? null
, coq, mathcomp, mathcomp-finmap, mathcomp-bigenough }:
with lib; mkCoqDerivation {
let
inherit (lib) licenses maintainers switch;
inherit (lib.versions) range;
in
mkCoqDerivation {
/* namePrefix leads to e.g. `name = coq8.11-mathcomp1.11-multinomials-1.5.2` */
namePrefix = [ "coq" "mathcomp" ];
pname = "multinomials";

View file

@ -210,11 +210,11 @@ buildDotnetGlobalTool {
nugetSha256 = "sha256-ZG2HFyKYhVNVYd2kRlkbAjZJq88OADe3yjxmLuxXDUo=";
meta = with lib; {
meta = {
homepage = "https://cmd.petabridge.com/index.html";
changelog = "https://cmd.petabridge.com/articles/RELEASE_NOTES.html";
license = licenses.unfree;
platforms = platforms.linux;
license = lib.licenses.unfree;
platforms = lib.platforms.linux;
};
}
```

View file

@ -51,11 +51,11 @@ pet = buildGoModule rec {
vendorHash = "sha256-ciBIR+a1oaYH+H1PcC8cD8ncfJczk1IiJ8iYNM+R6aA=";
meta = with lib; {
meta = {
description = "Simple command-line snippet manager, written in Go";
homepage = "https://github.com/knqyf263/pet";
license = licenses.mit;
maintainers = with maintainers; [ kalbasit ];
license = lib.licenses.mit;
maintainers = with lib.maintainers; [ kalbasit ];
};
}
```

View file

@ -93,11 +93,11 @@ build-idris-package {
hash = "sha256-h28F9EEPuvab6zrfeE+0k1XGQJGwINnsJEG8yjWIl7w=";
};
meta = with lib; {
meta = {
description = "Idris YAML lib";
homepage = "https://github.com/Heather/Idris.Yaml";
license = licenses.mit;
maintainers = [ maintainers.brainrape ];
license = lib.licenses.mit;
maintainers = [ lib.maintainers.brainrape ];
};
}
```

View file

@ -184,11 +184,11 @@ buildNpmPackage rec {
NODE_OPTIONS = "--openssl-legacy-provider";
meta = with lib; {
meta = {
description = "A modern web UI for various torrent clients with a Node.js backend and React frontend";
homepage = "https://flood.js.org";
license = licenses.gpl3Only;
maintainers = with maintainers; [ winter ];
license = lib.licenses.gpl3Only;
maintainers = with lib.maintainers; [ winter ];
};
}
```
@ -233,6 +233,37 @@ sha256-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
It returns a derivation with all `package-lock.json` dependencies downloaded into `$out/`, usable as an npm cache.
#### importNpmLock {#javascript-buildNpmPackage-importNpmLock}
`importNpmLock` is a Nix function that requires the following optional arguments:
- `npmRoot`: Path to package directory containing the source tree
- `package`: Parsed contents of `package.json`
- `packageLock`: Parsed contents of `package-lock.json`
- `pname`: Package name
- `version`: Package version
It returns a derivation with a patched `package.json` & `package-lock.json` with all dependencies resolved to Nix store paths.
This function is analogous to using `fetchNpmDeps`, but instead of specifying `hash` it uses metadata from `package.json` & `package-lock.json`.
Note that `npmHooks.npmConfigHook` cannot be used with `importNpmLock`. You will instead need to use `importNpmLock.npmConfigHook`:
```nix
{ buildNpmPackage, importNpmLock }:
buildNpmPackage {
pname = "hello";
version = "0.1.0";
npmDeps = importNpmLock {
npmRoot = ./.;
};
npmConfigHook = importNpmLock.npmConfigHook;
}
```
### corepack {#javascript-corepack}
This package puts the corepack wrappers for pnpm and yarn in your PATH, and they will honor the `packageManager` setting in the `package.json`.

View file

@ -1,8 +1,8 @@
# Users Guide to Lua Infrastructure {#users-guide-to-lua-infrastructure}
# Lua {#lua}
## Using Lua {#using-lua}
## Using Lua {#lua-userguide}
### Overview of Lua {#overview-of-lua}
### Overview of Lua {#lua-overview}
Several versions of the Lua interpreter are available: luajit, lua 5.1, 5.2, 5.3.
The attribute `lua` refers to the default interpreter, it is also possible to refer to specific versions, e.g. `lua5_2` refers to Lua 5.2.
@ -118,7 +118,7 @@ Again, it is possible to launch the interpreter from the shell.
The Lua interpreter has the attribute `pkgs` which contains all Lua libraries for that specific interpreter.
## Developing with Lua {#developing-with-lua}
## Developing with lua {#lua-developing}
Now that you know how to get a working Lua environment with Nix, it is time
to go forward and start actually developing with Lua. There are two ways to
@ -193,10 +193,10 @@ luaposix = buildLuarocksPackage {
disabled = (luaOlder "5.1") || (luaAtLeast "5.4");
propagatedBuildInputs = [ bit32 lua std_normalize ];
meta = with lib; {
meta = {
homepage = "https://github.com/luaposix/luaposix/";
description = "Lua bindings for POSIX";
maintainers = with maintainers; [ vyp lblasc ];
maintainers = with lib.maintainers; [ vyp lblasc ];
license.fullName = "MIT/X11";
};
};
@ -234,30 +234,20 @@ The `lua.withPackages` takes a function as an argument that is passed the set of
Using the `withPackages` function, the previous example for the luafilesystem environment can be written like this:
```nix
with import <nixpkgs> {};
lua.withPackages (ps: [ps.luafilesystem])
```
`withPackages` passes the correct package set for the specific interpreter version as an argument to the function. In the above example, `ps` equals `luaPackages`.
But you can also easily switch to using `lua5_2`:
But you can also easily switch to using `lua5_1`:
```nix
with import <nixpkgs> {};
lua5_2.withPackages (ps: [ps.lua])
lua5_1.withPackages (ps: [ps.lua])
```
Now, `ps` is set to `lua52Packages`, matching the version of the interpreter.
Now, `ps` is set to `lua5_1.pkgs`, matching the version of the interpreter.
### Possible Todos {#possible-todos}
* export/use version specific variables such as `LUA_PATH_5_2`/`LUAROCKS_CONFIG_5_2`
* let luarocks check for dependencies via exporting the different rocktrees in temporary config
### Lua Contributing guidelines {#lua-contributing-guidelines}
### Lua Contributing guidelines {#lua-contributing}
Following rules should be respected:
* Make sure libraries build for all Lua interpreters.
* Commit names of Lua libraries should reflect that they are Lua libraries, so write for example `luaPackages.luafilesystem: 1.11 -> 1.12`.

View file

@ -34,11 +34,11 @@ maven.buildMavenPackage rec {
--add-flags "-jar $out/share/jd-cli/jd-cli.jar"
'';
meta = with lib; {
meta = {
description = "Simple command line wrapper around JD Core Java Decompiler project";
homepage = "https://github.com/intoolswetrust/jd-cli";
license = licenses.gpl3Plus;
maintainers = with maintainers; [ majiir ];
license = lib.licenses.gpl3Plus;
maintainers = with lib.maintainers; [ majiir ];
};
}:
```

View file

@ -110,11 +110,11 @@ buildDunePackage rec {
hash = "sha256-d5/3KUBAWRj8tntr4RkJ74KWW7wvn/B/m1nx0npnzyc=";
};
meta = with lib; {
meta = {
homepage = "https://github.com/flowtype/ocaml-wtf8";
description = "WTF-8 is a superset of UTF-8 that allows unpaired surrogates.";
license = licenses.mit;
maintainers = [ maintainers.eqyiel ];
license = lib.licenses.mit;
maintainers = [ lib.maintainers.eqyiel ];
};
}
```

View file

@ -7,7 +7,6 @@
| Package | Aliases | Interpreter |
|------------|-----------------|-------------|
| python27 | python2, python | CPython 2.7 |
| python38 | | CPython 3.8 |
| python39 | | CPython 3.9 |
| python310 | | CPython 3.10 |
| python311 | python3 | CPython 3.11 |
@ -60,7 +59,6 @@ sets are
* `pkgs.python27Packages`
* `pkgs.python3Packages`
* `pkgs.python38Packages`
* `pkgs.python39Packages`
* `pkgs.python310Packages`
* `pkgs.python311Packages`
@ -132,12 +130,12 @@ buildPythonPackage rec {
hypothesis
];
meta = with lib; {
meta = {
changelog = "https://github.com/pytest-dev/pytest/releases/tag/${version}";
description = "Framework for writing tests";
homepage = "https://github.com/pytest-dev/pytest";
license = licenses.mit;
maintainers = with maintainers; [ domenkozar lovek323 madjar lsix ];
license = lib.licenses.mit;
maintainers = with lib.maintainers; [ domenkozar lovek323 madjar lsix ];
};
}
```
@ -314,7 +312,7 @@ python3Packages.buildPythonApplication rec {
python-daemon
];
meta = with lib; {
meta = {
# ...
};
}
@ -901,12 +899,12 @@ buildPythonPackage rec {
"toolz.dicttoolz"
];
meta = with lib; {
meta = {
changelog = "https://github.com/pytoolz/toolz/releases/tag/${version}";
homepage = "https://github.com/pytoolz/toolz";
description = "List processing tools and functional utilities";
license = licenses.bsd3;
maintainers = with maintainers; [ fridh ];
license = lib.licenses.bsd3;
maintainers = with lib.maintainers; [ fridh ];
};
}
```
@ -1036,12 +1034,12 @@ buildPythonPackage rec {
pytest
];
meta = with lib; {
meta = {
changelog = "https://github.com/blaze/datashape/releases/tag/${version}";
homepage = "https://github.com/ContinuumIO/datashape";
description = "A data description language";
license = licenses.bsd2;
maintainers = with maintainers; [ fridh ];
license = lib.licenses.bsd2;
maintainers = with lib.maintainers; [ fridh ];
};
}
```
@ -1086,12 +1084,12 @@ buildPythonPackage rec {
libxslt
];
meta = with lib; {
meta = {
changelog = "https://github.com/lxml/lxml/releases/tag/lxml-${version}";
description = "Pythonic binding for the libxml2 and libxslt libraries";
homepage = "https://lxml.de";
license = licenses.bsd3;
maintainers = with maintainers; [ sjourdois ];
license = lib.licenses.bsd3;
maintainers = with lib.maintainers; [ sjourdois ];
};
}
```
@ -1157,12 +1155,12 @@ buildPythonPackage rec {
# Tests cannot import pyfftw. pyfftw works fine though.
doCheck = false;
meta = with lib; {
meta = {
changelog = "https://github.com/pyFFTW/pyFFTW/releases/tag/v${version}";
description = "A pythonic wrapper around FFTW, the FFT library, presenting a unified interface for all the supported transforms";
homepage = "http://hgomersall.github.com/pyFFTW";
license = with licenses; [ bsd2 bsd3 ];
maintainers = with maintainers; [ fridh ];
license = with lib.licenses; [ bsd2 bsd3 ];
maintainers = with lib.maintainers; [ fridh ];
};
}
```
@ -1532,12 +1530,12 @@ buildPythonPackage rec {
wheel
];
meta = with lib; {
meta = {
changelog = "https://github.com/pytoolz/toolz/releases/tag/${version}";
homepage = "https://github.com/pytoolz/toolz/";
description = "List processing tools and functional utilities";
license = licenses.bsd3;
maintainers = with maintainers; [ fridh ];
license = lib.licenses.bsd3;
maintainers = with lib.maintainers; [ fridh ];
};
}
```

View file

@ -12,7 +12,7 @@ an extra indirection.
## Nix expression for a Qt package (default.nix) {#qt-default-nix}
```nix
{ stdenv, lib, qt6, wrapQtAppsHook }:
{ stdenv, qt6 }:
stdenv.mkDerivation {
pname = "myapp";
@ -23,10 +23,12 @@ stdenv.mkDerivation {
}
```
The same goes for Qt 5 where libraries and tools are under `libsForQt5`.
Any Qt package should include `wrapQtAppsHook` in `nativeBuildInputs`, or explicitly set `dontWrapQtApps` to bypass generating the wrappers.
::: {.note}
Graphical Linux applications should also include `qtwayland` in `buildInputs`, to ensure the Wayland platform plugin is available.
Qt 6 graphical applications should also include `qtwayland` in `buildInputs` on Linux (but not on platforms e.g. Darwin, where `qtwayland` is not available), to ensure the Wayland platform plugin is available.
This may become default in the future, see [NixOS/nixpkgs#269674](https://github.com/NixOS/nixpkgs/pull/269674).
:::

View file

@ -35,10 +35,10 @@ rustPlatform.buildRustPackage rec {
cargoHash = "sha256-jtBw4ahSl88L0iuCXxQgZVm1EcboWRJMNtjxLVTtzts=";
meta = with lib; {
meta = {
description = "A fast line-oriented regex search tool, similar to ag and ack";
homepage = "https://github.com/BurntSushi/ripgrep";
license = licenses.unlicense;
license = lib.licenses.unlicense;
maintainers = [];
};
}
@ -903,8 +903,8 @@ with import <nixpkgs>
};
let
rustPlatform = makeRustPlatform {
cargo = rust-bin.stable.latest.minimal;
rustc = rust-bin.stable.latest.minimal;
cargo = rust-bin.selectLatestNightlyWith (toolchain: toolchain.default);
rustc = rust-bin.selectLatestNightlyWith (toolchain: toolchain.default);
};
in
@ -923,11 +923,11 @@ rustPlatform.buildRustPackage rec {
doCheck = false;
meta = with lib; {
meta = {
description = "A fast line-oriented regex search tool, similar to ag and ack";
homepage = "https://github.com/BurntSushi/ripgrep";
license = with licenses; [ mit unlicense ];
maintainers = with maintainers; [];
license = with lib.licenses; [ mit unlicense ];
maintainers = with lib.maintainers; [];
};
}
```

View file

@ -181,11 +181,11 @@ let
runHook postInstall
'';
meta = with lib; {
meta = {
description = "A LaTeX2e class for overhead transparencies";
license = licenses.unfreeRedistributable;
maintainers = with maintainers; [ veprbl ];
platforms = platforms.all;
license = lib.licenses.unfreeRedistributable;
maintainers = with lib.maintainers; [ veprbl ];
platforms = lib.platforms.all;
};
};

View file

@ -3,16 +3,16 @@
Nix packages can declare *meta-attributes* that contain information about a package such as a description, its homepage, its license, and so on. For instance, the GNU Hello package has a `meta` declaration like this:
```nix
meta = with lib; {
meta = {
description = "A program that produces a familiar, friendly greeting";
longDescription = ''
GNU Hello is a program that prints "Hello, world!" when you run it.
It is fully customizable.
'';
homepage = "https://www.gnu.org/software/hello/manual/";
license = licenses.gpl3Plus;
maintainers = with maintainers; [ eelco ];
platforms = platforms.all;
license = lib.licenses.gpl3Plus;
maintainers = with lib.maintainers; [ eelco ];
platforms = lib.platforms.all;
};
```

1
lib/.version Normal file
View file

@ -0,0 +1 @@
24.05

View file

@ -2,47 +2,87 @@
rec {
/* Throw if pred is false, else return pred.
Intended to be used to augment asserts with helpful error messages.
/**
Throw if pred is false, else return pred.
Intended to be used to augment asserts with helpful error messages.
Example:
assertMsg false "nope"
stderr> error: nope
# Inputs
assert assertMsg ("foo" == "bar") "foo is not bar, silly"; ""
stderr> error: foo is not bar, silly
`pred`
Type:
assertMsg :: Bool -> String -> Bool
: Predicate that needs to succeed, otherwise `msg` is thrown
`msg`
: Message to throw in case `pred` fails
# Type
```
assertMsg :: Bool -> String -> Bool
```
# Examples
:::{.example}
## `lib.asserts.assertMsg` usage example
```nix
assertMsg false "nope"
stderr> error: nope
assert assertMsg ("foo" == "bar") "foo is not bar, silly"; ""
stderr> error: foo is not bar, silly
```
:::
*/
# TODO(Profpatsch): add tests that check stderr
assertMsg =
# Predicate that needs to succeed, otherwise `msg` is thrown
pred:
# Message to throw in case `pred` fails
msg:
pred || builtins.throw msg;
/* Specialized `assertMsg` for checking if `val` is one of the elements
of the list `xs`. Useful for checking enums.
/**
Specialized `assertMsg` for checking if `val` is one of the elements
of the list `xs`. Useful for checking enums.
Example:
let sslLibrary = "libressl";
in assertOneOf "sslLibrary" sslLibrary [ "openssl" "bearssl" ]
stderr> error: sslLibrary must be one of [
stderr> "openssl"
stderr> "bearssl"
stderr> ], but is: "libressl"
# Inputs
Type:
assertOneOf :: String -> ComparableVal -> List ComparableVal -> Bool
`name`
: The name of the variable the user entered `val` into, for inclusion in the error message
`val`
: The value of what the user provided, to be compared against the values in `xs`
`xs`
: The list of valid values
# Type
```
assertOneOf :: String -> ComparableVal -> List ComparableVal -> Bool
```
# Examples
:::{.example}
## `lib.asserts.assertOneOf` usage example
```nix
let sslLibrary = "libressl";
in assertOneOf "sslLibrary" sslLibrary [ "openssl" "bearssl" ]
stderr> error: sslLibrary must be one of [
stderr> "openssl"
stderr> "bearssl"
stderr> ], but is: "libressl"
```
:::
*/
assertOneOf =
# The name of the variable the user entered `val` into, for inclusion in the error message
name:
# The value of what the user provided, to be compared against the values in `xs`
val:
# The list of valid values
xs:
assertMsg
(lib.elem val xs)
@ -50,29 +90,51 @@ rec {
lib.generators.toPretty {} xs}, but is: ${
lib.generators.toPretty {} val}";
/* Specialized `assertMsg` for checking if every one of `vals` is one of the elements
of the list `xs`. Useful for checking lists of supported attributes.
/**
Specialized `assertMsg` for checking if every one of `vals` is one of the elements
of the list `xs`. Useful for checking lists of supported attributes.
Example:
let sslLibraries = [ "libressl" "bearssl" ];
in assertEachOneOf "sslLibraries" sslLibraries [ "openssl" "bearssl" ]
stderr> error: each element in sslLibraries must be one of [
stderr> "openssl"
stderr> "bearssl"
stderr> ], but is: [
stderr> "libressl"
stderr> "bearssl"
stderr> ]
# Inputs
Type:
assertEachOneOf :: String -> List ComparableVal -> List ComparableVal -> Bool
`name`
: The name of the variable the user entered `val` into, for inclusion in the error message
`vals`
: The list of values of what the user provided, to be compared against the values in `xs`
`xs`
: The list of valid values
# Type
```
assertEachOneOf :: String -> List ComparableVal -> List ComparableVal -> Bool
```
# Examples
:::{.example}
## `lib.asserts.assertEachOneOf` usage example
```nix
let sslLibraries = [ "libressl" "bearssl" ];
in assertEachOneOf "sslLibraries" sslLibraries [ "openssl" "bearssl" ]
stderr> error: each element in sslLibraries must be one of [
stderr> "openssl"
stderr> "bearssl"
stderr> ], but is: [
stderr> "libressl"
stderr> "bearssl"
stderr> ]
```
:::
*/
assertEachOneOf =
# The name of the variable the user entered `val` into, for inclusion in the error message
name:
# The list of values of what the user provided, to be compared against the values in `xs`
vals:
# The list of valid values
xs:
assertMsg
(lib.all (val: lib.elem val xs) vals)

View file

@ -216,8 +216,7 @@ rec {
attrPath:
# The nested attribute set to find the value in.
set:
let errorMsg = "cannot find attribute `" + concatStringsSep "." attrPath + "'";
in attrByPath attrPath (abort errorMsg) set;
attrByPath attrPath (abort ("cannot find attribute `" + concatStringsSep "." attrPath + "'")) set;
/* Map each attribute in the given set and merge them into a new attribute set.
@ -680,65 +679,79 @@ rec {
attrsToList = mapAttrsToList nameValuePair;
/* Like `mapAttrs`, except that it recursively applies itself to
the *leaf* attributes of a potentially-nested attribute set:
the second argument of the function will never be an attrset.
Also, the first argument of the argument function is a *list*
of the attribute names that form the path to the leaf attribute.
/**
Like `mapAttrs`, except that it recursively applies itself to the *leaf* attributes of a potentially-nested attribute set:
the second argument of the function will never be an attrset.
Also, the first argument of the mapping function is a *list* of the attribute names that form the path to the leaf attribute.
For a function that gives you control over what counts as a leaf,
see `mapAttrsRecursiveCond`.
For a function that gives you control over what counts as a leaf, see `mapAttrsRecursiveCond`.
Example:
mapAttrsRecursive (path: value: concatStringsSep "-" (path ++ [value]))
{ n = { a = "A"; m = { b = "B"; c = "C"; }; }; d = "D"; }
=> { n = { a = "n-a-A"; m = { b = "n-m-b-B"; c = "n-m-c-C"; }; }; d = "d-D"; }
:::{#map-attrs-recursive-example .example}
# Map over leaf attributes
Type:
mapAttrsRecursive :: ([String] -> a -> b) -> AttrSet -> AttrSet
```nix
mapAttrsRecursive (path: value: concatStringsSep "-" (path ++ [value]))
{ n = { a = "A"; m = { b = "B"; c = "C"; }; }; d = "D"; }
```
evaluates to
```nix
{ n = { a = "n-a-A"; m = { b = "n-m-b-B"; c = "n-m-c-C"; }; }; d = "d-D"; }
```
:::
# Type
```
mapAttrsRecursive :: ([String] -> a -> b) -> AttrSet -> AttrSet
```
*/
mapAttrsRecursive =
# A function, given a list of attribute names and a value, returns a new value.
# A function that, given an attribute path as a list of strings and the corresponding attribute value, returns a new value.
f:
# Set to recursively map over.
# Attribute set to recursively map over.
set:
mapAttrsRecursiveCond (as: true) f set;
/* Like `mapAttrsRecursive`, but it takes an additional predicate
function that tells it whether to recurse into an attribute
set. If it returns false, `mapAttrsRecursiveCond` does not
recurse, but does apply the map function. If it returns true, it
does recurse, and does not apply the map function.
/**
Like `mapAttrsRecursive`, but it takes an additional predicate that tells it whether to recurse into an attribute set.
If the predicate returns false, `mapAttrsRecursiveCond` does not recurse, but instead applies the mapping function.
If the predicate returns true, it does recurse, and does not apply the mapping function.
Example:
# To prevent recursing into derivations (which are attribute
# sets with the attribute "type" equal to "derivation"):
mapAttrsRecursiveCond
(as: !(as ? "type" && as.type == "derivation"))
(x: ... do something ...)
attrs
:::{#map-attrs-recursive-cond-example .example}
# Map over an leaf attributes defined by a condition
Type:
mapAttrsRecursiveCond :: (AttrSet -> Bool) -> ([String] -> a -> b) -> AttrSet -> AttrSet
Map derivations to their `name` attribute.
Derivatons are identified as attribute sets that contain `{ type = "derivation"; }`.
```nix
mapAttrsRecursiveCond
(as: !(as ? "type" && as.type == "derivation"))
(x: x.name)
attrs
```
:::
# Type
```
mapAttrsRecursiveCond :: (AttrSet -> Bool) -> ([String] -> a -> b) -> AttrSet -> AttrSet
```
*/
mapAttrsRecursiveCond =
# A function, given the attribute set the recursion is currently at, determine if to recurse deeper into that attribute set.
# A function that, given the attribute set the recursion is currently at, determines if to recurse deeper into that attribute set.
cond:
# A function, given a list of attribute names and a value, returns a new value.
# A function that, given an attribute path as a list of strings and the corresponding attribute value, returns a new value.
# The attribute value is either an attribute set for which `cond` returns false, or something other than an attribute set.
f:
# Attribute set to recursively map over.
set:
let
recurse = path:
let
g =
name: value:
mapAttrs
(name: value:
if isAttrs value && cond value
then recurse (path ++ [name]) value
else f (path ++ [name]) value;
in mapAttrs g;
in recurse [] set;
then recurse (path ++ [ name ]) value
else f (path ++ [ name ]) value);
in
recurse [ ] set;
/* Generate an attribute set by mapping a function over a list of
@ -870,10 +883,7 @@ rec {
Type:
zipAttrs :: [ AttrSet ] -> AttrSet
*/
zipAttrs =
# List of attribute sets to zip together.
sets:
zipAttrsWith (name: values: values) sets;
zipAttrs = zipAttrsWith (name: values: values);
/*
Merge a list of attribute sets together using the `//` operator.
@ -1138,10 +1148,7 @@ rec {
Type: chooseDevOutputs :: [Derivation] -> [String]
*/
chooseDevOutputs =
# List of packages to pick `dev` outputs from
drvs:
builtins.map getDev drvs;
chooseDevOutputs = builtins.map getDev;
/* Make various Nix tools consider the contents of the resulting
attribute set when looking for what to build, find, etc.

View file

@ -221,9 +221,10 @@ rec {
let
f = if isFunction fn then fn else import fn;
auto = intersectAttrs (functionArgs f) autoArgs;
mirrorArgs = mirrorFunctionArgs f;
origArgs = auto // args;
pkgs = f origArgs;
mkAttrOverridable = name: _: makeOverridable (newArgs: (f newArgs).${name}) origArgs;
mkAttrOverridable = name: _: makeOverridable (mirrorArgs (newArgs: (f newArgs).${name})) origArgs;
in
if isDerivation pkgs then throw
("function `callPackages` was called on a *single* derivation "
@ -305,18 +306,129 @@ rec {
in if drv == null then null else
deepSeq drv' drv';
/* Make a set of packages with a common scope. All packages called
with the provided `callPackage` will be evaluated with the same
arguments. Any package in the set may depend on any other. The
`overrideScope'` function allows subsequent modification of the package
set in a consistent way, i.e. all packages in the set will be
called with the overridden packages. The package sets may be
hierarchical: the packages in the set are called with the scope
provided by `newScope` and the set provides a `newScope` attribute
which can form the parent scope for later package sets.
/**
Make an attribute set (a "scope") from functions that take arguments from that same attribute set.
See [](#ex-makeScope) for how to use it.
Type:
makeScope :: (AttrSet -> ((AttrSet -> a) | Path) -> AttrSet -> a) -> (AttrSet -> AttrSet) -> AttrSet
# Inputs
1. `newScope` (`AttrSet -> ((AttrSet -> a) | Path) -> AttrSet -> a`)
A function that takes an attribute set `attrs` and returns what ends up as `callPackage` in the output.
Typical values are `callPackageWith` or the output attribute `newScope`.
2. `f` (`AttrSet -> AttrSet`)
A function that takes an attribute set as returned by `makeScope newScope f` (a "scope") and returns any attribute set.
This function is used to compute the fixpoint of the resulting scope using `callPackage`.
Its argument is the lazily evaluated reference to the value of that fixpoint, and is typically called `self` or `final`.
See [](#ex-makeScope) for how to use it.
See [](#sec-functions-library-fixedPoints) for details on fixpoint computation.
# Output
`makeScope` returns an attribute set of a form called `scope`, which also contains the final attributes produced by `f`:
```
scope :: {
callPackage :: ((AttrSet -> a) | Path) -> AttrSet -> a
newScope = AttrSet -> scope
overrideScope = (scope -> scope -> AttrSet) -> scope
packages :: AttrSet -> AttrSet
}
```
- `callPackage` (`((AttrSet -> a) | Path) -> AttrSet -> a`)
A function that
1. Takes a function `p`, or a path to a Nix file that contains a function `p`, which takes an attribute set and returns value of arbitrary type `a`,
2. Takes an attribute set `args` with explicit attributes to pass to `p`,
3. Calls `f` with attributes from the original attribute set `attrs` passed to `newScope` updated with `args, i.e. `attrs // args`, if they match the attributes in the argument of `p`.
All such functions `p` will be called with the same value for `attrs`.
See [](#ex-makeScope-callPackage) for how to use it.
- `newScope` (`AttrSet -> scope`)
Takes an attribute set `attrs` and returns a scope that extends the original scope.
- `overrideScope` (`(scope -> scope -> AttrSet) -> scope`)
Takes a function `g` of the form `final: prev: { # attributes }` to act as an overlay on `f`, and returns a new scope with values determined by `extends g f`.
See [](https://nixos.org/manual/nixpkgs/unstable/#function-library-lib.fixedPoints.extends) for details.
This allows subsequent modification of the final attribute set in a consistent way, i.e. all functions `p` invoked with `callPackage` will be called with the modified values.
- `packages` (`AttrSet -> AttrSet`)
The value of the argument `f` to `makeScope`.
- final attributes
The final values returned by `f`.
# Examples
:::{#ex-makeScope .example}
# Create an interdependent package set on top of `pkgs`
The functions in `foo.nix` and `bar.nix` can depend on each other, in the sense that `foo.nix` can contain a function that expects `bar` as an attribute in its argument.
```nix
let
pkgs = import <nixpkgs> { };
in
pkgs.lib.makeScope pkgs.newScope (self: {
foo = self.callPackage ./foo.nix { };
bar = self.callPackage ./bar.nix { };
})
```
evaluates to
```nix
{
callPackage = «lambda»;
newScope = «lambda»;
overrideScope = «lambda»;
packages = «lambda»;
foo = «derivation»;
bar = «derivation»;
}
```
:::
:::{#ex-makeScope-callPackage .example}
# Using `callPackage` from a scope
```nix
let
pkgs = import <nixpkgs> { };
inherit (pkgs) lib;
scope = lib.makeScope lib.callPackageWith (self: { a = 1; b = 2; });
three = scope.callPackage ({ a, b }: a + b) { };
four = scope.callPackage ({ a, b }: a + b) { a = 2; };
in
[ three four ]
```
evaluates to
```nix
[ 3 4 ]
```
:::
# Type
```
makeScope :: (AttrSet -> ((AttrSet -> a) | Path) -> AttrSet -> a) -> (AttrSet -> AttrSet) -> scope
```
*/
makeScope = newScope: f:
let self = f self // {

View file

@ -1,7 +1,20 @@
{ lib }:
let
inherit (lib) throwIfNot;
inherit (lib)
genAttrs
isString
throwIfNot
;
showMaybeAttrPosPre = prefix: attrName: v:
let pos = builtins.unsafeGetAttrPos attrName v;
in if pos == null then "" else "${prefix}${pos.file}:${toString pos.line}:${toString pos.column}";
showMaybePackagePosPre = prefix: pkg:
if pkg?meta.position && isString pkg.meta.position
then "${prefix}${pkg.meta.position}"
else "";
in
{
/*
@ -64,6 +77,11 @@ in
#
# This can be used for adding package attributes, such as `tests`.
passthru ? { }
, # Optional list of assumed outputs. Default: ["out"]
#
# This must match the set of outputs that the returned derivation has.
# You must use this when the derivation has multiple outputs.
outputs ? [ "out" ]
}:
let
# These checks are strict in `drv` and some `drv` attributes, but the
@ -71,11 +89,40 @@ in
# Instead, the individual derivation attributes do depend on it.
checked =
throwIfNot (derivation.type or null == "derivation")
"lazySimpleDerivation: input must be a derivation."
"lazyDerivation: input must be a derivation."
throwIfNot
(derivation.outputs == [ "out" ])
# Supporting multiple outputs should be a matter of inheriting more attrs.
"The derivation ${derivation.name or "<unknown>"} has multiple outputs. This is not supported by lazySimpleDerivation yet. Support could be added, and be useful as long as the set of outputs is known in advance, without evaluating the actual derivation."
# NOTE: Technically we could require our outputs to be a subset of the
# actual ones, or even leave them unchecked and fail on a lazy basis.
# However, consider the case where an output is added in the underlying
# derivation, such as dev. lazyDerivation would remove it and cause it
# to fail as a buildInputs item, without any indication as to what
# happened. Hence the more stringent condition. We could consider
# adding a flag to control this behavior if there's a valid case for it,
# but the documentation must have a note like this.
(derivation.outputs == outputs)
''
lib.lazyDerivation: The derivation ${derivation.name or "<unknown>"} has outputs that don't match the assumed outputs.
Assumed outputs passed to lazyDerivation${showMaybeAttrPosPre ",\n at " "outputs" args}:
${lib.generators.toPretty { multiline = false; } outputs};
Actual outputs of the derivation${showMaybePackagePosPre ",\n defined at " derivation}:
${lib.generators.toPretty { multiline = false; } derivation.outputs}
If the outputs are known ahead of evaluating the derivation,
then update the lazyDerivation call to match the actual outputs, in the same order.
If lazyDerivation is passed a literal value, just change it to the actual outputs.
As a result it will work as before / as intended.
Otherwise, when the outputs are dynamic and can't be known ahead of time, it won't
be possible to add laziness, but lib.lazyDerivation may still be useful for trimming
the attributes.
If you want to keep trimming the attributes, make sure that the package is in a
variable (don't evaluate it twice!) and pass the variable and its outputs attribute
to lib.lazyDerivation. This largely defeats laziness, but keeps the trimming.
If none of the above works for you, replace the lib.lazyDerivation call by the
expression in the derivation argument.
''
derivation;
in
{
@ -92,12 +139,15 @@ in
# A fixed set of derivation values, so that `lazyDerivation` can return
# its attrset before evaluating `derivation`.
# This must only list attributes that are available on _all_ derivations.
inherit (checked) outputs out outPath outputName drvPath name system;
inherit (checked) outPath outputName drvPath name system;
inherit outputs;
# The meta attribute can either be taken from the derivation, or if the
# `lazyDerivation` caller knew a shortcut, be taken from there.
meta = args.meta or checked.meta;
} // passthru;
}
// genAttrs outputs (outputName: checked.${outputName})
// passthru;
/* Conditionally set a derivation attribute.

View file

@ -412,6 +412,11 @@ in mkLicense lset) ({
fullName = "Detection Rule License 1.0";
};
dtoa = {
spdxId = "dtoa";
fullName = "dtoa License";
};
eapl = {
fullName = "EPSON AVASYS PUBLIC LICENSE";
url = "https://avasys.jp/hp/menu000000700/hpg000000603.htm";
@ -1066,6 +1071,11 @@ in mkLicense lset) ({
url = "https://sources.debian.org/copyright/license/debianutils/4.9.1/";
};
smlnj = {
spdxId = "SMLNJ";
fullName = "Standard ML of New Jersey License";
};
sspl = {
shortName = "SSPL";
fullName = "Server Side Public License";
@ -1215,6 +1225,11 @@ in mkLicense lset) ({
url = "https://mcj.sourceforge.net/authors.html#xfig";
};
xinetd = {
spdxId = "xinetd";
fullName = "xinetd License";
};
zlib = {
spdxId = "Zlib";
fullName = "zlib License";
@ -1229,6 +1244,11 @@ in mkLicense lset) ({
spdxId = "ZPL-2.1";
fullName = "Zope Public License 2.1";
};
xskat = {
spdxId = "XSkat";
fullName = "XSkat License";
};
} // {
# TODO: remove legacy aliases
agpl3 = {

View file

@ -1038,30 +1038,32 @@ rec {
toInt "3.14"
=> error: floating point JSON numbers are not supported
*/
toInt = str:
toInt =
let
matchStripInput = match "[[:space:]]*(-?[[:digit:]]+)[[:space:]]*";
matchLeadingZero = match "0[[:digit:]]+";
in
str:
let
# RegEx: Match any leading whitespace, possibly a '-', one or more digits,
# and finally match any trailing whitespace.
strippedInput = match "[[:space:]]*(-?[[:digit:]]+)[[:space:]]*" str;
strippedInput = matchStripInput str;
# RegEx: Match a leading '0' then one or more digits.
isLeadingZero = match "0[[:digit:]]+" (head strippedInput) == [];
isLeadingZero = matchLeadingZero (head strippedInput) == [];
# Attempt to parse input
parsedInput = fromJSON (head strippedInput);
generalError = "toInt: Could not convert ${escapeNixString str} to int.";
octalAmbigError = "toInt: Ambiguity in interpretation of ${escapeNixString str}"
+ " between octal and zero padded integer.";
in
# Error on presence of non digit characters.
if strippedInput == null
then throw generalError
# Error on presence of leading zero/octal ambiguity.
else if isLeadingZero
then throw octalAmbigError
then throw "toInt: Ambiguity in interpretation of ${escapeNixString str} between octal and zero padded integer."
# Error if parse function fails.
else if !isInt parsedInput
then throw generalError
@ -1089,15 +1091,20 @@ rec {
toIntBase10 "3.14"
=> error: floating point JSON numbers are not supported
*/
toIntBase10 = str:
toIntBase10 =
let
matchStripInput = match "[[:space:]]*0*(-?[[:digit:]]+)[[:space:]]*";
matchZero = match "0+";
in
str:
let
# RegEx: Match any leading whitespace, then match any zero padding,
# capture possibly a '-' followed by one or more digits,
# and finally match any trailing whitespace.
strippedInput = match "[[:space:]]*0*(-?[[:digit:]]+)[[:space:]]*" str;
strippedInput = matchStripInput str;
# RegEx: Match at least one '0'.
isZero = match "0+" (head strippedInput) == [];
isZero = matchZero (head strippedInput) == [];
# Attempt to parse input
parsedInput = fromJSON (head strippedInput);

View file

@ -55,6 +55,24 @@ runTests {
expected = { a = false; b = false; c = true; };
};
testCallPackageWithOverridePreservesArguments =
let
f = { a ? 0, b }: {};
f' = callPackageWith { a = 1; b = 2; } f {};
in {
expr = functionArgs f'.override;
expected = functionArgs f;
};
testCallPackagesWithOverridePreservesArguments =
let
f = { a ? 0, b }: { nested = {}; };
f' = callPackagesWith { a = 1; b = 2; } f {};
in {
expr = functionArgs f'.nested.override;
expected = functionArgs f;
};
# TRIVIAL
testId = {
@ -1973,6 +1991,24 @@ runTests {
}).drvPath;
};
testLazyDerivationMultiOutputReturnsDerivationAttrs = let
derivation = {
type = "derivation";
outputs = ["out" "dev"];
dev = "test dev";
out = "test out";
outPath = "test outPath";
outputName = "out";
drvPath = "test drvPath";
name = "test name";
system = "test system";
meta.position = "/hi:23";
};
in {
expr = lazyDerivation { inherit derivation; outputs = ["out" "dev"]; passthru.meta.position = "/hi:23"; };
expected = derivation;
};
testTypeDescriptionInt = {
expr = (with types; int).description;
expected = "signed integer";

View file

@ -6,12 +6,19 @@
{ config, lib, ... }:
with lib;
let
inherit (lib)
mkAliasOptionModule
mkForce
mkOption
types
;
in
{
options = {
# A simple boolean option that can be enabled or disabled.
enable = lib.mkOption {
enable = mkOption {
type = types.nullOr types.bool;
default = null;
example = true;
@ -41,7 +48,7 @@ with lib;
# should override the next import.
( { config, lib, ... }:
{
enableAlias = lib.mkForce false;
enableAlias = mkForce false;
}
)

View file

@ -6,12 +6,19 @@
{ config, lib, ... }:
with lib;
let
inherit (lib)
mkAliasOptionModule
mkDefault
mkOption
types
;
in
{
options = {
# A simple boolean option that can be enabled or disabled.
enable = lib.mkOption {
enable = mkOption {
type = types.nullOr types.bool;
default = null;
example = true;
@ -41,7 +48,7 @@ with lib;
# should be able to be overridden by the next import.
( { config, lib, ... }:
{
enableAlias = lib.mkDefault false;
enableAlias = mkDefault false;
}
)

View file

@ -2,7 +2,14 @@
, extendModules
, ...
}:
with lib;
let
inherit (lib)
mkOption
mkOverride
types
;
in
{
imports = [

View file

@ -53,6 +53,12 @@ pkgs.runCommand "nixpkgs-lib-tests-nix-${nix.version}" {
echo "Running lib/tests/modules.sh"
bash lib/tests/modules.sh
echo "Checking lib.version"
nix-instantiate lib -A version --eval || {
echo "lib.version does not evaluate when lib is isolated from the rest of the nixpkgs tree"
exit 1
}
echo "Running lib/tests/filesystem.sh"
TEST_LIB=$PWD/lib bash lib/tests/filesystem.sh

View file

@ -159,7 +159,7 @@ in {
version = release + versionSuffix;
/* Returns the current nixpkgs release number as string. */
release = lib.strings.fileContents ../.version;
release = lib.strings.fileContents ./.version;
/* The latest release that is supported, at the time of release branch-off,
if applicable.

View file

@ -681,7 +681,7 @@
};
ajs124 = {
email = "nix@ajs124.de";
matrix = "@andreas.schraegle:helsinki-systems.de";
matrix = "@ajs124:ajs124.de";
github = "ajs124";
githubId = 1229027;
name = "Andreas Schrägle";
@ -4247,6 +4247,12 @@
githubId = 49398;
name = "Daniël de Kok";
};
daniel-fahey = {
name = "Daniel Fahey";
email = "daniel.fahey+nixpkgs@pm.me";
github = "daniel-fahey";
githubId = 7294692;
};
danielfullmer = {
email = "danielrf12@gmail.com";
github = "danielfullmer";
@ -5594,6 +5600,12 @@
githubId = 5737945;
name = "Elia Argentieri";
};
elisesouche = {
email = "elise@souche.one";
github = "elisesouche";
githubId = 161958668;
name = "Élise Souche";
};
elitak = {
email = "elitak@gmail.com";
github = "elitak";
@ -6560,6 +6572,11 @@
githubId = 726447;
name = "Francisco Demartino";
};
frankp = {
github = "MDM23";
githubId = 10290864;
name = "Peter Frank";
};
franzmondlichtmann = {
name = "Franz Schroepf";
email = "franz-schroepf@t-online.de";
@ -7037,6 +7054,12 @@
githubId = 37602871;
name = "Galois";
};
ggg = {
email = "gggkiller2@gmail.com";
github = "GGG-KILLER";
githubId = 5892127;
name = "GGG";
};
ggpeti = {
email = "ggpeti@gmail.com";
matrix = "@ggpeti:ggpeti.com";
@ -7404,6 +7427,12 @@
githubId = 21156405;
name = "GuangTao Zhang";
};
Guanran928 = {
email = "guanran928@outlook.com";
github = "Guanran928";
githubId = 68757440;
name = "Guanran928";
};
guekka = {
github = "Guekka";
githubId = 39066502;
@ -7450,6 +7479,13 @@
githubId = 443978;
name = "Gabriel Volpe";
};
gwg313 = {
email = "gwg313@pm.me";
matrix = "@gwg313:matrix.org";
github = "gwg313";
githubId = 70684146;
name = "Glen Goodwin";
};
gytis-ivaskevicius = {
name = "Gytis Ivaskevicius";
email = "me@gytis.io";
@ -7936,6 +7972,12 @@
githubId = 1614615;
name = "Hendrik Schaeidt";
};
hsjobeki = {
email = "hsjobeki@gmail.com";
github = "hsjobeki";
githubId = 50398876;
name = "Johannes Kirschbauer";
};
htr = {
email = "hugo@linux.com";
github = "htr";
@ -8226,6 +8268,13 @@
github = "ilyakooo0";
githubId = 6209627;
};
imadnyc = {
email = "me@imad.nyc";
github = "imadnyc";
githubId = 113966166;
name = "Abdullah Imad";
matrix = "@dre:imad.nyc";
};
imalison = {
email = "IvanMalison@gmail.com";
github = "colonelpanic8";
@ -9130,6 +9179,12 @@
githubId = 8900;
name = "Johan Magnus Jonsson";
};
jmarmstrong1207 = {
name = "James Armstrong";
email = "jm.armstrong1207@gmail.com";
github = "jmarmstrong1207";
githubId = 32995055;
};
jmbaur = {
email = "jaredbaur@fastmail.com";
github = "jmbaur";
@ -9200,6 +9255,15 @@
githubId = 1102396;
name = "Jussi Maki";
};
joaquintrinanes = {
email = "hi@joaquint.io";
github = "JoaquinTrinanes";
name = "Joaquín Triñanes";
githubId = 1385934;
keys = [{
fingerprint = "3A13 5C15 E1D5 850D 2F90 AB25 6E14 46DD 451C 6BAF";
}];
};
jobojeha = {
email = "jobojeha@jeppener.de";
github = "jobojeha";
@ -9374,6 +9438,13 @@
githubId = 392720;
name = "Jon Banafato";
};
jonas-w = {
email = "nixpkgs@03j.de";
github = "jonas-w";
githubId = 32615971;
name = "Jonas Wunderlich";
matrix = "@matrix:03j.de";
};
jonathanmarler = {
email = "johnnymarler@gmail.com";
github = "marler8997";
@ -10515,6 +10586,15 @@
githubId = 70764075;
name = "kud";
};
kugland = {
email = "kugland@gmail.com";
github = "kugland";
githubId = 1173932;
name = "André Kugland";
keys = [{
fingerprint = "6A62 5E60 E3FF FCAE B3AA 50DC 1DA9 3817 80CD D833";
}];
};
kupac = {
github = "Kupac";
githubId = 8224569;
@ -10538,6 +10618,12 @@
githubId = 449813;
name = "Roman Kuznetsov";
};
kuznetsss = {
email = "kuzzz99@gmail.com";
github = "kuznetsss";
githubId = 15742918;
name = "Sergey Kuznetsov";
};
kwohlfahrt = {
email = "kai.wohlfahrt@gmail.com";
github = "kwohlfahrt";
@ -13923,6 +14009,11 @@
githubId = 3159451;
name = "Nicolas Schneider";
};
NIS = {
name = "NSC IT Solutions";
github = "dev-nis";
githubId = 132921300;
};
nitsky = {
name = "nitsky";
github = "nitsky";
@ -14877,6 +14968,12 @@
githubId = 8641;
name = "Pierre Carrier";
};
pcasaretto = {
email = "pcasaretto@gmail.com";
github = "pcasaretto";
githubId = 817039;
name = "Paulo Casaretto";
};
pedrohlc = {
email = "root@pedrohlc.com";
github = "PedroHLC";
@ -17020,6 +17117,15 @@
githubId = 132835;
name = "Samuel Dionne-Riel";
};
samuelefacenda = {
name = "Samuele Facenda";
email = "samuele.facenda@gmail.com";
github = "SamueleFacenda";
githubId = 92163673;
keys = [{
fingerprint = "3BA5 A3DB 3239 E2AC 1F3B 68A0 0DB8 3F58 B259 6271";
}];
};
samuel-martineau = {
name = "Samuel Martineau";
email = "samuel@smartineau.me";
@ -17062,6 +17168,12 @@
githubId = 1153271;
name = "Sander van der Burg";
};
Sanskarzz = {
email = "sanskar.gur@gmail.com";
github = "Sanskarzz";
githubId = 92817635;
name = "Sanskar Gurdasani";
};
sarcasticadmin = {
email = "rob@sarcasticadmin.com";
github = "sarcasticadmin";
@ -17074,6 +17186,15 @@
githubId = 178904;
name = "Daniel Ehlers";
};
sascha8a = {
email = "sascha@localhost.systems";
github = "sascha8a";
githubId = 6937965;
name = "Alexander Lampalzer";
keys = [{
fingerprint = "0350 3136 E22C C561 30E3 A4AE 2087 9CCA CD5C D670";
}];
};
saschagrunert = {
email = "mail@saschagrunert.de";
github = "saschagrunert";
@ -17098,6 +17219,12 @@
githubId = 8534888;
name = "Savanni D'Gerinel";
};
savedra1 = {
email = "michaelsavedra@gmail.com";
github = "savedra1";
githubId = 99875823;
name = "Michael Savedra";
};
savyajha = {
email = "savya.jha@hawkradius.com";
github = "savyajha";
@ -17712,6 +17839,7 @@
};
sikmir = {
email = "sikmir@disroot.org";
matrix = "@sikmir:matrix.org";
github = "sikmir";
githubId = 688044;
name = "Nikolay Korotkiy";
@ -18023,6 +18151,12 @@
githubId = 55726;
name = "Stanislav Ochotnický";
};
sodiboo = {
name = "sodiboo";
github = "sodiboo";
githubId = 37938646;
matrix = "@sodiboo:arcticfoxes.net";
};
softinio = {
email = "code@softinio.com";
github = "softinio";
@ -19140,6 +19274,12 @@
githubId = 3105057;
name = "Jan Beinke";
};
themaxmur = {
name = "Maxim Muravev";
email = "muravjev.mak@yandex.ru";
github = "TheMaxMur";
githubId = 31189199;
};
thenonameguy = {
email = "thenonameguy24@gmail.com";
name = "Krisztian Szabo";
@ -21067,6 +21207,12 @@
githubId = 973709;
name = "Jairo Llopis";
};
yamashitax = {
email = "hello@yamashit.ax";
github = "yamashitax";
githubId = 99486674;
name = "";
};
yana = {
email = "yana@riseup.net";
github = "yanateras";

View file

@ -93,6 +93,7 @@ CROSS_TARGETS=(
mips64el-unknown-linux-gnuabi64
mips64el-unknown-linux-gnuabin32
mipsel-unknown-linux-gnu
powerpc64-unknown-linux-gnuabielfv2
powerpc64le-unknown-linux-gnu
riscv64-unknown-linux-gnu
)

View file

@ -1,9 +1,8 @@
# Evaluate `release.nix' like Hydra would. Too bad nix-instantiate
# can't to do this.
with import ../../lib;
# Evaluate `release.nix' like Hydra would. Too bad nix-instantiate can't to do this.
let
inherit (import ../../lib) isDerivation mapAttrs;
trace = if builtins.getEnv "VERBOSE" == "1" then builtins.trace else (x: y: y);
rel = removeAttrs (import ../../pkgs/top-level/release.nix { }) [ "tarball" "unstable" "xbursttools" ];

View file

@ -1,11 +1,22 @@
# This expression returns a list of all fetchurl calls used by expr.
with import ../.. { };
with lib;
{ expr }:
{ expr, lib ? import ../../lib }:
let
inherit (lib)
addErrorContext
attrNames
concatLists
const
filter
genericClosure
isAttrs
isDerivation
isList
mapAttrsToList
optional
optionals
;
root = expr;

View file

@ -4,6 +4,7 @@ import base64
import binascii
import json
import pathlib
from typing import Optional
from urllib.parse import urlparse
import bs4
@ -57,19 +58,26 @@ def to_sri(hash):
),
default=pathlib.Path(__file__).parent.parent.parent.parent
)
def main(set: str, version: str, nixpkgs: pathlib.Path):
@click.option(
"--sources-url",
type=str,
default=None,
)
def main(set: str, version: str, nixpkgs: pathlib.Path, sources_url: Optional[str]):
root_dir = nixpkgs / "pkgs/kde"
set_dir = root_dir / set
generated_dir = root_dir / "generated"
metadata = utils.KDERepoMetadata.from_json(generated_dir)
set_url = {
"frameworks": "kf",
"gear": "releases",
"plasma": "plasma",
}[set]
if sources_url is None:
set_url = {
"frameworks": "kf",
"gear": "releases",
"plasma": "plasma",
}[set]
sources_url = f"https://kde.org/info/sources/source-{set_url}-{version}.html"
sources = httpx.get(f"https://kde.org/info/sources/source-{set_url}-{version}.html")
sources = httpx.get(sources_url)
sources.raise_for_status()
bs = bs4.BeautifulSoup(sources.text, features="html.parser")
@ -101,7 +109,7 @@ def main(set: str, version: str, nixpkgs: pathlib.Path):
set_dir.mkdir(parents=True, exist_ok=True)
with (set_dir / "default.nix").open("w") as fd:
fd.write(ROOT_TEMPLATE.render(packages=results.keys()) + "\n")
fd.write(ROOT_TEMPLATE.render(packages=sorted(results.keys())) + "\n")
sources_dir = generated_dir / "sources"
sources_dir.mkdir(parents=True, exist_ok=True)

View file

@ -188,6 +188,7 @@ with lib.maintainers; {
ivar
mdarocha
corngood
ggg
raphaelr
jamiemagee
anpin
@ -429,7 +430,6 @@ with lib.maintainers; {
helsinki-systems = {
# Verify additions to this team with at least one already existing member of the team.
members = [
ajs124
das_j
];
scope = "Group registration for packages maintained by Helsinki Systems";
@ -494,6 +494,7 @@ with lib.maintainers; {
members = [
aanderse
cpages
dschrempf
edwtjo
minijackson
peterhoeg
@ -927,6 +928,18 @@ with lib.maintainers; {
shortName = "Serokell employees";
};
steam = {
members = [
atemu
eclairevoyant
jonringer
k900
mkg20001
];
scope = "Maintain steam module and packages";
shortName = "Steam";
};
systemd = {
members = [ ];
githubTeams = [

View file

@ -150,6 +150,7 @@ Or if you have an older card, you may have to use one of the legacy
drivers:
```nix
services.xserver.videoDrivers = [ "nvidiaLegacy470" ];
services.xserver.videoDrivers = [ "nvidiaLegacy390" ];
services.xserver.videoDrivers = [ "nvidiaLegacy340" ];
services.xserver.videoDrivers = [ "nvidiaLegacy304" ];

View file

@ -47,9 +47,8 @@ without having to know its implementation details.
```nix
{ config, lib, pkgs, ... }:
with lib;
let
inherit (lib) mkIf mkOption types;
cfg = config.programs.man;
in

View file

@ -18,3 +18,13 @@ you can view a log of the test:
```ShellSession
$ nix-store --read-log result
```
## System Requirements {#sec-running-nixos-tests-requirements}
NixOS tests require virtualization support.
This means that the machine must have `kvm` in its [system features](https://nixos.org/manual/nix/stable/command-ref/conf-file.html?highlight=system-features#conf-system-features) list, or `apple-virt` in case of macOS.
These features are autodetected locally, but `apple-virt` is only autodetected since Nix 2.19.0.
Features of **remote builders** must additionally be configured manually on the client, e.g. on NixOS with [`nix.buildMachines.*.supportedFeatures`](https://search.nixos.org/options?show=nix.buildMachines.*.supportedFeatures&sort=alpha_asc&query=nix.buildMachines) or through general [Nix configuration](https://nixos.org/manual/nix/stable/advanced-topics/distributed-builds).
If you run the tests on a **macOS** machine, you also need a "remote" builder for Linux; possibly a VM. [nix-darwin](https://daiderd.com/nix-darwin/) users may enable [`nix.linux-builder.enable`](https://daiderd.com/nix-darwin/manual/index.html#opt-nix.linux-builder.enable) to launch such a VM.

View file

@ -104,9 +104,8 @@ functions system environment substitution should *not* be disabled explicitly.
```nix
{ config, lib, pkgs, ... }:
with lib;
let
inherit (lib) concatStringsSep mkIf mkOption optionalString types;
cfg = config.services.locate;
in {
options.services.locate = {
@ -163,9 +162,7 @@ in {
::: {#exec-escaping-example .example}
### Escaping in Exec directives
```nix
{ config, lib, pkgs, utils, ... }:
with lib;
{ config, pkgs, utils, ... }:
let
cfg = config.services.echo;

View file

@ -272,6 +272,9 @@ update /etc/fstab.
# parted /dev/sda -- mkpart ESP fat32 1MB 512MB
# parted /dev/sda -- set 3 esp on
```
::: {.note}
In case you decided to not create a swap partition, replace `3` by `2`. To be sure of the id number of ESP, run `parted --list`.
:::
Once complete, you can follow with
[](#sec-installation-manual-partitioning-formatting).

View file

@ -127,6 +127,10 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
- The `power.ups` module now generates `upsd.conf`, `upsd.users` and `upsmon.conf` automatically from a set of new configuration options. This breaks compatibility with existing `power.ups` setups where these files were created manually. Back up these files before upgrading NixOS.
- `unrar` was updated to v7. See [changelog](https://www.rarlab.com/unrar7notes.htm) for more information.
- `k3s` was updated to [v1.29](https://github.com/k3s-io/k3s/releases/tag/v1.29.1%2Bk3s2). See [changelog and upgrade notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#urgent-upgrade-notes) for more information.
- `k9s` was updated to v0.31. There have been various breaking changes in the config file format,
check out the changelog of [v0.29](https://github.com/derailed/k9s/releases/tag/v0.29.0),
[v0.30](https://github.com/derailed/k9s/releases/tag/v0.30.0) and
@ -139,6 +143,11 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
The list in `nixos/modules/virtualisation/amazon-ec2-amis.nix` will stop
being updated and will be removed in the future.
- The option `services.postgresql.ensureUsers._.ensurePermissions` has been removed as it's
not declarative and is broken with newer postgresql versions. Consider using
[](#opt-services.postgresql.ensureUsers._.ensureDBOwnership)
instead or a tool that's more suited for managing the data inside a postgresql database.
- `idris2` was updated to v0.7.0. This version introduces breaking changes. Check out the [changelog](https://github.com/idris-lang/Idris2/blob/v0.7.0/CHANGELOG.md#v070) for details.
- `neo4j` has been updated to 5, you may want to read the [release notes for Neo4j 5](https://neo4j.com/release-notes/database/neo4j-5/)
@ -162,6 +171,10 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
- `paperless`' `services.paperless.extraConfig` setting has been removed and converted to the freeform type and option named `services.paperless.settings`.
- `services.homepage-dashboard` now takes it's configuration using native Nix expressions, rather than dumping templated configurations into `/var/lib/homepage-dashboard` where they were previously managed manually. There are now new options which allow the configuration of bookmarks, services, widgets and custom CSS/JS natively in Nix.
- `hare` may now be cross-compiled. For that to work, however, `haredoc` needed to stop being built together with it. Thus, the latter is now its own package with the name of `haredoc`.
- The legacy and long deprecated systemd target `network-interfaces.target` has been removed. Use `network.target` instead.
- `services.frp.settings` now generates the frp configuration file in TOML format as [recommended by upstream](https://github.com/fatedier/frp#configuration-files), instead of the legacy INI format. This has also introduced other changes in the configuration file structure and options.
@ -174,6 +187,10 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
release notes of [v19](https://github.com/systemd/mkosi/releases/tag/v19) and
[v20](https://github.com/systemd/mkosi/releases/tag/v20) for a list of changes.
- The `services.vikunja` systemd service now uses `vikunja` as dynamic user instead of `vikunja-api`. Database users might need to be changed.
- The `services.vikunja.setupNginx` setting has been removed. Users now need to setup the webserver configuration on their own with a proxy pass to the vikunja service.
- The `woodpecker-*` packages have been updated to v2 which includes [breaking changes](https://woodpecker-ci.org/docs/next/migrations#200).
- `services.nginx` will no longer advertise HTTP/3 availability automatically. This must now be manually added, preferably to each location block.
@ -356,6 +373,8 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
- A new hardening flag, `zerocallusedregs` was made available, corresponding to the gcc/clang option `-fzero-call-used-regs=used-gpr`.
- A new hardening flag, `trivialautovarinit` was made available, corresponding to the gcc/clang option `-ftrivial-auto-var-init=pattern`.
- New options were added to the dnsdist module to enable and configure a DNSCrypt endpoint (see `services.dnsdist.dnscrypt.enable`, etc.).
The module can generate the DNSCrypt provider key pair, certificates and also performs their rotation automatically with no downtime.
@ -370,6 +389,9 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
- [Nginx virtual hosts](#opt-services.nginx.virtualHosts) using `forceSSL` or
`globalRedirect` can now have redirect codes other than 301 through
- `bacula` now allows to configure `TLS` for encrypted communication.
`redirectCode`.
- `libjxl` 0.9.0 [dropped support for the butteraugli API](https://github.com/libjxl/libjxl/pull/2576). You will no longer be able to set `enableButteraugli` on `libaom`.

View file

@ -1,4 +1,4 @@
{ stdenv, closureInfo, xorriso, syslinux, libossp_uuid
{ lib, stdenv, callPackage, closureInfo, xorriso, syslinux, libossp_uuid, squashfsTools
, # The file name of the resulting ISO image.
isoName ? "cd.iso"
@ -16,6 +16,17 @@
# symlink to `object' that will be added to the CD.
storeContents ? []
, # In addition to `contents', the closure of the store paths listed
# in `squashfsContents' is compressed as squashfs and the result is
# placed in /nix-store.squashfs on the CD.
# FIXME: This is a performance optimization to avoid Hydra copying
# the squashfs between builders and should be removed when Hydra
# is smarter about scheduling.
squashfsContents ? []
, # Compression settings for squashfs
squashfsCompression ? "xz -Xdict-size 100%"
, # Whether this should be an El-Torito bootable CD.
bootable ? false
@ -45,12 +56,20 @@ assert bootable -> bootImage != "";
assert efiBootable -> efiBootImage != "";
assert usbBootable -> isohybridMbrImage != "";
let
needSquashfs = squashfsContents != [];
makeSquashfsDrv = callPackage ./make-squashfs.nix {
storeContents = squashfsContents;
comp = squashfsCompression;
};
in
stdenv.mkDerivation {
name = isoName;
__structuredAttrs = true;
buildCommandPath = ./make-iso9660-image.sh;
nativeBuildInputs = [ xorriso syslinux zstd libossp_uuid ];
nativeBuildInputs = [ xorriso syslinux zstd libossp_uuid ]
++ lib.optionals needSquashfs makeSquashfsDrv.nativeBuildInputs;
inherit isoName bootable bootImage compressImage volumeID efiBootImage efiBootable isohybridMbrImage usbBootable;
@ -60,6 +79,8 @@ stdenv.mkDerivation {
objects = map (x: x.object) storeContents;
symlinks = map (x: x.symlink) storeContents;
squashfsCommand = lib.optionalString needSquashfs makeSquashfsDrv.buildCommand;
# For obtaining the closure of `storeContents'.
closureInfo = closureInfo { rootPaths = map (x: x.object) storeContents; };
}

View file

@ -68,6 +68,11 @@ for i in $(< $closureInfo/store-paths); do
addPath "${i:1}" "$i"
done
# If needed, build a squashfs and add that
if [[ -n "$squashfsCommand" ]]; then
(out="nix-store.squashfs" eval "$squashfsCommand")
addPath "nix-store.squashfs" "nix-store.squashfs"
fi
# Also include a manifest of the closures in a format suitable for
# nix-store --load-db.

View file

@ -1,6 +1,3 @@
# mypy: disable-error-code="no-untyped-call"
# drop the above line when mypy is upgraded to include
# https://github.com/python/typeshed/commit/49b717ca52bf0781a538b04c0d76a5513f7119b8
import codecs
import os
import sys
@ -10,6 +7,7 @@ from contextlib import contextmanager
from queue import Empty, Queue
from typing import Any, Dict, Iterator
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import AttributesImpl
from colorama import Fore, Style
@ -22,7 +20,7 @@ class Logger:
self.queue: "Queue[Dict[str, str]]" = Queue()
self.xml.startDocument()
self.xml.startElement("logfile", attrs={})
self.xml.startElement("logfile", attrs=AttributesImpl({}))
self._print_serial_logs = True
@ -44,7 +42,7 @@ class Logger:
return message
def log_line(self, message: str, attributes: Dict[str, str]) -> None:
self.xml.startElement("line", attributes)
self.xml.startElement("line", attrs=AttributesImpl(attributes))
self.xml.characters(message)
self.xml.endElement("line")
@ -89,8 +87,8 @@ class Logger:
)
)
self.xml.startElement("nest", attrs={})
self.xml.startElement("head", attributes)
self.xml.startElement("nest", attrs=AttributesImpl({}))
self.xml.startElement("head", attrs=AttributesImpl(attributes))
self.xml.characters(message)
self.xml.endElement("head")

View file

@ -66,7 +66,7 @@ with lib;
networkmanager-sstp = super.networkmanager-vpnc.override { withGnome = false; };
networkmanager-vpnc = super.networkmanager-vpnc.override { withGnome = false; };
pango = super.pango.override { x11Support = false; };
pinentry = super.pinentry.override { enabledFlavors = [ "curses" "tty" "emacs" ]; withLibsecret = false; };
pinentry-curses = super.pinentry-curses.override { withLibsecret = false; };
pipewire = super.pipewire.override { vulkanSupport = false; x11Support = false; };
pythonPackagesExtensions = super.pythonPackagesExtensions ++ [
(python-final: python-prev: {

View file

@ -704,6 +704,11 @@ in {
in stringAfter [ "users" ] ''
if [ -e ${lingerDir} ] ; then
cd ${lingerDir}
for user in ${lingerDir}/*; do
if ! id "$user" >/dev/null 2>&1; then
rm --force -- "$user"
fi
done
ls ${lingerDir} | sort | comm -3 -1 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl disable-linger
ls ${lingerDir} | sort | comm -3 -2 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl enable-linger
fi

View file

@ -3,6 +3,7 @@
{ lib
, runCommand
, runCommandLocal
, python3
, black
, ruff
@ -33,6 +34,7 @@
, seed
, definitionsDirectory
, sectorSize
, mkfsEnv ? {}
}:
let
@ -50,6 +52,11 @@ let
mypy --strict $out
'';
amendedRepartDefinitions = runCommandLocal "amended-repart.d" {} ''
definitions=$(${amendRepartDefinitions} ${partitions} ${definitionsDirectory})
cp -r $definitions $out
'';
fileSystemToolMapping = {
"vfat" = [ dosfstools mtools ];
"ext4" = [ e2fsprogs.bin ];
@ -74,28 +81,39 @@ in
runCommand imageFileBasename
{
__structuredAttrs = true;
nativeBuildInputs = [
systemd
fakeroot
util-linux
compressionPkg
] ++ fileSystemTools;
} ''
amendedRepartDefinitions=$(${amendRepartDefinitions} ${partitions} ${definitionsDirectory})
env = mkfsEnv;
systemdRepartFlags = [
"--dry-run=no"
"--empty=create"
"--size=auto"
"--seed=${seed}"
"--definitions=${amendedRepartDefinitions}"
"--split=${lib.boolToString split}"
"--json=pretty"
] ++ lib.optionals (sectorSize != null) [
"--sector-size=${toString sectorSize}"
];
passthru = {
inherit amendRepartDefinitions amendedRepartDefinitions;
};
} ''
mkdir -p $out
cd $out
echo "Building image with systemd-repart..."
unshare --map-root-user fakeroot systemd-repart \
--dry-run=no \
--empty=create \
--size=auto \
--seed="${seed}" \
--definitions="$amendedRepartDefinitions" \
--split="${lib.boolToString split}" \
--json=pretty \
${lib.optionalString (sectorSize != null) "--sector-size=${toString sectorSize}"} \
''${systemdRepartFlags[@]} \
${imageFileBasename}.raw \
| tee repart-output.json

View file

@ -60,6 +60,11 @@ let
};
};
};
mkfsOptionsToEnv = opts: lib.mapAttrs' (fsType: options: {
name = "SYSTEMD_REPART_MKFS_OPTIONS_${lib.toUpper fsType}";
value = builtins.concatStringsSep " " options;
}) opts;
in
{
options.image.repart = {
@ -183,6 +188,29 @@ in
'';
};
mkfsOptions = lib.mkOption {
type = with lib.types; attrsOf (listOf str);
default = {};
example = lib.literalExpression ''
{
vfat = [ "-S 512" "-c" ];
}
'';
description = lib.mdDoc ''
Specify extra options for created file systems. The specified options
are converted to individual environment variables of the format
`SYSTEMD_REPART_MKFS_OPTIONS_<FSTYPE>`.
See [upstream systemd documentation](https://github.com/systemd/systemd/blob/v255/docs/ENVIRONMENT.md?plain=1#L575-L577)
for information about the usage of these environment variables.
The example would produce the following environment variable:
```
SYSTEMD_REPART_MKFS_OPTIONS_VFAT="-S 512 -c"
```
'';
};
};
config = {
@ -239,11 +267,13 @@ in
(lib.mapAttrs (_n: v: { Partition = v.repartConfig; }) finalPartitions);
partitions = pkgs.writeText "partitions.json" (builtins.toJSON finalPartitions);
mkfsEnv = mkfsOptionsToEnv cfg.mkfsOptions;
in
pkgs.callPackage ./repart-image.nix {
systemd = cfg.package;
inherit (cfg) imageFileBasename compression split seed sectorSize;
inherit fileSystems definitionsDirectory partitions;
inherit fileSystems definitionsDirectory partitions mkfsEnv;
};
meta.maintainers = with lib.maintainers; [ nikstur ];

View file

@ -811,12 +811,6 @@ in
optional config.isoImage.includeSystemBuildDependencies
config.system.build.toplevel.drvPath;
# Create the squashfs image that contains the Nix store.
system.build.squashfsStore = pkgs.callPackage ../../../lib/make-squashfs.nix {
storeContents = config.isoImage.storeContents;
comp = config.isoImage.squashfsCompression;
};
# Individual files to be included on the CD, outside of the Nix
# store on the CD.
isoImage.contents =
@ -827,9 +821,6 @@ in
{ source = config.system.build.initialRamdisk + "/" + config.system.boot.loader.initrdFile;
target = "/boot/" + config.system.boot.loader.initrdFile;
}
{ source = config.system.build.squashfsStore;
target = "/nix-store.squashfs";
}
{ source = pkgs.writeText "version" config.system.nixos.label;
target = "/version.txt";
}
@ -878,6 +869,8 @@ in
bootable = config.isoImage.makeBiosBootable;
bootImage = "/isolinux/isolinux.bin";
syslinux = if config.isoImage.makeBiosBootable then pkgs.syslinux else null;
squashfsContents = config.isoImage.storeContents;
squashfsCompression = config.isoImage.squashfsCompression;
} // optionalAttrs (config.isoImage.makeUsbBootable && config.isoImage.makeBiosBootable) {
usbBootable = true;
isohybridMbrImage = "${pkgs.syslinux}/share/syslinux/isohdpfx.bin";

View file

@ -163,6 +163,7 @@
./programs/clash-verge.nix
./programs/cnping.nix
./programs/command-not-found/command-not-found.nix
./programs/coolercontrol.nix
./programs/criu.nix
./programs/darling.nix
./programs/dconf.nix
@ -719,6 +720,7 @@
./services/misc/libreddit.nix
./services/misc/lidarr.nix
./services/misc/lifecycled.nix
./services/misc/llama-cpp.nix
./services/misc/logkeys.nix
./services/misc/mame.nix
./services/misc/mbpfan.nix
@ -781,6 +783,7 @@
./services/misc/svnserve.nix
./services/misc/synergy.nix
./services/misc/sysprof.nix
./services/misc/tabby.nix
./services/misc/tandoor-recipes.nix
./services/misc/taskserver
./services/misc/tautulli.nix

View file

@ -98,6 +98,24 @@ in
}
'';
};
initialPrefs = mkOption {
type = types.attrs;
description = lib.mdDoc ''
Initial preferences are used to configure the browser for the first run.
Unlike {option}`programs.chromium.extraOpts`, initialPrefs can be changed by users in the browser settings.
More information can be found in the Chromium documentation:
<https://www.chromium.org/administrators/configuring-other-preferences/>
'';
default = {};
example = literalExpression ''
{
"first_run_tabs" = [
"https://nixos.org/"
];
}
'';
};
};
};
@ -110,6 +128,7 @@ in
{ source = "${cfg.plasmaBrowserIntegrationPackage}/etc/chromium/native-messaging-hosts/org.kde.plasma.browser_integration.json"; };
"chromium/policies/managed/default.json" = lib.mkIf (defaultProfile != {}) { text = builtins.toJSON defaultProfile; };
"chromium/policies/managed/extra.json" = lib.mkIf (cfg.extraOpts != {}) { text = builtins.toJSON cfg.extraOpts; };
"chromium/initial_preferences" = lib.mkIf (cfg.initialPrefs != {}) { text = builtins.toJSON cfg.initialPrefs; };
# for google-chrome https://www.chromium.org/administrators/linux-quick-start
"opt/chrome/native-messaging-hosts/org.kde.plasma.browser_integration.json" = lib.mkIf cfg.enablePlasmaBrowserIntegration
{ source = "${cfg.plasmaBrowserIntegrationPackage}/etc/opt/chrome/native-messaging-hosts/org.kde.plasma.browser_integration.json"; };

View file

@ -3,6 +3,7 @@
{
options.programs.clash-verge = {
enable = lib.mkEnableOption (lib.mdDoc "Clash Verge");
package = lib.mkPackageOption pkgs "clash-verge" {};
autoStart = lib.mkEnableOption (lib.mdDoc "Clash Verge auto launch");
tunMode = lib.mkEnableOption (lib.mdDoc "Clash Verge TUN mode");
};
@ -14,10 +15,10 @@
lib.mkIf cfg.enable {
environment.systemPackages = [
pkgs.clash-verge
cfg.package
(lib.mkIf cfg.autoStart (pkgs.makeAutostartItem {
name = "clash-verge";
package = pkgs.clash-verge;
package = cfg.package;
}))
];
@ -25,7 +26,7 @@
owner = "root";
group = "root";
capabilities = "cap_net_bind_service,cap_net_admin=+ep";
source = "${lib.getExe pkgs.clash-verge}";
source = "${lib.getExe cfg.package}";
};
};

View file

@ -0,0 +1,37 @@
{ config
, lib
, pkgs
, ...
}:
let
cfg = config.programs.coolercontrol;
in
{
##### interface
options = {
programs.coolercontrol.enable = lib.mkEnableOption (lib.mdDoc "CoolerControl GUI & its background services");
};
##### implementation
config = lib.mkIf cfg.enable {
environment.systemPackages = with pkgs.coolercontrol; [
coolercontrol-gui
];
systemd = {
packages = with pkgs.coolercontrol; [
coolercontrol-liqctld
coolercontrold
];
# https://github.com/NixOS/nixpkgs/issues/81138
services = {
coolercontrol-liqctld.wantedBy = [ "multi-user.target" ];
coolercontrold.wantedBy = [ "multi-user.target" ];
};
};
};
meta.maintainers = with lib.maintainers; [ OPNA2608 codifryed ];
}

View file

@ -1,8 +1,7 @@
{ config, lib, pkgs, ... }:
with lib;
let
inherit (lib) mkRemovedOptionModule mkOption mkPackageOption types mkIf optionalString;
cfg = config.programs.gnupg;
@ -26,8 +25,10 @@ let
"curses";
in
{
imports = [
(mkRemovedOptionModule [ "programs" "gnupg" "agent" "pinentryFlavor" ] "Use programs.gnupg.agent.pinentryPackage instead")
];
options.programs.gnupg = {
package = mkPackageOption pkgs "gnupg" { };
@ -66,17 +67,17 @@ in
'';
};
agent.pinentryFlavor = mkOption {
type = types.nullOr (types.enum pkgs.pinentry.flavors);
example = "gnome3";
default = defaultPinentryFlavor;
defaultText = literalMD ''matching the configured desktop environment'';
agent.pinentryPackage = mkOption {
type = types.nullOr types.package;
example = lib.literalMD "pkgs.pinentry-gnome3";
default = pkgs.pinentry-curses;
defaultText = lib.literalMD "matching the configured desktop environment or `pkgs.pinentry-curses`";
description = lib.mdDoc ''
Which pinentry interface to use. If not null, the path to the
pinentry binary will be set in /etc/gnupg/gpg-agent.conf.
If not set at all, it'll pick an appropriate flavor depending on the
system configuration (qt flavor for lxqt and plasma5, gtk2 for xfce
4.12, gnome3 on all other systems with X enabled, ncurses otherwise).
Which pinentry package to use. The path to the mainProgram as defined in
the package's meta attriutes will be set in /etc/gnupg/gpg-agent.conf.
If not set by the user, it'll pick an appropriate flavor depending on the
system configuration (qt flavor for lxqt and plasma5, gtk2 for xfce,
gnome3 on all other systems with X enabled, curses otherwise).
'';
};
@ -102,9 +103,8 @@ in
};
config = mkIf cfg.agent.enable {
programs.gnupg.agent.settings = {
pinentry-program = lib.mkIf (cfg.agent.pinentryFlavor != null)
"${pkgs.pinentry.${cfg.agent.pinentryFlavor}}/bin/pinentry";
programs.gnupg.agent.settings = mkIf (cfg.agent.pinentryPackage != null) {
pinentry-program = lib.getExe cfg.agent.pinentryPackage;
};
environment.etc."gnupg/gpg-agent.conf".source =
@ -207,9 +207,9 @@ in
wantedBy = [ "sockets.target" ];
};
services.dbus.packages = mkIf (cfg.agent.pinentryFlavor == "gnome3") [ pkgs.gcr ];
services.dbus.packages = mkIf (lib.elem "gnome3" (cfg.agent.pinentryPackage.flavors or [])) [ pkgs.gcr ];
environment.systemPackages = with pkgs; [ cfg.package ];
environment.systemPackages = [ cfg.package ];
environment.interactiveShellInit = ''
# Bind gpg-agent to this TTY if gpg commands are used.
@ -230,12 +230,10 @@ in
'';
assertions = [
{ assertion = cfg.agent.enableSSHSupport -> !config.programs.ssh.startAgent;
{
assertion = cfg.agent.enableSSHSupport -> !config.programs.ssh.startAgent;
message = "You can't use ssh-agent and GnuPG agent with SSH support enabled at the same time!";
}
];
};
# uses attributes of the linked package
meta.buildDocsInSandbox = false;
}

View file

@ -43,6 +43,9 @@ in {
}
'';
apply = steam: steam.override (prev: {
extraEnv = (lib.optionalAttrs (cfg.extraCompatPackages != [ ]) {
STEAM_EXTRA_COMPAT_TOOLS_PATHS = makeBinPath cfg.extraCompatPackages;
}) // (prev.extraEnv or {});
extraLibraries = pkgs: let
prevLibs = if prev ? extraLibraries then prev.extraLibraries pkgs else [ ];
additionalLibs = with config.hardware.opengl;
@ -56,6 +59,8 @@ in {
# use the setuid wrapped bubblewrap
bubblewrap = "${config.security.wrapperDir}/..";
};
} // optionalAttrs cfg.extest.enable {
extraEnv.LD_PRELOAD = "${pkgs.pkgsi686Linux.extest}/lib/libextest.so";
});
description = lib.mdDoc ''
The Steam package to use. Additional libraries are added from the system
@ -66,6 +71,16 @@ in {
'';
};
extraCompatPackages = mkOption {
type = types.listOf types.package;
default = [ ];
description = lib.mdDoc ''
Extra packages to be used as compatibility tools for Steam on Linux. Packages will be included
in the `STEAM_EXTRA_COMPAT_TOOLS_PATHS` environmental variable. For more information see
<https://github.com/ValveSoftware/steam-for-linux/issues/6310">.
'';
};
remotePlay.openFirewall = mkOption {
type = types.bool;
default = false;
@ -114,6 +129,11 @@ in {
};
};
};
extest.enable = mkEnableOption (lib.mdDoc ''
Load the extest library into Steam, to translate X11 input events to
uinput events (e.g. for using Steam Input on Wayland)
'');
};
config = mkIf cfg.enable {
@ -167,5 +187,5 @@ in {
];
};
meta.maintainers = with maintainers; [ mkg20001 ];
meta.maintainers = teams.steam;
}

View file

@ -152,6 +152,7 @@ in {
'';
}
];
environment = {
systemPackages = optional (cfg.package != null) cfg.package ++ cfg.extraPackages;
# Needed for the default wallpaper:
@ -166,8 +167,12 @@ in {
"sway/config".source = mkOptionDefault "${cfg.package}/etc/sway/config";
};
};
programs.gnupg.agent.pinentryPackage = lib.mkDefault pkgs.pinentry-gnome3;
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1050913
xdg.portal.config.sway.default = mkDefault [ "wlr" "gtk" ];
# To make a Sway session available if a display manager like SDDM is enabled:
services.xserver.displayManager.sessionPackages = optionals (cfg.package != null) [ cfg.package ]; }
(import ./wayland-session.nix { inherit lib pkgs; })

View file

@ -4,11 +4,36 @@
# TODO: test configuration when building nixexpr (use -t parameter)
# TODO: support sqlite3 (it's deprecate?) and mysql
with lib;
let
inherit (lib)
concatStringsSep
literalExpression
mapAttrsToList
mdDoc
mkIf
mkOption
optional
optionalString
types
;
libDir = "/var/lib/bacula";
yes_no = bool: if bool then "yes" else "no";
tls_conf = tls_cfg: optionalString tls_cfg.enable (
concatStringsSep
"\n"
(
["TLS Enable = yes;"]
++ optional (tls_cfg.require != null) "TLS Require = ${yes_no tls_cfg.require};"
++ optional (tls_cfg.certificate != null) ''TLS Certificate = "${tls_cfg.certificate}";''
++ [''TLS Key = "${tls_cfg.key}";'']
++ optional (tls_cfg.verifyPeer != null) "TLS Verify Peer = ${yes_no tls_cfg.verifyPeer};"
++ optional (tls_cfg.allowedCN != [ ]) "TLS Allowed CN = ${concatStringsSep " " (tls_cfg.allowedCN)};"
++ optional (tls_cfg.caCertificateFile != null) ''TLS CA Certificate File = "${tls_cfg.caCertificateFile}";''
)
);
fd_cfg = config.services.bacula-fd;
fd_conf = pkgs.writeText "bacula-fd.conf"
''
@ -18,6 +43,7 @@ let
WorkingDirectory = ${libDir};
Pid Directory = /run;
${fd_cfg.extraClientConfig}
${tls_conf fd_cfg.tls}
}
${concatStringsSep "\n" (mapAttrsToList (name: value: ''
@ -25,6 +51,7 @@ let
Name = "${name}";
Password = ${value.password};
Monitor = ${value.monitor};
${tls_conf value.tls}
}
'') fd_cfg.director)}
@ -44,6 +71,7 @@ let
WorkingDirectory = ${libDir};
Pid Directory = /run;
${sd_cfg.extraStorageConfig}
${tls_conf sd_cfg.tls}
}
${concatStringsSep "\n" (mapAttrsToList (name: value: ''
@ -70,6 +98,7 @@ let
Name = "${name}";
Password = ${value.password};
Monitor = ${value.monitor};
${tls_conf value.tls}
}
'') sd_cfg.director)}
@ -90,6 +119,7 @@ let
Working Directory = ${libDir};
Pid Directory = /run/;
QueryFile = ${pkgs.bacula}/etc/query.sql;
${tls_conf dir_cfg.tls}
${dir_cfg.extraDirectorConfig}
}
@ -108,13 +138,99 @@ let
${dir_cfg.extraConfig}
'';
directorOptions = {...}:
linkOption = name: destination: "[${name}](#opt-${builtins.replaceStrings [ "<" ">"] ["_" "_"] destination})";
tlsLink = destination: submodulePath: linkOption "${submodulePath}.${destination}" "${submodulePath}.${destination}";
tlsOptions = submodulePath: {...}:
{
options = {
enable = mkOption {
type = types.bool;
default = false;
description = mdDoc ''
Specifies if TLS should be enabled.
If this set to `false` TLS will be completely disabled, even if ${tlsLink "tls.require" submodulePath} is true.
'';
};
require = mkOption {
type = types.nullOr types.bool;
default = null;
description = mdDoc ''
Require TLS or TLS-PSK encryption.
This directive is ignored unless one of ${tlsLink "tls.enable" submodulePath} is true or TLS PSK Enable is set to `yes`.
If TLS is not required while TLS or TLS-PSK are enabled, then the Bacula component
will connect with other components either with or without TLS or TLS-PSK
If ${tlsLink "tls.enable" submodulePath} or TLS-PSK is enabled and TLS is required, then the Bacula
component will refuse any connection request that does not use TLS.
'';
};
certificate = mkOption {
type = types.nullOr types.path;
default = null;
description = mdDoc ''
The full path to the PEM encoded TLS certificate.
It will be used as either a client or server certificate,
depending on the connection direction.
This directive is required in a server context, but it may
not be specified in a client context if ${tlsLink "tls.verifyPeer" submodulePath} is
`false` in the corresponding server context.
'';
};
key = mkOption {
type = types.path;
description = mdDoc ''
The path of a PEM encoded TLS private key.
It must correspond to the TLS certificate.
'';
};
verifyPeer = mkOption {
type = types.nullOr types.bool;
default = null;
description = mdDoc ''
Verify peer certificate.
Instructs server to request and verify the client's X.509 certificate.
Any client certificate signed by a known-CA will be accepted.
Additionally, the client's X509 certificate Common Name must meet the value of the Address directive.
If ${tlsLink "tls.allowedCN" submodulePath} is used,
the client's x509 certificate Common Name must also correspond to
one of the CN specified in the ${tlsLink "tls.allowedCN" submodulePath} directive.
This directive is valid only for a server and not in client context.
Standard from Bacula is `true`.
'';
};
allowedCN = mkOption {
type = types.listOf types.str;
default = [ ];
description = mdDoc ''
Common name attribute of allowed peer certificates.
This directive is valid for a server and in a client context.
If this directive is specified, the peer certificate will be verified against this list.
In the case this directive is configured on a server side, the allowed
CN list will not be checked if ${tlsLink "tls.verifyPeer" submodulePath} is false.
'';
};
caCertificateFile = mkOption {
type = types.nullOr types.path;
default = null;
description = mdDoc ''
The path specifying a PEM encoded TLS CA certificate(s).
Multiple certificates are permitted in the file.
One of TLS CA Certificate File or TLS CA Certificate Dir are required in a server context, unless
${tlsLink "tls.verifyPeer" submodulePath} is false, and are always required in a client context.
'';
};
};
};
directorOptions = submodulePath:{...}:
{
options = {
password = mkOption {
type = types.str;
# TODO: required?
description = lib.mdDoc ''
description = mdDoc ''
Specifies the password that must be supplied for the default Bacula
Console to be authorized. The same password must appear in the
Director resource of the Console configuration file. For added
@ -135,7 +251,7 @@ let
type = types.enum [ "no" "yes" ];
default = "no";
example = "yes";
description = lib.mdDoc ''
description = mdDoc ''
If Monitor is set to `no`, this director will have
full access to this Storage daemon. If Monitor is set to
`yes`, this director will only be able to fetch the
@ -146,6 +262,13 @@ let
security problems.
'';
};
tls = mkOption {
type = types.submodule (tlsOptions "${submodulePath}.director.<name>");
description = mdDoc ''
TLS Options for the Director in this Configuration.
'';
};
};
};
@ -154,7 +277,7 @@ let
options = {
changerDevice = mkOption {
type = types.str;
description = lib.mdDoc ''
description = mdDoc ''
The specified name-string must be the generic SCSI device name of the
autochanger that corresponds to the normal read/write Archive Device
specified in the Device resource. This generic SCSI device name
@ -173,7 +296,7 @@ let
changerCommand = mkOption {
type = types.str;
description = lib.mdDoc ''
description = mdDoc ''
The name-string specifies an external program to be called that will
automatically change volumes as required by Bacula. Normally, this
directive will be specified only in the AutoChanger resource, which
@ -195,14 +318,14 @@ let
};
devices = mkOption {
description = lib.mdDoc "";
description = mdDoc "";
type = types.listOf types.str;
};
extraAutochangerConfig = mkOption {
default = "";
type = types.lines;
description = lib.mdDoc ''
description = mdDoc ''
Extra configuration to be passed in Autochanger directive.
'';
example = ''
@ -219,7 +342,7 @@ let
archiveDevice = mkOption {
# TODO: required?
type = types.str;
description = lib.mdDoc ''
description = mdDoc ''
The specified name-string gives the system file name of the storage
device managed by this storage daemon. This will usually be the
device file name of a removable storage device (tape drive), for
@ -236,7 +359,7 @@ let
mediaType = mkOption {
# TODO: required?
type = types.str;
description = lib.mdDoc ''
description = mdDoc ''
The specified name-string names the type of media supported by this
device, for example, `DLT7000`. Media type names are
arbitrary in that you set them to anything you want, but they must be
@ -274,7 +397,7 @@ let
extraDeviceConfig = mkOption {
default = "";
type = types.lines;
description = lib.mdDoc ''
description = mdDoc ''
Extra configuration to be passed in Device directive.
'';
example = ''
@ -295,7 +418,7 @@ in {
enable = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
description = mdDoc ''
Whether to enable the Bacula File Daemon.
'';
};
@ -304,7 +427,7 @@ in {
default = "${config.networking.hostName}-fd";
defaultText = literalExpression ''"''${config.networking.hostName}-fd"'';
type = types.str;
description = lib.mdDoc ''
description = mdDoc ''
The client name that must be used by the Director when connecting.
Generally, it is a good idea to use a name related to the machine so
that error messages can be easily identified if you have multiple
@ -315,7 +438,7 @@ in {
port = mkOption {
default = 9102;
type = types.port;
description = lib.mdDoc ''
description = mdDoc ''
This specifies the port number on which the Client listens for
Director connections. It must agree with the FDPort specified in
the Client resource of the Director's configuration file.
@ -324,16 +447,26 @@ in {
director = mkOption {
default = {};
description = lib.mdDoc ''
description = mdDoc ''
This option defines director resources in Bacula File Daemon.
'';
type = with types; attrsOf (submodule directorOptions);
type = types.attrsOf (types.submodule (directorOptions "services.bacula-fd"));
};
tls = mkOption {
type = types.submodule (tlsOptions "services.bacula-fd");
default = { };
description = mdDoc ''
TLS Options for the File Daemon.
Important notice: The backup won't be encrypted.
'';
};
extraClientConfig = mkOption {
default = "";
type = types.lines;
description = lib.mdDoc ''
description = mdDoc ''
Extra configuration to be passed in Client directive.
'';
example = ''
@ -345,7 +478,7 @@ in {
extraMessagesConfig = mkOption {
default = "";
type = types.lines;
description = lib.mdDoc ''
description = mdDoc ''
Extra configuration to be passed in Messages directive.
'';
example = ''
@ -358,7 +491,7 @@ in {
enable = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
description = mdDoc ''
Whether to enable Bacula Storage Daemon.
'';
};
@ -367,7 +500,7 @@ in {
default = "${config.networking.hostName}-sd";
defaultText = literalExpression ''"''${config.networking.hostName}-sd"'';
type = types.str;
description = lib.mdDoc ''
description = mdDoc ''
Specifies the Name of the Storage daemon.
'';
};
@ -375,7 +508,7 @@ in {
port = mkOption {
default = 9103;
type = types.port;
description = lib.mdDoc ''
description = mdDoc ''
Specifies port number on which the Storage daemon listens for
Director connections.
'';
@ -383,32 +516,32 @@ in {
director = mkOption {
default = {};
description = lib.mdDoc ''
description = mdDoc ''
This option defines Director resources in Bacula Storage Daemon.
'';
type = with types; attrsOf (submodule directorOptions);
type = types.attrsOf (types.submodule (directorOptions "services.bacula-sd"));
};
device = mkOption {
default = {};
description = lib.mdDoc ''
description = mdDoc ''
This option defines Device resources in Bacula Storage Daemon.
'';
type = with types; attrsOf (submodule deviceOptions);
type = types.attrsOf (types.submodule deviceOptions);
};
autochanger = mkOption {
default = {};
description = lib.mdDoc ''
description = mdDoc ''
This option defines Autochanger resources in Bacula Storage Daemon.
'';
type = with types; attrsOf (submodule autochangerOptions);
type = types.attrsOf (types.submodule autochangerOptions);
};
extraStorageConfig = mkOption {
default = "";
type = types.lines;
description = lib.mdDoc ''
description = mdDoc ''
Extra configuration to be passed in Storage directive.
'';
example = ''
@ -420,13 +553,21 @@ in {
extraMessagesConfig = mkOption {
default = "";
type = types.lines;
description = lib.mdDoc ''
description = mdDoc ''
Extra configuration to be passed in Messages directive.
'';
example = ''
console = all
'';
};
tls = mkOption {
type = types.submodule (tlsOptions "services.bacula-sd");
default = { };
description = mdDoc ''
TLS Options for the Storage Daemon.
Important notice: The backup won't be encrypted.
'';
};
};
@ -434,7 +575,7 @@ in {
enable = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
description = mdDoc ''
Whether to enable Bacula Director Daemon.
'';
};
@ -443,7 +584,7 @@ in {
default = "${config.networking.hostName}-dir";
defaultText = literalExpression ''"''${config.networking.hostName}-dir"'';
type = types.str;
description = lib.mdDoc ''
description = mdDoc ''
The director name used by the system administrator. This directive is
required.
'';
@ -452,7 +593,7 @@ in {
port = mkOption {
default = 9101;
type = types.port;
description = lib.mdDoc ''
description = mdDoc ''
Specify the port (a positive integer) on which the Director daemon
will listen for Bacula Console connections. This same port number
must be specified in the Director resource of the Console
@ -465,7 +606,7 @@ in {
password = mkOption {
# TODO: required?
type = types.str;
description = lib.mdDoc ''
description = mdDoc ''
Specifies the password that must be supplied for a Director.
'';
};
@ -473,7 +614,7 @@ in {
extraMessagesConfig = mkOption {
default = "";
type = types.lines;
description = lib.mdDoc ''
description = mdDoc ''
Extra configuration to be passed in Messages directive.
'';
example = ''
@ -484,7 +625,7 @@ in {
extraDirectorConfig = mkOption {
default = "";
type = types.lines;
description = lib.mdDoc ''
description = mdDoc ''
Extra configuration to be passed in Director directive.
'';
example = ''
@ -496,13 +637,22 @@ in {
extraConfig = mkOption {
default = "";
type = types.lines;
description = lib.mdDoc ''
description = mdDoc ''
Extra configuration for Bacula Director Daemon.
'';
example = ''
TODO
'';
};
tls = mkOption {
type = types.submodule (tlsOptions "services.bacula-dir");
default = { };
description = mdDoc ''
TLS Options for the Director.
Important notice: The backup won't be encrypted.
'';
};
};
};

View file

@ -134,7 +134,7 @@ in
localSourceAllow = mkOption {
type = types.listOf types.str;
# Permissions snapshot and destroy are in case --no-sync-snap is not used
default = [ "bookmark" "hold" "send" "snapshot" "destroy" ];
default = [ "bookmark" "hold" "send" "snapshot" "destroy" "mount" ];
description = lib.mdDoc ''
Permissions granted for the {option}`services.syncoid.user` user
for local source datasets. See

View file

@ -107,10 +107,25 @@ in
wants = [ "network-online.target" ];
after = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ];
# lldap defaults to a hardcoded `jwt_secret` value if none is provided, which is bad, because
# an attacker could create a valid admin jwt access token fairly trivially.
# Because there are 3 different ways `jwt_secret` can be provided, we check if any one of them is present,
# and if not, bootstrap a secret in `/var/lib/lldap/jwt_secret_file` and give that to lldap.
script = lib.optionalString (!cfg.settings ? jwt_secret) ''
if [[ -z "$LLDAP_JWT_SECRET_FILE" ]] && [[ -z "$LLDAP_JWT_SECRET" ]]; then
if [[ ! -e "./jwt_secret_file" ]]; then
${lib.getExe pkgs.openssl} rand -base64 -out ./jwt_secret_file 32
fi
export LLDAP_JWT_SECRET_FILE="./jwt_secret_file"
fi
'' + ''
${lib.getExe cfg.package} run --config-file ${format.generate "lldap_config.toml" cfg.settings}
'';
serviceConfig = {
ExecStart = "${lib.getExe cfg.package} run --config-file ${format.generate "lldap_config.toml" cfg.settings}";
StateDirectory = "lldap";
StateDirectoryMode = "0750";
WorkingDirectory = "%S/lldap";
UMask = "0027";
User = "lldap";
Group = "lldap";
DynamicUser = true;

View file

@ -37,7 +37,7 @@ in
description = lib.mdDoc "The port to bind to.";
};
enableUnixSocket = mkEnableOption (lib.mdDoc "unix socket at /run/memcached/memcached.sock");
enableUnixSocket = mkEnableOption (lib.mdDoc "Unix Domain Socket at /run/memcached/memcached.sock instead of listening on an IP address and port. The `listen` and `port` options are ignored.");
maxMemory = mkOption {
type = types.ints.unsigned;

View file

@ -161,33 +161,6 @@ in
'';
};
ensurePermissions = mkOption {
type = types.attrsOf types.str;
default = {};
visible = false; # This option has been deprecated.
description = lib.mdDoc ''
This option is DEPRECATED and should not be used in nixpkgs anymore,
use `ensureDBOwnership` instead. It can also break with newer
versions of PostgreSQL ( 15).
Permissions to ensure for the user, specified as an attribute set.
The attribute names specify the database and tables to grant the permissions for.
The attribute values specify the permissions to grant. You may specify one or
multiple comma-separated SQL privileges here.
For more information on how to specify the target
and on which privileges exist, see the
[GRANT syntax](https://www.postgresql.org/docs/current/sql-grant.html).
The attributes are used as `GRANT ''${attrValue} ON ''${attrName}`.
'';
example = literalExpression ''
{
"DATABASE \"nextcloud\"" = "ALL PRIVILEGES";
"ALL TABLES IN SCHEMA public" = "ALL PRIVILEGES";
}
'';
};
ensureDBOwnership = mkOption {
type = types.bool;
default = false;
@ -460,16 +433,6 @@ in
Offender: ${name} has not been found among databases.
'';
}) cfg.ensureUsers;
# `ensurePermissions` is now deprecated, let's avoid it.
warnings = lib.optional (any ({ ensurePermissions, ... }: ensurePermissions != {}) cfg.ensureUsers) "
`services.postgresql.ensureUsers.*.ensurePermissions` is used in your expressions,
this option is known to be broken with newer PostgreSQL versions,
consider migrating to `services.postgresql.ensureUsers.*.ensureDBOwnership` or
consult the release notes or manual for more migration guidelines.
This option will be removed in NixOS 24.05 unless it sees significant
maintenance improvements.
";
services.postgresql.settings =
{
@ -583,11 +546,6 @@ in
concatMapStrings
(user:
let
userPermissions = concatStringsSep "\n"
(mapAttrsToList
(database: permission: ''$PSQL -tAc 'GRANT ${permission} ON ${database} TO "${user.name}"' '')
user.ensurePermissions
);
dbOwnershipStmt = optionalString
user.ensureDBOwnership
''$PSQL -tAc 'ALTER DATABASE "${user.name}" OWNER TO "${user.name}";' '';
@ -599,7 +557,6 @@ in
userClauses = ''$PSQL -tAc 'ALTER ROLE "${user.name}" ${concatStringsSep " " clauseSqlStatements}' '';
in ''
$PSQL -tAc "SELECT 1 FROM pg_roles WHERE rolname='${user.name}'" | grep -q 1 || $PSQL -tAc 'CREATE USER "${user.name}"'
${userPermissions}
${userClauses}
${dbOwnershipStmt}

View file

@ -5,8 +5,7 @@
utils,
...
}: let
xcfg = config.services.xserver;
cfg = xcfg.desktopManager.plasma6;
cfg = config.services.desktopManager.plasma6;
inherit (pkgs) kdePackages;
inherit (lib) literalExpression mkDefault mkIf mkOption mkPackageOptionMD types;
@ -17,7 +16,7 @@
'';
in {
options = {
services.xserver.desktopManager.plasma6 = {
services.desktopManager.plasma6 = {
enable = mkOption {
type = types.bool;
default = false;
@ -44,6 +43,12 @@ in {
};
};
imports = [
(lib.mkRenamedOptionModule [ "services" "xserver" "desktopManager" "plasma6" "enable" ] [ "services" "desktopManager" "plasma6" "enable" ])
(lib.mkRenamedOptionModule [ "services" "xserver" "desktopManager" "plasma6" "enableQt5Integration" ] [ "services" "desktopManager" "plasma6" "enableQt5Integration" ])
(lib.mkRenamedOptionModule [ "services" "xserver" "desktopManager" "plasma6" "notoPackage" ] [ "services" "desktopManager" "plasma6" "notoPackage" ])
];
config = mkIf cfg.enable {
assertions = [
{
@ -161,7 +166,7 @@ in {
in
requiredPackages
++ utils.removePackagesByName optionalPackages config.environment.plasma6.excludePackages
++ lib.optionals config.services.xserver.desktopManager.plasma6.enableQt5Integration [
++ lib.optionals config.services.desktopManager.plasma6.enableQt5Integration [
breeze.qt5
plasma-integration.qt5
pkgs.plasma5Packages.kwayland-integration
@ -175,7 +180,7 @@ in {
++ lib.optional config.powerManagement.enable powerdevil
++ lib.optional config.services.colord.enable colord-kde
++ lib.optional config.services.hardware.bolt.enable plasma-thunderbolt
++ lib.optionals config.services.samba.enable [kdenetwork-filesharing pkgs.samba]
++ lib.optional config.services.samba.enable kdenetwork-filesharing
++ lib.optional config.services.xserver.wacom.enable wacomtablet
++ lib.optional config.services.flatpak.enable flatpak-kcm;
@ -185,7 +190,7 @@ in {
"/libexec" # for drkonqi
];
environment.etc."X11/xkb".source = xcfg.xkb.dir;
environment.etc."X11/xkb".source = config.services.xserver.xkb.dir;
# Add ~/.config/kdedefaults to XDG_CONFIG_DIRS for shells, since Plasma sets that.
# FIXME: maybe we should append to XDG_CONFIG_DIRS in /etc/set-environment instead?
@ -210,6 +215,7 @@ in {
serif = ["Noto Serif"];
};
programs.gnupg.agent.pinentryPackage = pkgs.pinentry-qt;
programs.ssh.askPassword = mkDefault "${kdePackages.ksshaskpass.out}/bin/ksshaskpass";
# Enable helpful DBus services.

View file

@ -56,6 +56,16 @@ in {
description = lib.mdDoc "Set the host to bind on.";
default = "127.0.0.1";
};
extraOptions = mkOption {
type = types.listOf types.str;
default = [];
example = [ "--no-security-headers" ];
description = lib.mdDoc ''
Additional command-line arguments to pass to
{command}`hoogle server`
'';
};
};
config = mkIf cfg.enable {
@ -66,7 +76,10 @@ in {
serviceConfig = {
Restart = "always";
ExecStart = ''${hoogleEnv}/bin/hoogle server --local --port ${toString cfg.port} --home ${cfg.home} --host ${cfg.host}'';
ExecStart = ''
${hoogleEnv}/bin/hoogle server --local --port ${toString cfg.port} --home ${cfg.home} --host ${cfg.host} \
${concatStringsSep " " cfg.extraOptions}
'';
DynamicUser = true;

View file

@ -90,7 +90,9 @@ in
users.groups.nixseparatedebuginfod = { };
nix.settings.extra-allowed-users = [ "nixseparatedebuginfod" ];
nix.settings = lib.optionalAttrs (lib.versionAtLeast config.nix.package.version "2.4") {
extra-allowed-users = [ "nixseparatedebuginfod" ];
};
environment.variables.DEBUGINFOD_URLS = "http://${url}";

View file

@ -14,11 +14,11 @@ let
customEtc = {
"fwupd/fwupd.conf" = {
source = format.generate "fwupd.conf" {
source = format.generate "fwupd.conf" ({
fwupd = cfg.daemonSettings;
} // lib.optionalAttrs (lib.length (lib.attrNames cfg.uefiCapsuleSettings) != 0) {
uefi_capsule = cfg.uefiCapsuleSettings;
};
});
# fwupd tries to chmod the file if it doesn't have the right permissions
mode = "0640";
};

View file

@ -1,37 +1,58 @@
{ config, lib, pkgs }: let
{
addDriverRunpath,
glibc,
jq,
lib,
nvidia-container-toolkit,
nvidia-driver,
runtimeShell,
writeScriptBin,
}:
let
mountOptions = { options = ["ro" "nosuid" "nodev" "bind"]; };
mounts = [
{ hostPath = "${lib.getBin config.hardware.nvidia.package}/bin/nvidia-cuda-mps-control";
# FIXME: Making /usr mounts optional
{ hostPath = lib.getExe' nvidia-driver "nvidia-cuda-mps-control";
containerPath = "/usr/bin/nvidia-cuda-mps-control"; }
{ hostPath = "${lib.getBin config.hardware.nvidia.package}/bin/nvidia-cuda-mps-server";
{ hostPath = lib.getExe' nvidia-driver "nvidia-cuda-mps-server";
containerPath = "/usr/bin/nvidia-cuda-mps-server"; }
{ hostPath = "${lib.getBin config.hardware.nvidia.package}/bin/nvidia-debugdump";
{ hostPath = lib.getExe' nvidia-driver "nvidia-debugdump";
containerPath = "/usr/bin/nvidia-debugdump"; }
{ hostPath = "${lib.getBin config.hardware.nvidia.package}/bin/nvidia-powerd";
{ hostPath = lib.getExe' nvidia-driver "nvidia-powerd";
containerPath = "/usr/bin/nvidia-powerd"; }
{ hostPath = "${lib.getBin config.hardware.nvidia.package}/bin/nvidia-smi";
{ hostPath = lib.getExe' nvidia-driver "nvidia-smi";
containerPath = "/usr/bin/nvidia-smi"; }
{ hostPath = "${pkgs.nvidia-container-toolkit}/bin/nvidia-ctk";
{ hostPath = lib.getExe' nvidia-container-toolkit "nvidia-ctk";
containerPath = "/usr/bin/nvidia-ctk"; }
{ hostPath = "${pkgs.glibc}/lib";
containerPath = "${pkgs.glibc}/lib"; }
{ hostPath = "${pkgs.glibc}/lib64";
containerPath = "${pkgs.glibc}/lib64"; }
{ hostPath = "${lib.getLib glibc}/lib";
containerPath = "${lib.getLib glibc}/lib"; }
# FIXME: use closureinfo
{
hostPath = addDriverRunpath.driverLink;
containerPath = addDriverRunpath.driverLink;
}
{ hostPath = "${lib.getLib glibc}/lib";
containerPath = "${lib.getLib glibc}/lib"; }
{ hostPath = "${lib.getLib glibc}/lib64";
containerPath = "${lib.getLib glibc}/lib64"; }
];
jqAddMountExpression = ".containerEdits.mounts[.containerEdits.mounts | length] |= . +";
mountsToJq = lib.concatMap
(mount:
["${pkgs.jq}/bin/jq '${jqAddMountExpression} ${builtins.toJSON (mount // mountOptions)}'"])
["${lib.getExe jq} '${jqAddMountExpression} ${builtins.toJSON (mount // mountOptions)}'"])
mounts;
in ''
#! ${pkgs.runtimeShell}
in
writeScriptBin "nvidia-cdi-generator"
''
#! ${runtimeShell}
function cdiGenerate {
${pkgs.nvidia-container-toolkit}/bin/nvidia-ctk cdi generate \
${lib.getExe' nvidia-container-toolkit "nvidia-ctk"} cdi generate \
--format json \
--ldconfig-path ${pkgs.glibc.bin}/bin/ldconfig \
--library-search-path ${config.hardware.nvidia.package}/lib \
--nvidia-ctk-path ${pkgs.nvidia-container-toolkit}/bin/nvidia-ctk
--ldconfig-path ${lib.getExe' glibc "ldconfig"} \
--library-search-path ${lib.getLib nvidia-driver}/lib \
--nvidia-ctk-path ${lib.getExe' nvidia-container-toolkit "nvidia-ctk"}
}
cdiGenerate | \

View file

@ -26,9 +26,11 @@
serviceConfig = {
RuntimeDirectory = "cdi";
RemainAfterExit = true;
ExecStart = let
script = (pkgs.writeScriptBin "nvidia-cdi-generator"
(import ./cdi-generate.nix { inherit config lib pkgs; })); in (lib.getExe script);
ExecStart =
let
script = pkgs.callPackage ./cdi-generate.nix { nvidia-driver = config.hardware.nvidia.package; };
in
lib.getExe script;
Type = "oneshot";
};
};

View file

@ -187,7 +187,11 @@ in {
# Indeed, it will try to create all the folders and realize one of them already exist.
# Therefore, we have to create it ourselves.
''${pkgs.coreutils}/bin/mkdir -p "''${STATE_DIRECTORY}/listmonk/uploads"''
"${cfg.package}/bin/listmonk --config ${cfgFile} --idempotent --install --upgrade --yes"
# setup database if not already done
"${cfg.package}/bin/listmonk --config ${cfgFile} --idempotent --install --yes"
# apply db migrations (setup and migrations can not be done in one step
# with "--install --upgrade" listmonk ignores the upgrade)
"${cfg.package}/bin/listmonk --config ${cfgFile} --upgrade --yes"
"${updateDatabaseConfigScript}/bin/update-database-config.sh"
];
ExecStart = "${cfg.package}/bin/listmonk --config ${cfgFile}";

View file

@ -37,7 +37,7 @@ in
type = lib.types.str;
default = "127.0.0.1:8009";
example = "[::]:8008";
description = lib.mdDoc "The interface and port to listen on.";
description = lib.mdDoc "The interface and port or path (for unix socket) to listen on.";
};
SYNCV3_LOG_LEVEL = lib.mkOption {
@ -98,6 +98,7 @@ in
ExecStart = lib.getExe cfg.package;
StateDirectory = "matrix-sliding-sync";
WorkingDirectory = "%S/matrix-sliding-sync";
RuntimeDirectory = "matrix-sliding-sync";
Restart = "on-failure";
RestartSec = "1s";
};

View file

@ -5,9 +5,6 @@ with lib;
let
cfg = config.services.etebase-server;
pythonEnv = pkgs.python3.withPackages (ps: with ps;
[ etebase-server daphne ]);
iniFmt = pkgs.formats.ini {};
configIni = iniFmt.generate "etebase-server.ini" cfg.settings;
@ -46,6 +43,13 @@ in
'';
};
package = mkOption {
type = types.package;
default = pkgs.python3.pkgs.etebase-server;
defaultText = literalExpression "pkgs.python3.pkgs.etebase-server";
description = lib.mdDoc "etebase-server package to use.";
};
dataDir = mkOption {
type = types.str;
default = "/var/lib/etebase-server";
@ -164,7 +168,7 @@ in
(runCommand "etebase-server" {
nativeBuildInputs = [ makeWrapper ];
} ''
makeWrapper ${pythonEnv}/bin/etebase-server \
makeWrapper ${cfg.package}/bin/etebase-server \
$out/bin/etebase-server \
--chdir ${escapeShellArg cfg.dataDir} \
--prefix ETEBASE_EASY_CONFIG_PATH : "${configIni}"
@ -178,8 +182,8 @@ in
systemd.services.etebase-server = {
description = "An Etebase (EteSync 2.0) server";
after = [ "network.target" "systemd-tmpfiles-setup.service" ];
path = [ cfg.package ];
wantedBy = [ "multi-user.target" ];
path = [ pythonEnv ];
serviceConfig = {
User = cfg.user;
Restart = "always";
@ -187,24 +191,26 @@ in
};
environment = {
ETEBASE_EASY_CONFIG_PATH = configIni;
PYTHONPATH = cfg.package.pythonPath;
};
preStart = ''
# Auto-migrate on first run or if the package has changed
versionFile="${cfg.dataDir}/src-version"
if [[ $(cat "$versionFile" 2>/dev/null) != ${pkgs.etebase-server} ]]; then
if [[ $(cat "$versionFile" 2>/dev/null) != ${cfg.package} ]]; then
etebase-server migrate --no-input
etebase-server collectstatic --no-input --clear
echo ${pkgs.etebase-server} > "$versionFile"
echo ${cfg.package} > "$versionFile"
fi
'';
script =
let
python = cfg.package.python;
networking = if cfg.unixSocket != null
then "-u ${cfg.unixSocket}"
else "-b 0.0.0.0 -p ${toString cfg.port}";
then "--uds ${cfg.unixSocket}"
else "--host 0.0.0.0 --port ${toString cfg.port}";
in ''
cd "${pythonEnv}/lib/etebase-server";
daphne ${networking} \
${python.pkgs.uvicorn}/bin/uvicorn ${networking} \
--app-dir ${cfg.package}/${cfg.package.python.sitePackages} \
etebase_server.asgi:application
'';
};

View file

@ -6,6 +6,8 @@
let
cfg = config.services.homepage-dashboard;
# Define the settings format used for this program
settingsFormat = pkgs.formats.yaml { };
in
{
options = {
@ -25,31 +27,217 @@ in
default = 8082;
description = lib.mdDoc "Port for Homepage to bind to.";
};
environmentFile = lib.mkOption {
type = lib.types.str;
description = ''
The path to an environment file that contains environment variables to pass
to the homepage-dashboard service, for the purpose of passing secrets to
the service.
See the upstream documentation:
https://gethomepage.dev/latest/installation/docker/#using-environment-secrets
'';
default = "";
};
customCSS = lib.mkOption {
type = lib.types.lines;
description = lib.mdDoc ''
Custom CSS for styling Homepage.
See https://gethomepage.dev/latest/configs/custom-css-js/.
'';
default = "";
};
customJS = lib.mkOption {
type = lib.types.lines;
description = lib.mdDoc ''
Custom Javascript for Homepage.
See https://gethomepage.dev/latest/configs/custom-css-js/.
'';
default = "";
};
bookmarks = lib.mkOption {
inherit (settingsFormat) type;
description = lib.mdDoc ''
Homepage bookmarks configuration.
See https://gethomepage.dev/latest/configs/bookmarks/.
'';
# Defaults: https://github.com/gethomepage/homepage/blob/main/src/skeleton/bookmarks.yaml
example = [
{
Developer = [
{ Github = [{ abbr = "GH"; href = "https://github.com/"; }]; }
];
}
{
Entertainment = [
{ YouTube = [{ abbr = "YT"; href = "https://youtube.com/"; }]; }
];
}
];
default = [ ];
};
services = lib.mkOption {
inherit (settingsFormat) type;
description = lib.mdDoc ''
Homepage services configuration.
See https://gethomepage.dev/latest/configs/services/.
'';
# Defaults: https://github.com/gethomepage/homepage/blob/main/src/skeleton/services.yaml
example = [
{
"My First Group" = [
{
"My First Service" = {
href = "http://localhost/";
description = "Homepage is awesome";
};
}
];
}
{
"My Second Group" = [
{
"My Second Service" = {
href = "http://localhost/";
description = "Homepage is the best";
};
}
];
}
];
default = [ ];
};
widgets = lib.mkOption {
inherit (settingsFormat) type;
description = lib.mdDoc ''
Homepage widgets configuration.
See https://gethomepage.dev/latest/configs/service-widgets/.
'';
# Defaults: https://github.com/gethomepage/homepage/blob/main/src/skeleton/widgets.yaml
example = [
{
resources = {
cpu = true;
memory = true;
disk = "/";
};
}
{
search = {
provider = "duckduckgo";
target = "_blank";
};
}
];
default = [ ];
};
kubernetes = lib.mkOption {
inherit (settingsFormat) type;
description = lib.mdDoc ''
Homepage kubernetes configuration.
See https://gethomepage.dev/latest/configs/kubernetes/.
'';
default = { };
};
docker = lib.mkOption {
inherit (settingsFormat) type;
description = lib.mdDoc ''
Homepage docker configuration.
See https://gethomepage.dev/latest/configs/docker/.
'';
default = { };
};
settings = lib.mkOption {
inherit (settingsFormat) type;
description = lib.mdDoc ''
Homepage settings.
See https://gethomepage.dev/latest/configs/settings/.
'';
# Defaults: https://github.com/gethomepage/homepage/blob/main/src/skeleton/settings.yaml
default = { };
};
};
};
config = lib.mkIf cfg.enable {
systemd.services.homepage-dashboard = {
description = "Homepage Dashboard";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
config =
let
# If homepage-dashboard is enabled, but none of the configuration values have been updated,
# then default to "unmanaged" configuration which is manually updated in
# var/lib/homepage-dashboard. This is to maintain backwards compatibility, and should be
# deprecated in a future release.
managedConfig = !(
cfg.bookmarks == [ ] &&
cfg.customCSS == "" &&
cfg.customJS == "" &&
cfg.docker == { } &&
cfg.kubernetes == { } &&
cfg.services == [ ] &&
cfg.settings == { } &&
cfg.widgets == [ ]
);
environment = {
HOMEPAGE_CONFIG_DIR = "/var/lib/homepage-dashboard";
PORT = "${toString cfg.listenPort}";
configDir = if managedConfig then "/etc/homepage-dashboard" else "/var/lib/homepage-dashboard";
msg = "using unmanaged configuration for homepage-dashboard is deprecated and will be removed"
+ " in 24.05. please see the NixOS documentation for `services.homepage-dashboard' and add"
+ " your bookmarks, services, widgets, and other configuration using the options provided.";
in
lib.mkIf cfg.enable {
warnings = lib.optional (!managedConfig) msg;
environment.etc = lib.mkIf managedConfig {
"homepage-dashboard/custom.css".text = cfg.customCSS;
"homepage-dashboard/custom.js".text = cfg.customJS;
"homepage-dashboard/bookmarks.yaml".source = settingsFormat.generate "bookmarks.yaml" cfg.bookmarks;
"homepage-dashboard/docker.yaml".source = settingsFormat.generate "docker.yaml" cfg.docker;
"homepage-dashboard/kubernetes.yaml".source = settingsFormat.generate "kubernetes.yaml" cfg.kubernetes;
"homepage-dashboard/services.yaml".source = settingsFormat.generate "services.yaml" cfg.services;
"homepage-dashboard/settings.yaml".source = settingsFormat.generate "settings.yaml" cfg.settings;
"homepage-dashboard/widgets.yaml".source = settingsFormat.generate "widgets.yaml" cfg.widgets;
};
serviceConfig = {
Type = "simple";
DynamicUser = true;
StateDirectory = "homepage-dashboard";
ExecStart = "${lib.getExe cfg.package}";
Restart = "on-failure";
systemd.services.homepage-dashboard = {
description = "Homepage Dashboard";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
environment = {
HOMEPAGE_CONFIG_DIR = configDir;
PORT = toString cfg.listenPort;
LOG_TARGETS = lib.mkIf managedConfig "stdout";
};
serviceConfig = {
Type = "simple";
DynamicUser = true;
EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
StateDirectory = lib.mkIf (!managedConfig) "homepage-dashboard";
ExecStart = lib.getExe cfg.package;
Restart = "on-failure";
};
};
networking.firewall = lib.mkIf cfg.openFirewall {
allowedTCPPorts = [ cfg.listenPort ];
};
};
networking.firewall = lib.mkIf cfg.openFirewall {
allowedTCPPorts = [ cfg.listenPort ];
};
};
}

View file

@ -342,6 +342,7 @@ in
User = cfg.user;
Restart = "on-failure";
LimitNOFILE = 65536;
# gunicorn needs setuid, liblapack needs mbind
SystemCallFilter = defaultServiceConfig.SystemCallFilter ++ [ "@setuid mbind" ];
# Needs to serve web page

View file

@ -0,0 +1,203 @@
{ config, lib, pkgs, ... }:
let
inherit (lib) types;
cfg = config.services.tabby;
format = pkgs.formats.toml { };
tabbyPackage = cfg.package.override {
inherit (cfg) acceleration;
};
in
{
options = {
services.tabby = {
enable = lib.mkEnableOption (
lib.mdDoc "Self-hosted AI coding assistant using large language models"
);
package = lib.mkPackageOption pkgs "tabby" { };
port = lib.mkOption {
type = types.port;
default = 11029;
description = lib.mdDoc ''
Specifies the bind port on which the tabby server HTTP interface listens.
'';
};
model = lib.mkOption {
type = types.str;
default = "TabbyML/StarCoder-1B";
description = lib.mdDoc ''
Specify the model that tabby will use to generate completions.
This model will be downloaded automatically if it is not already present.
If you want to utilize an existing model that you've already
downloaded you'll need to move it into tabby's state directory which
lives in `/var/lib/tabby`. Because the tabby.service is configured to
use a DyanmicUser the service will need to have been started at least
once before you can move the locally existing model into
`/var/lib/tabby`. You can set the model to 'none' and tabby will
startup and fail to download a model, but will have created the
`/var/lib/tabby` directory. You can then copy over the model manually
into `/var/lib/tabby`, update the model option to the name you just
downloaded and copied over then `nixos-rebuild switch` to start using
it.
$ tabby download --model TabbyML/DeepseekCoder-6.7B
$ find ~/.tabby/ | tail -n1
/home/ghthor/.tabby/models/TabbyML/DeepseekCoder-6.7B/ggml/q8_0.v2.gguf
$ sudo rsync -r ~/.tabby/models/ /var/lib/tabby/models/
$ sudo chown -R tabby:tabby /var/lib/tabby/models/
See for Model Options:
> https://github.com/TabbyML/registry-tabby
'';
};
acceleration = lib.mkOption {
type = types.nullOr (types.enum [ "cpu" "rocm" "cuda" "metal" ]);
default = null;
example = "rocm";
description = lib.mdDoc ''
Specifies the device to use for hardware acceleration.
- `cpu`: no acceleration just use the CPU
- `rocm`: supported by modern AMD GPUs
- `cuda`: supported by modern NVIDIA GPUs
- `metal`: supported on darwin aarch64 machines
Tabby will try and determine what type of acceleration that is
already enabled in your configuration when `acceleration = null`.
- nixpkgs.config.cudaSupport
- nixpkgs.config.rocmSupport
- if stdenv.isDarwin && stdenv.isAarch64
IFF multiple acceleration methods are found to be enabled or if you
haven't set either `cudaSupport or rocmSupport` you will have to
specify the device type manually here otherwise it will default to
the first from the list above or to cpu.
'';
};
settings = lib.mkOption {
inherit (format) type;
default = { };
description = lib.mdDoc ''
Tabby scheduler configuration
See for more details:
> https://tabby.tabbyml.com/docs/configuration/#repository-context-for-code-completion
'';
example = lib.literalExpression ''
settings = {
repositories = [
{ name = "tabby"; git_url = "https://github.com/TabbyML/tabby.git"; }
{ name = "CTranslate2"; git_url = "git@github.com:OpenNMT/CTranslate2.git"; }
# local directory is also supported, but limited by systemd DynamicUser=1
# adding local repositories will need to be done manually
{ name = "repository_a"; git_url = "file:///var/lib/tabby/repository_a"; }
];
};
'';
};
usageCollection = lib.mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Enable sending anonymous usage data.
See for more details:
> https://tabby.tabbyml.com/docs/configuration#usage-collection
'';
};
indexInterval = lib.mkOption {
type = types.str;
default = "5hours";
example = "5hours";
description = lib.mdDoc ''
Run tabby scheduler to generate the index database at this interval.
Updates by default every 5 hours. This value applies to
`OnUnitInactiveSec`
The format is described in
{manpage}`systemd.time(7)`.
To disable running `tabby scheduler --now` updates, set to `"never"`
'';
};
};
};
# TODO(ghthor): firewall config
config = lib.mkIf cfg.enable {
environment = {
etc."tabby/config.toml".source = format.generate "config.toml" cfg.settings;
systemPackages = [ tabbyPackage ];
};
systemd = let
serviceUser = {
WorkingDirectory = "/var/lib/tabby";
StateDirectory = [ "tabby" ];
ConfigurationDirectory = [ "tabby" ];
DynamicUser = true;
User = "tabby";
Group = "tabby";
};
serviceEnv = lib.mkMerge [
{
TABBY_ROOT = "%S/tabby";
}
(lib.mkIf (!cfg.usageCollection) {
TABBY_DISABLE_USAGE_COLLECTION = "1";
})
];
in {
services.tabby = {
wantedBy = [ "multi-user.target" ];
description = "Self-hosted AI coding assistant using large language models";
after = [ "network.target" ];
environment = serviceEnv;
serviceConfig = lib.mkMerge [
serviceUser
{
ExecStart =
"${lib.getExe tabbyPackage} serve --model ${cfg.model} --port ${toString cfg.port} --device ${tabbyPackage.featureDevice}";
}
];
};
services.tabby-scheduler = lib.mkIf (cfg.indexInterval != "never") {
wantedBy = [ "multi-user.target" ];
description = "Tabby repository indexing service";
after = [ "network.target" ];
environment = serviceEnv;
preStart = "cp -f /etc/tabby/config.toml \${TABBY_ROOT}/config.toml";
serviceConfig = lib.mkMerge [
serviceUser
{
# Type = "oneshot";
ExecStart = "${lib.getExe tabbyPackage} scheduler --now";
}
];
};
timers.tabby-scheduler = lib.mkIf (cfg.indexInterval != "never") {
description = "Update timer for tabby-scheduler";
partOf = [ "tabby-scheduler.service" ];
wantedBy = [ "timers.target" ];
timerConfig.OnUnitInactiveSec = cfg.indexInterval;
};
};
};
meta.maintainers = with lib.maintainers; [ ghthor ];
}

View file

@ -81,7 +81,7 @@ in {
include = mkDefault "/etc/mackerel-agent/conf.d/*.conf";
};
# upstream service file in https://git.io/JUt4Q
# upstream service file in https://github.com/mackerelio/mackerel-agent/blob/master/packaging/rpm/src/mackerel-agent.service
systemd.services.mackerel-agent = {
description = "mackerel.io agent";
wants = [ "network-online.target" ];

View file

@ -93,12 +93,14 @@ in
};
serviceOpts = {
script = ''
export RESTIC_PASSWORD_FILE=$CREDENTIALS_DIRECTORY/RESTIC_PASSWORD_FILE
${pkgs.prometheus-restic-exporter}/bin/restic-exporter.py \
${concatStringsSep " \\\n " cfg.extraFlags}
'';
serviceConfig = {
ExecStart = ''
${pkgs.prometheus-restic-exporter}/bin/restic-exporter.py \
${concatStringsSep " \\\n " cfg.extraFlags}
'';
EnvironmentFile = mkIf (cfg.environmentFile != null) cfg.environmentFile;
LoadCredential = [ "RESTIC_PASSWORD_FILE:${cfg.passwordFile}" ];
};
environment =
let
@ -108,8 +110,7 @@ in
toRcloneVal = v: if lib.isBool v then lib.boolToString v else v;
in
{
RESTIC_REPO_URL = cfg.repository;
RESTIC_REPO_PASSWORD_FILE = cfg.passwordFile;
RESTIC_REPOSITORY = cfg.repository;
LISTEN_ADDRESS = cfg.listenAddress;
LISTEN_PORT = toString cfg.port;
REFRESH_INTERVAL = toString cfg.refreshInterval;

View file

@ -1,5 +1,11 @@
{ config, lib, pkgs, ... }:
let
inherit (lib) maintainers;
inherit (lib.meta) getExe;
inherit (lib.modules) mkIf;
inherit (lib.options) literalExpression mkEnableOption mkOption mkPackageOption;
inherit (lib.types) bool enum nullOr port str submodule;
cfg = config.services.scrutiny;
# Define the settings format used for this program
settingsFormat = pkgs.formats.yaml { };
@ -7,20 +13,16 @@ in
{
options = {
services.scrutiny = {
enable = lib.mkEnableOption "Enables the scrutiny web application.";
enable = mkEnableOption "Scrutiny, a web application for drive monitoring";
package = lib.mkPackageOptionMD pkgs "scrutiny" { };
package = mkPackageOption pkgs "scrutiny" { };
openFirewall = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Open the default ports in the firewall for Scrutiny.";
};
openFirewall = mkEnableOption "opening the default ports in the firewall for Scrutiny";
influxdb.enable = lib.mkOption {
type = lib.types.bool;
influxdb.enable = mkOption {
type = bool;
default = true;
description = lib.mdDoc ''
description = ''
Enables InfluxDB on the host system using the `services.influxdb2` NixOS module
with default options.
@ -29,127 +31,124 @@ in
'';
};
settings = lib.mkOption {
description = lib.mdDoc ''
settings = mkOption {
description = ''
Scrutiny settings to be rendered into the configuration file.
See https://github.com/AnalogJ/scrutiny/blob/master/example.scrutiny.yaml.
'';
default = { };
type = lib.types.submodule {
type = submodule {
freeformType = settingsFormat.type;
options.web.listen.port = lib.mkOption {
type = lib.types.port;
options.web.listen.port = mkOption {
type = port;
default = 8080;
description = lib.mdDoc "Port for web application to listen on.";
description = "Port for web application to listen on.";
};
options.web.listen.host = lib.mkOption {
type = lib.types.str;
options.web.listen.host = mkOption {
type = str;
default = "0.0.0.0";
description = lib.mdDoc "Interface address for web application to bind to.";
description = "Interface address for web application to bind to.";
};
options.web.listen.basepath = lib.mkOption {
type = lib.types.str;
options.web.listen.basepath = mkOption {
type = str;
default = "";
example = "/scrutiny";
description = lib.mdDoc ''
description = ''
If Scrutiny will be behind a path prefixed reverse proxy, you can override this
value to serve Scrutiny on a subpath.
'';
};
options.log.level = lib.mkOption {
type = lib.types.enum [ "INFO" "DEBUG" ];
options.log.level = mkOption {
type = enum [ "INFO" "DEBUG" ];
default = "INFO";
description = lib.mdDoc "Log level for Scrutiny.";
description = "Log level for Scrutiny.";
};
options.web.influxdb.scheme = lib.mkOption {
type = lib.types.str;
options.web.influxdb.scheme = mkOption {
type = str;
default = "http";
description = lib.mdDoc "URL scheme to use when connecting to InfluxDB.";
description = "URL scheme to use when connecting to InfluxDB.";
};
options.web.influxdb.host = lib.mkOption {
type = lib.types.str;
options.web.influxdb.host = mkOption {
type = str;
default = "0.0.0.0";
description = lib.mdDoc "IP or hostname of the InfluxDB instance.";
description = "IP or hostname of the InfluxDB instance.";
};
options.web.influxdb.port = lib.mkOption {
type = lib.types.port;
options.web.influxdb.port = mkOption {
type = port;
default = 8086;
description = lib.mdDoc "The port of the InfluxDB instance.";
description = "The port of the InfluxDB instance.";
};
options.web.influxdb.tls.insecure_skip_verify = lib.mkOption {
type = lib.types.bool;
default = false;
description = lib.mdDoc "Skip TLS verification when connecting to InfluxDB.";
};
options.web.influxdb.tls.insecure_skip_verify = mkEnableOption "skipping TLS verification when connecting to InfluxDB";
options.web.influxdb.token = lib.mkOption {
type = lib.types.nullOr lib.types.str;
options.web.influxdb.token = mkOption {
type = nullOr str;
default = null;
description = lib.mdDoc "Authentication token for connecting to InfluxDB.";
description = "Authentication token for connecting to InfluxDB.";
};
options.web.influxdb.org = lib.mkOption {
type = lib.types.nullOr lib.types.str;
options.web.influxdb.org = mkOption {
type = nullOr str;
default = null;
description = lib.mdDoc "InfluxDB organisation under which to store data.";
description = "InfluxDB organisation under which to store data.";
};
options.web.influxdb.bucket = lib.mkOption {
type = lib.types.nullOr lib.types.str;
options.web.influxdb.bucket = mkOption {
type = nullOr str;
default = null;
description = lib.mdDoc "InfluxDB bucket in which to store data.";
description = "InfluxDB bucket in which to store data.";
};
};
};
collector = {
enable = lib.mkEnableOption "Enables the scrutiny metrics collector.";
enable = mkEnableOption "the Scrutiny metrics collector";
package = lib.mkPackageOptionMD pkgs "scrutiny-collector" { };
package = mkPackageOption pkgs "scrutiny-collector" { };
schedule = lib.mkOption {
type = lib.types.str;
schedule = mkOption {
type = str;
default = "*:0/15";
description = lib.mdDoc ''
description = ''
How often to run the collector in systemd calendar format.
'';
};
settings = lib.mkOption {
description = lib.mdDoc ''
settings = mkOption {
description = ''
Collector settings to be rendered into the collector configuration file.
See https://github.com/AnalogJ/scrutiny/blob/master/example.collector.yaml.
'';
default = { };
type = lib.types.submodule {
type = submodule {
freeformType = settingsFormat.type;
options.host.id = lib.mkOption {
type = lib.types.nullOr lib.types.str;
options.host.id = mkOption {
type = nullOr str;
default = null;
description = lib.mdDoc "Host ID for identifying/labelling groups of disks";
description = "Host ID for identifying/labelling groups of disks";
};
options.api.endpoint = lib.mkOption {
type = lib.types.str;
default = "http://localhost:8080";
description = lib.mdDoc "Scrutiny app API endpoint for sending metrics to.";
options.api.endpoint = mkOption {
type = str;
default = "http://localhost:${toString cfg.settings.web.listen.port}";
defaultText = literalExpression ''"http://localhost:''${config.services.scrutiny.settings.web.listen.port}"'';
description = "Scrutiny app API endpoint for sending metrics to.";
};
options.log.level = lib.mkOption {
type = lib.types.enum [ "INFO" "DEBUG" ];
options.log.level = mkOption {
type = enum [ "INFO" "DEBUG" ];
default = "INFO";
description = lib.mdDoc "Log level for Scrutiny collector.";
description = "Log level for Scrutiny collector.";
};
};
};
@ -157,14 +156,14 @@ in
};
};
config = lib.mkIf (cfg.enable || cfg.collector.enable) {
config = mkIf (cfg.enable || cfg.collector.enable) {
services.influxdb2.enable = cfg.influxdb.enable;
networking.firewall = lib.mkIf cfg.openFirewall {
networking.firewall = mkIf cfg.openFirewall {
allowedTCPPorts = [ cfg.settings.web.listen.port ];
};
services.smartd = lib.mkIf cfg.collector.enable {
services.smartd = mkIf cfg.collector.enable {
enable = true;
extraOptions = [
"-A /var/log/smartd/"
@ -174,7 +173,7 @@ in
systemd = {
services = {
scrutiny = lib.mkIf cfg.enable {
scrutiny = mkIf cfg.enable {
description = "Hard Drive S.M.A.R.T Monitoring, Historical Trends & Real World Failure Thresholds";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
@ -185,14 +184,14 @@ in
};
serviceConfig = {
DynamicUser = true;
ExecStart = "${lib.getExe cfg.package} start --config ${settingsFormat.generate "scrutiny.yaml" cfg.settings}";
ExecStart = "${getExe cfg.package} start --config ${settingsFormat.generate "scrutiny.yaml" cfg.settings}";
Restart = "always";
StateDirectory = "scrutiny";
StateDirectoryMode = "0750";
};
};
scrutiny-collector = lib.mkIf cfg.collector.enable {
scrutiny-collector = mkIf cfg.collector.enable {
description = "Scrutiny Collector Service";
environment = {
COLLECTOR_VERSION = "1";
@ -200,12 +199,12 @@ in
};
serviceConfig = {
Type = "oneshot";
ExecStart = "${lib.getExe cfg.collector.package} run --config ${settingsFormat.generate "scrutiny-collector.yaml" cfg.collector.settings}";
ExecStart = "${getExe cfg.collector.package} run --config ${settingsFormat.generate "scrutiny-collector.yaml" cfg.collector.settings}";
};
};
};
timers = lib.mkIf cfg.collector.enable {
timers = mkIf cfg.collector.enable {
scrutiny-collector = {
timerConfig = {
OnCalendar = cfg.collector.schedule;
@ -217,5 +216,5 @@ in
};
};
meta.maintainers = [ lib.maintainers.jnsgruk ];
meta.maintainers = [ maintainers.jnsgruk ];
}

View file

@ -194,8 +194,8 @@ in
allowedIPs = mkOption {
type = types.listOf types.str;
default = [ ];
example = [ "192.168.25.52" "192.168.25.53" ];
description = lib.mdDoc "List of IPs to allow (default all allowed).";
example = [ "192.168.25.52" "192.168.25.53" "192.168.0.0/24" ];
description = lib.mdDoc "List of IPs or networks to allow (default all allowed).";
};
birdSocket = mkOption {

View file

@ -33,7 +33,7 @@ let
sendversion=${boolToString cfg.sendVersion}
${optionalString (cfg.registerName != "") "registerName=${cfg.registerName}"}
${optionalString (cfg.registerPassword == "") "registerPassword=${cfg.registerPassword}"}
${optionalString (cfg.registerPassword != "") "registerPassword=${cfg.registerPassword}"}
${optionalString (cfg.registerUrl != "") "registerUrl=${cfg.registerUrl}"}
${optionalString (cfg.registerHostname != "") "registerHostname=${cfg.registerHostname}"}

View file

@ -10,6 +10,15 @@ let
format = pkgs.formats.yaml {};
nameToId = netName: "nebula-${netName}";
resolveFinalPort = netCfg:
if netCfg.listen.port == null then
if (netCfg.isLighthouse || netCfg.isRelay) then
4242
else
0
else
netCfg.listen.port;
in
{
# Interface
@ -95,8 +104,15 @@ in
};
listen.port = mkOption {
type = types.port;
default = 4242;
type = types.nullOr types.port;
default = null;
defaultText = lib.literalExpression ''
if (config.services.nebula.networks.''${name}.isLighthouse ||
config.services.nebula.networks.''${name}.isRelay) then
4242
else
0;
'';
description = lib.mdDoc "Port number to listen on.";
};
@ -174,7 +190,7 @@ in
};
listen = {
host = netCfg.listen.host;
port = netCfg.listen.port;
port = resolveFinalPort netCfg;
};
tun = {
disabled = netCfg.tun.disable;
@ -185,7 +201,15 @@ in
outbound = netCfg.firewall.outbound;
};
} netCfg.settings;
configFile = format.generate "nebula-config-${netName}.yml" settings;
configFile = format.generate "nebula-config-${netName}.yml" (
warnIf
((settings.lighthouse.am_lighthouse || settings.relay.am_relay) && settings.listen.port == 0)
''
Nebula network '${netName}' is configured as a lighthouse or relay, and its port is ${builtins.toString settings.listen.port}.
You will likely experience connectivity issues: https://nebula.defined.net/docs/config/listen/#listenport
''
settings
);
in
{
# Create the systemd service for Nebula.
@ -229,7 +253,7 @@ in
# Open the chosen ports for UDP.
networking.firewall.allowedUDPPorts =
unique (mapAttrsToList (netName: netCfg: netCfg.listen.port) enabledNetworks);
unique (filter (port: port > 0) (mapAttrsToList (netName: netCfg: resolveFinalPort netCfg) enabledNetworks));
# Create the service users and groups.
users.users = mkMerge (mapAttrsToList (netName: netCfg:

View file

@ -436,6 +436,7 @@ in
And if you edit a declarative profile NetworkManager will move it to the persistent storage and treat it like a ad-hoc one,
but there will be two profiles as soon as the systemd unit from this option runs again which can be confusing since NetworkManager tools will start displaying two profiles with the same name and probably a bit different settings depending on what you edited.
A profile won't be deleted even if it's removed from the config until the system reboots because that's when NetworkManager clears it's temp directory.
If `networking.resolvconf.enable` is true, attributes affecting the name resolution (such as `ignore-auto-dns`) may not end up changing `/etc/resolv.conf` as expected when other name services (for example `networking.dhcpcd`) are enabled. Run `resolvconf -l` in the terminal to see what each service produces.
'';
};
environmentFiles = mkOption {

View file

@ -76,12 +76,13 @@ in {
checkconf = mkOption {
type = types.bool;
default = !cfg.settings ? include;
defaultText = "!config.services.unbound.settings ? include";
default = !cfg.settings ? include && !cfg.settings ? remote-control;
defaultText = "!services.unbound.settings ? include && !services.unbound.settings ? remote-control";
description = lib.mdDoc ''
Wether to check the resulting config file with unbound checkconf for syntax errors.
If settings.include is used, then this options is disabled, as the import can likely not be resolved at build time.
If settings.include is used, this options is disabled, as the import can likely not be accessed at build time.
If settings.remote-control is used, this option is disabled, too as the control-key-file, server-cert-file and server-key-file cannot be accessed at build time.
'';
};

View file

@ -4,49 +4,33 @@ let
cfg = config.services.esdm;
in
{
imports = [
# removed option 'services.esdm.cuseRandomEnable'
(lib.mkRemovedOptionModule [ "services" "esdm" "cuseRandomEnable" ] ''
Use services.esdm.enableLinuxCompatServices instead.
'')
# removed option 'services.esdm.cuseUrandomEnable'
(lib.mkRemovedOptionModule [ "services" "esdm" "cuseUrandomEnable" ] ''
Use services.esdm.enableLinuxCompatServices instead.
'')
# removed option 'services.esdm.procEnable'
(lib.mkRemovedOptionModule [ "services" "esdm" "procEnable" ] ''
Use services.esdm.enableLinuxCompatServices instead.
'')
# removed option 'services.esdm.verbose'
(lib.mkRemovedOptionModule [ "services" "esdm" "verbose" ] ''
There is no replacement.
'')
];
options.services.esdm = {
enable = lib.mkEnableOption (lib.mdDoc "ESDM service configuration");
package = lib.mkPackageOption pkgs "esdm" { };
serverEnable = lib.mkOption {
enableLinuxCompatServices = lib.mkOption {
type = lib.types.bool;
default = true;
description = lib.mdDoc ''
Enable option for ESDM server service. If serverEnable == false, then the esdm-server
will not start. Also the subsequent services esdm-cuse-random, esdm-cuse-urandom
and esdm-proc will not start as these have the entry Want=esdm-server.service.
'';
};
cuseRandomEnable = lib.mkOption {
type = lib.types.bool;
default = true;
description = lib.mdDoc ''
Enable option for ESDM cuse-random service. Determines if the esdm-cuse-random.service
is started.
'';
};
cuseUrandomEnable = lib.mkOption {
type = lib.types.bool;
default = true;
description = lib.mdDoc ''
Enable option for ESDM cuse-urandom service. Determines if the esdm-cuse-urandom.service
is started.
'';
};
procEnable = lib.mkOption {
type = lib.types.bool;
default = true;
description = lib.mdDoc ''
Enable option for ESDM proc service. Determines if the esdm-proc.service
is started.
'';
};
verbose = lib.mkOption {
type = lib.types.bool;
default = false;
description = lib.mdDoc ''
Enable verbose ExecStart for ESDM. If verbose == true, then the corresponding "ExecStart"
values of the 4 aforementioned services are overwritten with the option
for the highest verbosity.
Enable /dev/random, /dev/urandom and /proc/sys/kernel/random/* userspace wrapper.
'';
};
};
@ -55,46 +39,13 @@ in
lib.mkMerge [
({
systemd.packages = [ cfg.package ];
systemd.services."esdm-server".wantedBy = [ "basic.target" ];
})
# It is necessary to set those options for these services to be started by systemd in NixOS
(lib.mkIf cfg.serverEnable {
systemd.services."esdm-server".wantedBy = [ "basic.target" ];
systemd.services."esdm-server".serviceConfig = lib.mkIf cfg.verbose {
ExecStart = [
" " # unset previous value defined in 'esdm-server.service'
"${cfg.package}/bin/esdm-server -f -vvvvvv"
];
};
})
(lib.mkIf cfg.cuseRandomEnable {
systemd.services."esdm-cuse-random".wantedBy = [ "basic.target" ];
systemd.services."esdm-cuse-random".serviceConfig = lib.mkIf cfg.verbose {
ExecStart = [
" " # unset previous value defined in 'esdm-cuse-random.service'
"${cfg.package}/bin/esdm-cuse-random -f -v 6"
];
};
})
(lib.mkIf cfg.cuseUrandomEnable {
systemd.services."esdm-cuse-urandom".wantedBy = [ "basic.target" ];
systemd.services."esdm-cuse-urandom".serviceConfig = lib.mkIf cfg.verbose {
ExecStart = [
" " # unset previous value defined in 'esdm-cuse-urandom.service'
"${config.services.esdm.package}/bin/esdm-cuse-urandom -f -v 6"
];
};
})
(lib.mkIf cfg.procEnable {
systemd.services."esdm-proc".wantedBy = [ "basic.target" ];
systemd.services."esdm-proc".serviceConfig = lib.mkIf cfg.verbose {
ExecStart = [
" " # unset previous value defined in 'esdm-proc.service'
"${cfg.package}/bin/esdm-proc --relabel -f -o allow_other /proc/sys/kernel/random -v 6"
];
};
(lib.mkIf cfg.enableLinuxCompatServices {
systemd.targets."esdm-linux-compat".wantedBy = [ "basic.target" ];
systemd.services."esdm-server-suspend".wantedBy = [ "sleep.target" "suspend.target" "hibernate.target" ];
systemd.services."esdm-server-resume".wantedBy = [ "sleep.target" "suspend.target" "hibernate.target" ];
})
]);

View file

@ -180,7 +180,6 @@ in {
users.groups.vaultwarden = { };
systemd.services.vaultwarden = {
aliases = [ "bitwarden_rs.service" ];
after = [ "network.target" ];
path = with pkgs; [ openssl ];
serviceConfig = {
@ -202,7 +201,6 @@ in {
};
systemd.services.backup-vaultwarden = mkIf (cfg.backupDir != null) {
aliases = [ "backup-bitwarden_rs.service" ];
description = "Backup vaultwarden";
environment = {
DATA_FOLDER = "/var/lib/bitwarden_rs";
@ -222,7 +220,6 @@ in {
};
systemd.timers.backup-vaultwarden = mkIf (cfg.backupDir != null) {
aliases = [ "backup-bitwarden_rs.timer" ];
description = "Backup vaultwarden on time";
timerConfig = {
OnCalendar = mkDefault "23:00";
@ -240,6 +237,9 @@ in {
};
};
# uses attributes of the linked package
meta.buildDocsInSandbox = false;
meta = {
# uses attributes of the linked package
buildDocsInSandbox = false;
maintainers = with lib.maintainers; [ dotlambda SuperSandro2000 ];
};
}

View file

@ -6,9 +6,6 @@ with lib;
let
cfg = config.services.yubikey-agent;
# reuse the pinentryFlavor option from the gnupg module
pinentryFlavor = config.programs.gnupg.agent.pinentryFlavor;
in
{
###### interface
@ -40,14 +37,9 @@ in
# This overrides the systemd user unit shipped with the
# yubikey-agent package
systemd.user.services.yubikey-agent = mkIf (pinentryFlavor != null) {
path = [ pkgs.pinentry.${pinentryFlavor} ];
wantedBy = [
(if pinentryFlavor == "tty" || pinentryFlavor == "curses" then
"default.target"
else
"graphical-session.target")
];
systemd.user.services.yubikey-agent = mkIf (config.programs.gnupg.agent.pinentryPackage != null) {
path = [ config.programs.gnupg.agent.pinentryPackage ];
wantedBy = [ "default.target" ];
};
# Yubikey-agent expects pcsd to be running in order to function.

View file

@ -16,10 +16,20 @@ in
{
options = {
services.miniflux = {
enable = mkEnableOption (lib.mdDoc "miniflux and creates a local postgres database for it");
enable = mkEnableOption (lib.mdDoc "miniflux");
package = mkPackageOption pkgs "miniflux" { };
createDatabaseLocally = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Whether a PostgreSQL database should be automatically created and
configured on the local host. If set to `false`, you need provision a
database yourself and make sure to create the hstore extension in it.
'';
};
config = mkOption {
type = with types; attrsOf (oneOf [ str int ]);
example = literalExpression ''
@ -38,7 +48,7 @@ in
'';
};
adminCredentialsFile = mkOption {
adminCredentialsFile = mkOption {
type = types.path;
description = lib.mdDoc ''
File containing the ADMIN_USERNAME and
@ -51,14 +61,14 @@ in
};
config = mkIf cfg.enable {
services.miniflux.config = {
services.miniflux.config = {
LISTEN_ADDR = mkDefault defaultAddress;
DATABASE_URL = "user=miniflux host=/run/postgresql dbname=miniflux";
DATABASE_URL = lib.mkIf cfg.createDatabaseLocally "user=miniflux host=/run/postgresql dbname=miniflux";
RUN_MIGRATIONS = 1;
CREATE_ADMIN = 1;
};
services.postgresql = {
services.postgresql = lib.mkIf cfg.createDatabaseLocally {
enable = true;
ensureUsers = [ {
name = "miniflux";
@ -67,7 +77,7 @@ in
ensureDatabases = [ "miniflux" ];
};
systemd.services.miniflux-dbsetup = {
systemd.services.miniflux-dbsetup = lib.mkIf cfg.createDatabaseLocally {
description = "Miniflux database setup";
requires = [ "postgresql.service" ];
after = [ "network.target" "postgresql.service" ];
@ -81,8 +91,9 @@ in
systemd.services.miniflux = {
description = "Miniflux service";
wantedBy = [ "multi-user.target" ];
requires = [ "miniflux-dbsetup.service" ];
after = [ "network.target" "postgresql.service" "miniflux-dbsetup.service" ];
requires = lib.optional cfg.createDatabaseLocally "miniflux-dbsetup.service";
after = [ "network.target" ]
++ lib.optionals cfg.createDatabaseLocally [ "postgresql.service" "miniflux-dbsetup.service" ];
serviceConfig = {
ExecStart = "${cfg.package}/bin/miniflux";
@ -129,6 +140,7 @@ in
include "${pkgs.apparmorRulesFromClosure { name = "miniflux"; } cfg.package}"
r ${cfg.package}/bin/miniflux,
r @{sys}/kernel/mm/transparent_hugepage/hpage_pmd_size,
rw /run/miniflux/**,
}
'';
};

View file

@ -45,7 +45,7 @@ let
};
};
webroot = pkgs.runCommand
webroot = pkgs.runCommandLocal
"${cfg.package.name or "nextcloud"}-with-apps"
{ }
''

View file

@ -104,6 +104,7 @@ in
StateDirectory = "photoprism";
WorkingDirectory = "/var/lib/photoprism";
RuntimeDirectory = "photoprism";
ReadWritePaths = [ cfg.originalsPath cfg.importPath cfg.storagePath ];
LoadCredential = lib.optionalString (cfg.passwordFile != null)
"PHOTOPRISM_ADMIN_PASSWORD:${cfg.passwordFile}";

View file

@ -9,10 +9,13 @@ let
useMysql = cfg.database.type == "mysql";
usePostgresql = cfg.database.type == "postgres";
in {
imports = [
(mkRemovedOptionModule [ "services" "vikunja" "setupNginx" ] "services.vikunja no longer supports the automatic set up of a nginx virtual host. Set up your own webserver config with a proxy pass to the vikunja service.")
];
options.services.vikunja = with lib; {
enable = mkEnableOption (lib.mdDoc "vikunja service");
package-api = mkPackageOption pkgs "vikunja-api" { };
package-frontend = mkPackageOption pkgs "vikunja-frontend" { };
package = mkPackageOption pkgs "vikunja" { };
environmentFiles = mkOption {
type = types.listOf types.path;
default = [ ];
@ -21,25 +24,10 @@ in {
For example passwords should be set in one of these files.
'';
};
setupNginx = mkOption {
type = types.bool;
default = config.services.nginx.enable;
defaultText = literalExpression "config.services.nginx.enable";
description = lib.mdDoc ''
Whether to setup NGINX.
Further nginx configuration can be done by changing
{option}`services.nginx.virtualHosts.<frontendHostname>`.
This does not enable TLS or ACME by default. To enable this, set the
{option}`services.nginx.virtualHosts.<frontendHostname>.enableACME` to
`true` and if appropriate do the same for
{option}`services.nginx.virtualHosts.<frontendHostname>.forceSSL`.
'';
};
frontendScheme = mkOption {
type = types.enum [ "http" "https" ];
description = lib.mdDoc ''
Whether the site is available via http or https.
This does not configure https or ACME in nginx!
'';
};
frontendHostname = mkOption {
@ -104,42 +92,27 @@ in {
};
};
systemd.services.vikunja-api = {
description = "vikunja-api";
systemd.services.vikunja = {
description = "vikunja";
after = [ "network.target" ] ++ lib.optional usePostgresql "postgresql.service" ++ lib.optional useMysql "mysql.service";
wantedBy = [ "multi-user.target" ];
path = [ cfg.package-api ];
path = [ cfg.package ];
restartTriggers = [ configFile ];
serviceConfig = {
Type = "simple";
DynamicUser = true;
StateDirectory = "vikunja";
ExecStart = "${cfg.package-api}/bin/vikunja";
ExecStart = "${cfg.package}/bin/vikunja";
Restart = "always";
EnvironmentFile = cfg.environmentFiles;
};
};
services.nginx.virtualHosts."${cfg.frontendHostname}" = mkIf cfg.setupNginx {
locations = {
"/" = {
root = cfg.package-frontend;
tryFiles = "try_files $uri $uri/ /";
};
"~* ^/(api|dav|\\.well-known)/" = {
proxyPass = "http://localhost:${toString cfg.port}";
extraConfig = ''
client_max_body_size 20M;
'';
};
};
};
environment.etc."vikunja/config.yaml".source = configFile;
environment.systemPackages = [
cfg.package-api # for admin `vikunja` CLI
cfg.package # for admin `vikunja` CLI
];
};
}

Some files were not shown because too many files have changed in this diff Show more