Merge master into staging-next

This commit is contained in:
github-actions[bot] 2024-01-18 18:00:55 +00:00 committed by GitHub
commit dc4a7c97b0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
102 changed files with 1880 additions and 613 deletions

View file

@ -19,8 +19,8 @@ jobs:
# we don't limit this action to only NixOS repo since the checks are cheap and useful developer feedback
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: cachix/install-nix-action@7ac1ec25491415c381d9b62f0657c7a028df52a7 # v24
- uses: cachix/cachix-action@6a2e08b5ebf7a9f285ff57b1870a4262b06e0bee # v13
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
- uses: cachix/cachix-action@18cf96c7c98e048e10a83abd92116114cd8504be # v14
with:
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.
name: nixpkgs-ci

View file

@ -90,7 +90,7 @@ jobs:
base=$(mktemp -d)
git worktree add "$base" "$(git rev-parse HEAD^1)"
echo "base=$base" >> "$GITHUB_ENV"
- uses: cachix/install-nix-action@7ac1ec25491415c381d9b62f0657c7a028df52a7 # v24
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
- name: Fetching the pinned tool
# Update the pinned version using pkgs/test/nixpkgs-check-by-name/scripts/update-pinned-tool.sh
run: |

View file

@ -16,7 +16,7 @@ jobs:
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@7ac1ec25491415c381d9b62f0657c7a028df52a7 # v24
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true

View file

@ -28,7 +28,7 @@ jobs:
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@7ac1ec25491415c381d9b62f0657c7a028df52a7 # v24
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
with:
# nixpkgs commit is pinned so that it doesn't break
# editorconfig-checker 2.4.0

View file

@ -18,11 +18,11 @@ jobs:
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@7ac1ec25491415c381d9b62f0657c7a028df52a7 # v24
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true
- uses: cachix/cachix-action@6a2e08b5ebf7a9f285ff57b1870a4262b06e0bee # v13
- uses: cachix/cachix-action@18cf96c7c98e048e10a83abd92116114cd8504be # v14
with:
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.
name: nixpkgs-ci

View file

@ -19,11 +19,11 @@ jobs:
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@7ac1ec25491415c381d9b62f0657c7a028df52a7 # v24
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true
- uses: cachix/cachix-action@6a2e08b5ebf7a9f285ff57b1870a4262b06e0bee # v13
- uses: cachix/cachix-action@18cf96c7c98e048e10a83abd92116114cd8504be # v14
with:
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.
name: nixpkgs-ci

View file

@ -29,7 +29,7 @@ jobs:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
if: ${{ env.CHANGED_FILES && env.CHANGED_FILES != '' }}
- uses: cachix/install-nix-action@7ac1ec25491415c381d9b62f0657c7a028df52a7 # v24
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
with:
nix_path: nixpkgs=channel:nixpkgs-unstable
- name: Parse all changed or added nix files

View file

@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: cachix/install-nix-action@7ac1ec25491415c381d9b62f0657c7a028df52a7 # v24
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
with:
nix_path: nixpkgs=channel:nixpkgs-unstable
- name: setup

View file

@ -1,33 +1,228 @@
# pkgs.dockerTools {#sec-pkgs-dockerTools}
`pkgs.dockerTools` is a set of functions for creating and manipulating Docker images according to the [Docker Image Specification v1.2.0](https://github.com/moby/moby/blob/master/image/spec/v1.2.md#docker-image-specification-v120). Docker itself is not used to perform any of the operations done by these functions.
`pkgs.dockerTools` is a set of functions for creating and manipulating Docker images according to the [Docker Image Specification v1.3.0](https://github.com/moby/moby/blob/46f7ab808b9504d735d600e259ca0723f76fb164/image/spec/spec.md#image-json-field-descriptions).
Docker itself is not used to perform any of the operations done by these functions.
## buildImage {#ssec-pkgs-dockerTools-buildImage}
This function is analogous to the `docker build` command, in that it can be used to build a Docker-compatible repository tarball containing a single image with one or multiple layers. As such, the result is suitable for being loaded in Docker with `docker load`.
This function builds a Docker-compatible repository tarball containing a single image.
As such, the result is suitable for being loaded in Docker with `docker load` (see [](#ex-dockerTools-buildImage) for how to do this).
The parameters of `buildImage` with relative example values are described below:
This function will create a single layer for all files (and dependencies) that are specified in its argument.
Only new dependencies that are not already in the existing layers will be copied.
If you prefer to create multiple layers for the files and dependencies you want to add to the image, see [](#ssec-pkgs-dockerTools-buildLayeredImage) or [](#ssec-pkgs-dockerTools-streamLayeredImage) instead.
[]{#ex-dockerTools-buildImage}
[]{#ex-dockerTools-buildImage-runAsRoot}
This function allows a script to be run during the layer generation process, allowing custom behaviour to affect the final results of the image (see the documentation of the `runAsRoot` and `extraCommands` attributes).
The resulting repository tarball will list a single image as specified by the `name` and `tag` attributes.
By default, that image will use a static creation date (see documentation for the `created` attribute).
This allows `buildImage` to produce reproducible images.
:::{.tip}
When running an image built with `buildImage`, you might encounter certain errors depending on what you included in the image, especially if you did not start with any base image.
If you encounter errors similar to `getProtocolByName: does not exist (no such protocol name: tcp)`, you may need to add the contents of `pkgs.iana-etc` in the `copyToRoot` attribute.
Similarly, if you encounter errors similar to `Error_Protocol ("certificate has unknown CA",True,UnknownCa)`, you may need to add the contents of `pkgs.cacert` in the `copyToRoot` attribute.
:::
### Inputs {#ssec-pkgs-dockerTools-buildImage-inputs}
`buildImage` expects an argument with the following attributes:
`name` (String)
: The name of the generated image.
`tag` (String or Null; _optional_)
: Tag of the generated image.
If `null`, the hash of the nix derivation will be used as the tag.
_Default value:_ `null`.
`fromImage` (Path or Null; _optional_)
: The repository tarball of an image to be used as the base for the generated image.
It must be a valid Docker image, such as one exported by `docker save`, or another image built with the `dockerTools` utility functions.
This can be seen as an equivalent of `FROM fromImage` in a `Dockerfile`.
A value of `null` can be seen as an equivalent of `FROM scratch`.
If specified, the layer created by `buildImage` will be appended to the layers defined in the base image, resulting in an image with at least two layers (one or more layers from the base image, and the layer created by `buildImage`).
Otherwise, the resulting image with contain the single layer created by `buildImage`.
_Default value:_ `null`.
`fromImageName` (String or Null; _optional_)
: Used to specify the image within the repository tarball in case it contains multiple images.
A value of `null` means that `buildImage` will use the first image available in the repository.
:::{.note}
This must be used with `fromImageTag`. Using only `fromImageName` without `fromImageTag` will make `buildImage` use the first image available in the repository.
:::
_Default value:_ `null`.
`fromImageTag` (String or Null; _optional_)
: Used to specify the image within the repository tarball in case it contains multiple images.
A value of `null` means that `buildImage` will use the first image available in the repository.
:::{.note}
This must be used with `fromImageName`. Using only `fromImageTag` without `fromImageName` will make `buildImage` use the first image available in the repository
:::
_Default value:_ `null`.
`copyToRoot` (Path, List of Paths, or Null; _optional_)
: Files to add to the generated image.
Anything that coerces to a path (e.g. a derivation) can also be used.
This can be seen as an equivalent of `ADD contents/ /` in a `Dockerfile`.
_Default value:_ `null`.
`keepContentsDirlinks` (Boolean; _optional_)
: When adding files to the generated image (as specified by `copyToRoot`), this attribute controls whether to preserve symlinks to directories.
If `false`, the symlinks will be transformed into directories.
This behaves the same as `rsync -k` when `keepContentsDirlinks` is `false`, and the same as `rsync -K` when `keepContentsDirlinks` is `true`.
_Default value:_ `false`.
`runAsRoot` (String or Null; _optional_)
: A bash script that will run as root inside a VM that contains the existing layers of the base image and the new generated layer (including the files from `copyToRoot`).
The script will be run with a working directory of `/`.
This can be seen as an equivalent of `RUN ...` in a `Dockerfile`.
A value of `null` means that this step in the image generation process will be skipped.
See [](#ex-dockerTools-buildImage-runAsRoot) for how to work with this attribute.
:::{.caution}
Using this attribute requires the `kvm` device to be available, see [`system-features`](https://nixos.org/manual/nix/stable/command-ref/conf-file.html#conf-system-features).
If the `kvm` device isn't available, you should consider using [`buildLayeredImage`](#ssec-pkgs-dockerTools-buildLayeredImage) or [`streamLayeredImage`](#ssec-pkgs-dockerTools-streamLayeredImage) instead.
Those functions allow scripts to be run as root without access to the `kvm` device.
:::
:::{.note}
At the time the script in `runAsRoot` is run, the files specified directly in `copyToRoot` will be present in the VM, but their dependencies might not be there yet.
Copying their dependencies into the generated image is a step that happens after `runAsRoot` finishes running.
:::
_Default value:_ `null`.
`extraCommands` (String; _optional_)
: A bash script that will run before the layer created by `buildImage` is finalised.
The script will be run on some (opaque) working directory which will become `/` once the layer is created.
This is similar to `runAsRoot`, but the script specified in `extraCommands` is **not** run as root, and does not involve creating a VM.
It is simply run as part of building the derivation that outputs the layer created by `buildImage`.
See [](#ex-dockerTools-buildImage-extraCommands) for how to work with this attribute, and subtle differences compared to `runAsRoot`.
_Default value:_ `""`.
`config` (Attribute Set; _optional_)
: Used to specify the configuration of the containers that will be started off the generated image.
Must be an attribute set, with each attribute as listed in the [Docker Image Specification v1.3.0](https://github.com/moby/moby/blob/46f7ab808b9504d735d600e259ca0723f76fb164/image/spec/spec.md#image-json-field-descriptions).
_Default value:_ `null`.
`architecture` (String; _optional_)
: Used to specify the image architecture.
This is useful for multi-architecture builds that don't need cross compiling.
If specified, its value should follow the [OCI Image Configuration Specification](https://github.com/opencontainers/image-spec/blob/main/config.md#properties), which should still be compatible with Docker.
According to the linked specification, all possible values for `$GOARCH` in [the Go docs](https://go.dev/doc/install/source#environment) should be valid, but will commonly be one of `386`, `amd64`, `arm`, or `arm64`.
_Default value:_ the same value from `pkgs.go.GOARCH`.
`diskSize` (Number; _optional_)
: Controls the disk size (in megabytes) of the VM used to run the script specified in `runAsRoot`.
This attribute is ignored if `runAsRoot` is `null`.
_Default value:_ 1024.
`buildVMMemorySize` (Number; _optional_)
: Controls the amount of memory (in megabytes) provisioned for the VM used to run the script specified in `runAsRoot`.
This attribute is ignored if `runAsRoot` is `null`.
_Default value:_ 512.
`created` (String; _optional_)
: Specifies the time of creation of the generated image.
This should be either a date and time formatted according to [ISO-8601](https://en.wikipedia.org/wiki/ISO_8601) or `"now"`, in which case `buildImage` will use the current date.
See [](#ex-dockerTools-buildImage-creatednow) for how to use `"now"`.
:::{.caution}
Using `"now"` means that the generated image will not be reproducible anymore (because the date will always change whenever it's built).
:::
_Default value:_ `"1970-01-01T00:00:01Z"`.
`uid` (Number; _optional_)
: The uid of the user that will own the files packed in the new layer built by `buildImage`.
_Default value:_ 0.
`gid` (Number; _optional_)
: The gid of the group that will own the files packed in the new layer built by `buildImage`.
_Default value:_ 0.
`contents` **DEPRECATED**
: This attribute is deprecated, and users are encouraged to use `copyToRoot` instead.
### Passthru outputs {#ssec-pkgs-dockerTools-buildImage-passthru-outputs}
`buildImage` defines a few [`passthru`](#var-stdenv-passthru) attributes:
`buildArgs` (Attribute Set)
: The argument passed to `buildImage` itself.
This allows you to inspect all attributes specified in the argument, as described above.
`layer` (Attribute Set)
: The derivation with the layer created by `buildImage`.
This allows easier inspection of the contents added by `buildImage` in the generated image.
`imageTag` (String)
: The tag of the generated image.
This is useful if no tag was specified in the attributes of the argument to `buildImage`, because an automatic tag will be used instead.
`imageTag` allows you to retrieve the value of the tag used in this case.
### Examples {#ssec-pkgs-dockerTools-buildImage-examples}
:::{.example #ex-dockerTools-buildImage}
# Building a Docker image
The following package builds a Docker image that runs the `redis-server` executable from the `redis` package.
The Docker image will have name `redis` and tag `latest`.
```nix
buildImage {
{ dockerTools, buildEnv, redis }:
dockerTools.buildImage {
name = "redis";
tag = "latest";
fromImage = someBaseImage;
fromImageName = null;
fromImageTag = "latest";
copyToRoot = pkgs.buildEnv {
copyToRoot = buildEnv {
name = "image-root";
paths = [ pkgs.redis ];
paths = [ redis ];
pathsToLink = [ "/bin" ];
};
runAsRoot = ''
#!${pkgs.runtimeShell}
mkdir -p /data
'';
@ -36,68 +231,111 @@ buildImage {
WorkingDir = "/data";
Volumes = { "/data" = { }; };
};
diskSize = 1024;
buildVMMemorySize = 512;
}
```
The above example will build a Docker image `redis/latest` from the given base image. Loading and running this image in Docker results in `redis-server` being started automatically.
The result of building this package is a `.tar.gz` file that can be loaded into Docker:
- `name` specifies the name of the resulting image. This is the only required argument for `buildImage`.
```shell
$ nix-build
(some output removed for clarity)
building '/nix/store/yw0adm4wpsw1w6j4fb5hy25b3arr9s1v-docker-image-redis.tar.gz.drv'...
Adding layer...
tar: Removing leading `/' from member names
Adding meta...
Cooking the image...
Finished.
/nix/store/p4dsg62inh9d2ksy3c7bv58xa851dasr-docker-image-redis.tar.gz
- `tag` specifies the tag of the resulting image. By default it's `null`, which indicates that the nix output hash will be used as tag.
- `fromImage` is the repository tarball containing the base image. It must be a valid Docker image, such as exported by `docker save`. By default it's `null`, which can be seen as equivalent to `FROM scratch` of a `Dockerfile`.
- `fromImageName` can be used to further specify the base image within the repository, in case it contains multiple images. By default it's `null`, in which case `buildImage` will peek the first image available in the repository.
- `fromImageTag` can be used to further specify the tag of the base image within the repository, in case an image contains multiple tags. By default it's `null`, in which case `buildImage` will peek the first tag available for the base image.
- `copyToRoot` is a derivation that will be copied in the new layer of the resulting image. This can be similarly seen as `ADD contents/ /` in a `Dockerfile`. By default it's `null`.
- `runAsRoot` is a bash script that will run as root in an environment that overlays the existing layers of the base image with the new resulting layer, including the previously copied `contents` derivation. This can be similarly seen as `RUN ...` in a `Dockerfile`.
> **_NOTE:_** Using this parameter requires the `kvm` device to be available.
- `config` is used to specify the configuration of the containers that will be started off the built image in Docker. The available options are listed in the [Docker Image Specification v1.2.0](https://github.com/moby/moby/blob/master/image/spec/v1.2.md#image-json-field-descriptions).
- `architecture` is _optional_ and used to specify the image architecture, this is useful for multi-architecture builds that don't need cross compiling. If not specified it will default to `hostPlatform`.
- `diskSize` is used to specify the disk size of the VM used to build the image in megabytes. By default it's 1024 MiB.
- `buildVMMemorySize` is used to specify the memory size of the VM to build the image in megabytes. By default it's 512 MiB.
After the new layer has been created, its closure (to which `contents`, `config` and `runAsRoot` contribute) will be copied in the layer itself. Only new dependencies that are not already in the existing layers will be copied.
At the end of the process, only one new single layer will be produced and added to the resulting image.
The resulting repository will only list the single image `image/tag`. In the case of [the `buildImage` example](#ex-dockerTools-buildImage), it would be `redis/latest`.
It is possible to inspect the arguments with which an image was built using its `buildArgs` attribute.
> **_NOTE:_** If you see errors similar to `getProtocolByName: does not exist (no such protocol name: tcp)` you may need to add `pkgs.iana-etc` to `contents`.
> **_NOTE:_** If you see errors similar to `Error_Protocol ("certificate has unknown CA",True,UnknownCa)` you may need to add `pkgs.cacert` to `contents`.
By default `buildImage` will use a static date of one second past the UNIX Epoch. This allows `buildImage` to produce binary reproducible images. When listing images with `docker images`, the newly created images will be listed like this:
```ShellSession
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
hello latest 08c791c7846e 48 years ago 25.2MB
$ docker load -i /nix/store/p4dsg62inh9d2ksy3c7bv58xa851dasr-docker-image-redis.tar.gz
(some output removed for clarity)
Loaded image: redis:latest
```
:::
You can break binary reproducibility but have a sorted, meaningful `CREATED` column by setting `created` to `now`.
:::{.example #ex-dockerTools-buildImage-runAsRoot}
# Building a Docker image with `runAsRoot`
The following package builds a Docker image with the `hello` executable from the `hello` package.
It uses `runAsRoot` to create a directory and a file inside the image.
This works the same as [](#ex-dockerTools-buildImage-extraCommands), but uses `runAsRoot` instead of `extraCommands`.
```nix
pkgs.dockerTools.buildImage {
{ dockerTools, buildEnv, hello }:
dockerTools.buildImage {
name = "hello";
tag = "latest";
created = "now";
copyToRoot = pkgs.buildEnv {
copyToRoot = buildEnv {
name = "image-root";
paths = [ pkgs.hello ];
paths = [ hello ];
pathsToLink = [ "/bin" ];
};
runAsRoot = ''
mkdir -p /data
echo "some content" > my-file
'';
config = {
Cmd = [ "/bin/hello" ];
WorkingDir = "/data";
};
}
```
:::
:::{.example #ex-dockerTools-buildImage-extraCommands}
# Building a Docker image with `extraCommands`
The following package builds a Docker image with the `hello` executable from the `hello` package.
It uses `extraCommands` to create a directory and a file inside the image.
This works the same as [](#ex-dockerTools-buildImage-runAsRoot), but uses `extraCommands` instead of `runAsRoot`.
Note that with `extraCommands`, we can't directly reference `/` and must create files and directories as if we were already on `/`.
```nix
{ dockerTools, buildEnv, hello }:
dockerTools.buildImage {
name = "hello";
tag = "latest";
copyToRoot = buildEnv {
name = "image-root";
paths = [ hello ];
pathsToLink = [ "/bin" ];
};
extraCommands = ''
mkdir -p data
echo "some content" > my-file
'';
config = {
Cmd = [ "/bin/hello" ];
WorkingDir = "/data";
};
}
```
:::
:::{.example #ex-dockerTools-buildImage-creatednow}
# Building a Docker image with a creation date set to the current time
Note that using a value of `"now"` in the `created` attribute will break reproducibility.
```nix
{ dockerTools, buildEnv, hello }:
dockerTools.buildImage {
name = "hello";
tag = "latest";
created = "now";
copyToRoot = buildEnv {
name = "image-root";
paths = [ hello ];
pathsToLink = [ "/bin" ];
};
@ -105,139 +343,376 @@ pkgs.dockerTools.buildImage {
}
```
Now the Docker CLI will display a reasonable date and sort the images as expected:
After importing the generated repository tarball with Docker, its CLI will display a reasonable date and sort the images as expected:
```ShellSession
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
hello latest de2bf4786de6 About a minute ago 25.2MB
```
However, the produced images will not be binary reproducible.
:::
## buildLayeredImage {#ssec-pkgs-dockerTools-buildLayeredImage}
Create a Docker image with many of the store paths being on their own layer to improve sharing between images. The image is realized into the Nix store as a gzipped tarball. Depending on the intended usage, many users might prefer to use `streamLayeredImage` instead, which this function uses internally.
`buildLayeredImage` uses [`streamLayeredImage`](#ssec-pkgs-dockerTools-streamLayeredImage) underneath to build a compressed Docker-compatible repository tarball.
Basically, `buildLayeredImage` runs the script created by `streamLayeredImage` to save the compressed image in the Nix store.
`buildLayeredImage` supports the same options as `streamLayeredImage`, see [`streamLayeredImage`](#ssec-pkgs-dockerTools-streamLayeredImage) for details.
`name`
:::{.note}
Despite the similar name, [`buildImage`](#ssec-pkgs-dockerTools-buildImage) works completely differently from `buildLayeredImage` and `streamLayeredImage`.
: The name of the resulting image.
Even though some of the arguments may seem related, they cannot be interchanged.
:::
`tag` _optional_
You can use this function to load an image in Docker with `docker load`.
See [](#ex-dockerTools-buildLayeredImage-hello) to see how to do that.
: Tag of the generated image.
### Examples {#ssec-pkgs-dockerTools-buildLayeredImage-examples}
*Default:* the output path's hash
:::{.example #ex-dockerTools-buildLayeredImage-hello}
# Building a layered Docker image
`fromImage` _optional_
: The repository tarball containing the base image. It must be a valid Docker image, such as one exported by `docker save`.
*Default:* `null`, which can be seen as equivalent to `FROM scratch` of a `Dockerfile`.
`contents` _optional_
: Top-level paths in the container. Either a single derivation, or a list of derivations.
*Default:* `[]`
`config` _optional_
`architecture` is _optional_ and used to specify the image architecture, this is useful for multi-architecture builds that don't need cross compiling. If not specified it will default to `hostPlatform`.
: Run-time configuration of the container. A full list of the options available is in the [Docker Image Specification v1.2.0](https://github.com/moby/moby/blob/master/image/spec/v1.2.md#image-json-field-descriptions).
*Default:* `{}`
`created` _optional_
: Date and time the layers were created. Follows the same `now` exception supported by `buildImage`.
*Default:* `1970-01-01T00:00:01Z`
`maxLayers` _optional_
: Maximum number of layers to create.
*Default:* `100`
*Maximum:* `125`
`extraCommands` _optional_
: Shell commands to run while building the final layer, without access to most of the layer contents. Changes to this layer are "on top" of all the other layers, so can create additional directories and files.
`fakeRootCommands` _optional_
: Shell commands to run while creating the archive for the final layer in a fakeroot environment. Unlike `extraCommands`, you can run `chown` to change the owners of the files in the archive, changing fakeroot's state instead of the real filesystem. The latter would require privileges that the build user does not have. Static binaries do not interact with the fakeroot environment. By default all files in the archive will be owned by root.
`enableFakechroot` _optional_
: Whether to run in `fakeRootCommands` in `fakechroot`, making programs behave as though `/` is the root of the image being created, while files in the Nix store are available as usual. This allows scripts that perform installation in `/` to work as expected. Considering that `fakechroot` is implemented via the same mechanism as `fakeroot`, the same caveats apply.
*Default:* `false`
### Behavior of `contents` in the final image {#dockerTools-buildLayeredImage-arg-contents}
Each path directly listed in `contents` will have a symlink in the root of the image.
For example:
The following package builds a layered Docker image that runs the `hello` executable from the `hello` package.
The Docker image will have name `hello` and tag `latest`.
```nix
pkgs.dockerTools.buildLayeredImage {
{ dockerTools, hello }:
dockerTools.buildLayeredImage {
name = "hello";
contents = [ pkgs.hello ];
tag = "latest";
contents = [ hello ];
config.Cmd = [ "/bin/hello" ];
}
```
will create symlinks for all the paths in the `hello` package:
The result of building this package is a `.tar.gz` file that can be loaded into Docker:
```ShellSession
/bin/hello -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/bin/hello
/share/info/hello.info -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/share/info/hello.info
/share/locale/bg/LC_MESSAGES/hello.mo -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/share/locale/bg/LC_MESSAGES/hello.mo
```shell
$ nix-build
(some output removed for clarity)
building '/nix/store/bk8bnrbw10nq7p8pvcmdr0qf57y6scha-hello.tar.gz.drv'...
No 'fromImage' provided
Creating layer 1 from paths: ['/nix/store/i93s7xxblavsacpy82zdbn4kplsyq48l-libunistring-1.1']
Creating layer 2 from paths: ['/nix/store/ji01n9vinnj22nbrb86nx8a1ssgpilx8-libidn2-2.3.4']
Creating layer 3 from paths: ['/nix/store/ldrslljw4rg026nw06gyrdwl78k77vyq-xgcc-12.3.0-libgcc']
Creating layer 4 from paths: ['/nix/store/9y8pmvk8gdwwznmkzxa6pwyah52xy3nk-glibc-2.38-27']
Creating layer 5 from paths: ['/nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1']
Creating layer 6 with customisation...
Adding manifests...
Done.
/nix/store/hxcz7snvw7f8rzhbh6mv8jq39d992905-hello.tar.gz
$ docker load -i /nix/store/hxcz7snvw7f8rzhbh6mv8jq39d992905-hello.tar.gz
(some output removed for clarity)
Loaded image: hello:latest
```
### Automatic inclusion of `config` references {#dockerTools-buildLayeredImage-arg-config}
The closure of `config` is automatically included in the closure of the final image.
This allows you to make very simple Docker images with very little code. This container will start up and run `hello`:
```nix
pkgs.dockerTools.buildLayeredImage {
name = "hello";
config.Cmd = [ "${pkgs.hello}/bin/hello" ];
}
```
### Adjusting `maxLayers` {#dockerTools-buildLayeredImage-arg-maxLayers}
Increasing the `maxLayers` increases the number of layers which have a chance to be shared between different images.
Modern Docker installations support up to 128 layers, but older versions support as few as 42.
If the produced image will not be extended by other Docker builds, it is safe to set `maxLayers` to `128`. However, it will be impossible to extend the image further.
The first (`maxLayers-2`) most "popular" paths will have their own individual layers, then layer \#`maxLayers-1` will contain all the remaining "unpopular" paths, and finally layer \#`maxLayers` will contain the Image configuration.
Docker's Layers are not inherently ordered, they are content-addressable and are not explicitly layered until they are composed in to an Image.
:::
## streamLayeredImage {#ssec-pkgs-dockerTools-streamLayeredImage}
Builds a script which, when run, will stream an uncompressed tarball of a Docker image to stdout. The arguments to this function are as for `buildLayeredImage`. This method of constructing an image does not realize the image into the Nix store, so it saves on IO and disk/cache space, particularly with large images.
`streamLayeredImage` builds a **script** which, when run, will stream to stdout a Docker-compatible repository tarball containing a single image, using multiple layers to improve sharing between images.
This means that `streamLayeredImage` does not output an image into the Nix store, but only a script that builds the image, saving on IO and disk/cache space, particularly with large images.
The image produced by running the output script can be piped directly into `docker load`, to load it into the local docker daemon:
You can use this function to load an image in Docker with `docker load`.
See [](#ex-dockerTools-streamLayeredImage-hello) to see how to do that.
```ShellSession
$(nix-build) | docker load
For this function, you specify a [store path](https://nixos.org/manual/nix/stable/store/store-path) or a list of store paths to be added to the image, and the functions will automatically include any dependencies of those paths in the image.
The function will attempt to create one layer per object in the Nix store that needs to be added to the image.
In case there are more objects to include than available layers, the function will put the most ["popular"](https://github.com/NixOS/nixpkgs/tree/release-23.11/pkgs/build-support/references-by-popularity) objects in their own layers, and group all remaining objects into a single layer.
An additional layer will be created with symlinks to the store paths you specified to be included in the image.
These symlinks are built with [`symlinkJoin`](#trivial-builder-symlinkJoin), so they will be included in the root of the image.
See [](#ex-dockerTools-streamLayeredImage-exploringlayers) to understand how these symlinks are laid out in the generated image.
`streamLayeredImage` allows scripts to be run when creating the additional layer with symlinks, allowing custom behaviour to affect the final results of the image (see the documentation of the `extraCommands` and `fakeRootCommands` attributes).
The resulting repository tarball will list a single image as specified by the `name` and `tag` attributes.
By default, that image will use a static creation date (see documentation for the `created` attribute).
This allows the function to produce reproducible images.
### Inputs {#ssec-pkgs-dockerTools-streamLayeredImage-inputs}
`streamLayeredImage` expects one argument with the following attributes:
`name` (String)
: The name of the generated image.
`tag` (String; _optional_)
: Tag of the generated image.
If `null`, the hash of the nix derivation will be used as the tag.
_Default value:_ `null`.
`fromImage`(Path or Null; _optional_)
: The repository tarball of an image to be used as the base for the generated image.
It must be a valid Docker image, such as one exported by `docker save`, or another image built with the `dockerTools` utility functions.
This can be seen as an equivalent of `FROM fromImage` in a `Dockerfile`.
A value of `null` can be seen as an equivalent of `FROM scratch`.
If specified, the created layers will be appended to the layers defined in the base image.
_Default value:_ `null`.
`contents` (Path or List of Paths; _optional_) []{#dockerTools-buildLayeredImage-arg-contents}
: Directories whose contents will be added to the generated image.
Things that coerce to paths (e.g. a derivation) can also be used.
This can be seen as an equivalent of `ADD contents/ /` in a `Dockerfile`.
All the contents specified by `contents` will be added as a final layer in the generated image.
They will be added as links to the actual files (e.g. links to the store paths).
The actual files will be added in previous layers.
_Default value:_ `[]`
`config` (Attribute Set; _optional_) []{#dockerTools-buildLayeredImage-arg-config}
: Used to specify the configuration of the containers that will be started off the generated image.
Must be an attribute set, with each attribute as listed in the [Docker Image Specification v1.3.0](https://github.com/moby/moby/blob/46f7ab808b9504d735d600e259ca0723f76fb164/image/spec/spec.md#image-json-field-descriptions).
If any packages are used directly in `config`, they will be automatically included in the generated image.
See [](#ex-dockerTools-streamLayeredImage-configclosure) for an example.
_Default value:_ `null`.
`architecture` (String; _optional_)
: Used to specify the image architecture.
This is useful for multi-architecture builds that don't need cross compiling.
If specified, its value should follow the [OCI Image Configuration Specification](https://github.com/opencontainers/image-spec/blob/main/config.md#properties), which should still be compatible with Docker.
According to the linked specification, all possible values for `$GOARCH` in [the Go docs](https://go.dev/doc/install/source#environment) should be valid, but will commonly be one of `386`, `amd64`, `arm`, or `arm64`.
_Default value:_ the same value from `pkgs.go.GOARCH`.
`created` (String; _optional_)
: Specifies the time of creation of the generated image.
This should be either a date and time formatted according to [ISO-8601](https://en.wikipedia.org/wiki/ISO_8601) or `"now"`, in which case the current date will be used.
:::{.caution}
Using `"now"` means that the generated image will not be reproducible anymore (because the date will always change whenever it's built).
:::
_Default value:_ `"1970-01-01T00:00:01Z"`.
`maxLayers` (Number; _optional_) []{#dockerTools-buildLayeredImage-arg-maxLayers}
: The maximum number of layers that will be used by the generated image.
If a `fromImage` was specified, the number of layers used by `fromImage` will be subtracted from `maxLayers` to ensure that the image generated will have at most `maxLayers`.
:::{.caution}
Depending on the tool/runtime where the image will be used, there might be a limit to the number of layers that an image can have.
For Docker, see [this issue on GitHub](https://github.com/docker/docs/issues/8230).
:::
_Default value:_ 100.
`extraCommands` (String; _optional_)
: A bash script that will run in the context of the layer created with the contents specified by `contents`.
At the moment this script runs, only the contents directly specified by `contents` will be available as links.
_Default value:_ `""`.
`fakeRootCommands` (String; _optional_)
: A bash script that will run in the context of the layer created with the contents specified by `contents`.
During the process to generate that layer, the script in `extraCommands` will be run first, if specified.
After that, a {manpage}`fakeroot(1)` environment will be entered.
The script specified in `fakeRootCommands` runs inside the fakeroot environment, and the layer is then generated from the view of the files inside the fakeroot environment.
This is useful to change the owners of the files in the layer (by running `chown`, for example), or performing any other privileged operations related to file manipulation (by default, all files in the layer will be owned by root, and the build environment doesn't have enough privileges to directly perform privileged operations on these files).
For more details, see the manpage for {manpage}`fakeroot(1)`.
:::{.caution}
Due to how fakeroot works, static binaries cannot perform privileged file operations in `fakeRootCommands`, unless `enableFakechroot` is set to `true`.
:::
_Default value:_ `""`.
`enableFakechroot` (Boolean; _optional_)
: By default, the script specified in `fakeRootCommands` only runs inside a fakeroot environment.
If `enableFakechroot` is `true`, a more complete chroot environment will be created using [`proot`](https://proot-me.github.io/) before running the script in `fakeRootCommands`.
Files in the Nix store will be available.
This allows scripts that perform installation in `/` to work as expected.
This can be seen as an equivalent of `RUN ...` in a `Dockerfile`.
_Default value:_ `false`
`includeStorePaths` (Boolean; _optional_)
: The files specified in `contents` are put into layers in the generated image.
If `includeStorePaths` is `false`, the actual files will not be included in the generated image, and only links to them will be added instead.
It is **not recommended** to set this to `false` unless you have other tooling to insert the store paths via other means (such as bind mounting the host store) when running containers with the generated image.
If you don't provide any extra tooling, the generated image won't run properly.
See [](#ex-dockerTools-streamLayeredImage-exploringlayers) to understand the impact of setting `includeStorePaths` to `false`.
_Default value:_ `true`
`passthru` (Attribute Set; _optional_)
: Use this to pass any attributes as [passthru](#var-stdenv-passthru) for the resulting derivation.
_Default value:_ `{}`
### Passthru outputs {#ssec-pkgs-dockerTools-streamLayeredImage-passthru-outputs}
`streamLayeredImage` also defines its own [`passthru`](#var-stdenv-passthru) attributes:
`imageTag` (String)
: The tag of the generated image.
This is useful if no tag was specified in the attributes of the argument to the function, because an automatic tag will be used instead.
`imageTag` allows you to retrieve the value of the tag used in this case.
### Examples {#ssec-pkgs-dockerTools-streamLayeredImage-examples}
:::{.example #ex-dockerTools-streamLayeredImage-hello}
# Streaming a layered Docker image
The following package builds a **script** which, when run, will stream a layered Docker image that runs the `hello` executable from the `hello` package.
The Docker image will have name `hello` and tag `latest`.
```nix
{ dockerTools, hello }:
dockerTools.streamLayeredImage {
name = "hello";
tag = "latest";
contents = [ hello ];
config.Cmd = [ "/bin/hello" ];
}
```
Alternatively, the image be piped via `gzip` into `skopeo`, e.g., to copy it into a registry:
The result of building this package is a script.
Running this script and piping it into `docker load` gives you the same image that was built in [](#ex-dockerTools-buildLayeredImage-hello).
Note that in this case, the image is never added to the Nix store, but instead streamed directly into Docker.
```ShellSession
$(nix-build) | gzip --fast | skopeo copy docker-archive:/dev/stdin docker://some_docker_registry/myimage:tag
```shell
$ nix-build
(output removed for clarity)
/nix/store/wsz2xl8ckxnlb769irvq6jv1280dfvxd-stream-hello
$ /nix/store/wsz2xl8ckxnlb769irvq6jv1280dfvxd-stream-hello | docker load
No 'fromImage' provided
Creating layer 1 from paths: ['/nix/store/i93s7xxblavsacpy82zdbn4kplsyq48l-libunistring-1.1']
Creating layer 2 from paths: ['/nix/store/ji01n9vinnj22nbrb86nx8a1ssgpilx8-libidn2-2.3.4']
Creating layer 3 from paths: ['/nix/store/ldrslljw4rg026nw06gyrdwl78k77vyq-xgcc-12.3.0-libgcc']
Creating layer 4 from paths: ['/nix/store/9y8pmvk8gdwwznmkzxa6pwyah52xy3nk-glibc-2.38-27']
Creating layer 5 from paths: ['/nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1']
Creating layer 6 with customisation...
Adding manifests...
Done.
(some output removed for clarity)
Loaded image: hello:latest
```
:::
:::{.example #ex-dockerTools-streamLayeredImage-exploringlayers}
# Exploring the layers in an image built with `streamLayeredImage`
Assume the following package, which builds a layered Docker image with the `hello` package.
```nix
{ dockerTools, hello }:
dockerTools.streamLayeredImage {
name = "hello";
contents = [ hello ];
}
```
The `hello` package depends on 4 other packages:
```shell
$ nix-store --query -R $(nix-build -A hello)
/nix/store/i93s7xxblavsacpy82zdbn4kplsyq48l-libunistring-1.1
/nix/store/ji01n9vinnj22nbrb86nx8a1ssgpilx8-libidn2-2.3.4
/nix/store/ldrslljw4rg026nw06gyrdwl78k77vyq-xgcc-12.3.0-libgcc
/nix/store/9y8pmvk8gdwwznmkzxa6pwyah52xy3nk-glibc-2.38-27
/nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1
```
This means that all these packages will be included in the image generated by `streamLayeredImage`.
It will put each package in its own layer, for a total of 5 layers with actual files in them.
A final layer will be created only with symlinks for the `hello` package.
The image generated will have the following directory structure (some directories were collapsed for readability):
```
├── bin
│ └── hello → /nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1/bin/hello
├── nix
│ └── store
│ ├─⊕ 9y8pmvk8gdwwznmkzxa6pwyah52xy3nk-glibc-2.38-27
│ ├─⊕ i93s7xxblavsacpy82zdbn4kplsyq48l-libunistring-1.1
│ ├─⊕ ji01n9vinnj22nbrb86nx8a1ssgpilx8-libidn2-2.3.4
│ ├─⊕ ldrslljw4rg026nw06gyrdwl78k77vyq-xgcc-12.3.0-libgcc
│ └─⊕ zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1
└── share
├── info
│ └── hello.info → /nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1/share/info/hello.info
├─⊕ locale
└── man
└── man1
└── hello.1.gz → /nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1/share/man/man1/hello.1.gz
```
Each of the packages in `/nix/store` comes from a layer in the image.
The final layer adds the `/bin` and `/share` directories, but they only contain links to the actual files in `/nix/store`.
If our package sets `includeStorePaths` to `false`, we'll end up with only the final layer with the links, but the actual files won't exist in the image:
```nix
{ dockerTools, hello }:
dockerTools.streamLayeredImage {
name = "hello";
contents = [ hello ];
}
```
After building this package, the image will have the following directory structure:
```
├── bin
│ └── hello → /nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1/bin/hello
└── share
├── info
│ └── hello.info → /nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1/share/info/hello.info
├─⊕ locale
└── man
└── man1
└── hello.1.gz → /nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1/share/man/man1/hello.1.gz
```
Note how the links point to paths in `/nix/store`, but they're not included in the image itself.
This is why you need extra tooling when using `includeStorePaths`:
a container created from such image won't find any of the files it needs to run otherwise.
:::
::: {.example #ex-dockerTools-streamLayeredImage-configclosure}
# Building a layered Docker image with packages directly in `config`
The closure of `config` is automatically included in the generated image.
The following package shows a more compact way to create the same output generated in [](#ex-dockerTools-streamLayeredImage-hello).
```nix
{ dockerTools, hello, lib }:
dockerTools.streamLayeredImage {
name = "hello";
tag = "latest";
config.Cmd = [ "${lib.getExe hello}" ];
}
```
:::
## pullImage {#ssec-pkgs-dockerTools-fetchFromRegistry}

View file

@ -29,6 +29,10 @@ pkgs.mkShell {
... all the attributes of `stdenv.mkDerivation`.
## Variants {#sec-pkgs-mkShell-variants}
`pkgs.mkShellNoCC` is a variant that uses `stdenvNoCC` instead of `stdenv` as base environment. This is useful if no C compiler is needed in the shell environment.
## Building the shell {#sec-pkgs-mkShell-building}
This derivation output will contain a text file that contains a reference to

View file

@ -354,6 +354,7 @@ mkYarnPackage rec {
- The `echo 9` steps comes from this answer: <https://stackoverflow.com/a/49139496>
- Exporting the headers in `npm_config_nodedir` comes from this issue: <https://github.com/nodejs/node-gyp/issues/1191#issuecomment-301243919>
- `offlineCache` (described [above](#javascript-yarn2nix-preparation)) must be specified to avoid [Import From Derivation](#ssec-import-from-derivation) (IFD) when used inside Nixpkgs.
## Outside Nixpkgs {#javascript-outside-nixpkgs}

View file

@ -26,6 +26,17 @@ It is important to import Qt modules directly, that is: `qtbase`, `qtdeclarative
Additionally all Qt packages must include `wrapQtAppsHook` in `nativeBuildInputs`, or you must explicitly set `dontWrapQtApps`.
`pkgs.callPackage` does not provide injections for `qtbase` or the like.
Instead you want to either use `pkgs.libsForQt5.callPackage`, or `pkgs.qt6Packages.callPackage`, depending on the Qt version you want to use.
For example (from [here](https://github.com/NixOS/nixpkgs/blob/2f9286912cb215969ece465147badf6d07aa43fe/pkgs/top-level/all-packages.nix#L30106))
```nix
zeal-qt5 = libsForQt5.callPackage ../data/documentation/zeal { };
zeal-qt6 = qt6Packages.callPackage ../data/documentation/zeal { };
zeal = zeal-qt5;
```
## Locating runtime dependencies {#qt-runtime-dependencies}
Qt applications must be wrapped to find runtime dependencies.

View file

@ -44,21 +44,22 @@ rustPlatform.buildRustPackage rec {
}
```
`buildRustPackage` requires either the `cargoSha256` or the
`cargoHash` attribute which is computed over all crate sources of this
package. `cargoHash256` is used for traditional Nix SHA-256 hashes,
such as the one in the example above. `cargoHash` should instead be
used for [SRI](https://www.w3.org/TR/SRI/) hashes. For example:
Exception: If the application has cargo `git` dependencies, the `cargoHash`/`cargoSha256`
approach will not work, and you will need to copy the `Cargo.lock` file of the application
to nixpkgs and continue with the next section for specifying the options of the`cargoLock`
section.
`buildRustPackage` requires either the `cargoHash` or the `cargoSha256`
attribute which is computed over all crate sources of this package.
`cargoSha256` is used for traditional Nix SHA-256 hashes. `cargoHash` should
instead be used for [SRI](https://www.w3.org/TR/SRI/) hashes and should be
preferred. For example:
```nix
cargoHash = "sha256-l1vL2ZdtDRxSGvP0X/l3nMw8+6WF67KPutJEzUROjg8=";
```
Exception: If the application has cargo `git` dependencies, the `cargoHash`/`cargoSha256`
approach will not work, and you will need to copy the `Cargo.lock` file of the application
to nixpkgs and continue with the next section for specifying the options of the `cargoLock`
section.
Both types of hashes are permitted when contributing to nixpkgs. The
Cargo hash is obtained by inserting a fake checksum into the
expression and building the package once. The correct checksum can

View file

@ -475,11 +475,11 @@ A script to be run by `maintainers/scripts/update.nix` when the package is match
```nix
passthru.updateScript = writeScript "update-zoom-us" ''
#!/usr/bin/env nix-shell
#!nix-shell -i bash -p curl pcre common-updater-scripts
#!nix-shell -i bash -p curl pcre2 common-updater-scripts
set -eu -o pipefail
version="$(curl -sI https://zoom.us/client/latest/zoom_x86_64.tar.xz | grep -Fi 'Location:' | pcregrep -o1 '/(([0-9]\.?)+)/')"
version="$(curl -sI https://zoom.us/client/latest/zoom_x86_64.tar.xz | grep -Fi 'Location:' | pcre2grep -o1 '/(([0-9]\.?)+)/')"
update-source-version zoom-us "$version"
'';
```

View file

@ -77,7 +77,7 @@ Linux foo 3.4.82 #1-NixOS SMP Thu Mar 20 14:44:05 UTC 2014 x86_64 GNU/Linux
There are several ways to change the configuration of the container.
First, on the host, you can edit
`/var/lib/container/name/etc/nixos/configuration.nix`, and run
`/var/lib/nixos-containers/foo/etc/nixos/configuration.nix`, and run
```ShellSession
# nixos-container update foo

View file

@ -63,3 +63,42 @@ checks:
is **restart**ed with the others. If it is set, both the service and the
socket are **stop**ped and the socket is **start**ed, leaving socket
activation to start the service when it's needed.
## Sysinit reactivation {#sec-sysinit-reactivation}
[`sysinit.target`](https://www.freedesktop.org/software/systemd/man/latest/systemd.special.html#sysinit.target)
is a systemd target that encodes system initialization (i.e. early startup). A
few units that need to run very early in the bootup process are ordered to
finish before this target is reached. Probably the most notable one of these is
`systemd-tmpfiles-setup.service`. We will refer to these units as "sysinit
units".
"Normal" systemd units, by default, are ordered AFTER `sysinit.target`. In
other words, these "normal" units expect all services ordered before
`sysinit.target` to have finished without explicity declaring this dependency
relationship for each dependency. See the [systemd
bootup](https://www.freedesktop.org/software/systemd/man/latest/bootup.html)
for more details on the bootup process.
When restarting both a unit ordered before `sysinit.target` as well as one
after, this presents a problem because they would be started at the same time
as they do not explicitly declare their dependency relations.
To solve this, NixOS has an artificial `sysinit-reactivation.target` which
allows you to ensure that services ordered before `sysinit.target` are
restarted correctly. This applies both to the ordering between these sysinit
services as well as ensuring that sysinit units are restarted before "normal"
units.
To make an existing sysinit service restart correctly during system switch, you
have to declare:
```nix
systemd.services.my-sysinit = {
requiredBy = [ "sysinit-reactivation.target" ];
before = [ "sysinit-reactivation.target" ];
restartTriggers = [ config.environment.etc."my-sysinit.d".source ];
};
```
You need to configure appropriate `restartTriggers` specific to your service.

View file

@ -37,7 +37,7 @@ of actions is always the same:
- Forget about the failed state of units (`systemctl reset-failed`)
- Reload systemd (`systemctl daemon-reload`)
- Reload systemd user instances (`systemctl --user daemon-reload`)
- Set up tmpfiles (`systemd-tmpfiles --create`)
- Reactivate sysinit (`systemctl restart sysinit-reactivation.target`)
- Reload units (`systemctl reload`)
- Restart units (`systemctl restart`)
- Start units (`systemctl start`)

View file

@ -116,6 +116,11 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
- The executable file names for `firefox-devedition`, `firefox-beta`, `firefox-esr` now matches their package names, which is consistent with the `firefox-*-bin` packages. The desktop entries are also updated so that you can have multiple editions of firefox in your app launcher.
- switch-to-configuration does not directly call systemd-tmpfiles anymore.
Instead, the new artificial sysinit-reactivation.target is introduced which
allows to restart multiple services that are ordered before sysinit.target
and respect the ordering between the services.
- The `systemd.oomd` module behavior is changed as:
- Raise ManagedOOMMemoryPressureLimit from 50% to 80%. This should make systemd-oomd kill things less often, and fix issues like [this](https://pagure.io/fedora-workstation/issue/358).

View file

@ -9,17 +9,17 @@
# This file is made to be used as follow:
#
# $ nix-instantiate ./option-usage.nix --argstr testOption service.xserver.enable -A txtContent --eval
# $ nix-instantiate ./option-usages.nix --argstr testOption service.xserver.enable -A txtContent --eval
#
# or
#
# $ nix-build ./option-usage.nix --argstr testOption service.xserver.enable -A txt -o service.xserver.enable._txt
# $ nix-build ./option-usages.nix --argstr testOption service.xserver.enable -A txt -o service.xserver.enable._txt
#
# Other targets exists such as `dotContent`, `dot`, and `pdf`. If you are
# looking for the option usage of multiple options, you can provide a list
# as argument.
#
# $ nix-build ./option-usage.nix --arg testOptions \
# $ nix-build ./option-usages.nix --arg testOptions \
# '["boot.loader.gummiboot.enable" "boot.loader.gummiboot.timeout"]' \
# -A txt -o gummiboot.list
#

View file

@ -889,9 +889,15 @@ while (my $f = <$list_active_users>) {
close($list_active_users) || die("Unable to close the file handle to loginctl");
# Set the new tmpfiles
print STDERR "setting up tmpfiles\n";
system("$new_systemd/bin/systemd-tmpfiles", "--create", "--remove", "--exclude-prefix=/dev") == 0 or $res = 3;
# Restart sysinit-reactivation.target.
# This target only exists to restart services ordered before sysinit.target. We
# cannot use X-StopOnReconfiguration to restart sysinit.target because then ALL
# services of the system would be restarted since all normal services have a
# default dependency on sysinit.target. sysinit-reactivation.target ensures
# that services ordered BEFORE sysinit.target get re-started in the correct
# order. Ordering between these services is respected.
print STDERR "restarting sysinit-reactivation.target\n";
system("$new_systemd/bin/systemctl", "restart", "sysinit-reactivation.target") == 0 or $res = 4;
# Before reloading we need to ensure that the units are still active. They may have been
# deactivated because one of their requirements got stopped. If they are inactive

View file

@ -569,6 +569,13 @@ in
unitConfig.X-StopOnReconfiguration = true;
};
# This target only exists so that services ordered before sysinit.target
# are restarted in the correct order, notably BEFORE the other services,
# when switching configurations.
systemd.targets.sysinit-reactivation = {
description = "Reactivate sysinit units";
};
systemd.units =
mapAttrs' (n: v: nameValuePair "${n}.path" (pathToUnit n v)) cfg.paths
// mapAttrs' (n: v: nameValuePair "${n}.service" (serviceToUnit n v)) cfg.services

View file

@ -150,6 +150,41 @@ in
"systemd-tmpfiles-setup.service"
];
# Allow systemd-tmpfiles to be restarted by switch-to-configuration. This
# service is not pulled into the normal boot process. It only exists for
# switch-to-configuration.
#
# This needs to be a separate unit because it does not execute
# systemd-tmpfiles with `--boot` as that is supposed to only be executed
# once at boot time.
#
# Keep this aligned with the upstream `systemd-tmpfiles-setup.service` unit.
systemd.services."systemd-tmpfiles-resetup" = {
description = "Re-setup tmpfiles on a system that is already running.";
requiredBy = [ "sysinit-reactivation.target" ];
after = [ "local-fs.target" "systemd-sysusers.service" "systemd-journald.service" ];
before = [ "sysinit-reactivation.target" "shutdown.target" ];
conflicts = [ "shutdown.target" ];
restartTriggers = [ config.environment.etc."tmpfiles.d".source ];
unitConfig.DefaultDependencies = false;
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = "systemd-tmpfiles --create --remove --exclude-prefix=/dev";
SuccessExitStatus = "DATAERR CANTCREAT";
ImportCredential = [
"tmpfiles.*"
"loging.motd"
"login.issue"
"network.hosts"
"ssh.authorized_keys.root"
];
};
};
environment.etc = {
"tmpfiles.d".source = (pkgs.symlinkJoin {
name = "tmpfiles.d";

View file

@ -150,10 +150,12 @@ in
after = [
"network-online.target"
"lxcfs.service"
] ++ (lib.optional cfg.socketActivation "incus.socket");
"incus.socket"
];
requires = [
"lxcfs.service"
] ++ (lib.optional cfg.socketActivation "incus.socket");
"incus.socket"
];
wants = [
"network-online.target"
];
@ -183,7 +185,7 @@ in
};
};
systemd.sockets.incus = lib.mkIf cfg.socketActivation {
systemd.sockets.incus = {
description = "Incus UNIX socket";
wantedBy = [ "sockets.target" ];
@ -191,7 +193,6 @@ in
ListenStream = "/var/lib/incus/unix.socket";
SocketMode = "0660";
SocketGroup = "incus-admin";
Service = "incus.service";
};
};

View file

@ -214,16 +214,14 @@ in {
LimitNPROC = "infinity";
TasksMax = "infinity";
Restart = "on-failure";
TimeoutStartSec = "${cfg.startTimeout}s";
TimeoutStopSec = "30s";
# By default, `lxd` loads configuration files from hard-coded
# `/usr/share/lxc/config` - since this is a no-go for us, we have to
# explicitly tell it where the actual configuration files are
Environment = lib.mkIf (config.virtualisation.lxc.lxcfs.enable)
"LXD_LXC_TEMPLATE_CONFIG=${pkgs.lxcfs}/share/lxc/config";
};
unitConfig.ConditionPathExists = "!/var/lib/incus/.migrated-from-lxd";
};
systemd.services.lxd-preseed = lib.mkIf (cfg.preseed != null) {

View file

@ -820,6 +820,7 @@ in {
syncthing-init = handleTest ./syncthing-init.nix {};
syncthing-many-devices = handleTest ./syncthing-many-devices.nix {};
syncthing-relay = handleTest ./syncthing-relay.nix {};
sysinit-reactivation = runTest ./sysinit-reactivation.nix;
systemd = handleTest ./systemd.nix {};
systemd-analyze = handleTest ./systemd-analyze.nix {};
systemd-binfmt = handleTestOn ["x86_64-linux"] ./systemd-binfmt.nix {};

View file

@ -6,9 +6,8 @@
}:
{
container = import ./container.nix { inherit system pkgs; };
lxd-to-incus = import ./lxd-to-incus.nix { inherit system pkgs; };
preseed = import ./preseed.nix { inherit system pkgs; };
socket-activated = import ./socket-activated.nix { inherit system pkgs; };
virtual-machine = handleTestOn [ "x86_64-linux" ] ./virtual-machine.nix {
inherit system pkgs;
};
virtual-machine = handleTestOn [ "x86_64-linux" ] ./virtual-machine.nix { inherit system pkgs; };
}

View file

@ -0,0 +1,112 @@
import ../make-test-python.nix (
{ pkgs, lib, ... }:
let
releases = import ../../release.nix { configuration.documentation.enable = lib.mkForce false; };
container-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
container-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
in
{
name = "lxd-to-incus";
meta = {
maintainers = lib.teams.lxc.members;
};
nodes.machine =
{ lib, ... }:
{
environment.systemPackages = [ pkgs.lxd-to-incus ];
virtualisation = {
diskSize = 6144;
cores = 2;
memorySize = 2048;
lxd.enable = true;
lxd.preseed = {
networks = [
{
name = "nixostestbr0";
type = "bridge";
config = {
"ipv4.address" = "10.0.100.1/24";
"ipv4.nat" = "true";
};
}
];
profiles = [
{
name = "default";
devices = {
eth0 = {
name = "eth0";
network = "nixostestbr0";
type = "nic";
};
root = {
path = "/";
pool = "nixostest_pool";
size = "35GiB";
type = "disk";
};
};
}
{
name = "nixos_notdefault";
devices = { };
}
];
storage_pools = [
{
name = "nixostest_pool";
driver = "dir";
}
];
};
incus.enable = true;
};
};
testScript = ''
def lxd_wait_for_preseed(_) -> bool:
_, output = machine.systemctl("is-active lxd-preseed.service")
return ("inactive" in output)
def lxd_instance_is_up(_) -> bool:
status, _ = machine.execute("lxc exec container --disable-stdin --force-interactive /run/current-system/sw/bin/true")
return status == 0
def incus_instance_is_up(_) -> bool:
status, _ = machine.execute("incus exec container --disable-stdin --force-interactive /run/current-system/sw/bin/true")
return status == 0
with machine.nested("initialize lxd and resources"):
machine.wait_for_unit("sockets.target")
machine.wait_for_unit("lxd.service")
retry(lxd_wait_for_preseed)
machine.succeed("lxc image import ${container-image-metadata}/*/*.tar.xz ${container-image-rootfs}/*/*.tar.xz --alias nixos")
machine.succeed("lxc launch nixos container")
retry(lxd_instance_is_up)
machine.wait_for_unit("incus.service")
with machine.nested("run migration"):
machine.succeed("lxd-to-incus --yes")
with machine.nested("verify resources migrated to incus"):
machine.succeed("incus config show container")
retry(incus_instance_is_up)
machine.succeed("incus exec container -- true")
machine.succeed("incus profile show default | grep nixostestbr0")
machine.succeed("incus profile show default | grep nixostest_pool")
machine.succeed("incus profile show nixos_notdefault")
machine.succeed("incus storage show nixostest_pool")
machine.succeed("incus network show nixostestbr0")
'';
}
)

View file

@ -0,0 +1,107 @@
# This runs to two scenarios but in one tests:
# - A post-sysinit service needs to be restarted AFTER tmpfiles was restarted.
# - A service needs to be restarted BEFORE tmpfiles is restarted
{ lib, ... }:
let
makeGeneration = generation: {
"${generation}".configuration = {
systemd.services.pre-sysinit-before-tmpfiles.environment.USER =
lib.mkForce "${generation}-tmpfiles-user";
systemd.services.pre-sysinit-after-tmpfiles.environment = {
NEEDED_PATH = lib.mkForce "/run/${generation}-needed-by-pre-sysinit-after-tmpfiles";
PATH_TO_CREATE = lib.mkForce "/run/${generation}-needed-by-post-sysinit";
};
systemd.services.post-sysinit.environment = {
NEEDED_PATH = lib.mkForce "/run/${generation}-needed-by-post-sysinit";
PATH_TO_CREATE = lib.mkForce "/run/${generation}-created-by-post-sysinit";
};
systemd.tmpfiles.settings.test = lib.mkForce {
"/run/${generation}-needed-by-pre-sysinit-after-tmpfiles".f.user =
"${generation}-tmpfiles-user";
};
};
};
in
{
name = "sysinit-reactivation";
meta.maintainers = with lib.maintainers; [ nikstur ];
nodes.machine = { config, lib, pkgs, ... }: {
systemd.services.pre-sysinit-before-tmpfiles = {
wantedBy = [ "sysinit.target" ];
requiredBy = [ "sysinit-reactivation.target" ];
before = [ "systemd-tmpfiles-setup.service" "systemd-tmpfiles-resetup.service" ];
unitConfig.DefaultDependencies = false;
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
environment.USER = "tmpfiles-user";
script = "${pkgs.shadow}/bin/useradd $USER";
};
systemd.services.pre-sysinit-after-tmpfiles = {
wantedBy = [ "sysinit.target" ];
requiredBy = [ "sysinit-reactivation.target" ];
after = [ "systemd-tmpfiles-setup.service" "systemd-tmpfiles-resetup.service" ];
unitConfig.DefaultDependencies = false;
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
environment = {
NEEDED_PATH = "/run/needed-by-pre-sysinit-after-tmpfiles";
PATH_TO_CREATE = "/run/needed-by-post-sysinit";
};
script = ''
if [[ -e $NEEDED_PATH ]]; then
touch $PATH_TO_CREATE
fi
'';
};
systemd.services.post-sysinit = {
wantedBy = [ "default.target" ];
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
environment = {
NEEDED_PATH = "/run/needed-by-post-sysinit";
PATH_TO_CREATE = "/run/created-by-post-sysinit";
};
script = ''
if [[ -e $NEEDED_PATH ]]; then
touch $PATH_TO_CREATE
fi
'';
};
systemd.tmpfiles.settings.test = {
"/run/needed-by-pre-sysinit-after-tmpfiles".f.user =
"tmpfiles-user";
};
specialisation = lib.mkMerge [
(makeGeneration "second")
(makeGeneration "third")
];
};
testScript = { nodes, ... }: ''
def switch(generation):
toplevel = "${nodes.machine.system.build.toplevel}";
machine.succeed(f"{toplevel}/specialisation/{generation}/bin/switch-to-configuration switch")
machine.wait_for_unit("default.target")
machine.succeed("test -e /run/created-by-post-sysinit")
switch("second")
machine.succeed("test -e /run/second-created-by-post-sysinit")
switch("third")
machine.succeed("test -e /run/third-created-by-post-sysinit")
'';
}

View file

@ -969,9 +969,28 @@
dependencies = with self; [ nvim-lspconfig ];
};
nvim-spectre = super.nvim-spectre.overrideAttrs {
dependencies = with self; [ plenary-nvim ];
nvim-spectre = super.nvim-spectre.overrideAttrs (old:
let
spectre_oxi = rustPlatform.buildRustPackage {
pname = "spectre_oxi";
inherit (old) version src;
sourceRoot = "source/spectre_oxi";
cargoHash = "sha256-y2ZIgOApIShkIesXmItPKDO6XjFrG4GS5HCPncJUmN8=";
preCheck = ''
mkdir tests/tmp/
'';
};
in
(lib.optionalAttrs stdenv.isLinux {
dependencies = with self;
[ plenary-nvim ];
postInstall = ''
ln -s ${spectre_oxi}/lib/libspectre_oxi.* $out/lua/spectre_oxi.so
'';
}));
nvim-teal-maker = super.nvim-teal-maker.overrideAttrs {
postPatch = ''

View file

@ -9,7 +9,7 @@
let
pname = "1password";
version = if channel == "stable" then "8.10.23" else "8.10.24-6.BETA";
version = if channel == "stable" then "8.10.23" else "8.10.24-35.BETA";
sources = {
stable = {
@ -33,19 +33,19 @@ let
beta = {
x86_64-linux = {
url = "https://downloads.1password.com/linux/tar/beta/x86_64/1password-${version}.x64.tar.gz";
hash = "sha256-vrC+JzcRQnXTB0KDoIpYTJjoQCNFgFaZuV+8BXTwwmk=";
hash = "sha256-NO8jxXvdjDn7uTyboav8UnHfc0plHDLoKQ/FHZJqpsE=";
};
aarch64-linux = {
url = "https://downloads.1password.com/linux/tar/beta/aarch64/1password-${version}.arm64.tar.gz";
hash = "sha256-4v5gtaPWjyBs5VV5quuq77MzjcYQN1k/Ju0NYB44gYM=";
hash = "sha256-9qnODNE3kNRZyj5+2nfoz9zBmY2MqxVPo3rpLOCFAsI=";
};
x86_64-darwin = {
url = "https://downloads.1password.com/mac/1Password-${version}-x86_64.zip";
hash = "sha256-SSGg8zLiEaYFTWRb4K145nG/dDQCQw2di8bD59xoTrA=";
hash = "sha256-gU11xBIGOCRbQshOQ4ktYVgHe6dxJ0GnONkVnZkCiEE=";
};
aarch64-darwin = {
url = "https://downloads.1password.com/mac/1Password-${version}-aarch64.zip";
hash = "sha256-SgTv1gYPBAr/LPeAtHGBZUw35TegpaVW1M84maT8BdY=";
hash = "sha256-YcnVIgV+2MZOS+a+3lFuNMgnLaGVrOP53B/k70zRoTI=";
};
};
};

View file

@ -19,15 +19,15 @@
python3.pkgs.buildPythonApplication rec {
pname = "gpu-viewer";
version = "2.26";
version = "2.32";
format = "other";
src = fetchFromGitHub {
owner = "arunsivaramanneo";
repo = pname;
rev = "v${version}";
hash = "sha256-3GYJq76g/pU8dt+OMGBeDcw47z5Xv3AGkLsACcBCELs=";
rev = "refs/tags/v${version}";
hash = "sha256-zv53tvFQ0NAqFPYp7qZVmbuM1fBJwC4t43YJDZdqSPU=";
};
nativeBuildInputs = [

View file

@ -14,11 +14,11 @@ stdenv.mkDerivation (finalAttrs: let
in {
pname = "logseq";
version = "0.10.3";
version = "0.10.4";
src = fetchurl {
url = "https://github.com/logseq/logseq/releases/download/${version}/logseq-linux-x64-${version}.AppImage";
hash = "sha256-aduFqab5cpoXR3oFOHzsXJwogm1bZ9KgT2Mt6G9kbBA=";
hash = "sha256-vFCNhnhfxlSLeieB1DJgym5nbzPKO1ngArTUXvf+DAU=";
name = "${pname}-${version}.AppImage";
};

View file

@ -2,16 +2,16 @@
buildGoModule rec {
pname = "atmos";
version = "1.53.0";
version = "1.54.0";
src = fetchFromGitHub {
owner = "cloudposse";
repo = pname;
rev = "v${version}";
sha256 = "sha256-2T5LCtycTBnJntcKQoJqNwTczWR8bC1SBAqjMN+3Qd4=";
sha256 = "sha256-WGOuFqkrX3/5RINdsegTSxJ28W4iEMPuLVrCjtmCkTw=";
};
vendorHash = "sha256-piK9IVwGAidDhBNAEnu9hD7Ng67ZKxZMcNqgOXLCkq0=";
vendorHash = "sha256-kR13BVbjgQoEjb2xwH8LkxLeMp30h6mbWum9RbzzSGE=";
ldflags = [ "-s" "-w" "-X github.com/cloudposse/atmos/cmd.Version=v${version}" ];

View file

@ -167,9 +167,9 @@ rec {
mkTerraform = attrs: pluggable (generic attrs);
terraform_1 = mkTerraform {
version = "1.6.6";
hash = "sha256-fYFmHypzSbSgut9Wij6Sz8xR97DVOwPLQap6pan7IRA=";
vendorHash = "sha256-fQsxTX1v8HsMDIkofeCVfNitJAaTWHwppC7DniXlvT4=";
version = "1.7.0";
hash = "sha256-oF0osIC/ti9ZkWDTBIQuBHreIBVfeo4f/naGFdaMxJE=";
vendorHash = "sha256-77W0x6DENB+U3yB4LI3PwJU9bTuH7Eqz2a9FNoERuJg=";
patches = [ ./provider-path-0_15.patch ];
passthru = {
inherit plugins;

View file

@ -13,11 +13,11 @@
stdenv.mkDerivation rec {
pname = "appflowy";
version = "0.4.1";
version = "0.4.3";
src = fetchzip {
url = "https://github.com/AppFlowy-IO/appflowy/releases/download/${version}/AppFlowy-${version}-linux-x86_64.tar.gz";
hash = "sha256-9wv7/3wtR1xiOHRYXP29Qbom1Xl9xZbhCFEPf0LJitg=";
hash = "sha256-JrcqVPlFr8zD9ZSBxk9WqN7KCLKq+yCjMfA4QbIfDZE=";
stripRoot = false;
};

View file

@ -6,14 +6,18 @@
python3.pkgs.buildPythonApplication rec {
pname = "snakemake";
version = "8.0.1";
version = "8.2.1";
format = "setuptools";
src = fetchFromGitHub {
owner = "snakemake";
repo = pname;
rev = "refs/tags/v${version}";
hash = "sha256-F4c/lgp7J6LLye+f3FpzaXz3zM7R+jXxTziPlVbxFxA=";
hash = "sha256-NpsDJuxH+NHvE735OCHaISPSOhYDxWiKqCb4Yk9DHf4=";
# https://github.com/python-versioneer/python-versioneer/issues/217
postFetch = ''
sed -i "$out"/snakemake/_version.py -e 's#git_refnames = ".*"#git_refnames = " (tag: v${version})"#'
'';
};
postPatch = ''

View file

@ -23,13 +23,13 @@
stdenv.mkDerivation rec {
pname = "advanced-scene-switcher";
version = "1.24.0";
version = "1.24.2";
src = fetchFromGitHub {
owner = "WarmUpTill";
repo = "SceneSwitcher";
rev = version;
hash = "sha256-Xnf8Vz6I5EfiiVoG0JRd0f0IJHw1IVkTLL4Th/hWYrc=";
hash = "sha256-J5Qcs2eoKMeO1O/MCsR5wfmfbtndRaZmHrbleEZqqOo=";
};
nativeBuildInputs = [

View file

@ -4,13 +4,13 @@ stdenv.mkDerivation rec {
pname = "obs-ndi";
version = "4.13.0";
nativeBuildInputs = [ cmake ];
nativeBuildInputs = [ cmake qtbase ];
buildInputs = [ obs-studio qtbase ndi ];
src = fetchFromGitHub {
owner = "Palakis";
repo = "obs-ndi";
rev = "dummy-tag-${version}";
rev = version;
sha256 = "sha256-ugAMSTXbbIZ61oWvoggVJ5kZEgp/waEcWt89AISrSdE=";
};
@ -19,8 +19,8 @@ stdenv.mkDerivation rec {
];
postPatch = ''
# Add path (variable added in hardcode-ndi-path.patch)
sed -i -e s,@NDI@,${ndi},g src/obs-ndi.cpp
# Add path (variable added in hardcode-ndi-path.patch
sed -i -e s,@NDI@,${ndi},g src/plugin-main.cpp
# Replace bundled NDI SDK with the upstream version
# (This fixes soname issues)
@ -28,12 +28,7 @@ stdenv.mkDerivation rec {
ln -s ${ndi}/include lib/ndi
'';
postInstall = ''
mkdir $out/lib $out/share
mv $out/obs-plugins/64bit $out/lib/obs-plugins
rm -rf $out/obs-plugins
mv $out/data $out/share/obs
'';
cmakeFlags = [ "-DENABLE_QT=ON" ];
dontWrapQtApps = true;

View file

@ -1,19 +1,16 @@
diff --git a/src/obs-ndi.cpp b/src/obs-ndi.cpp
index 1a8aeb3..9a36ea9 100644
--- a/src/obs-ndi.cpp
+++ b/src/obs-ndi.cpp
@@ -132,13 +132,7 @@ const NDIlib_v5 *load_ndilib()
const char *redistFolder = std::getenv(NDILIB_REDIST_FOLDER);
if (redistFolder)
libraryLocations.push_back(redistFolder);
diff --git a/src/plugin-main.cpp b/src/plugin-main.cpp
index 0d94add..617af73 100644
--- a/src/plugin-main.cpp
+++ b/src/plugin-main.cpp
@@ -244,10 +244,7 @@ const NDIlib_v4 *load_ndilib()
if (!path.isEmpty()) {
locations << path;
}
-#if defined(__linux__) || defined(__APPLE__)
- libraryLocations.push_back("/usr/lib");
- libraryLocations.push_back("/usr/lib64");
- libraryLocations.push_back("/usr/lib/x86_64-linux-gnu");
- libraryLocations.push_back("/usr/local/lib");
- libraryLocations.push_back("/usr/local/lib64");
- locations << "/usr/lib";
- locations << "/usr/local/lib";
-#endif
+ libraryLocations.push_back("@NDI@/lib");
for (std::string path : libraryLocations) {
blog(LOG_DEBUG, "[load_ndilib] Trying library path: '%s'", path.c_str());
+ locations << "@NDI@/lib";
for (QString location : locations) {
path = QDir::cleanPath(
QDir(location).absoluteFilePath(NDILIB_LIBRARY_NAME));

View file

@ -13,10 +13,10 @@ let
}.${system} or throwSystem;
hash = {
x86_64-linux = "sha256-zJsgYjmnGT9Ye5hnhqtv5piGM1/HT+DFhVivKLlvE1Q=";
aarch64-linux = "sha256-RjIiSgSxkejS+Dun1xMCZ6C9SPH9AahudQMICH3thC0=";
x86_64-darwin = "sha256-PrfHusjA6o1L60eMblnydTKAYe8vKvK2W3jQZYp5dPc=";
aarch64-darwin = "sha256-LpyXsdjPpdoIqFzm3sLOlBBQdJgrNl8cPehNAVqFvXg=";
x86_64-linux = "sha256-vr/c7kYXoKlZh7+f1ZPHcmIGw0nB8x1wJt/iR2F9bQI=";
aarch64-linux = "sha256-mKLbxj5LSztjHtLWdZFlW4T6S+kN56SZnJNxKZDQIQ4=";
x86_64-darwin = "sha256-AllKEadf+1s3XGCXD0PRycvDUyYNL6HLaViBwwaYswU=";
aarch64-darwin = "sha256-6Pik3uYLfbeAW4Q4ZxJFt90IH+jhXWKY6kpDA6NAmaA=";
}.${system} or throwSystem;
bin = "$out/bin/codeium_language_server";
@ -24,7 +24,7 @@ let
in
stdenv.mkDerivation (finalAttrs: {
pname = "codeium";
version = "1.6.22";
version = "1.6.23";
src = fetchurl {
name = "${finalAttrs.pname}-${finalAttrs.version}.gz";
url = "https://github.com/Exafunction/codeium/releases/download/language-server-v${finalAttrs.version}/language_server_${plat}.gz";

View file

@ -5,16 +5,16 @@
buildGoModule rec {
pname = "flarectl";
version = "0.85.0";
version = "0.86.0";
src = fetchFromGitHub {
owner = "cloudflare";
repo = "cloudflare-go";
rev = "v${version}";
hash = "sha256-mXbWiHU28MlcYbS+RLHToJZpVMWsQ7qY6dAyY+ulwjw=";
hash = "sha256-BGjay9DTlIU563bCSjprq5YwF47Xqj+ZulCda5t2t5I=";
};
vendorHash = "sha256-v6xhhufqxfFvY3BpcM6Qvpljf/vE8ZwPG47zhx+ilb0=";
vendorHash = "sha256-Bn2SDvFWmmMYDpOe+KBuzyTZLpdDtYDPc8HixgEgX+M=";
subPackages = [ "cmd/flarectl" ];

View file

@ -0,0 +1,54 @@
{ lib
, fetchFromGitHub
, buildGoModule
, oath-toolkit
, openldap
}:
buildGoModule rec {
pname = "glauth";
version = "2.3.0";
src = fetchFromGitHub {
owner = "glauth";
repo = "glauth";
rev = "v${version}";
hash = "sha256-XYNNR3bVLNtAl+vbGRv0VhbLf+em8Ay983jqcW7KDFU=";
};
vendorHash = "sha256-SFmGgxDokIbVl3ANDPMCqrB0ck8Wyva2kSV2mgNRogo=";
nativeCheckInputs = [
oath-toolkit
openldap
];
modRoot = "v2";
# Disable go workspaces to fix build.
env.GOWORK = "off";
# Fix this build error:
# main module (github.com/glauth/glauth/v2) does not contain package github.com/glauth/glauth/v2/vendored/toml
excludedPackages = [ "vendored/toml" ];
# Based on ldflags in <glauth>/Makefile.
ldflags = [
"-s"
"-w"
"-X main.GitClean=1"
"-X main.LastGitTag=v${version}"
"-X main.GitTagIsCommit=1"
];
# Tests fail in the sandbox.
doCheck = false;
meta = with lib; {
description = "A lightweight LDAP server for development, home use, or CI";
homepage = "https://github.com/glauth/glauth";
license = licenses.mit;
maintainers = with maintainers; [ bjornfor ];
mainProgram = "glauth";
};
}

View file

@ -0,0 +1,61 @@
{ lib
, cargo
, desktop-file-utils
, fetchFromGitLab
, glib
, gtk4
, libadwaita
, meson
, ninja
, pkg-config
, rustPlatform
, rustc
, stdenv
, wrapGAppsHook4
}:
stdenv.mkDerivation rec {
pname = "lorem";
version = "1.3";
src = fetchFromGitLab {
domain = "gitlab.gnome.org";
owner = "World/design";
repo = pname;
rev = version;
hash = "sha256-+Dp/o1rZSHWihLLLe6CzV6c7uUnSsE8Ct3tbLNqlGF0=";
};
cargoDeps = rustPlatform.fetchCargoTarball {
inherit src;
name = "${pname}-${version}";
hash = "sha256-YYjPhlPp211i+ECPu1xgDumz8nVqWRO8YzcZXy8uunI=";
};
nativeBuildInputs = [
cargo
desktop-file-utils
meson
ninja
pkg-config
rustPlatform.cargoSetupHook
rustc
wrapGAppsHook4
];
buildInputs = [
glib
gtk4
libadwaita
];
meta = with lib; {
description = "Generate placeholder text";
homepage = "https://gitlab.gnome.org/World/design/lorem";
changelog = "https://gitlab.gnome.org/World/design/lorem/-/releases/${version}";
license = licenses.gpl3Plus;
maintainers = with maintainers; [ michaelgrahamevans ];
mainProgram = "lorem";
platforms = platforms.linux;
};
}

View file

@ -1,6 +1,7 @@
{ lib
, buildGoModule
, fetchFromGitHub
, fetchpatch
, nix-update-script
}:
@ -15,6 +16,14 @@ buildGoModule rec {
hash = "sha256-crWepf5j3Gd1lhya2DGIh/to7l+AnjKJPR+qUd9WOzw=";
};
patches = [
# create migration touch file, remove > 0.4.0
(fetchpatch {
url = "https://github.com/lxc/incus/commit/edc5fd2a9baccfb7b6814a440e2947cbb580afcf.diff";
hash = "sha256-ffQfMFrKDPuLU4jVbG/VGHSO3DmeHw30ATJ8yxJAoHQ=";
})
];
modRoot = "cmd/lxd-to-incus";
vendorHash = "sha256-cBAqJz3Y4CqyxTt7u/4mXoQPKmKgQ3gYJV1NiC/H+TA=";

View file

@ -405,6 +405,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3362992a0d9f1dd7c3d0e89e0ab2bb540b7a95fea8cd798090e758fda2899b5e"
dependencies = [
"codespan-reporting",
"serde",
]
[[package]]
@ -424,6 +425,7 @@ version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e"
dependencies = [
"serde",
"termcolor",
"unicode-width",
]
@ -1642,7 +1644,7 @@ dependencies = [
[[package]]
name = "nickel-lang-cli"
version = "1.3.0"
version = "1.4.0"
dependencies = [
"clap 4.4.7",
"clap_complete",
@ -1660,7 +1662,7 @@ dependencies = [
[[package]]
name = "nickel-lang-core"
version = "0.3.0"
version = "0.4.0"
dependencies = [
"ansi_term",
"assert_matches",
@ -1706,7 +1708,7 @@ dependencies = [
"toml",
"topiary",
"topiary-queries",
"tree-sitter-nickel 0.1.0",
"tree-sitter-nickel",
"typed-arena",
"unicode-segmentation",
"void",
@ -1715,7 +1717,7 @@ dependencies = [
[[package]]
name = "nickel-lang-lsp"
version = "1.3.0"
version = "1.4.0"
dependencies = [
"anyhow",
"assert_cmd",
@ -1760,7 +1762,7 @@ dependencies = [
[[package]]
name = "nickel-wasm-repl"
version = "0.3.0"
version = "0.4.0"
dependencies = [
"nickel-lang-core",
]
@ -2106,7 +2108,7 @@ dependencies = [
[[package]]
name = "pyckel"
version = "1.3.0"
version = "1.4.0"
dependencies = [
"codespan-reporting",
"nickel-lang-core",
@ -2984,8 +2986,8 @@ dependencies = [
[[package]]
name = "topiary"
version = "0.2.3"
source = "git+https://github.com/tweag/topiary.git?rev=8299a04bf83c4a2774cbbff7a036c022efa939b3#8299a04bf83c4a2774cbbff7a036c022efa939b3"
version = "0.3.0"
source = "git+https://github.com/tweag/topiary.git?rev=9ae9ef49c2fa968d15107b817864ff6627e0983e#9ae9ef49c2fa968d15107b817864ff6627e0983e"
dependencies = [
"clap 4.4.7",
"futures",
@ -3001,7 +3003,7 @@ dependencies = [
"tree-sitter-bash",
"tree-sitter-facade",
"tree-sitter-json",
"tree-sitter-nickel 0.0.1",
"tree-sitter-nickel",
"tree-sitter-ocaml",
"tree-sitter-ocamllex",
"tree-sitter-query",
@ -3013,8 +3015,8 @@ dependencies = [
[[package]]
name = "topiary-queries"
version = "0.2.3"
source = "git+https://github.com/tweag/topiary.git?rev=8299a04bf83c4a2774cbbff7a036c022efa939b3#8299a04bf83c4a2774cbbff7a036c022efa939b3"
version = "0.3.0"
source = "git+https://github.com/tweag/topiary.git?rev=9ae9ef49c2fa968d15107b817864ff6627e0983e#9ae9ef49c2fa968d15107b817864ff6627e0983e"
[[package]]
name = "tree-sitter"
@ -3056,20 +3058,10 @@ dependencies = [
"tree-sitter",
]
[[package]]
name = "tree-sitter-nickel"
version = "0.0.1"
source = "git+https://github.com/nickel-lang/tree-sitter-nickel?rev=b1a4718601ebd29a62bf3a7fd1069a99ccf48093#b1a4718601ebd29a62bf3a7fd1069a99ccf48093"
dependencies = [
"cc",
"tree-sitter",
]
[[package]]
name = "tree-sitter-nickel"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e95267764f0648c768e4da3e4c31b96bc5716446497dfa8b6296924b149f64a"
source = "git+https://github.com/nickel-lang/tree-sitter-nickel?rev=091b5dcc7d138901bcc162da9409c0bb626c0d27#091b5dcc7d138901bcc162da9409c0bb626c0d27"
dependencies = [
"cc",
"tree-sitter",
@ -3078,7 +3070,7 @@ dependencies = [
[[package]]
name = "tree-sitter-ocaml"
version = "0.20.4"
source = "git+https://github.com/tree-sitter/tree-sitter-ocaml.git#694c57718fd85d514f8b81176038e7a4cfabcaaf"
source = "git+https://github.com/tree-sitter/tree-sitter-ocaml.git#4abfdc1c7af2c6c77a370aee974627be1c285b3b"
dependencies = [
"cc",
"tree-sitter",
@ -3105,7 +3097,7 @@ dependencies = [
[[package]]
name = "tree-sitter-rust"
version = "0.20.4"
source = "git+https://github.com/tree-sitter/tree-sitter-rust.git#48e053397b587de97790b055a1097b7c8a4ef846"
source = "git+https://github.com/tree-sitter/tree-sitter-rust.git#79456e6080f50fc1ca7c21845794308fa5d35a51"
dependencies = [
"cc",
"tree-sitter",
@ -3197,9 +3189,9 @@ checksum = "e1766d682d402817b5ac4490b3c3002d91dfa0d22812f341609f97b08757359c"
[[package]]
name = "unsafe-libyaml"
version = "0.2.9"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f28467d3e1d3c6586d8f25fa243f544f5800fec42d97032474e17222c2b75cfa"
checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b"
[[package]]
name = "url"
@ -3566,18 +3558,18 @@ checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec"
[[package]]
name = "zerocopy"
version = "0.7.18"
version = "0.7.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ede7d7c7970ca2215b8c1ccf4d4f354c4733201dfaaba72d44ae5b37472e4901"
checksum = "1c4061bedbb353041c12f413700357bec76df2c7e2ca8e4df8bac24c6bf68e3d"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.7.18"
version = "0.7.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b27b1bb92570f989aac0ab7e9cbfbacdd65973f7ee920d9f0e71ebac878fd0b"
checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a"
dependencies = [
"proc-macro2 1.0.69",
"quote 1.0.33",

View file

@ -8,28 +8,28 @@
rustPlatform.buildRustPackage rec {
pname = "nickel";
version = "1.3.0";
version = "1.4.0";
src = fetchFromGitHub {
owner = "tweag";
repo = "nickel";
rev = "refs/tags/${version}";
hash = "sha256-MBonps3yFEpw9l3EAJ6BXNNjY2fUGzWCP+7h0M8LEAY=";
hash = "sha256-YPS+Szj0T8mbcrYBdAuoQupv1x0EIq4rFS2Wk5oYVsY=";
};
cargoLock = {
lockFile = ./Cargo.lock;
outputHashes = {
"topiary-0.2.3" = "sha256-EgDFjJeGJb36je/be7DXvzvpBYDUaupOiQxtL7bN/+Q=";
"topiary-0.3.0" = "sha256-1leQLRohX0iDiOOO96ETM2L3yOElW8OwR5IcrsoxfOo=";
"tree-sitter-bash-0.20.4" = "sha256-VP7rJfE/k8KV1XN1w5f0YKjCnDMYU1go/up0zj1mabM=";
"tree-sitter-facade-0.9.3" = "sha256-M/npshnHJkU70pP3I4WMXp3onlCSWM5mMIqXP45zcUs=";
"tree-sitter-nickel-0.0.1" = "sha256-aYsEx1Y5oDEqSPCUbf1G3J5Y45ULT9OkD+fn6stzrOU=";
"tree-sitter-nickel-0.1.0" = "sha256-HyHdameEgET5UXKMgw7EJvZsJxToc9Qz26XHvc5qmU0=";
"tree-sitter-query-0.1.0" = "sha256-5N7FT0HTK3xzzhAlk3wBOB9xlEpKSNIfakgFnsxEi18=";
"tree-sitter-json-0.20.1" = "sha256-Msnct7JzPBIR9+PIBZCJTRdVMUzhaDTKkl3JaDUKAgo=";
"tree-sitter-ocaml-0.20.4" = "sha256-j3Hv2qOMxeBNOW+WIgIYzG3zMIFWPQpoHe94b2rT+A8=";
"tree-sitter-ocaml-0.20.4" = "sha256-ycmjIKfrsVSVHmPP3HCxfk5wcBIF/JFH8OnU8mY1Cc8=";
"tree-sitter-ocamllex-0.20.2" = "sha256-YhmEE7I7UF83qMuldHqc/fD/no/7YuZd6CaAIaZ1now=";
"tree-sitter-toml-0.5.1" = "sha256-5nLNBxFeOGE+gzbwpcrTVnuL1jLUA0ZLBVw2QrOLsDQ=";
"tree-sitter-rust-0.20.4" = "sha256-ht0l1a3esvBbVHNbUosItmqxwL7mDp+QyhIU6XTUiEk=";
"tree-sitter-rust-0.20.4" = "sha256-57CuGp7gP+AVYIR3HbMXnmmSAbtlpWrOHRYpMbmWfds=";
"web-tree-sitter-sys-1.3.0" = "sha256-9rKB0rt0y9TD/HLRoB9LjEP9nO4kSWR9ylbbOXo2+2M=";
};

View file

@ -42,6 +42,12 @@ stdenv.mkDerivation rec {
dpkg -x $src $out
cp -av $out/opt/ocenaudio/* $out
rm -rf $out/opt
mv $out/usr/share $out/share
rm -rf $out/usr
substituteInPlace $out/share/applications/ocenaudio.desktop \
--replace "/opt/ocenaudio/bin/ocenaudio" "ocenaudio"
mkdir -p $out/share/licenses/ocenaudio
mv $out/bin/ocenaudio_license.txt $out/share/licenses/ocenaudio/LICENSE
# Create symlink bzip2 library
ln -s ${bzip2.out}/lib/libbz2.so.1 $out/lib/libbz2.so.1.0

View file

@ -0,0 +1,52 @@
{ autoreconfHook
, fetchFromGitHub
, fftwMpi
, lib
, llvmPackages
, mpi
, precision ? "double"
, stdenv
}:
assert lib.elem precision [ "single" "double" "long-double" ];
let
fftw' = fftwMpi.override { inherit precision; };
in
stdenv.mkDerivation (finalAttrs: {
pname = "pfft-${precision}";
version = "1.0.8-alpha";
src = fetchFromGitHub {
owner = "mpip";
repo = "pfft";
rev = "v${finalAttrs.version}";
hash = "sha256-T5nPlkPKjYYRCuT1tSzXNJTPs/o6zwJMv9lPCWOwabw=";
};
outputs = [ "out" "dev" ];
nativeBuildInputs = [ autoreconfHook ];
preConfigure = ''
export FCFLAGS="-I${lib.getDev fftw'}/include"
'';
configureFlags = [
"--enable-portable-binary"
] ++ lib.optional (precision != "double") "--enable-${precision}";
buildInputs = lib.optional stdenv.cc.isClang llvmPackages.openmp;
propagatedBuildInputs = [ fftw' mpi ];
doCheck = true;
meta = {
description = "Parallel fast Fourier transforms";
homepage = "https://www-user.tu-chemnitz.de/~potts/workgroup/pippig/software.php.en#pfft";
license = lib.licenses.gpl3Plus;
maintainers = with lib.maintainers; [ hmenke ];
platforms = lib.platforms.linux;
};
})

View file

@ -0,0 +1,53 @@
{ autoreconfHook
, fetchurl
, fftwMpi
, gsl
, lib
, llvmPackages
, pfft
, precision ? "double"
, stdenv
}:
assert lib.elem precision [ "single" "double" "long-double" ];
let
fftw' = fftwMpi.override { inherit precision; };
pfft' = pfft.override { inherit precision; };
in
stdenv.mkDerivation (finalAttrs: {
pname = "pnfft-${precision}";
version = "1.0.7-alpha";
src = fetchurl {
url = "https://www-user.tu-chemnitz.de/~potts/workgroup/pippig/software/pnfft-${finalAttrs.version}.tar.gz";
hash = "sha256-/aVY/1fuMRl1Q2O7bmc5M4aA0taGD+fcQgCdhVYr1no=";
};
outputs = [ "out" "dev" ];
nativeBuildInputs = [ autoreconfHook ];
preConfigure = ''
export FCFLAGS="-I${lib.getDev fftw'}/include -I${lib.getDev pfft'}/include"
'';
configureFlags = [
"--enable-threads"
"--enable-portable-binary"
] ++ lib.optional (precision != "double") "--enable-${precision}";
buildInputs = [ gsl ] ++ lib.optional stdenv.cc.isClang llvmPackages.openmp;
propagatedBuildInputs = [ pfft' ];
doCheck = true;
meta = {
description = "Parallel nonequispaced fast Fourier transforms";
homepage = "https://www-user.tu-chemnitz.de/~potts/workgroup/pippig/software.php.en#pnfft";
license = lib.licenses.gpl3Plus;
maintainers = with lib.maintainers; [ hmenke ];
platforms = lib.platforms.linux;
};
})

View file

@ -0,0 +1,12 @@
diff --git a/src/poptracker.cpp b/src/poptracker.cpp
index dbf477b..6ccfac2 100644
--- a/src/poptracker.cpp
+++ b/src/poptracker.cpp
@@ -217,6 +217,7 @@ PopTracker::PopTracker(int argc, char** argv, bool cli, const json& args)
Pack::addOverrideSearchPath(os_pathcat(appPath, "user-override")); // portable/system overrides
Assets::addSearchPath(os_pathcat(appPath, "assets")); // system assets
}
+ Assets::addSearchPath("@assets@");
_asio = new asio::io_service();
HTTP::certfile = asset("cacert.pem"); // https://curl.se/docs/caextract.html

View file

@ -0,0 +1,74 @@
{ lib
, stdenv
, fetchFromGitHub
, util-linux
, SDL2
, SDL2_ttf
, SDL2_image
, openssl
, which
, libsForQt5
, makeWrapper
}:
stdenv.mkDerivation (finalAttrs: {
pname = "poptracker";
version = "0.25.7";
src = fetchFromGitHub {
owner = "black-sliver";
repo = "PopTracker";
rev = "v${finalAttrs.version}";
hash = "sha256-wP2d8cWNg80KUyw1xPQMriNRg3UyXgKaSoJ17U5vqCE=";
fetchSubmodules = true;
};
patches = [ ./assets-path.diff ];
postPatch = ''
substituteInPlace src/poptracker.cpp --replace "@assets@" "$out/share/$pname/"
'';
enableParallelBuilding = true;
nativeBuildInputs = [
util-linux
makeWrapper
];
buildInputs = [
SDL2
SDL2_ttf
SDL2_image
openssl
];
buildFlags = [
"native"
"CONF=RELEASE"
"VERSION=v${finalAttrs.version}"
];
installPhase = ''
runHook preInstall
install -m555 -Dt $out/bin build/linux-x86_64/poptracker
install -m444 -Dt $out/share/${finalAttrs.pname} assets/*
wrapProgram $out/bin/poptracker --prefix PATH : ${lib.makeBinPath [ which libsForQt5.kdialog ]}
runHook postInstall
'';
meta = with lib; {
description = "Scriptable tracker for randomized games";
longDescription = ''
Universal, scriptable randomizer tracking solution that is open source. Supports auto-tracking.
PopTracker packs should be placed in `~/PopTracker/packs` or `./packs`.
'';
homepage = "https://github.com/black-sliver/PopTracker";
changelog = "https://github.com/black-sliver/PopTracker/releases/tag/v${finalAttrs.version}";
license = licenses.gpl3Only;
maintainers = with maintainers; [ freyacodes ];
mainProgram = "poptracker";
platforms = [ "x86_64-linux" ];
};
})

View file

@ -6,17 +6,17 @@
}:
buildGoModule rec {
pname = "rimgo";
version = "1.2.1";
version = "1.2.3";
src = fetchFromGitea {
domain = "codeberg.org";
owner = "rimgo";
repo = "rimgo";
rev = "v${version}";
hash = "sha256-C6xixULZCDs+rIP7IWBVQNo34Yk/8j9ell2D0nUoHBg=";
hash = "sha256-nokXM+lnTiaWKwglmFYLBpnGHJn1yFok76tqb0nulVA=";
};
vendorHash = "sha256-u5N7aI9RIQ3EmiyHv0qhMcKkvmpp+5G7xbzdQcbhybs=";
vendorHash = "sha256-wDTSqfp1Bb1Jb9XX3A3/p5VUcjr5utpe6l/3pXfZpsg=";
nativeBuildInputs = [ tailwindcss ];

View file

@ -0,0 +1,40 @@
{ lib
, stdenv
, fetchFromGitHub
, libbsd
}:
stdenv.mkDerivation {
pname = "sha2wordlist";
version = "unstable-2023-02-20";
src = fetchFromGitHub {
owner = "kirei";
repo = "sha2wordlist";
rev = "2017b7ac786cfb5ad7f35f3f9068333b426d65f7";
hash = "sha256-A5KIXvwllzUcUm52lhw0QDjhEkCVTcbLQGFZWmHrFpU=";
};
postPatch = ''
substituteInPlace Makefile \
--replace "gcc" "$CC"
'';
buildInputs = [
libbsd
];
installPhase = ''
mkdir -p $out/bin
install -m 755 sha2wordlist $out/bin
'';
meta = with lib; {
description = "Display SHA-256 as PGP words";
homepage = "https://github.com/kirei/sha2wordlist";
maintainers = with maintainers; [ baloo ];
license = [ licenses.bsd2 ];
platforms = platforms.all;
mainProgram = "sha2wordlist";
};
}

View file

@ -11,18 +11,18 @@
buildGoModule rec {
pname = "usql";
version = "0.17.4";
version = "0.17.5";
src = fetchFromGitHub {
owner = "xo";
repo = "usql";
rev = "v${version}";
hash = "sha256-mEx0RMfPNRvsgjVcZDTzr74G7l5C8UcTZ15INNX4Kuo=";
hash = "sha256-Lh5CProffPB/GEYvU1h7St8zgmnS1QOjBgvdUXlsGzc=";
};
buildInputs = [ unixODBC icu ];
vendorHash = "sha256-zVSgrlTWDaN5uhA0iTcYMer4anly+m0BRTa6uuiLIjk=";
vendorHash = "sha256-IdqSTwQeMRjB5sE53VvTVAXPyIyN+pMj4XziIT31rV0=";
proxyVendor = true;
# Exclude broken genji, hive & impala drivers (bad group)

View file

@ -34,6 +34,7 @@ let
#### Services
biometryd = callPackage ./services/biometryd { };
content-hub = callPackage ./services/content-hub { };
hfd-service = callPackage ./services/hfd-service { };
history-service = callPackage ./services/history-service { };
lomiri-download-manager = callPackage ./services/lomiri-download-manager { };

View file

@ -0,0 +1,179 @@
{ stdenv
, lib
, fetchFromGitLab
, fetchpatch
, fetchpatch2
, gitUpdater
, testers
, cmake
, cmake-extras
, dbus-test-runner
, gettext
, glib
, gsettings-qt
, gtest
, libapparmor
, libnotify
, lomiri-api
, lomiri-app-launch
, lomiri-download-manager
, lomiri-ui-toolkit
, pkg-config
, properties-cpp
, qtbase
, qtdeclarative
, qtfeedback
, qtgraphicaleffects
, wrapGAppsHook
, xvfb-run
}:
stdenv.mkDerivation (finalAttrs: {
pname = "content-hub";
version = "1.1.0";
src = fetchFromGitLab {
owner = "ubports";
repo = "development/core/content-hub";
rev = finalAttrs.version;
hash = "sha256-IntEpgPCBmOL6K6TU+UhgGb6OHVA9pYurK5VN3woIIw=";
};
outputs = [
"out"
"dev"
"examples"
];
patches = [
# Remove when https://gitlab.com/ubports/development/core/content-hub/-/merge_requests/33 merged & in release
(fetchpatch {
name = "0001-content-hub-Migrate-to-GetConnectionCredentials.patch";
url = "https://gitlab.com/ubports/development/core/content-hub/-/commit/9c0eae42d856b4b6e24fa609ade0e674c7a84cfe.patch";
hash = "sha256-IWoCQKSCCk26n7133oG0Ht+iEjavn/IiOVUM+tCLX2U=";
})
# Remove when https://gitlab.com/ubports/development/core/content-hub/-/merge_requests/34 merged & in release
(fetchpatch {
name = "0002-content-hub-import-Lomiri-Content-CMakeLists-Drop-qt-argument-to-qmlplugindump.patch";
url = "https://gitlab.com/ubports/development/core/content-hub/-/commit/63a4baf1469de31c4fd50c69ed85d061f5e8e80a.patch";
hash = "sha256-T+6T9lXne6AhDFv9d7L8JNwdl8f0wjDmvSoNVPkHza4=";
})
# Remove when https://gitlab.com/ubports/development/core/content-hub/-/merge_requests/35 merged & in release
# fetchpatch2 due to renames, https://github.com/NixOS/nixpkgs/issues/32084
(fetchpatch2 {
name = "0003-content-hub-Add-more-better-GNUInstallDirs-variables-usage.patch";
url = "https://gitlab.com/ubports/development/core/content-hub/-/commit/3c5ca4a8ec125e003aca78c14521b70140856c25.patch";
hash = "sha256-kYN0eLwMyM/9yK+zboyEsoPKZMZ4SCXodVYsvkQr2F8=";
})
];
postPatch = ''
substituteInPlace import/*/Content/CMakeLists.txt \
--replace "\''${CMAKE_INSTALL_LIBDIR}/qt5/qml" "\''${CMAKE_INSTALL_PREFIX}/${qtbase.qtQmlPrefix}"
# Look for peer files in running system
substituteInPlace src/com/lomiri/content/service/registry-updater.cpp \
--replace '/usr' '/run/current-system/sw'
# Don't override default theme search path (which honours XDG_DATA_DIRS) with a FHS assumption
substituteInPlace import/Lomiri/Content/contenthubplugin.cpp \
--replace 'QIcon::setThemeSearchPaths(QStringList() << ("/usr/share/icons/"));' ""
'';
strictDeps = true;
nativeBuildInputs = [
cmake
gettext
pkg-config
qtdeclarative # qmlplugindump
wrapGAppsHook
];
buildInputs = [
cmake-extras
glib
gsettings-qt
libapparmor
libnotify
lomiri-api
lomiri-app-launch
lomiri-download-manager
lomiri-ui-toolkit
properties-cpp
qtbase
qtdeclarative
qtfeedback
qtgraphicaleffects
];
nativeCheckInputs = [
dbus-test-runner
xvfb-run
];
checkInputs = [
gtest
];
dontWrapQtApps = true;
cmakeFlags = [
(lib.cmakeBool "GSETTINGS_COMPILE" true)
(lib.cmakeBool "GSETTINGS_LOCALINSTALL" true)
(lib.cmakeBool "ENABLE_TESTS" finalAttrs.finalPackage.doCheck)
(lib.cmakeBool "ENABLE_DOC" false) # needs Qt5 qdoc: https://github.com/NixOS/nixpkgs/pull/245379
(lib.cmakeBool "ENABLE_UBUNTU_COMPAT" true) # in case something still depends on it
];
preBuild = let
listToQtVar = list: suffix: lib.strings.concatMapStringsSep ":" (drv: "${lib.getBin drv}/${suffix}") list;
in ''
# Executes qmlplugindump
export QT_PLUGIN_PATH=${listToQtVar [ qtbase ] qtbase.qtPluginPrefix}
export QML2_IMPORT_PATH=${listToQtVar [ qtdeclarative lomiri-ui-toolkit qtfeedback qtgraphicaleffects ] qtbase.qtQmlPrefix}
'';
doCheck = stdenv.buildPlatform.canExecute stdenv.hostPlatform;
# Starts & talks to D-Bus services, breaks under parallelism
enableParallelChecking = false;
preFixup = ''
for exampleExe in content-hub-test-{importer,exporter,sharer}; do
moveToOutput bin/$exampleExe $examples
moveToOutput share/applications/$exampleExe.desktop $examples
done
moveToOutput share/icons $examples
'';
postFixup = ''
for exampleBin in $examples/bin/*; do
wrapGApp $exampleBin
done
'';
passthru = {
tests.pkg-config = testers.testMetaPkgConfig finalAttrs.finalPackage;
updateScript = gitUpdater { };
};
meta = with lib; {
description = "Content sharing/picking service";
longDescription = ''
content-hub is a mediation service to let applications share content between them,
even if they are not running at the same time.
'';
homepage = "https://gitlab.com/ubports/development/core/content-hub";
license = with licenses; [ gpl3Only lgpl3Only ];
mainProgram = "content-hub-service";
maintainers = teams.lomiri.members;
platforms = platforms.linux;
pkgConfigModules = [
"libcontent-hub"
"libcontent-hub-glib"
];
};
})

View file

@ -17,10 +17,10 @@
mkXfceDerivation {
category = "panel-plugins";
pname = "xfce4-whiskermenu-plugin";
version = "2.8.2";
version = "2.8.3";
rev-prefix = "v";
odd-unstable = false;
sha256 = "sha256-v1YvmdL1AUyzJjbU9/yIYAAuQfbVlJCcdagM5yhKMuU=";
sha256 = "sha256-xRLvjRu/I+wsTWXUhrJUcrQz+JkZCYqoJSqYAYOztgg=";
nativeBuildInputs = [
cmake

View file

@ -12,13 +12,13 @@
rustPlatform.buildRustPackage rec {
pname = "gleam";
version = "0.33.0";
version = "0.34.0";
src = fetchFromGitHub {
owner = "gleam-lang";
repo = pname;
rev = "refs/tags/v${version}";
hash = "sha256-fAI4GKdMg2FlNLqXtqAEpmvi63RApRZdQEWPqEf+Dyw=";
hash = "sha256-cqJNNSN3x2tr6/i7kXAlvIaU9SfyPWBE4c6twc/p1lY=";
};
nativeBuildInputs = [ git pkg-config ];
@ -26,7 +26,7 @@ rustPlatform.buildRustPackage rec {
buildInputs = [ openssl ] ++
lib.optionals stdenv.isDarwin [ Security SystemConfiguration ];
cargoHash = "sha256-Ogjt6lIOvoTPWQhtNFqMgACNrH/27+8JRDlFb//9oUg=";
cargoHash = "sha256-mCMfVYbpUik8oc7TLLAXPBmBUchy+quAZLmd9pqCZ7Y=";
passthru.updateScript = nix-update-script { };

View file

@ -8,25 +8,25 @@ let
glslang = fetchFromGitHub {
owner = "KhronosGroup";
repo = "glslang";
rev = "728c689574fba7e53305b475cd57f196c1a21226";
hash = "sha256-BAgDQosiO3e4yy2DpQ6SjrJNrHTUDSduHFRvzWvd4v0=";
rev = "a91631b260cba3f22858d6c6827511e636c2458a";
hash = "sha256-7kIIU45pe+IF7lGltpIKSvQBmcXR+TWFvmx7ztMNrpc=";
};
spirv-tools = fetchFromGitHub {
owner = "KhronosGroup";
repo = "SPIRV-Tools";
rev = "d9446130d5165f7fafcb3599252a22e264c7d4bd";
hash = "sha256-fuYhzfkWXDm1icLHifc32XZCNQ6Dj5f5WJslT2JoMbc=";
rev = "f0cc85efdbbe3a46eae90e0f915dc1509836d0fc";
hash = "sha256-RzGvoDt1Qc+f6mZsfs99MxX4YB3yFc5FP92Yx/WGrsI=";
};
spirv-headers = fetchFromGitHub {
owner = "KhronosGroup";
repo = "SPIRV-Headers";
rev = "c214f6f2d1a7253bb0e9f195c2dc5b0659dc99ef";
hash = "sha256-/9EDOiqN6ZzDhRKP/Kv8D/BT2Cs7G8wyzEsGATLpmrA=";
rev = "1c6bb2743599e6eb6f37b2969acc0aef812e32e3";
hash = "sha256-/I9dJlBE0kvFvqooKuqMETtOE72Jmva3zIGnq0o4+aE=";
};
in
stdenv.mkDerivation rec {
pname = "shaderc";
version = "2022.4";
version = "2023.8";
outputs = [ "out" "lib" "bin" "dev" "static" ];
@ -34,13 +34,14 @@ stdenv.mkDerivation rec {
owner = "google";
repo = "shaderc";
rev = "v${version}";
hash = "sha256-/p2gJ7Lnh8IfvwBwHPDtmfLJ8j+Rbv+Oxu9lxY6fxfk=";
hash = "sha256-c8mJ361DY2VlSFZ4/RCrV+nqB9HblbOdfMkI4cM1QzM=";
};
patchPhase = ''
cp -r --no-preserve=mode ${glslang} third_party/glslang
cp -r --no-preserve=mode ${spirv-tools} third_party/spirv-tools
ln -s ${spirv-headers} third_party/spirv-tools/external/spirv-headers
patchShebangs --build utils/
'';
nativeBuildInputs = [ cmake python3 ]

View file

@ -3,6 +3,8 @@
, python3
}:
let cln' = cln.override { gccStdenv = stdenv; }; in
stdenv.mkDerivation rec {
pname = "cvc4";
version = "1.6";
@ -15,7 +17,7 @@ stdenv.mkDerivation rec {
# Build fails with GNUmake 4.4
nativeBuildInputs = [ autoreconfHook gnumake42 pkg-config ];
buildInputs = [ gmp swig libantlr3c boost python3 ]
++ lib.optionals stdenv.isLinux [ cln ];
++ lib.optionals stdenv.isLinux [ cln' ];
configureFlags = [
"--enable-language-bindings=c"

View file

@ -300,8 +300,8 @@ in {
};
ruby_3_2 = generic {
version = rubyVersion "3" "2" "2" "";
hash = "sha256-lsV1WIcaZ0jeW8nydOk/S1qtBs2PN776Do2U57ikI7w=";
version = rubyVersion "3" "2" "3" "";
hash = "sha256-r38XV9ndtjA0WYgTkhHx/VcP9bqDDe8cx8Rorptlybo=";
cargoHash = "sha256-6du7RJo0DH+eYMOoh3L31F3aqfR5+iG1iKauSV1uNcQ=";
};

View file

@ -1,20 +1,15 @@
{ lib, stdenv, fetchurl
, pkg-config
, SDL2, libpng, libjpeg, libtiff, giflib, libwebp, libXpm, zlib, Foundation
, version ? "2.8.2"
, hash ? "sha256-j0hrv7z4Rk3VjJ5dkzlKsCVc5otRxalmqRgkSCCnbdw="
}:
let
stdenv.mkDerivation (finalAttrs: {
pname = "SDL2_image";
in
stdenv.mkDerivation {
inherit pname version;
version = "2.8.2";
src = fetchurl {
url = "https://www.libsdl.org/projects/SDL_image/release/${pname}-${version}.tar.gz";
inherit hash;
url = "https://www.libsdl.org/projects/SDL_image/release/SDL2_image-${finalAttrs.version}.tar.gz";
hash = "sha256-j0hrv7z4Rk3VjJ5dkzlKsCVc5otRxalmqRgkSCCnbdw=";
};
nativeBuildInputs = [ pkg-config ];
@ -44,4 +39,4 @@ stdenv.mkDerivation {
license = licenses.zlib;
maintainers = with maintainers; [ cpages ];
};
}
})

View file

@ -4,13 +4,13 @@
stdenv.mkDerivation rec {
pname = "faudio";
version = "23.12";
version = "24.01";
src = fetchFromGitHub {
owner = "FNA-XNA";
repo = "FAudio";
rev = version;
sha256 = "sha256-bftS5gcIzvJlv9K2hKIIXl5lzP4RVwSK5/kxpQrJe/A=";
sha256 = "sha256-9/hgGrMtEz2CXZUPVMT1aSwDMlb+eQ9soTp1X1uME7I=";
};
nativeBuildInputs = [cmake];

View file

@ -4,11 +4,11 @@
, gitUpdater
, testers
, cmake
, cmake-extras
, glib
, gobject-introspection
, gtest
, intltool
, lomiri
, pkg-config
, systemd
, vala
@ -28,7 +28,7 @@ stdenv.mkDerivation (finalAttrs: {
postPatch = ''
# Queries via pkg_get_variable, can't override prefix
substituteInPlace data/CMakeLists.txt \
--replace 'DESTINATION "''${SYSTEMD_USER_UNIT_DIR}"' 'DESTINATION "${placeholder "out"}/lib/systemd/user"'
--replace 'pkg_get_variable(SYSTEMD_USER_UNIT_DIR systemd systemd_user_unit_dir)' 'set(SYSTEMD_USER_UNIT_DIR ''${CMAKE_INSTALL_PREFIX}/lib/systemd/user)'
'';
strictDeps = true;
@ -42,8 +42,9 @@ stdenv.mkDerivation (finalAttrs: {
];
buildInputs = [
cmake-extras
lomiri.cmake-extras
glib
lomiri.lomiri-url-dispatcher
systemd
];
@ -53,7 +54,7 @@ stdenv.mkDerivation (finalAttrs: {
cmakeFlags = [
"-DENABLE_TESTS=${lib.boolToString finalAttrs.finalPackage.doCheck}"
"-DENABLE_LOMIRI_FEATURES=OFF"
"-DENABLE_LOMIRI_FEATURES=ON"
"-DGSETTINGS_LOCALINSTALL=ON"
"-DGSETTINGS_COMPILE=ON"
];

View file

@ -2,6 +2,12 @@
let
versionJSON = lib.importJSON ./version.json;
ndiPlatform =
if stdenv.isAarch64 then "aarch64-rpi4-linux-gnueabi"
else if stdenv.isAarch32 then "arm-rpi2-linux-gnueabihf"
else if stdenv.isx86_64 then "x86_64-linux-gnu"
else if stdenv.isi686 then "i686-linux-gnu"
else throw "unsupported platform for NDI SDK";
in
stdenv.mkDerivation rec {
pname = "ndi";
@ -35,12 +41,13 @@ stdenv.mkDerivation rec {
installPhase = ''
mkdir $out
mv bin/x86_64-linux-gnu $out/bin
mv bin/${ndiPlatform} $out/bin
for i in $out/bin/*; do
if [ -L "$i" ]; then continue; fi
patchelf --set-interpreter "$(cat $NIX_CC/nix-support/dynamic-linker)" "$i"
done
patchelf --set-rpath "${avahi}/lib:${stdenv.cc.libc}/lib" $out/bin/ndi-record
mv lib/x86_64-linux-gnu $out/lib
mv lib/${ndiPlatform} $out/lib
for i in $out/lib/*; do
if [ -L "$i" ]; then continue; fi
patchelf --set-rpath "${avahi}/lib:${stdenv.cc.libc}/lib" "$i"
@ -48,7 +55,6 @@ stdenv.mkDerivation rec {
mv include examples $out/
mkdir -p $out/share/doc/${pname}-${version}
mv licenses $out/share/doc/${pname}-${version}/licenses
mv logos $out/share/doc/${pname}-${version}/logos
mv documentation/* $out/share/doc/${pname}-${version}/
'';
@ -61,9 +67,9 @@ stdenv.mkDerivation rec {
passthru.updateScript = ./update.py;
meta = with lib; {
homepage = "https://ndi.tv/sdk/";
homepage = "https://ndi.video/ndi-sdk/";
description = "NDI Software Developer Kit";
platforms = ["x86_64-linux"];
platforms = ["x86_64-linux" "i686-linux" "aarch64-linux" "armv7l-linux"];
hydraPlatforms = [];
sourceProvenance = with sourceTypes; [ binaryNativeCode ];
license = licenses.unfree;

View file

@ -1 +1 @@
{"hash": "sha256:70e04c2e7a629a9854de2727e0f978175b7a4ec6cf4cd9799a22390862f6fa27", "version": "5.5.2"}
{"hash": "sha256:4ff4b92f2c5f42d234aa7d142e2de7e9b045c72b46ad5149a459d48efd9218de", "version": "5.6.0"}

View file

@ -1,56 +0,0 @@
{ lib
, buildPythonPackage
, certifi
, cryptography
, fetchFromGitHub
, pylsqpack
, pyopenssl
, pytestCheckHook
, pythonOlder
, service-identity
, setuptools
, wheel
}:
buildPythonPackage rec {
pname = "aioquic-mitmproxy";
version = "0.9.21.1";
pyproject = true;
disabled = pythonOlder "3.8";
src = fetchFromGitHub {
owner = "meitinger";
repo = "aioquic_mitmproxy";
rev = "refs/tags/${version}";
hash = "sha256-eD3eICE9jS1jyqMgWwcv6w3gkR0EyGcKwgSXhasXNeA=";
};
nativeBuildInputs = [
setuptools
wheel
];
propagatedBuildInputs = [
certifi
cryptography
pylsqpack
pyopenssl
service-identity
];
nativeCheckInputs = [
pytestCheckHook
];
pythonImportsCheck = [
"aioquic"
];
meta = with lib; {
description = "QUIC and HTTP/3 implementation in Python";
homepage = "https://github.com/meitinger/aioquic_mitmproxy";
license = licenses.bsd3;
maintainers = with maintainers; [ fab ];
};
}

View file

@ -11,21 +11,20 @@
, segno
, setuptools
, trustme
, wheel
}:
buildPythonPackage rec {
pname = "aiounifi";
version = "68";
format = "pyproject";
version = "69";
pyproject = true;
disabled = pythonOlder "3.11";
src = fetchFromGitHub {
owner = "Kane610";
repo = pname;
repo = "aiounifi";
rev = "refs/tags/v${version}";
hash = "sha256-fMTkk2+4RQzE8V4Nemkh2/0Keum+3eMKO5LlPQB9kOU=";
hash = "sha256-XYwdnG3OprHRZm3zQgoPw4VOzvvVflsQzi7+XQiASAU=";
};
postPatch = ''
@ -38,7 +37,6 @@ buildPythonPackage rec {
nativeBuildInputs = [
setuptools
wheel
];
propagatedBuildInputs = [

View file

@ -9,7 +9,7 @@
buildPythonPackage rec {
pname = "botocore-stubs";
version = "1.34.20";
version = "1.34.21";
format = "pyproject";
disabled = pythonOlder "3.7";
@ -17,7 +17,7 @@ buildPythonPackage rec {
src = fetchPypi {
pname = "botocore_stubs";
inherit version;
hash = "sha256-6FwnFoWMvtW5NRM/1oFTe2S7mRrU+0PVUpXt//r0lOk=";
hash = "sha256-xc3pikb8lNUNTs1GXdXGRQEiHJT+KJWmBt5cReyDdkM=";
};
nativeBuildInputs = [

View file

@ -7,6 +7,7 @@
, astroid
, pytestCheckHook
, hypothesis
, pythonRelaxDepsHook
}:
buildPythonPackage rec {
@ -25,8 +26,12 @@ buildPythonPackage rec {
nativeBuildInputs = [
flit-core
pythonRelaxDepsHook
];
# z3 does not provide a dist-info, so python-runtime-deps-check will fail
pythonRemoveDeps = [ "z3-solver" ];
postPatch = ''
substituteInPlace pyproject.toml \
--replace "\"--cov=deal_solver\"," "" \

View file

@ -1,7 +1,6 @@
{ lib
, buildPythonPackage
, fetchPypi
, fetchpatch
# propagates
, django-gravatar2
@ -25,17 +24,6 @@ buildPythonPackage rec {
hash = "sha256-uIjJaZHWL2evj+oISLprvKWT5Sm5f2EKgUD1twL1VbQ=";
};
patches = [
(fetchpatch {
url = "https://gitlab.com/mailman/django-mailman3/-/commit/840d0d531a0813de9a30e72427e202aea21b40fe.patch";
hash = "sha256-vltvsIP/SWpQZeXDUB+GWlTu+ghFMUqIT8i6CrYcmGo=";
})
(fetchpatch {
url = "https://gitlab.com/mailman/django-mailman3/-/commit/25c55e31d28f2fa8eb23f0e83c12f9b0a05bfbf0.patch";
hash = "sha256-ug5tBmnVfJTn5ufDDVg/cEtsZM59jQYJpQZV51T3qIc=";
})
];
postPatch = ''
substituteInPlace setup.py \
--replace 'django>=3.2,<4.2' 'django>=3.2,<4.3'

View file

@ -16,6 +16,7 @@
, pytestCheckHook
, pythonOlder
, redis
, setuptools
}:
buildPythonPackage rec {
@ -40,6 +41,7 @@ buildPythonPackage rec {
nativeBuildInputs = [
poetry-core
setuptools
];
propagatedBuildInputs = [

View file

@ -10,7 +10,7 @@
buildPythonPackage rec {
pname = "karton-core";
version = "5.3.0";
version = "5.3.2";
format = "setuptools";
disabled = pythonOlder "3.7";
@ -19,7 +19,7 @@ buildPythonPackage rec {
owner = "CERT-Polska";
repo = "karton";
rev = "refs/tags/v${version}";
hash = "sha256-sf8O4Y/yMoTFCibQRtNDX3pXdQ0Xzor3WqeU4xp3WuU=";
hash = "sha256-/MPD83sBo9n/dI1uXbHbjvz6upJSJrssMGmGwfQ+KE8=";
};
propagatedBuildInputs = [

View file

@ -3,8 +3,9 @@
, fetchFromGitHub
, buildPythonPackage
, pythonOlder
, pythonRelaxDepsHook
# Mitmproxy requirements
, aioquic-mitmproxy
, aioquic
, asgiref
, blinker
, brotli
@ -56,8 +57,16 @@ buildPythonPackage rec {
hash = "sha256-BO7oQ4TVuZ4dCtROq2M24V6HVo0jzyBdQfb67dYA07U=";
};
nativeBuildInputs = [
pythonRelaxDepsHook
];
pythonRelaxDeps = [
"aioquic"
];
propagatedBuildInputs = [
aioquic-mitmproxy
aioquic
asgiref
blinker
brotli
@ -109,22 +118,14 @@ buildPythonPackage rec {
"test_get_version"
# https://github.com/mitmproxy/mitmproxy/commit/36ebf11916704b3cdaf4be840eaafa66a115ac03
# Tests require terminal
"test_integration"
"test_commands_exist"
"test_contentview_flowview"
"test_flowview"
# ValueError: Exceeds the limit (4300) for integer string conversion
"test_roundtrip_big_integer"
"test_wireguard"
"test_commands_exist"
"test_integration"
"test_statusbar"
# AssertionError: Playbook mismatch!
"test_untrusted_cert"
"test_mitmproxy_ca_is_untrusted"
];
disabledTestPaths = [
# teardown of half the tests broken
"test/mitmproxy/addons/test_onboarding.py"
# FileNotFoundError: [Errno 2] No such file or directory
# likely wireguard is also not working in the sandbox
"test_wireguard"
];
dontUsePytestXdist = true;
@ -136,6 +137,6 @@ buildPythonPackage rec {
homepage = "https://mitmproxy.org/";
changelog = "https://github.com/mitmproxy/mitmproxy/blob/${version}/CHANGELOG.md";
license = licenses.mit;
maintainers = with maintainers; [ kamilchm SuperSandro2000 ];
maintainers = with maintainers; [ SuperSandro2000 ];
};
}

View file

@ -4,12 +4,13 @@
, pytestCheckHook
, requests
, pythonOlder
, setuptools
}:
buildPythonPackage rec {
pname = "pytado";
version = "0.17.3";
format = "setuptools";
version = "0.17.4";
pyproject = true;
disabled = pythonOlder "3.7";
@ -17,9 +18,13 @@ buildPythonPackage rec {
owner = "wmalgadey";
repo = "PyTado";
rev = "refs/tags/${version}";
sha256 = "sha256-whpNYiAb2cqKI4m0HJN2lPt51FLuEzrkrRTSWs6uznU=";
hash = "sha256-Wdd9HdsQjaYlL8knhMuO87+dom+aTsmrLRK0UdrpsbQ=";
};
nativeBuildInputs = [
setuptools
];
propagatedBuildInputs = [
requests
];

View file

@ -24,7 +24,7 @@
buildPythonPackage rec {
pname = "pytensor";
version = "2.18.5";
version = "2.18.6";
pyproject = true;
disabled = pythonOlder "3.9";
@ -33,7 +33,7 @@ buildPythonPackage rec {
owner = "pymc-devs";
repo = "pytensor";
rev = "refs/tags/rel-${version}";
hash = "sha256-0xwzFmYsec7uQaq6a4BAA6MYy2zIVZ0cTwodVJQ6yMs=";
hash = "sha256-SMh4wVZwmc87ztFn2OOI234VP3JzmxVMBkn7lYwVu6M=";
};
postPatch = ''

View file

@ -14,16 +14,16 @@
buildPythonPackage rec {
pname = "sfrbox-api";
version = "0.0.8";
format = "pyproject";
version = "0.0.9";
pyproject = true;
disabled = pythonOlder "3.8";
src = fetchFromGitHub {
owner = "hacf-fr";
repo = pname;
repo = "sfrbox-api";
rev = "refs/tags/v${version}";
hash = "sha256-yvVoWBupHRbMoXmun/pj0bPpujWKfH1SknEhvgIsPzk=";
hash = "sha256-rMfX9vA8IuWxXvVs4WYNHO6neeoie/3gABwhXyJoAF8=";
};
postPatch = ''
@ -36,17 +36,22 @@ buildPythonPackage rec {
];
propagatedBuildInputs = [
click
defusedxml
httpx
pydantic
];
passthru.optional-dependencies = {
cli = [
click
];
};
nativeCheckInputs = [
pytest-asyncio
pytestCheckHook
respx
];
] ++ lib.flatten (builtins.attrValues passthru.optional-dependencies);
pythonImportsCheck = [
"sfrbox_api"

View file

@ -1,24 +0,0 @@
From 3ae04e8b9be879cf25fb5b51a48c8a1263a4844d Mon Sep 17 00:00:00 2001
From: Gaetan Lepage <gaetan@glepage.com>
Date: Mon, 15 Jan 2024 10:05:40 +0100
Subject: [PATCH] remove-illegal-name-from-extra-dependencies
---
setup.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/setup.py b/setup.py
index 968c32d6..c98ee9f8 100755
--- a/setup.py
+++ b/setup.py
@@ -190,6 +190,7 @@ def _prepare_extras(skip_pattern: str = "^_", skip_files: Tuple[str] = ("base.tx
# create an 'all' keyword that install all possible dependencies
extras_req["all"] = list(chain([pkgs for k, pkgs in extras_req.items() if k not in ("_test", "_tests")]))
extras_req["dev"] = extras_req["all"] + extras_req["_tests"]
+ extras_req.pop("_tests")
return extras_req
--
2.42.0

View file

@ -20,7 +20,7 @@
let
pname = "torchmetrics";
version = "1.3.0";
version = "1.3.0.post";
in
buildPythonPackage {
inherit pname version;
@ -32,16 +32,9 @@ buildPythonPackage {
owner = "Lightning-AI";
repo = "torchmetrics";
rev = "refs/tags/v${version}";
hash = "sha256-xDUT9GSOn6ZNDFRsFws3NLxBsILKDHPKeEANwM8NXj8=";
hash = "sha256-InwXOeQ/u7sdq/+gjm0CSCiuB/9YXP+rPVbvOSH16Dk=";
};
patches = [
# The extra dependencies dictionary contains an illegally named entry '_tests'.
# The build fails because of this.
# Issue has been opened upstream: https://github.com/Lightning-AI/torchmetrics/issues/2305
./0001-remove-illegal-name-from-extra-dependencies.patch
];
propagatedBuildInputs = [
numpy
lightning-utilities

View file

@ -13,7 +13,7 @@
buildPythonPackage rec {
pname = "vallox-websocket-api";
version = "4.0.2";
version = "4.0.3";
format = "pyproject";
disabled = pythonOlder "3.8";
@ -22,7 +22,7 @@ buildPythonPackage rec {
owner = "yozik04";
repo = "vallox_websocket_api";
rev = "refs/tags/${version}";
hash = "sha256-a9cYYRAKX9sY9fQhefLWgyvk0vQl7Ao3zvw0SAtFW/Q=";
hash = "sha256-L6uLA8iVYzh3wFVSwxzleHhu22sQeomq9N9A1oAxpf4=";
};
nativeBuildInputs = [

View file

@ -5,14 +5,14 @@
python3.pkgs.buildPythonApplication rec {
pname = "checkov";
version = "3.1.63";
version = "3.1.66";
pyproject = true;
src = fetchFromGitHub {
owner = "bridgecrewio";
repo = "checkov";
rev = "refs/tags/${version}";
hash = "sha256-MQAREb3ivMTQGE/ktHDxz6r2t7LnsVoIEoZtv7rfC2U=";
hash = "sha256-hvl29/K4qHvDiXM0Ufmi3ExMq+2JXQbSzaFYCCP0OhU=";
};
patches = [

View file

@ -10,13 +10,13 @@
stdenv.mkDerivation rec {
pname = "corrosion";
version = "0.4.5";
version = "0.4.6";
src = fetchFromGitHub {
owner = "corrosion-rs";
repo = "corrosion";
rev = "v${version}";
hash = "sha256-eE3RNLK5xKOjXeA+vDQmM1hvw92TbmPEDLdeqimgwcA=";
hash = "sha256-WPMxewswSRc1ULBgGTrdZmWeFDWVzHk2jzqGChkRYKE=";
};
cargoRoot = "generator";
@ -25,7 +25,7 @@ stdenv.mkDerivation rec {
inherit src;
sourceRoot = "${src.name}/${cargoRoot}";
name = "${pname}-${version}";
hash = "sha256-j9tsRho/gWCGwXUYZSbs3rudT6nYHh0FSfBCAemZHmw=";
hash = "sha256-R09sgCjwqc22zXg1T7iMx9qmyMz9xlnEuOelPB4O7jw=";
};
buildInputs = lib.optional stdenv.isDarwin libiconv;

View file

@ -11,12 +11,17 @@ python3.pkgs.buildPythonApplication rec {
src = fetchFromGitHub {
owner = "Riverside-Healthcare";
repo = "djlint";
rev = "v${version}";
rev = "refs/tags/v${version}";
hash = "sha256-p9RIzX9zoZxBrhiNaIeCX9OgfQm/lXNwYsh6IcsnIVk=";
};
nativeBuildInputs = [
python3.pkgs.poetry-core
nativeBuildInputs = with python3.pkgs; [
poetry-core
pythonRelaxDepsHook
];
pythonRelaxDeps = [
"pathspec"
];
propagatedBuildInputs = with python3.pkgs; [

View file

@ -10,14 +10,14 @@
buildGoModule rec {
pname = "symfony-cli";
version = "5.8.1";
version = "5.8.2";
vendorHash = "sha256-bscRqFYV2qzTmu04l00/iMsFQR5ITPBFVr9BQwVGFU8=";
src = fetchFromGitHub {
owner = "symfony-cli";
repo = "symfony-cli";
rev = "v${version}";
hash = "sha256-GJPUYza1LhWZP9U3JKoe3i0npLgypo3DkKex9DFo1U4=";
hash = "sha256-P5VitZL6KYplMpWdwTkzJEqf5UoSB5HaH/0kL2CbUEA=";
};
ldflags = [

View file

@ -1,26 +1,27 @@
{ lib, stdenv, fetchgit, fetchpatch, autoreconfHook, libdrm, libX11, libGL, mesa, pkg-config }:
{ lib, stdenv, fetchFromGitLab, meson, ninja, libdrm, libX11, libGL, mesa, pkg-config, gst_all_1 }:
stdenv.mkDerivation {
pname = "kmscube";
version = "unstable-2018-06-17";
version = "unstable-2023-09-25";
src = fetchgit {
url = "git://anongit.freedesktop.org/mesa/kmscube";
rev = "9dcce71e603616ee7a54707e932f962cdf8fb20a";
sha256 = "1q5b5yvyfj3127385mp1bfmcbnpnbdswdk8gspp7g4541xk4k933";
src = fetchFromGitLab {
domain = "gitlab.freedesktop.org";
owner = "mesa";
repo = "kmscube";
rev = "96d63eb59e34c647cda1cbb489265f8c536ae055";
hash = "sha256-kpnn4JBNvwatrcCF/RGk/fQ7qiKD26iLBr9ovDmAKBo=";
};
patches = [
# Pull upstream patch for -fno-common toolchains.
(fetchpatch {
name = "fno-common.patch";
url = "https://gitlab.freedesktop.org/mesa/kmscube/-/commit/908ef39864442c0807954af5d3f88a3da1a6f8a5.patch";
sha256 = "1gxn3b50mvjlc25234839v5z29r8fd9di4176a3yx4gbsz8cc5vi";
})
];
nativeBuildInputs = [ autoreconfHook pkg-config ];
buildInputs = [ libdrm libX11 libGL mesa ];
nativeBuildInputs = [ meson pkg-config ninja ];
buildInputs = [
libdrm
libX11
libGL
mesa
] ++ (with gst_all_1; [
gstreamer
gst-plugins-base
]);
meta = with lib; {
description = "Example OpenGL app using KMS/GBM";

View file

@ -43,6 +43,11 @@ rustPlatform.buildRustPackage rec {
buildInputs = [ openssl ]
++ lib.optionals stdenv.isDarwin [ SystemConfiguration ];
checkFlags = [
# flaky
"--skip=ws_integration::none::merge"
];
passthru.tests.version = testers.testVersion {
package = surrealdb;
command = "surreal version";

View file

@ -99,7 +99,6 @@ let
azure-mgmt-advisor = overrideAzureMgmtPackage super.azure-mgmt-advisor "9.0.0" "zip" "sha256-/ECLNzFf6EeBtRkST4yxuKwQsvQkHkOdDT4l/WyhjXs=";
azure-mgmt-apimanagement = overrideAzureMgmtPackage super.azure-mgmt-apimanagement "4.0.0" "zip" "sha256-AiTjLJ28g80xnrRFLfPUevJgeaxLpuGmvkd3+FskNiw=";
azure-mgmt-authorization = overrideAzureMgmtPackage super.azure-mgmt-authorization "4.0.0" "zip" "sha256-abhavAmuZPxyl1vUNDEXDYx+tdFmdUuYqsXzhF3lfcQ=";
azure-mgmt-batch = overrideAzureMgmtPackage super.azure-mgmt-batch "17.0.0" "zip" "sha256-hkM4WVLuwxj4qgXsY8Ya7zu7/v37gKdP0Xbf2EqrsWo=";
azure-mgmt-billing = overrideAzureMgmtPackage super.azure-mgmt-billing "6.0.0" "zip" "sha256-1PXFpBiKRW/h6zK2xF9VyiBpx0vkHrdpIYQLOfL1wH8=";
azure-mgmt-botservice = overrideAzureMgmtPackage super.azure-mgmt-botservice "2.0.0b3" "zip" "sha256-XZGQOeMw8usyQ1tl8j57fZ3uqLshomHY9jO/rbpQOvM=";
azure-mgmt-cognitiveservices = overrideAzureMgmtPackage super.azure-mgmt-cognitiveservices "13.5.0" "zip" "sha256-RK8LGbH4J+nN6gnGBUweZgkqUcMrwe9aVtvZtAvFeBU=";
@ -139,6 +138,11 @@ let
azure-mgmt-appcontainers = overrideAzureMgmtPackage super.azure-mgmt-appcontainers "2.0.0" "zip"
"sha256-ccdIdvdgTYPWEZCWqkLc8lEuMuAEERvl5B1huJyBkvU=";
azure-mgmt-batch = (overrideAzureMgmtPackage super.azure-mgmt-batch "17.0.0" "zip"
"sha256-hkM4WVLuwxj4qgXsY8Ya7zu7/v37gKdP0Xbf2EqrsWo=").overridePythonAttrs (attrs: {
propagatedBuildInputs = attrs.propagatedBuildInputs or [ ] ++ [ self.msrest ];
});
azure-mgmt-batchai = overrideAzureMgmtPackage super.azure-mgmt-batchai "7.0.0b1" "zip"
"sha256-mT6vvjWbq0RWQidugR229E8JeVEiobPD3XA/nDM3I6Y=";
@ -204,8 +208,10 @@ let
azure-mgmt-applicationinsights = overrideAzureMgmtPackage super.azure-mgmt-applicationinsights "1.0.0" "zip"
"sha256-woeix9703hn5LAwxugKGf6xvW433G129qxkoi7RV/Fs=";
azure-mgmt-servicefabric = overrideAzureMgmtPackage super.azure-mgmt-servicefabric "1.0.0" "zip"
"sha256-3jXhF5EoMsGp6TEJqNJMq5T1VwOpCHsuscWwZVs7GRM=";
azure-mgmt-servicefabric = (overrideAzureMgmtPackage super.azure-mgmt-servicefabric "1.0.0" "zip"
"sha256-3jXhF5EoMsGp6TEJqNJMq5T1VwOpCHsuscWwZVs7GRM=").overridePythonAttrs (attrs: {
propagatedBuildInputs = attrs.propagatedBuildInputs or [ ] ++ [ self.msrest ];
});
azure-mgmt-servicelinker = overrideAzureMgmtPackage super.azure-mgmt-servicelinker "1.2.0b1" "zip"
"sha256-RK1Q51Q0wAG55oKrFmv65/2AUKl+gRdp27t/EcuMONk=";

View file

@ -2,16 +2,16 @@
buildGoModule rec {
pname = "copilot-cli";
version = "1.32.1";
version = "1.33.0";
src = fetchFromGitHub {
owner = "aws";
repo = pname;
rev = "v${version}";
hash = "sha256-OdzycH+52F6lfCErKlsVFiPE2gxU22ySV5uPA6zBXUg=";
hash = "sha256-4LDeilWi3FzvrvHjEyQKQi1GxouSlzDY96yBuMfpsXM=";
};
vendorHash = "sha256-5Nlo5Ol4YdO3XI5RhpFfBgprVUV5DUkySvCXeFZqulk=";
vendorHash = "sha256-EqgOyjb2raE5hW3h+czbsi/F9SVNDwPWM1L6GC7v6IY=";
nativeBuildInputs = [ installShellFiles ];

View file

@ -76,6 +76,7 @@ buildGoModule rec {
'';
passthru.tests.lxd = nixosTests.lxd;
passthru.tests.lxd-to-incus = nixosTests.incus.lxd-to-incus;
passthru.ui = callPackage ./ui.nix { };
passthru.updateScript = gitUpdater {
url = "https://github.com/canonical/lxd.git";

View file

@ -8,16 +8,16 @@
buildGoModule rec {
pname = "qovery-cli";
version = "0.80.0";
version = "0.81.0";
src = fetchFromGitHub {
owner = "Qovery";
repo = "qovery-cli";
rev = "refs/tags/v${version}";
hash = "sha256-HEOv58cUF/U/fa52cxre4HXXXNONSfHqbInI5nYvk0Q=";
hash = "sha256-Me2UIyBJ/TFP6M7zqQvJ/NDYoiOWop8Lkh8e1KbD9eU=";
};
vendorHash = "sha256-Vvc2YoZnoCzIU/jE6XSg/eVkWTwl6i04Fd5RHTaS1WM=";
vendorHash = "sha256-IDKJaWnQsOtghpCh7UyO6RzWgSZS0S0jdF5hVV7xVbs=";
nativeBuildInputs = [
installShellFiles

View file

@ -1,6 +1,7 @@
{ lib
, buildGoModule
, fetchFromGitHub
, fetchpatch
}:
buildGoModule rec {
@ -14,6 +15,15 @@ buildGoModule rec {
sha256 = "sha256-YMMHj6wctKtJi/rrcMIrLmNw/uvO6wCwokgYRQxcsFw=";
};
patches = [
# Add support for Nix files. Upstream is slow with responding to PRs,
# patch backported from PR https://github.com/google/addlicense/pull/153.
(fetchpatch {
url = "https://github.com/google/addlicense/commit/e0fb3f44cc7670dcc5cbcec2211c9ad238c5f9f1.patch";
hash = "sha256-XCAvL+HEa1hGc0GAnl+oYHKzBJ3I5ArS86vgABrP/Js=";
})
];
vendorHash = "sha256-2mncc21ecpv17Xp8PA9GIodoaCxNBacbbya/shU8T9Y=";
subPackages = [ "." ];

View file

@ -8,7 +8,7 @@
python3.pkgs.buildPythonApplication rec {
pname = "dooit";
version = "2.1.1";
format = "pyproject";
pyproject = true;
src = fetchFromGitHub {
owner = "kraanzu";
@ -19,6 +19,11 @@ python3.pkgs.buildPythonApplication rec {
nativeBuildInputs = with python3.pkgs; [
poetry-core
pythonRelaxDepsHook
];
pythonRelaxDeps = [
"tzlocal"
];
propagatedBuildInputs = with python3.pkgs; [

View file

@ -1,11 +1,20 @@
{ stdenv, lib, fetchurl, ncurses, autoreconfHook }:
{ stdenv
, lib
, fetchFromGitHub
, ncurses
, autoreconfHook
}:
stdenv.mkDerivation rec {
pname = "xstow";
version = "1.1.0";
version = "1.1.1";
src = fetchurl {
url = "http://downloads.sourceforge.net/sourceforge/${pname}/${pname}-${version}.tar.bz2";
sha256 = "sha256-wXQ5XSmogAt1torfarrqIU4nBYj69MGM/HBYqeIE+dw=";
src = fetchFromGitHub {
owner = "majorkingleo";
repo = "xstow";
rev = version;
fetchSubmodules = true;
hash = "sha256-c89+thw5N3Cgl1Ww+W7c3YsyhNJMLlreedvdWJFY3WY=";
};
nativeBuildInputs = [ autoreconfHook ];
@ -23,9 +32,8 @@ stdenv.mkDerivation rec {
];
meta = with lib; {
broken = stdenv.isDarwin;
description = "A replacement of GNU Stow written in C++";
homepage = "https://xstow.sourceforge.net";
homepage = "https://github.com/majorkingleo/xstow";
license = licenses.gpl2Only;
maintainers = with maintainers; [ nzbr ];
platforms = platforms.unix;

View file

@ -2,27 +2,24 @@
, clang
, fetchFromGitHub
, buildGoModule
, installShellFiles
}:
buildGoModule rec {
pname = "dae";
version = "0.5.0";
version = "0.4.0";
src = fetchFromGitHub {
owner = "daeuniverse";
repo = "dae";
rev = "v${version}";
hash = "sha256-DxGKfxu13F7+5zV/31GP9gkbGHrz5RdRe84J3DQ0iUs=";
hash = "sha256-hvAuWCacaWxXwxx5ktj57hnWt8fcnwD6rUuRj1+ZtFA=";
fetchSubmodules = true;
};
vendorHash = "sha256-UQRM3/JSsPDAGqYZ43bVYVvSLvqqZ/BJE6hwx5wzfcQ=";
vendorHash = "sha256-qK+x6ciAebwIWHRjRpNXCAqsfnmEx37evS4+7kwcFIs=";
proxyVendor = true;
nativeBuildInputs = [ clang installShellFiles ];
CGO_ENABLED = 0;
nativeBuildInputs = [ clang ];
ldflags = [
"-s"
@ -44,7 +41,6 @@ buildGoModule rec {
install -Dm444 install/dae.service $out/lib/systemd/system/dae.service
substituteInPlace $out/lib/systemd/system/dae.service \
--replace /usr/bin/dae $out/bin/dae
installShellCompletion install/shell-completion/dae.{bash,zsh,fish}
'';
meta = with lib; {

View file

@ -5,16 +5,20 @@
python3.pkgs.buildPythonApplication rec {
pname = "dnstwist";
version = "20230918";
format = "setuptools";
version = "20240116";
pyproject = true;
src = fetchFromGitHub {
owner = "elceef";
repo = pname;
repo = "dnstwist";
rev = "refs/tags/${version}";
hash = "sha256-LGeDb0++9Zsal9HOXjfjF18RFQS+6i578EfD3YTtlS4=";
hash = "sha256-areFRDi728SedArhUy/rbPzhoFabNoT/WdyyN+6OQK0=";
};
nativeBuildInputs = with python3.pkgs; [
setuptools
];
propagatedBuildInputs = with python3.pkgs; [
dnspython
geoip

View file

@ -12,7 +12,7 @@
let
pname = "ockam";
version = "0.115.0";
version = "0.116.0";
in
rustPlatform.buildRustPackage {
inherit pname version;
@ -21,10 +21,10 @@ rustPlatform.buildRustPackage {
owner = "build-trust";
repo = pname;
rev = "ockam_v${version}";
sha256 = "sha256-DPRMPGxOuF4FwDXyVNxv9j2qy3K1p/9AVmrp0pPUQXM=";
sha256 = "sha256-dcSH/mO3cUamjOCuvEB/C24n7K5T1KnUMvTn8fVu+YM=";
};
cargoHash = "sha256-SeBv2yO0E60C4xMGf/7LOOyTOXf8vZCxIBC1dU2CAX0=";
cargoHash = "sha256-9UwPPOKg+Im+vfQFiYKS68tONYkKz1TqX7ukbtmLcRk=";
nativeBuildInputs = [ git pkg-config ];
buildInputs = [ openssl dbus ]
++ lib.optionals stdenv.isDarwin [ Security ];

View file

@ -2,16 +2,16 @@
rustPlatform.buildRustPackage rec {
pname = "sniffglue";
version = "0.15.0";
version = "0.16.0";
src = fetchFromGitHub {
owner = "kpcyrd";
repo = pname;
rev = "v${version}";
sha256 = "sha256-8SkwdPaKHf0ZE/MeM4yOe2CpQvZzIHf5d06iM7KPAT8=";
sha256 = "sha256-MOw0WBdpo6dYXsjbUrqoIJl/sjQ4wSAcm4dPxDgTYgY=";
};
cargoSha256 = "sha256-UGvFLW48sakNuV3eXBpCxaHOrveQPXkynOayMK6qs4g=";
cargoHash = "sha256-vnfviiXJ4L/j5M3N+LegOIvLuD6vYJB1QeBgZJVfDnI=";
nativeBuildInputs = [ pkg-config ];

View file

@ -1,49 +0,0 @@
{ lib
, buildPythonApplication
, fetchPypi
, python-slugify
, requests
, urllib3
, six
, setuptools
, gitpython
, pythonRelaxDepsHook
}:
buildPythonApplication rec {
pname = "transifex-client";
version = "0.14.4";
src = fetchPypi {
inherit pname version;
sha256 = "11dc95cefe90ebf0cef3749c8c7d85b9d389c05bd0e3389bf117685df562bd5c";
};
# https://github.com/transifex/transifex-client/issues/323
nativeBuildInputs = [
pythonRelaxDepsHook
];
pythonRelaxDeps = [
"python-slugify"
];
propagatedBuildInputs = [
gitpython
python-slugify
requests
setuptools
six
urllib3
];
# Requires external resources
doCheck = false;
meta = with lib; {
description = "Transifex translation service client";
homepage = "https://www.transifex.com/";
license = licenses.gpl2Only;
maintainers = with maintainers; [ sikmir ];
};
}

View file

@ -1036,6 +1036,7 @@ mapAliases ({
tokodon = plasma5Packages.tokodon;
tor-browser-bundle-bin = tor-browser; # Added 2023-09-23
transfig = fig2dev; # Added 2022-02-15
transifex-client = transifex-cli; # Added 2023-12-29
trezor_agent = trezor-agent; # Added 2024-01-07
trustedGrub = throw "trustedGrub has been removed, because it is not maintained upstream anymore"; # Added 2023-05-10
trustedGrub-for-HP = throw "trustedGrub-for-HP has been removed, because it is not maintained upstream anymore"; # Added 2023-05-10

View file

@ -14077,8 +14077,6 @@ with pkgs;
tracefilesim = callPackage ../development/tools/analysis/garcosim/tracefilesim { };
transifex-client = python39.pkgs.callPackage ../tools/text/transifex-client { };
transifex-cli = callPackage ../applications/misc/transifex-cli { };
translatelocally = callPackage ../applications/misc/translatelocally { };
@ -22433,9 +22431,7 @@ with pkgs;
libavif = callPackage ../development/libraries/libavif { };
libayatana-common = callPackage ../development/libraries/libayatana-common {
inherit (lomiri) cmake-extras;
};
libayatana-common = callPackage ../development/libraries/libayatana-common { };
libb2 = callPackage ../development/libraries/libb2 { };
@ -24889,15 +24885,22 @@ with pkgs;
SDL2_image = callPackage ../development/libraries/SDL2_image {
inherit (darwin.apple_sdk.frameworks) Foundation;
};
SDL2_image_2_0_5 = SDL2_image.override({ # Pinned for pygame, toppler
# Pinned for pygame, toppler
SDL2_image_2_0 = SDL2_image.overrideAttrs (oldAttrs: {
version = "2.0.5";
src = fetchurl {
inherit (oldAttrs.src) url;
hash = "sha256-vdX24CZoL31+G+C2BRsgnaL0AqLdi9HEvZwlrSYxCNA";
};
});
SDL2_image_2_6 = SDL2_image.override({
# Pinned for hedgewars:
# https://github.com/NixOS/nixpkgs/pull/274185#issuecomment-1856764786
SDL2_image_2_6 = SDL2_image.overrideAttrs (oldAttrs: {
version = "2.6.3";
src = fetchurl {
inherit (oldAttrs.src) url;
hash = "sha256-kxyb5b8dfI+um33BV4KLfu6HTiPH8ktEun7/a0g2MSw=";
};
});
SDL2_mixer = callPackage ../development/libraries/SDL2_mixer {
@ -38391,7 +38394,7 @@ with pkgs;
tome4 = callPackage ../games/tome4 { };
toppler = callPackage ../games/toppler {
SDL2_image = SDL2_image_2_0_5;
SDL2_image = SDL2_image_2_0;
};
torus-trooper = callPackage ../games/torus-trooper { };

Some files were not shown because too many files have changed in this diff Show more