2021-01-09 15:39:16 +01:00
|
|
|
{ lib
|
|
|
|
, glibc
|
|
|
|
, fetchFromGitHub
|
|
|
|
, makeWrapper
|
|
|
|
, buildGoPackage
|
|
|
|
, linkFarm
|
|
|
|
, writeShellScript
|
|
|
|
, containerRuntimePath
|
|
|
|
, configTemplate
|
|
|
|
}:
|
|
|
|
let
|
|
|
|
isolatedContainerRuntimePath = linkFarm "isolated_container_runtime_path" [
|
|
|
|
{
|
|
|
|
name = "runc";
|
|
|
|
path = containerRuntimePath;
|
|
|
|
}
|
|
|
|
];
|
|
|
|
warnIfXdgConfigHomeIsSet = writeShellScript "warn_if_xdg_config_home_is_set" ''
|
|
|
|
set -eo pipefail
|
|
|
|
|
|
|
|
if [ -n "$XDG_CONFIG_HOME" ]; then
|
|
|
|
echo >&2 "$(tput setaf 3)warning: \$XDG_CONFIG_HOME=$XDG_CONFIG_HOME$(tput sgr 0)"
|
|
|
|
fi
|
|
|
|
'';
|
|
|
|
in
|
|
|
|
buildGoPackage rec {
|
|
|
|
pname = "nvidia-container-runtime";
|
|
|
|
version = "3.4.0";
|
2021-01-10 13:58:25 +01:00
|
|
|
|
2021-01-09 15:39:16 +01:00
|
|
|
src = fetchFromGitHub {
|
|
|
|
owner = "NVIDIA";
|
|
|
|
repo = pname;
|
|
|
|
rev = "v${version}";
|
|
|
|
sha256 = "095mks0r4079vawi50pk4zb5jk0g6s9idg2s1w55a0d27jkknldr";
|
|
|
|
};
|
2021-01-10 13:58:25 +01:00
|
|
|
|
2021-01-09 15:39:16 +01:00
|
|
|
goPackagePath = "github.com/${pname}/src";
|
2021-08-26 05:31:57 +02:00
|
|
|
ldflags = [ "-s" "-w" ];
|
2021-01-09 15:39:16 +01:00
|
|
|
nativeBuildInputs = [ makeWrapper ];
|
2021-01-10 13:58:25 +01:00
|
|
|
|
2021-01-09 15:39:16 +01:00
|
|
|
postInstall = ''
|
|
|
|
mv $out/bin/{src,nvidia-container-runtime}
|
|
|
|
mkdir -p $out/etc/nvidia-container-runtime
|
|
|
|
|
|
|
|
# nvidia-container-runtime invokes docker-runc or runc if that isn't
|
|
|
|
# available on PATH.
|
|
|
|
#
|
|
|
|
# Also set XDG_CONFIG_HOME if it isn't already to allow overriding
|
|
|
|
# configuration. This in turn allows users to have the nvidia container
|
|
|
|
# runtime enabled for any number of higher level runtimes like docker and
|
|
|
|
# podman, i.e., there's no need to have mutually exclusivity on what high
|
|
|
|
# level runtime can enable the nvidia runtime because each high level
|
|
|
|
# runtime has its own config.toml file.
|
|
|
|
wrapProgram $out/bin/nvidia-container-runtime \
|
|
|
|
--run "${warnIfXdgConfigHomeIsSet}" \
|
|
|
|
--prefix PATH : ${isolatedContainerRuntimePath} \
|
|
|
|
--set-default XDG_CONFIG_HOME $out/etc
|
|
|
|
|
|
|
|
cp ${configTemplate} $out/etc/nvidia-container-runtime/config.toml
|
|
|
|
|
|
|
|
substituteInPlace $out/etc/nvidia-container-runtime/config.toml \
|
|
|
|
--subst-var-by glibcbin ${lib.getBin glibc}
|
|
|
|
'';
|
2021-01-10 13:58:25 +01:00
|
|
|
|
2021-01-09 15:39:16 +01:00
|
|
|
meta = with lib; {
|
|
|
|
homepage = "https://github.com/NVIDIA/nvidia-container-runtime";
|
|
|
|
description = "NVIDIA container runtime";
|
2021-01-10 14:52:50 +01:00
|
|
|
license = licenses.asl20;
|
2021-01-09 15:39:16 +01:00
|
|
|
platforms = platforms.linux;
|
|
|
|
maintainers = with maintainers; [ cpcloud ];
|
|
|
|
};
|
|
|
|
}
|