1
0
Fork 1
mirror of https://github.com/NixOS/nixpkgs.git synced 2024-11-22 13:41:26 +00:00

Merge pull request #108862 from cpcloud/refactor-nvidia-containers

This commit is contained in:
Jörg Thalheim 2021-01-15 11:10:09 +00:00 committed by GitHub
commit f3042e3078
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 155 additions and 97 deletions

View file

@ -213,9 +213,6 @@ in
message = "Option enableNvidia requires 32bit support libraries";
}];
}
(mkIf cfg.enableNvidia {
environment.etc."nvidia-container-runtime/config.toml".source = "${pkgs.nvidia-docker}/etc/config.toml";
})
]);
imports = [

View file

@ -2,7 +2,6 @@
let
cfg = config.virtualisation.podman;
toml = pkgs.formats.toml { };
nvidia-docker = pkgs.nvidia-docker.override { containerRuntimePath = "${pkgs.runc}/bin/runc"; };
inherit (lib) mkOption types;
@ -100,8 +99,8 @@ in
containersConf.extraConfig = lib.optionalString cfg.enableNvidia
(builtins.readFile (toml.generate "podman.nvidia.containers.conf" {
engine = {
conmon_env_vars = [ "PATH=${lib.makeBinPath [ nvidia-docker ]}" ];
runtimes.nvidia = [ "${nvidia-docker}/bin/nvidia-container-runtime" ];
conmon_env_vars = [ "PATH=${lib.makeBinPath [ pkgs.nvidia-podman ]}" ];
runtimes.nvidia = [ "${pkgs.nvidia-podman}/bin/nvidia-container-runtime" ];
};
}));
};
@ -111,14 +110,7 @@ in
assertion = cfg.dockerCompat -> !config.virtualisation.docker.enable;
message = "Option dockerCompat conflicts with docker";
}
{
assertion = cfg.enableNvidia -> !config.virtualisation.docker.enableNvidia;
message = "Option enableNvidia conflicts with docker.enableNvidia";
}
];
}
(lib.mkIf cfg.enableNvidia {
environment.etc."nvidia-container-runtime/config.toml".source = "${nvidia-docker}/etc/podman-config.toml";
})
]);
}

View file

@ -7,6 +7,7 @@
, libseccomp
, rpcsvc-proto
, libtirpc
, makeWrapper
}:
let
modp-ver = "450.57";
@ -23,7 +24,7 @@ stdenv.mkDerivation rec {
src = fetchFromGitHub {
owner = "NVIDIA";
repo = "libnvidia-container";
repo = pname;
rev = "v${version}";
sha256 = "0j6b8z9x9hrrs4xp11zyjjd7kyl7fzcicpiis8k1qb1q2afnqsrq";
};
@ -64,17 +65,23 @@ stdenv.mkDerivation rec {
popd
'';
postInstall = ''
wrapProgram $out/bin/nvidia-container-cli \
--prefix LD_LIBRARY_PATH : /run/opengl-driver/lib:/run/opengl-driver-32/lib
'';
NIX_CFLAGS_COMPILE = [ "-I${libtirpc.dev}/include/tirpc" ];
NIX_LDFLAGS = [ "-L${libtirpc.dev}/lib" "-ltirpc" ];
nativeBuildInputs = [ pkgconfig rpcsvc-proto ];
nativeBuildInputs = [ pkgconfig rpcsvc-proto makeWrapper ];
buildInputs = [ libelf libcap libseccomp libtirpc ];
meta = with lib; {
homepage = "https://github.com/NVIDIA/libnvidia-container";
description = "NVIDIA container runtime library";
license = licenses.bsd3;
license = licenses.asl20;
platforms = platforms.linux;
maintainers = with maintainers; [ cpcloud ];
};
}

View file

@ -0,0 +1,72 @@
{ lib
, glibc
, fetchFromGitHub
, makeWrapper
, buildGoPackage
, linkFarm
, writeShellScript
, containerRuntimePath
, configTemplate
}:
let
isolatedContainerRuntimePath = linkFarm "isolated_container_runtime_path" [
{
name = "runc";
path = containerRuntimePath;
}
];
warnIfXdgConfigHomeIsSet = writeShellScript "warn_if_xdg_config_home_is_set" ''
set -eo pipefail
if [ -n "$XDG_CONFIG_HOME" ]; then
echo >&2 "$(tput setaf 3)warning: \$XDG_CONFIG_HOME=$XDG_CONFIG_HOME$(tput sgr 0)"
fi
'';
in
buildGoPackage rec {
pname = "nvidia-container-runtime";
version = "3.4.0";
src = fetchFromGitHub {
owner = "NVIDIA";
repo = pname;
rev = "v${version}";
sha256 = "095mks0r4079vawi50pk4zb5jk0g6s9idg2s1w55a0d27jkknldr";
};
goPackagePath = "github.com/${pname}/src";
buildFlagsArray = [ "-ldflags=" "-s -w" ];
nativeBuildInputs = [ makeWrapper ];
postInstall = ''
mv $out/bin/{src,nvidia-container-runtime}
mkdir -p $out/etc/nvidia-container-runtime
# nvidia-container-runtime invokes docker-runc or runc if that isn't
# available on PATH.
#
# Also set XDG_CONFIG_HOME if it isn't already to allow overriding
# configuration. This in turn allows users to have the nvidia container
# runtime enabled for any number of higher level runtimes like docker and
# podman, i.e., there's no need to have mutually exclusivity on what high
# level runtime can enable the nvidia runtime because each high level
# runtime has its own config.toml file.
wrapProgram $out/bin/nvidia-container-runtime \
--run "${warnIfXdgConfigHomeIsSet}" \
--prefix PATH : ${isolatedContainerRuntimePath} \
--set-default XDG_CONFIG_HOME $out/etc
cp ${configTemplate} $out/etc/nvidia-container-runtime/config.toml
substituteInPlace $out/etc/nvidia-container-runtime/config.toml \
--subst-var-by glibcbin ${lib.getBin glibc}
'';
meta = with lib; {
homepage = "https://github.com/NVIDIA/nvidia-container-runtime";
description = "NVIDIA container runtime";
license = licenses.asl20;
platforms = platforms.linux;
maintainers = with maintainers; [ cpcloud ];
};
}

View file

@ -0,0 +1,37 @@
{ lib
, fetchFromGitHub
, buildGoModule
, makeWrapper
, nvidia-container-runtime
}:
buildGoModule rec {
pname = "nvidia-container-toolkit";
version = "1.3.0";
src = fetchFromGitHub {
owner = "NVIDIA";
repo = pname;
rev = "v${version}";
sha256 = "04284bhgx4j55vg9ifvbji2bvmfjfy3h1lq7q356ffgw3yr9n0hn";
};
vendorSha256 = "17zpiyvf22skfcisflsp6pn56y6a793jcx89kw976fq2x5br1bz7";
buildFlagsArray = [ "-ldflags=" "-s -w" ];
nativeBuildInputs = [ makeWrapper ];
postInstall = ''
mv $out/bin/{pkg,${pname}}
ln -s $out/bin/nvidia-container-{toolkit,runtime-hook}
wrapProgram $out/bin/nvidia-container-toolkit \
--add-flags "-config ${nvidia-container-runtime}/etc/nvidia-container-runtime/config.toml"
'';
meta = with lib; {
homepage = "https://github.com/NVIDIA/nvidia-container-toolkit";
description = "NVIDIA container runtime hook";
license = licenses.asl20;
platforms = platforms.linux;
maintainers = with maintainers; [ cpcloud ];
};
}

View file

@ -1,106 +1,32 @@
{ stdenv
, lib
, fetchFromGitHub
, fetchpatch
, callPackage
, makeWrapper
, buildGoModule
, buildGoPackage
, glibc
, docker
, linkFarm
, containerRuntimePath ? "${docker}/libexec/docker/runc"
}:
with lib; let
libnvidia-container = callPackage ./libnvc.nix { };
isolatedContainerRuntimePath = linkFarm "isolated_container_runtime_path" [
{
name = "runc";
path = containerRuntimePath;
}
];
nvidia-container-runtime = buildGoPackage rec {
pname = "nvidia-container-runtime";
version = "3.4.0";
src = fetchFromGitHub {
owner = "NVIDIA";
repo = "nvidia-container-runtime";
rev = "v${version}";
sha256 = "095mks0r4079vawi50pk4zb5jk0g6s9idg2s1w55a0d27jkknldr";
};
goPackagePath = "github.com/nvidia-container-runtime/src";
buildFlagsArray = [ "-ldflags=" "-s -w" ];
postInstall = ''
mv $out/bin/{src,nvidia-container-runtime}
'';
};
nvidia-container-toolkit = buildGoModule rec {
pname = "nvidia-container-toolkit";
version = "1.3.0";
src = fetchFromGitHub {
owner = "NVIDIA";
repo = "nvidia-container-toolkit";
rev = "v${version}";
sha256 = "04284bhgx4j55vg9ifvbji2bvmfjfy3h1lq7q356ffgw3yr9n0hn";
};
vendorSha256 = "17zpiyvf22skfcisflsp6pn56y6a793jcx89kw976fq2x5br1bz7";
buildFlagsArray = [ "-ldflags=" "-s -w" ];
postInstall = ''
mv $out/bin/{pkg,${pname}}
cp $out/bin/{${pname},nvidia-container-runtime-hook}
'';
};
in
{ stdenv, lib, fetchFromGitHub, callPackage }:
stdenv.mkDerivation rec {
pname = "nvidia-docker";
version = "2.5.0";
src = fetchFromGitHub {
owner = "NVIDIA";
repo = "nvidia-docker";
repo = pname;
rev = "v${version}";
sha256 = "1n1k7fnimky67s12p2ycaq9mgk245fchq62vgd7bl3bzfcbg0z4h";
};
nativeBuildInputs = [ makeWrapper ];
buildPhase = ''
mkdir bin
cp nvidia-docker bin
substituteInPlace bin/nvidia-docker --subst-var-by VERSION ${version}
cp ${libnvidia-container}/bin/nvidia-container-cli bin
cp ${nvidia-container-toolkit}/bin/nvidia-container-{toolkit,runtime-hook} bin
cp ${nvidia-container-runtime}/bin/nvidia-container-runtime bin
'';
installPhase = ''
mkdir -p $out/{bin,etc}
cp -r bin $out
wrapProgram $out/bin/nvidia-container-cli \
--prefix LD_LIBRARY_PATH : /run/opengl-driver/lib:/run/opengl-driver-32/lib
# nvidia-container-runtime invokes docker-runc or runc if that isn't available on PATH
wrapProgram $out/bin/nvidia-container-runtime --prefix PATH : ${isolatedContainerRuntimePath}
cp ${./config.toml} $out/etc/config.toml
substituteInPlace $out/etc/config.toml --subst-var-by glibcbin ${lib.getBin glibc}
cp ${./podman-config.toml} $out/etc/podman-config.toml
substituteInPlace $out/etc/podman-config.toml --subst-var-by glibcbin ${lib.getBin glibc}
mkdir -p $out/bin
cp bin/nvidia-docker $out/bin
'';
meta = {
meta = with lib; {
homepage = "https://github.com/NVIDIA/nvidia-docker";
description = "NVIDIA container runtime for Docker";
license = licenses.bsd3;
platforms = platforms.linux;
maintainers = with lib.maintainers; [ cpcloud ];
maintainers = with maintainers; [ cpcloud ];
};
}

View file

@ -15715,7 +15715,34 @@ in
nv-codec-headers = callPackage ../development/libraries/nv-codec-headers { };
nvidia-docker = callPackage ../applications/virtualization/nvidia-docker { };
mkNvidiaContainerPkg = { name, containerRuntimePath, configTemplate, additionalPaths ? [] }:
let
nvidia-container-runtime = callPackage ../applications/virtualization/nvidia-container-runtime {
inherit containerRuntimePath configTemplate;
};
in symlinkJoin {
inherit name;
paths = [
(callPackage ../applications/virtualization/libnvidia-container { })
nvidia-container-runtime
(callPackage ../applications/virtualization/nvidia-container-toolkit {
inherit nvidia-container-runtime;
})
] ++ additionalPaths;
};
nvidia-docker = mkNvidiaContainerPkg {
name = "nvidia-docker";
containerRuntimePath = "${docker}/libexec/docker/runc";
configTemplate = ../applications/virtualization/nvidia-docker/config.toml;
additionalPaths = [ (callPackage ../applications/virtualization/nvidia-docker { }) ];
};
nvidia-podman = mkNvidiaContainerPkg {
name = "nvidia-podman";
containerRuntimePath = "${runc}/bin/runc";
configTemplate = ../applications/virtualization/nvidia-podman/config.toml;
};
nvidia-texture-tools = callPackage ../development/libraries/nvidia-texture-tools { };