1
0
Fork 1
mirror of https://github.com/NixOS/nixpkgs.git synced 2024-11-18 19:51:17 +00:00

{pkgs/development/cuda-modules,pkgs/test/cuda,pkgs/top-level/cuda-packages.nix}: reformat all CUDA files with nixfmt-rfc-style 2023-03-01

```bash
nix run github:NixOS/nixpkgs/ab6071eb54cc9b66dda436111d4f569e4e56cbf4#nixfmt-rfc-style -L --allow-import-from-derivation -- pkgs/development/cuda-modules pkgs/test/cuda pkgs/top-level/cuda-packages.nix
```
This commit is contained in:
Connor Baker 2024-03-27 20:16:29 +00:00
parent 7f797a698f
commit 93b08a7061
34 changed files with 974 additions and 994 deletions

View file

@ -21,6 +21,6 @@ let
assertCondition = true;
in
/* TODO: Consider testing whether we in fact use the newer libstdc++ */
# TODO: Consider testing whether we in fact use the newer libstdc++
lib.extendDerivation assertCondition passthruExtra cudaStdenv

View file

@ -1,4 +1,4 @@
{hostPlatform, lib}:
{ hostPlatform, lib }:
let
# Samples are built around the CUDA Toolkit, which is not available for
# aarch64. Check for both CUDA version and platform.
@ -8,7 +8,7 @@ let
extension =
final: _:
lib.attrsets.optionalAttrs platformIsSupported {
cuda-library-samples = final.callPackage ./generic.nix {};
cuda-library-samples = final.callPackage ./generic.nix { };
};
in
extension

View file

@ -22,7 +22,7 @@ let
cmake
addOpenGLRunpath
];
buildInputs = [cudatoolkit];
buildInputs = [ cudatoolkit ];
postFixup = ''
for exe in $out/bin/*; do
addOpenGLRunpath $exe
@ -36,7 +36,7 @@ let
cuSPARSE, cuSOLVER, cuFFT, cuRAND, NPP and nvJPEG.
'';
license = lib.licenses.bsd3;
maintainers = with lib.maintainers; [obsidian-systems-maintenance] ++ lib.teams.cuda.members;
maintainers = with lib.maintainers; [ obsidian-systems-maintenance ] ++ lib.teams.cuda.members;
};
};
in
@ -69,9 +69,9 @@ in
src = "${src}/cuTENSOR";
buildInputs = [cutensor];
buildInputs = [ cutensor ];
cmakeFlags = ["-DCUTENSOR_EXAMPLE_BINARY_INSTALL_DIR=${builtins.placeholder "out"}/bin"];
cmakeFlags = [ "-DCUTENSOR_EXAMPLE_BINARY_INSTALL_DIR=${builtins.placeholder "out"}/bin" ];
# CUTENSOR_ROOT is double escaped
postPatch = ''

View file

@ -15,65 +15,63 @@
let
inherit (lib) lists strings;
in
backendStdenv.mkDerivation (
finalAttrs: {
strictDeps = true;
backendStdenv.mkDerivation (finalAttrs: {
strictDeps = true;
pname = "cuda-samples";
version = cudaVersion;
pname = "cuda-samples";
version = cudaVersion;
src = fetchFromGitHub {
owner = "NVIDIA";
repo = finalAttrs.pname;
rev = "v${finalAttrs.version}";
inherit hash;
};
src = fetchFromGitHub {
owner = "NVIDIA";
repo = finalAttrs.pname;
rev = "v${finalAttrs.version}";
inherit hash;
};
nativeBuildInputs =
[
autoAddDriverRunpath
pkg-config
]
# CMake has to run as a native, build-time dependency for libNVVM samples.
# However, it's not the primary build tool -- that's still make.
# As such, we disable CMake's build system.
++ lists.optionals (strings.versionAtLeast finalAttrs.version "12.2") [cmake];
nativeBuildInputs =
[
autoAddDriverRunpath
pkg-config
]
# CMake has to run as a native, build-time dependency for libNVVM samples.
# However, it's not the primary build tool -- that's still make.
# As such, we disable CMake's build system.
++ lists.optionals (strings.versionAtLeast finalAttrs.version "12.2") [ cmake ];
dontUseCmakeConfigure = true;
dontUseCmakeConfigure = true;
buildInputs = [
cudatoolkit
freeimage
glfw3
];
buildInputs = [
cudatoolkit
freeimage
glfw3
];
# See https://github.com/NVIDIA/cuda-samples/issues/75.
patches = lib.optionals (finalAttrs.version == "11.3") [
(fetchpatch {
url = "https://github.com/NVIDIA/cuda-samples/commit/5c3ec60faeb7a3c4ad9372c99114d7bb922fda8d.patch";
hash = "sha256-0XxdmNK9MPpHwv8+qECJTvXGlFxc+fIbta4ynYprfpU=";
})
];
# See https://github.com/NVIDIA/cuda-samples/issues/75.
patches = lib.optionals (finalAttrs.version == "11.3") [
(fetchpatch {
url = "https://github.com/NVIDIA/cuda-samples/commit/5c3ec60faeb7a3c4ad9372c99114d7bb922fda8d.patch";
hash = "sha256-0XxdmNK9MPpHwv8+qECJTvXGlFxc+fIbta4ynYprfpU=";
})
];
enableParallelBuilding = true;
enableParallelBuilding = true;
preConfigure = ''
export CUDA_PATH=${cudatoolkit}
'';
preConfigure = ''
export CUDA_PATH=${cudatoolkit}
'';
installPhase = ''
runHook preInstall
installPhase = ''
runHook preInstall
install -Dm755 -t $out/bin bin/${backendStdenv.hostPlatform.parsed.cpu.name}/${backendStdenv.hostPlatform.parsed.kernel.name}/release/*
install -Dm755 -t $out/bin bin/${backendStdenv.hostPlatform.parsed.cpu.name}/${backendStdenv.hostPlatform.parsed.kernel.name}/release/*
runHook postInstall
'';
runHook postInstall
'';
meta = {
description = "Samples for CUDA Developers which demonstrates features in CUDA Toolkit";
# CUDA itself is proprietary, but these sample apps are not.
license = lib.licenses.bsd3;
maintainers = with lib.maintainers; [obsidian-systems-maintenance] ++ lib.teams.cuda.members;
};
}
)
meta = {
description = "Samples for CUDA Developers which demonstrates features in CUDA Toolkit";
# CUDA itself is proprietary, but these sample apps are not.
license = lib.licenses.bsd3;
maintainers = with lib.maintainers; [ obsidian-systems-maintenance ] ++ lib.teams.cuda.members;
};
})

View file

@ -1,4 +1,4 @@
{cudaVersion, lib}:
{ cudaVersion, lib }:
let
inherit (lib) attrsets modules trivial;
redistName = "cuda";
@ -63,23 +63,21 @@ let
featureRelease
;
}).overrideAttrs
(
prevAttrs: {
# Add the package-specific license.
meta = prevAttrs.meta // {
license =
let
licensePath =
if redistribRelease.license_path != null then
redistribRelease.license_path
else
"${pname}/LICENSE.txt";
url = "https://developer.download.nvidia.com/compute/cuda/redist/${licensePath}";
in
lib.licenses.nvidiaCudaRedist // {inherit url;};
};
}
);
(prevAttrs: {
# Add the package-specific license.
meta = prevAttrs.meta // {
license =
let
licensePath =
if redistribRelease.license_path != null then
redistribRelease.license_path
else
"${pname}/LICENSE.txt";
url = "https://developer.download.nvidia.com/compute/cuda/redist/${licensePath}";
in
lib.licenses.nvidiaCudaRedist // { inherit url; };
};
});
in
drv;

View file

@ -1,4 +1,8 @@
{cudaVersion, lib, addDriverRunpath}:
{
cudaVersion,
lib,
addDriverRunpath,
}:
let
inherit (lib) attrsets lists strings;
# cudaVersionOlder : Version -> Boolean
@ -8,96 +12,92 @@ let
addBuildInputs =
drv: buildInputs:
drv.overrideAttrs (prevAttrs: {buildInputs = prevAttrs.buildInputs ++ buildInputs;});
drv.overrideAttrs (prevAttrs: {
buildInputs = prevAttrs.buildInputs ++ buildInputs;
});
in
# NOTE: Filter out attributes that are not present in the previous version of
# the package set. This is necessary to prevent the appearance of attributes
# like `cuda_nvcc` in `cudaPackages_10_0, which predates redistributables.
final: prev:
attrsets.filterAttrs (attr: _: (builtins.hasAttr attr prev)) {
libcufile = prev.libcufile.overrideAttrs (
prevAttrs: {
buildInputs = prevAttrs.buildInputs ++ [
final.libcublas.lib
final.pkgs.numactl
final.pkgs.rdma-core
];
# Before 11.7 libcufile depends on itself for some reason.
autoPatchelfIgnoreMissingDeps =
prevAttrs.autoPatchelfIgnoreMissingDeps
++ lists.optionals (cudaVersionOlder "11.7") [ "libcufile.so.0" ];
}
);
libcufile = prev.libcufile.overrideAttrs (prevAttrs: {
buildInputs = prevAttrs.buildInputs ++ [
final.libcublas.lib
final.pkgs.numactl
final.pkgs.rdma-core
];
# Before 11.7 libcufile depends on itself for some reason.
autoPatchelfIgnoreMissingDeps =
prevAttrs.autoPatchelfIgnoreMissingDeps
++ lists.optionals (cudaVersionOlder "11.7") [ "libcufile.so.0" ];
});
libcusolver = addBuildInputs prev.libcusolver (
# Always depends on this
[final.libcublas.lib]
[ final.libcublas.lib ]
# Dependency from 12.0 and on
++ lists.optionals (cudaVersionAtLeast "12.0") [final.libnvjitlink.lib]
++ lists.optionals (cudaVersionAtLeast "12.0") [ final.libnvjitlink.lib ]
# Dependency from 12.1 and on
++ lists.optionals (cudaVersionAtLeast "12.1") [final.libcusparse.lib]
++ lists.optionals (cudaVersionAtLeast "12.1") [ final.libcusparse.lib ]
);
libcusparse = addBuildInputs prev.libcusparse (
lists.optionals (cudaVersionAtLeast "12.0") [final.libnvjitlink.lib]
lists.optionals (cudaVersionAtLeast "12.0") [ final.libnvjitlink.lib ]
);
cuda_cudart = prev.cuda_cudart.overrideAttrs (
prevAttrs: {
# Remove once cuda-find-redist-features has a special case for libcuda
outputs =
prevAttrs.outputs
++ lists.optionals (!(builtins.elem "stubs" prevAttrs.outputs)) [ "stubs" ];
cuda_cudart = prev.cuda_cudart.overrideAttrs (prevAttrs: {
# Remove once cuda-find-redist-features has a special case for libcuda
outputs =
prevAttrs.outputs
++ lists.optionals (!(builtins.elem "stubs" prevAttrs.outputs)) [ "stubs" ];
allowFHSReferences = false;
allowFHSReferences = false;
# The libcuda stub's pkg-config doesn't follow the general pattern:
postPatch =
prevAttrs.postPatch or ""
+ ''
while IFS= read -r -d $'\0' path ; do
sed -i \
-e "s|^libdir\s*=.*/lib\$|libdir=''${!outputLib}/lib/stubs|" \
-e "s|^Libs\s*:\(.*\)\$|Libs: \1 -Wl,-rpath,${addDriverRunpath.driverLink}/lib|" \
"$path"
done < <(find -iname 'cuda-*.pc' -print0)
''
+ ''
# Namelink may not be enough, add a soname.
# Cf. https://gitlab.kitware.com/cmake/cmake/-/issues/25536
if [[ -f lib/stubs/libcuda.so && ! -f lib/stubs/libcuda.so.1 ]] ; then
ln -s libcuda.so lib/stubs/libcuda.so.1
fi
'';
# The libcuda stub's pkg-config doesn't follow the general pattern:
postPatch =
prevAttrs.postPatch or ""
+ ''
while IFS= read -r -d $'\0' path ; do
sed -i \
-e "s|^libdir\s*=.*/lib\$|libdir=''${!outputLib}/lib/stubs|" \
-e "s|^Libs\s*:\(.*\)\$|Libs: \1 -Wl,-rpath,${addDriverRunpath.driverLink}/lib|" \
"$path"
done < <(find -iname 'cuda-*.pc' -print0)
''
+ ''
# Namelink may not be enough, add a soname.
# Cf. https://gitlab.kitware.com/cmake/cmake/-/issues/25536
if [[ -f lib/stubs/libcuda.so && ! -f lib/stubs/libcuda.so.1 ]] ; then
ln -s libcuda.so lib/stubs/libcuda.so.1
fi
'';
postFixup =
prevAttrs.postFixup or ""
+ ''
moveToOutput lib/stubs "$stubs"
ln -s "$stubs"/lib/stubs/* "$stubs"/lib/
ln -s "$stubs"/lib/stubs "''${!outputLib}/lib/stubs"
'';
}
);
postFixup =
prevAttrs.postFixup or ""
+ ''
moveToOutput lib/stubs "$stubs"
ln -s "$stubs"/lib/stubs/* "$stubs"/lib/
ln -s "$stubs"/lib/stubs "''${!outputLib}/lib/stubs"
'';
});
cuda_compat = prev.cuda_compat.overrideAttrs (
prevAttrs: {
autoPatchelfIgnoreMissingDeps = prevAttrs.autoPatchelfIgnoreMissingDeps ++ [
"libnvrm_gpu.so"
"libnvrm_mem.so"
"libnvdla_runtime.so"
];
# `cuda_compat` only works on aarch64-linux, and only when building for Jetson devices.
badPlatformsConditions = prevAttrs.badPlatformsConditions // {
"Trying to use cuda_compat on aarch64-linux targeting non-Jetson devices" =
!final.flags.isJetsonBuild;
};
}
);
cuda_compat = prev.cuda_compat.overrideAttrs (prevAttrs: {
autoPatchelfIgnoreMissingDeps = prevAttrs.autoPatchelfIgnoreMissingDeps ++ [
"libnvrm_gpu.so"
"libnvrm_mem.so"
"libnvdla_runtime.so"
];
# `cuda_compat` only works on aarch64-linux, and only when building for Jetson devices.
badPlatformsConditions = prevAttrs.badPlatformsConditions // {
"Trying to use cuda_compat on aarch64-linux targeting non-Jetson devices" =
!final.flags.isJetsonBuild;
};
});
cuda_gdb = addBuildInputs prev.cuda_gdb (
# x86_64 only needs gmp from 12.0 and on
lists.optionals (cudaVersionAtLeast "12.0") [final.pkgs.gmp]
lists.optionals (cudaVersionAtLeast "12.0") [ final.pkgs.gmp ]
);
cuda_nvcc = prev.cuda_nvcc.overrideAttrs (
@ -176,9 +176,9 @@ attrsets.filterAttrs (attr: _: (builtins.hasAttr attr prev)) {
}
);
cuda_nvprof = prev.cuda_nvprof.overrideAttrs (
prevAttrs: {buildInputs = prevAttrs.buildInputs ++ [final.cuda_cupti.lib];}
);
cuda_nvprof = prev.cuda_nvprof.overrideAttrs (prevAttrs: {
buildInputs = prevAttrs.buildInputs ++ [ final.cuda_cupti.lib ];
});
cuda_demo_suite = addBuildInputs prev.cuda_demo_suite [
final.pkgs.freeglut
@ -189,26 +189,24 @@ attrsets.filterAttrs (attr: _: (builtins.hasAttr attr prev)) {
final.libcurand.lib
];
nsight_compute = prev.nsight_compute.overrideAttrs (
prevAttrs: {
nativeBuildInputs =
prevAttrs.nativeBuildInputs
++ (
if (strings.versionOlder prev.nsight_compute.version "2022.2.0") then
[final.pkgs.qt5.wrapQtAppsHook]
else
[final.pkgs.qt6.wrapQtAppsHook]
);
buildInputs =
prevAttrs.buildInputs
++ (
if (strings.versionOlder prev.nsight_compute.version "2022.2.0") then
[final.pkgs.qt5.qtwebview]
else
[final.pkgs.qt6.qtwebview]
);
}
);
nsight_compute = prev.nsight_compute.overrideAttrs (prevAttrs: {
nativeBuildInputs =
prevAttrs.nativeBuildInputs
++ (
if (strings.versionOlder prev.nsight_compute.version "2022.2.0") then
[ final.pkgs.qt5.wrapQtAppsHook ]
else
[ final.pkgs.qt6.wrapQtAppsHook ]
);
buildInputs =
prevAttrs.buildInputs
++ (
if (strings.versionOlder prev.nsight_compute.version "2022.2.0") then
[ final.pkgs.qt5.qtwebview ]
else
[ final.pkgs.qt6.qtwebview ]
);
});
nsight_systems = prev.nsight_systems.overrideAttrs (
prevAttrs:

View file

@ -1,6 +1,6 @@
{
cudaVersion,
runPatches ? [],
runPatches ? [ ],
autoPatchelfHook,
autoAddDriverRunpath,
addOpenGLRunpath,
@ -61,7 +61,7 @@ backendStdenv.mkDerivation rec {
dontPatchELF = true;
dontStrip = true;
src = fetchurl {inherit (release) url sha256;};
src = fetchurl { inherit (release) url sha256; };
outputs = [
"out"
@ -79,9 +79,9 @@ backendStdenv.mkDerivation rec {
autoAddDriverRunpath
markForCudatoolkitRootHook
]
++ lib.optionals (lib.versionOlder version "11") [libsForQt5.wrapQtAppsHook]
++ lib.optionals (lib.versionAtLeast version "11.8") [qt6Packages.wrapQtAppsHook];
propagatedBuildInputs = [setupCudaHook];
++ lib.optionals (lib.versionOlder version "11") [ libsForQt5.wrapQtAppsHook ]
++ lib.optionals (lib.versionAtLeast version "11.8") [ qt6Packages.wrapQtAppsHook ];
propagatedBuildInputs = [ setupCudaHook ];
buildInputs =
lib.optionals (lib.versionOlder version "11") [
libsForQt5.qt5.qtwebengine
@ -130,7 +130,7 @@ backendStdenv.mkDerivation rec {
(lib.getLib libtiff)
qt6Packages.qtwayland
rdma-core
(ucx.override {enableCuda = false;}) # Avoid infinite recursion
(ucx.override { enableCuda = false; }) # Avoid infinite recursion
xorg.libxshmfence
xorg.libxkbfile
]
@ -144,17 +144,15 @@ backendStdenv.mkDerivation rec {
gst_all_1.gstreamer
gst_all_1.gst-plugins-base
])
++ (
with qt6; [
qtmultimedia
qttools
qtpositioning
qtscxml
qtsvg
qtwebchannel
qtwebengine
]
)
++ (with qt6; [
qtmultimedia
qttools
qtpositioning
qtscxml
qtsvg
qtwebchannel
qtwebengine
])
));
# Prepended to runpaths by autoPatchelf.
@ -170,26 +168,28 @@ backendStdenv.mkDerivation rec {
"${placeholder "out"}/nvvm/lib64"
];
autoPatchelfIgnoreMissingDeps = [
# This is the hardware-dependent userspace driver that comes from
# nvidia_x11 package. It must be deployed at runtime in
# /run/opengl-driver/lib or pointed at by LD_LIBRARY_PATH variable, rather
# than pinned in runpath
"libcuda.so.1"
autoPatchelfIgnoreMissingDeps =
[
# This is the hardware-dependent userspace driver that comes from
# nvidia_x11 package. It must be deployed at runtime in
# /run/opengl-driver/lib or pointed at by LD_LIBRARY_PATH variable, rather
# than pinned in runpath
"libcuda.so.1"
# The krb5 expression ships libcom_err.so.3 but cudatoolkit asks for the
# older
# This dependency is asked for by target-linux-x64/CollectX/RedHat/x86_64/libssl.so.10
# - do we even want to use nvidia-shipped libssl?
"libcom_err.so.2"
] ++ lib.optionals (lib.versionOlder version "10.1") [
# For Cuda 10.0, nVidia also shipped a jre implementation which needed
# two old versions of ffmpeg which are not available in nixpkgs
"libavcodec.so.54"
"libavcodec.so.53"
"libavformat.so.54"
"libavformat.so.53"
];
# The krb5 expression ships libcom_err.so.3 but cudatoolkit asks for the
# older
# This dependency is asked for by target-linux-x64/CollectX/RedHat/x86_64/libssl.so.10
# - do we even want to use nvidia-shipped libssl?
"libcom_err.so.2"
]
++ lib.optionals (lib.versionOlder version "10.1") [
# For Cuda 10.0, nVidia also shipped a jre implementation which needed
# two old versions of ffmpeg which are not available in nixpkgs
"libavcodec.so.54"
"libavcodec.so.53"
"libavformat.so.54"
"libavformat.so.53"
];
preFixup =
if (lib.versionAtLeast version "10.1" && lib.versionOlder version "11") then
@ -282,7 +282,14 @@ backendStdenv.mkDerivation rec {
for qtlib in $out/host-linux-x64/Plugins/*/libq*.so; do
qtdir=$(basename $(dirname $qtlib))
filename=$(basename $qtlib)
for qtpkgdir in ${lib.concatMapStringsSep " " (x: qt6Packages.${x}) ["qtbase" "qtimageformats" "qtsvg" "qtwayland"]}; do
for qtpkgdir in ${
lib.concatMapStringsSep " " (x: qt6Packages.${x}) [
"qtbase"
"qtimageformats"
"qtsvg"
"qtwayland"
]
}; do
if [ -e $qtpkgdir/lib/qt-6/plugins/$qtdir/$filename ]; then
ln -snf $qtpkgdir/lib/qt-6/plugins/$qtdir/$filename $qtlib
fi
@ -303,8 +310,9 @@ backendStdenv.mkDerivation rec {
''}
# Remove some cruft.
${lib.optionalString ((lib.versionAtLeast version "7.0") && (lib.versionOlder version "10.1"))
"rm $out/bin/uninstall*"}
${lib.optionalString (
(lib.versionAtLeast version "7.0") && (lib.versionOlder version "10.1")
) "rm $out/bin/uninstall*"}
# Fixup path to samples (needed for cuda 6.5 or else nsight will not find them)
if [ -d "$out"/cuda-samples ]; then
@ -360,19 +368,18 @@ backendStdenv.mkDerivation rec {
wrapProgram "$out/bin/$b" \
--set GDK_PIXBUF_MODULE_FILE "$GDK_PIXBUF_MODULE_FILE"
done
${
lib.optionalString (lib.versionAtLeast version "12")
# Check we don't have any lurking vendored qt libraries that weren't
# replaced during installPhase
''
qtlibfiles=$(find $out -name "libq*.so" -type f)
if [ ! -z "$qtlibfiles" ]; then
echo "Found unexpected vendored Qt library files in $out" >&2
echo $qtlibfiles >&2
echo "These should be replaced with symlinks in installPhase" >&2
exit 1
fi
''
${lib.optionalString (lib.versionAtLeast version "12")
# Check we don't have any lurking vendored qt libraries that weren't
# replaced during installPhase
''
qtlibfiles=$(find $out -name "libq*.so" -type f)
if [ ! -z "$qtlibfiles" ]; then
echo "Found unexpected vendored Qt library files in $out" >&2
echo $qtlibfiles >&2
echo "These should be replaced with symlinks in installPhase" >&2
exit 1
fi
''
}
'';
@ -405,7 +412,7 @@ backendStdenv.mkDerivation rec {
meta = with lib; {
description = "A compiler for NVIDIA GPUs, math libraries, and tools";
homepage = "https://developer.nvidia.com/cuda-toolkit";
platforms = ["x86_64-linux"];
platforms = [ "x86_64-linux" ];
license = licenses.nvidiaCuda;
maintainers = teams.cuda.members;
};

View file

@ -17,7 +17,7 @@ let
;
in
finalAttrs: prevAttrs: {
src = fetchurl {inherit (package) url hash;};
src = fetchurl { inherit (package) url hash; };
# Useful for inspecting why something went wrong.
brokenConditions =
@ -34,9 +34,9 @@ finalAttrs: prevAttrs: {
buildInputs =
prevAttrs.buildInputs
++ [zlib]
++ lists.optionals finalAttrs.passthru.useCudatoolkitRunfile [final.cudatoolkit]
++ lists.optionals (!finalAttrs.passthru.useCudatoolkitRunfile) [final.libcublas.lib];
++ [ zlib ]
++ lists.optionals finalAttrs.passthru.useCudatoolkitRunfile [ final.cudatoolkit ]
++ lists.optionals (!finalAttrs.passthru.useCudatoolkitRunfile) [ final.libcublas.lib ];
# Tell autoPatchelf about runtime dependencies.
# NOTE: Versions from CUDNN releases have four components.
@ -51,13 +51,11 @@ finalAttrs: prevAttrs: {
homepage = "https://developer.nvidia.com/cudnn";
maintainers =
prevAttrs.meta.maintainers
++ (
with maintainers; [
mdaiter
samuela
connorbaker
]
);
++ (with maintainers; [
mdaiter
samuela
connorbaker
]);
license = {
shortName = "cuDNN EULA";
fullName = "NVIDIA cuDNN Software License Agreement (EULA)";

View file

@ -13,7 +13,7 @@
}
];
# powerpc
linux-ppc64le = [];
linux-ppc64le = [ ];
# server-grade arm
linux-sbsa = [
{

View file

@ -65,12 +65,10 @@ let
# Un-nest the manifests attribute set.
releaseGrabber = evaluatedModules: evaluatedModules.config.cutensor.manifests;
in
lists.map
(trivial.flip trivial.pipe [
configEvaluator
releaseGrabber
])
cutensorVersions;
lists.map (trivial.flip trivial.pipe [
configEvaluator
releaseGrabber
]) cutensorVersions;
# Our cudaVersion tells us which version of CUDA we're building against.
# The subdirectories in lib/ tell us which versions of CUDA are supported.
@ -96,15 +94,11 @@ let
redistArch = flags.getRedistArch hostPlatform.system;
# platformIsSupported :: Manifests -> Boolean
platformIsSupported =
{feature, ...}:
(attrsets.attrByPath
[
pname
redistArch
]
null
feature
) != null;
{ feature, ... }:
(attrsets.attrByPath [
pname
redistArch
] null feature) != null;
# TODO(@connorbaker): With an auxilliary file keeping track of the CUDA versions each release supports,
# we could filter out releases that don't support our CUDA version.
@ -116,41 +110,39 @@ let
# Compute versioned attribute name to be used in this package set
# Patch version changes should not break the build, so we only use major and minor
# computeName :: RedistribRelease -> String
computeName = {version, ...}: mkVersionedPackageName redistName version;
computeName = { version, ... }: mkVersionedPackageName redistName version;
in
final: _:
let
# buildCutensorPackage :: Manifests -> AttrSet Derivation
buildCutensorPackage =
{redistrib, feature}:
{ redistrib, feature }:
let
drv = final.callPackage ../generic-builders/manifest.nix {
inherit pname redistName libPath;
redistribRelease = redistrib.${pname};
featureRelease = feature.${pname};
};
fixedDrv = drv.overrideAttrs (
prevAttrs: {
buildInputs =
prevAttrs.buildInputs
++ lists.optionals (strings.versionOlder cudaVersion "11.4") [final.cudatoolkit]
++ lists.optionals (strings.versionAtLeast cudaVersion "11.4") (
[final.libcublas.lib]
# For some reason, the 1.4.x release of cuTENSOR requires the cudart library.
++ lists.optionals (strings.hasPrefix "1.4" redistrib.${pname}.version) [final.cuda_cudart.lib]
);
meta = prevAttrs.meta // {
description = "cuTENSOR: A High-Performance CUDA Library For Tensor Primitives";
homepage = "https://developer.nvidia.com/cutensor";
maintainers = prevAttrs.meta.maintainers ++ [lib.maintainers.obsidian-systems-maintenance];
license = lib.licenses.unfreeRedistributable // {
shortName = "cuTENSOR EULA";
name = "cuTENSOR SUPPLEMENT TO SOFTWARE LICENSE AGREEMENT FOR NVIDIA SOFTWARE DEVELOPMENT KITS";
url = "https://docs.nvidia.com/cuda/cutensor/license.html";
};
fixedDrv = drv.overrideAttrs (prevAttrs: {
buildInputs =
prevAttrs.buildInputs
++ lists.optionals (strings.versionOlder cudaVersion "11.4") [ final.cudatoolkit ]
++ lists.optionals (strings.versionAtLeast cudaVersion "11.4") (
[ final.libcublas.lib ]
# For some reason, the 1.4.x release of cuTENSOR requires the cudart library.
++ lists.optionals (strings.hasPrefix "1.4" redistrib.${pname}.version) [ final.cuda_cudart.lib ]
);
meta = prevAttrs.meta // {
description = "cuTENSOR: A High-Performance CUDA Library For Tensor Primitives";
homepage = "https://developer.nvidia.com/cutensor";
maintainers = prevAttrs.meta.maintainers ++ [ lib.maintainers.obsidian-systems-maintenance ];
license = lib.licenses.unfreeRedistributable // {
shortName = "cuTENSOR EULA";
name = "cuTENSOR SUPPLEMENT TO SOFTWARE LICENSE AGREEMENT FOR NVIDIA SOFTWARE DEVELOPMENT KITS";
url = "https://docs.nvidia.com/cuda/cutensor/license.html";
};
}
);
};
});
in
attrsets.nameValuePair (computeName redistrib.${pname}) fixedDrv;
@ -158,7 +150,7 @@ let
let
nameOfNewest = computeName (lists.last supportedManifests).redistrib.${pname};
drvs = builtins.listToAttrs (lists.map buildCutensorPackage supportedManifests);
containsDefault = attrsets.optionalAttrs (drvs != {}) {cutensor = drvs.${nameOfNewest};};
containsDefault = attrsets.optionalAttrs (drvs != { }) { cutensor = drvs.${nameOfNewest}; };
in
drvs // containsDefault;
in

View file

@ -3,7 +3,7 @@
# - See the documentation in ./gpus.nix.
{
config,
cudaCapabilities ? (config.cudaCapabilities or []),
cudaCapabilities ? (config.cudaCapabilities or [ ]),
cudaForwardCompat ? (config.cudaForwardCompat or true),
lib,
cudaVersion,
@ -77,9 +77,9 @@ let
# cudaArchNameToVersions :: AttrSet String (List String)
# Maps the name of a GPU architecture to different versions of that architecture.
# For example, "Ampere" maps to [ "8.0" "8.6" "8.7" ].
cudaArchNameToVersions =
lists.groupBy' (versions: gpu: versions ++ [gpu.computeCapability]) [] (gpu: gpu.archName)
supportedGpus;
cudaArchNameToVersions = lists.groupBy' (versions: gpu: versions ++ [ gpu.computeCapability ]) [ ] (
gpu: gpu.archName
) supportedGpus;
# cudaComputeCapabilityToName :: AttrSet String String
# Maps the version of a GPU architecture to the name of that architecture.
@ -108,7 +108,7 @@ let
jetsonTargets = lists.intersectLists jetsonComputeCapabilities cudaCapabilities;
# dropDot :: String -> String
dropDot = ver: builtins.replaceStrings ["."] [""] ver;
dropDot = ver: builtins.replaceStrings [ "." ] [ "" ] ver;
# archMapper :: String -> List String -> List String
# Maps a feature across a list of architecture versions to produce a list of architectures.
@ -135,25 +135,29 @@ let
# `all-packages.nix`, which is evaluated on all systems. As such, we need to handle unsupported
# systems gracefully.
# getRedistArch :: String -> String
getRedistArch = nixSystem: attrsets.attrByPath [ nixSystem ] "unsupported" {
aarch64-linux = if jetsonTargets != [] then "linux-aarch64" else "linux-sbsa";
x86_64-linux = "linux-x86_64";
ppc64le-linux = "linux-ppc64le";
x86_64-windows = "windows-x86_64";
};
getRedistArch =
nixSystem:
attrsets.attrByPath [ nixSystem ] "unsupported" {
aarch64-linux = if jetsonTargets != [ ] then "linux-aarch64" else "linux-sbsa";
x86_64-linux = "linux-x86_64";
ppc64le-linux = "linux-ppc64le";
x86_64-windows = "windows-x86_64";
};
# Maps NVIDIA redist arch to Nix system.
# NOTE: This function *will* be called by unsupported systems because `cudaPackages` is part of
# `all-packages.nix`, which is evaluated on all systems. As such, we need to handle unsupported
# systems gracefully.
# getNixSystem :: String -> String
getNixSystem = redistArch: attrsets.attrByPath [ redistArch ] "unsupported-${redistArch}" {
linux-sbsa = "aarch64-linux";
linux-aarch64 = "aarch64-linux";
linux-x86_64 = "x86_64-linux";
linux-ppc64le = "ppc64le-linux";
windows-x86_64 = "x86_64-windows";
};
getNixSystem =
redistArch:
attrsets.attrByPath [ redistArch ] "unsupported-${redistArch}" {
linux-sbsa = "aarch64-linux";
linux-aarch64 = "aarch64-linux";
linux-x86_64 = "x86_64-linux";
linux-ppc64le = "ppc64le-linux";
windows-x86_64 = "x86_64-windows";
};
formatCapabilities =
{
@ -194,7 +198,7 @@ let
gencode =
let
base = gencodeMapper "sm" cudaCapabilities;
forward = gencodeMapper "compute" [(lists.last cudaCapabilities)];
forward = gencodeMapper "compute" [ (lists.last cudaCapabilities) ];
in
base ++ lib.optionals enableForwardCompat forward;
@ -209,150 +213,151 @@ let
# isJetsonBuild :: Boolean
isJetsonBuild =
let
requestedJetsonDevices =
lists.filter (cap: cudaComputeCapabilityToIsJetson.${cap} or false)
cudaCapabilities;
requestedNonJetsonDevices =
lists.filter (cap: !(builtins.elem cap requestedJetsonDevices))
cudaCapabilities;
jetsonBuildSufficientCondition = requestedJetsonDevices != [];
jetsonBuildNecessaryCondition = requestedNonJetsonDevices == [] && hostPlatform.isAarch64;
requestedJetsonDevices = lists.filter (
cap: cudaComputeCapabilityToIsJetson.${cap} or false
) cudaCapabilities;
requestedNonJetsonDevices = lists.filter (
cap: !(builtins.elem cap requestedJetsonDevices)
) cudaCapabilities;
jetsonBuildSufficientCondition = requestedJetsonDevices != [ ];
jetsonBuildNecessaryCondition = requestedNonJetsonDevices == [ ] && hostPlatform.isAarch64;
in
trivial.throwIf (jetsonBuildSufficientCondition && !jetsonBuildNecessaryCondition)
''
Jetson devices cannot be targeted with non-Jetson devices. Additionally, they require hostPlatform to be aarch64.
You requested ${builtins.toJSON cudaCapabilities} for host platform ${hostPlatform.system}.
Requested Jetson devices: ${builtins.toJSON requestedJetsonDevices}.
Requested non-Jetson devices: ${builtins.toJSON requestedNonJetsonDevices}.
Exactly one of the following must be true:
- All CUDA capabilities belong to Jetson devices and hostPlatform is aarch64.
- No CUDA capabilities belong to Jetson devices.
See ${./gpus.nix} for a list of architectures supported by this version of Nixpkgs.
''
jetsonBuildSufficientCondition
trivial.throwIf (jetsonBuildSufficientCondition && !jetsonBuildNecessaryCondition) ''
Jetson devices cannot be targeted with non-Jetson devices. Additionally, they require hostPlatform to be aarch64.
You requested ${builtins.toJSON cudaCapabilities} for host platform ${hostPlatform.system}.
Requested Jetson devices: ${builtins.toJSON requestedJetsonDevices}.
Requested non-Jetson devices: ${builtins.toJSON requestedNonJetsonDevices}.
Exactly one of the following must be true:
- All CUDA capabilities belong to Jetson devices and hostPlatform is aarch64.
- No CUDA capabilities belong to Jetson devices.
See ${./gpus.nix} for a list of architectures supported by this version of Nixpkgs.
'' jetsonBuildSufficientCondition
&& jetsonBuildNecessaryCondition;
};
in
# When changing names or formats: pause, validate, and update the assert
assert let
expected = {
cudaCapabilities = [
"7.5"
"8.6"
];
enableForwardCompat = true;
assert
let
expected = {
cudaCapabilities = [
"7.5"
"8.6"
];
enableForwardCompat = true;
archNames = [
"Turing"
"Ampere"
];
realArches = [
"sm_75"
"sm_86"
];
virtualArches = [
"compute_75"
"compute_86"
];
arches = [
"sm_75"
"sm_86"
"compute_86"
];
archNames = [
"Turing"
"Ampere"
];
realArches = [
"sm_75"
"sm_86"
];
virtualArches = [
"compute_75"
"compute_86"
];
arches = [
"sm_75"
"sm_86"
"compute_86"
];
gencode = [
"-gencode=arch=compute_75,code=sm_75"
"-gencode=arch=compute_86,code=sm_86"
"-gencode=arch=compute_86,code=compute_86"
];
gencodeString = "-gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_86,code=compute_86";
gencode = [
"-gencode=arch=compute_75,code=sm_75"
"-gencode=arch=compute_86,code=sm_86"
"-gencode=arch=compute_86,code=compute_86"
];
gencodeString = "-gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_86,code=sm_86 -gencode=arch=compute_86,code=compute_86";
isJetsonBuild = false;
};
actual = formatCapabilities {
cudaCapabilities = [
"7.5"
"8.6"
];
};
actualWrapped = (builtins.tryEval (builtins.deepSeq actual actual)).value;
in
asserts.assertMsg ((strings.versionAtLeast cudaVersion "11.2") -> (expected == actualWrapped)) ''
This test should only fail when using a version of CUDA older than 11.2, the first to support
8.6.
Expected: ${builtins.toJSON expected}
Actual: ${builtins.toJSON actualWrapped}
'';
# Check mixed Jetson and non-Jetson devices
assert let
expected = false;
actual = formatCapabilities {
cudaCapabilities = [
"7.2"
"7.5"
];
};
actualWrapped = (builtins.tryEval (builtins.deepSeq actual actual)).value;
in
asserts.assertMsg (expected == actualWrapped) ''
Jetson devices capabilities cannot be mixed with non-jetson devices.
Capability 7.5 is non-Jetson and should not be allowed with Jetson 7.2.
Expected: ${builtins.toJSON expected}
Actual: ${builtins.toJSON actualWrapped}
'';
# Check Jetson-only
assert let
expected = {
cudaCapabilities = [
"6.2"
"7.2"
];
enableForwardCompat = true;
archNames = [
"Pascal"
"Volta"
];
realArches = [
"sm_62"
"sm_72"
];
virtualArches = [
"compute_62"
"compute_72"
];
arches = [
"sm_62"
"sm_72"
"compute_72"
];
gencode = [
"-gencode=arch=compute_62,code=sm_62"
"-gencode=arch=compute_72,code=sm_72"
"-gencode=arch=compute_72,code=compute_72"
];
gencodeString = "-gencode=arch=compute_62,code=sm_62 -gencode=arch=compute_72,code=sm_72 -gencode=arch=compute_72,code=compute_72";
isJetsonBuild = true;
};
actual = formatCapabilities {
cudaCapabilities = [
"6.2"
"7.2"
];
};
actualWrapped = (builtins.tryEval (builtins.deepSeq actual actual)).value;
in
asserts.assertMsg
# We can't do this test unless we're targeting aarch64
(hostPlatform.isAarch64 -> (expected == actualWrapped))
''
Jetson devices can only be built with other Jetson devices.
Both 6.2 and 7.2 are Jetson devices.
isJetsonBuild = false;
};
actual = formatCapabilities {
cudaCapabilities = [
"7.5"
"8.6"
];
};
actualWrapped = (builtins.tryEval (builtins.deepSeq actual actual)).value;
in
asserts.assertMsg ((strings.versionAtLeast cudaVersion "11.2") -> (expected == actualWrapped)) ''
This test should only fail when using a version of CUDA older than 11.2, the first to support
8.6.
Expected: ${builtins.toJSON expected}
Actual: ${builtins.toJSON actualWrapped}
'';
# Check mixed Jetson and non-Jetson devices
assert
let
expected = false;
actual = formatCapabilities {
cudaCapabilities = [
"7.2"
"7.5"
];
};
actualWrapped = (builtins.tryEval (builtins.deepSeq actual actual)).value;
in
asserts.assertMsg (expected == actualWrapped) ''
Jetson devices capabilities cannot be mixed with non-jetson devices.
Capability 7.5 is non-Jetson and should not be allowed with Jetson 7.2.
Expected: ${builtins.toJSON expected}
Actual: ${builtins.toJSON actualWrapped}
'';
# Check Jetson-only
assert
let
expected = {
cudaCapabilities = [
"6.2"
"7.2"
];
enableForwardCompat = true;
archNames = [
"Pascal"
"Volta"
];
realArches = [
"sm_62"
"sm_72"
];
virtualArches = [
"compute_62"
"compute_72"
];
arches = [
"sm_62"
"sm_72"
"compute_72"
];
gencode = [
"-gencode=arch=compute_62,code=sm_62"
"-gencode=arch=compute_72,code=sm_72"
"-gencode=arch=compute_72,code=compute_72"
];
gencodeString = "-gencode=arch=compute_62,code=sm_62 -gencode=arch=compute_72,code=sm_72 -gencode=arch=compute_72,code=compute_72";
isJetsonBuild = true;
};
actual = formatCapabilities {
cudaCapabilities = [
"6.2"
"7.2"
];
};
actualWrapped = (builtins.tryEval (builtins.deepSeq actual actual)).value;
in
asserts.assertMsg
# We can't do this test unless we're targeting aarch64
(hostPlatform.isAarch64 -> (expected == actualWrapped))
''
Jetson devices can only be built with other Jetson devices.
Both 6.2 and 7.2 are Jetson devices.
Expected: ${builtins.toJSON expected}
Actual: ${builtins.toJSON actualWrapped}
'';
{
# formatCapabilities :: { cudaCapabilities: List Capability, enableForwardCompat: Boolean } -> { ... }
inherit formatCapabilities;
@ -376,6 +381,6 @@ asserts.assertMsg
;
}
// formatCapabilities {
cudaCapabilities = if cudaCapabilities == [] then defaultCapabilities else cudaCapabilities;
cudaCapabilities = if cudaCapabilities == [ ] then defaultCapabilities else cudaCapabilities;
enableForwardCompat = cudaForwardCompat;
}

View file

@ -50,144 +50,139 @@ let
sourceMatchesHost = flags.getNixSystem redistArch == stdenv.hostPlatform.system;
in
backendStdenv.mkDerivation (
finalAttrs: {
# NOTE: Even though there's no actual buildPhase going on here, the derivations of the
# redistributables are sensitive to the compiler flags provided to stdenv. The patchelf package
# is sensitive to the compiler flags provided to stdenv, and we depend on it. As such, we are
# also sensitive to the compiler flags provided to stdenv.
inherit pname;
inherit (redistribRelease) version;
backendStdenv.mkDerivation (finalAttrs: {
# NOTE: Even though there's no actual buildPhase going on here, the derivations of the
# redistributables are sensitive to the compiler flags provided to stdenv. The patchelf package
# is sensitive to the compiler flags provided to stdenv, and we depend on it. As such, we are
# also sensitive to the compiler flags provided to stdenv.
inherit pname;
inherit (redistribRelease) version;
# Don't force serialization to string for structured attributes, like outputToPatterns
# and brokenConditions.
# Avoids "set cannot be coerced to string" errors.
__structuredAttrs = true;
# Don't force serialization to string for structured attributes, like outputToPatterns
# and brokenConditions.
# Avoids "set cannot be coerced to string" errors.
__structuredAttrs = true;
# Keep better track of dependencies.
strictDeps = true;
# Keep better track of dependencies.
strictDeps = true;
# NOTE: Outputs are evaluated jointly with meta, so in the case that this is an unsupported platform,
# we still need to provide a list of outputs.
outputs =
let
# Checks whether the redistributable provides an output.
hasOutput =
output:
attrsets.attrByPath
[
redistArch
"outputs"
output
]
false
featureRelease;
# Order is important here so we use a list.
possibleOutputs = [
"bin"
"lib"
"static"
"dev"
"doc"
"sample"
"python"
];
# Filter out outputs that don't exist in the redistributable.
# NOTE: In the case the redistributable isn't supported on the target platform,
# we will have `outputs = [ "out" ] ++ possibleOutputs`. This is of note because platforms which
# aren't supported would otherwise have evaluation errors when trying to access outputs other than `out`.
# The alternative would be to have `outputs = [ "out" ]` when`redistArch = "unsupported"`, but that would
# require adding guards throughout the entirety of the CUDA package set to ensure `cudaSupport` is true --
# recall that OfBorg will evaluate packages marked as broken and that `cudaPackages` will be evaluated with
# `cudaSupport = false`!
additionalOutputs =
if redistArch == "unsupported"
then possibleOutputs
else builtins.filter hasOutput possibleOutputs;
# The out output is special -- it's the default output and we always include it.
outputs = [ "out" ] ++ additionalOutputs;
in
outputs;
# Traversed in the order of the outputs speficied in outputs;
# entries are skipped if they don't exist in outputs.
outputToPatterns = {
bin = [ "bin" ];
dev = [
"share/pkgconfig"
"**/*.pc"
"**/*.cmake"
];
lib = [
# NOTE: Outputs are evaluated jointly with meta, so in the case that this is an unsupported platform,
# we still need to provide a list of outputs.
outputs =
let
# Checks whether the redistributable provides an output.
hasOutput =
output:
attrsets.attrByPath [
redistArch
"outputs"
output
] false featureRelease;
# Order is important here so we use a list.
possibleOutputs = [
"bin"
"lib"
"lib64"
"static"
"dev"
"doc"
"sample"
"python"
];
static = ["**/*.a"];
sample = ["samples"];
python = ["**/*.whl"];
};
# Filter out outputs that don't exist in the redistributable.
# NOTE: In the case the redistributable isn't supported on the target platform,
# we will have `outputs = [ "out" ] ++ possibleOutputs`. This is of note because platforms which
# aren't supported would otherwise have evaluation errors when trying to access outputs other than `out`.
# The alternative would be to have `outputs = [ "out" ]` when`redistArch = "unsupported"`, but that would
# require adding guards throughout the entirety of the CUDA package set to ensure `cudaSupport` is true --
# recall that OfBorg will evaluate packages marked as broken and that `cudaPackages` will be evaluated with
# `cudaSupport = false`!
additionalOutputs =
if redistArch == "unsupported" then possibleOutputs else builtins.filter hasOutput possibleOutputs;
# The out output is special -- it's the default output and we always include it.
outputs = [ "out" ] ++ additionalOutputs;
in
outputs;
# Useful for introspecting why something went wrong. Maps descriptions of why the derivation would be marked as
# broken on have badPlatforms include the current platform.
# brokenConditions :: AttrSet Bool
# Sets `meta.broken = true` if any of the conditions are true.
# Example: Broken on a specific version of CUDA or when a dependency has a specific version.
brokenConditions = { };
# badPlatformsConditions :: AttrSet Bool
# Sets `meta.badPlatforms = meta.platforms` if any of the conditions are true.
# Example: Broken on a specific architecture when some condition is met (like targeting Jetson).
badPlatformsConditions = {
"No source" = !sourceMatchesHost;
};
# src :: Optional Derivation
src = trivial.pipe redistArch [
# If redistArch doesn't exist in redistribRelease, return null.
(redistArch: redistribRelease.${redistArch} or null)
# If the release is non-null, fetch the source; otherwise, return null.
(trivial.mapNullable (
{ relative_path, sha256, ... }:
fetchurl {
url = "https://developer.download.nvidia.com/compute/${redistName}/redist/${relative_path}";
inherit sha256;
}
))
# Traversed in the order of the outputs speficied in outputs;
# entries are skipped if they don't exist in outputs.
outputToPatterns = {
bin = [ "bin" ];
dev = [
"share/pkgconfig"
"**/*.pc"
"**/*.cmake"
];
lib = [
"lib"
"lib64"
];
static = [ "**/*.a" ];
sample = [ "samples" ];
python = [ "**/*.whl" ];
};
# Handle the pkg-config files:
# 1. No FHS
# 2. Location expected by the pkg-config wrapper
# 3. Generate unversioned names too
postPatch = ''
for path in pkg-config pkgconfig ; do
[[ -d "$path" ]] || continue
mkdir -p share/pkgconfig
mv "$path"/* share/pkgconfig/
rmdir "$path"
done
# Useful for introspecting why something went wrong. Maps descriptions of why the derivation would be marked as
# broken on have badPlatforms include the current platform.
for pc in share/pkgconfig/*.pc ; do
sed -i \
-e "s|^cudaroot\s*=.*\$|cudaroot=''${!outputDev}|" \
-e "s|^libdir\s*=.*/lib\$|libdir=''${!outputLib}/lib|" \
-e "s|^includedir\s*=.*/include\$|includedir=''${!outputDev}/include|" \
"$pc"
done
# brokenConditions :: AttrSet Bool
# Sets `meta.broken = true` if any of the conditions are true.
# Example: Broken on a specific version of CUDA or when a dependency has a specific version.
brokenConditions = { };
# E.g. cuda-11.8.pc -> cuda.pc
for pc in share/pkgconfig/*-"$majorMinorVersion.pc" ; do
ln -s "$(basename "$pc")" "''${pc%-$majorMinorVersion.pc}".pc
done
'';
# badPlatformsConditions :: AttrSet Bool
# Sets `meta.badPlatforms = meta.platforms` if any of the conditions are true.
# Example: Broken on a specific architecture when some condition is met (like targeting Jetson).
badPlatformsConditions = {
"No source" = !sourceMatchesHost;
};
env.majorMinorVersion = cudaMajorMinorVersion;
# src :: Optional Derivation
src = trivial.pipe redistArch [
# If redistArch doesn't exist in redistribRelease, return null.
(redistArch: redistribRelease.${redistArch} or null)
# If the release is non-null, fetch the source; otherwise, return null.
(trivial.mapNullable (
{ relative_path, sha256, ... }:
fetchurl {
url = "https://developer.download.nvidia.com/compute/${redistName}/redist/${relative_path}";
inherit sha256;
}
))
];
# We do need some other phases, like configurePhase, so the multiple-output setup hook works.
dontBuild = true;
# Handle the pkg-config files:
# 1. No FHS
# 2. Location expected by the pkg-config wrapper
# 3. Generate unversioned names too
postPatch = ''
for path in pkg-config pkgconfig ; do
[[ -d "$path" ]] || continue
mkdir -p share/pkgconfig
mv "$path"/* share/pkgconfig/
rmdir "$path"
done
nativeBuildInputs = [
for pc in share/pkgconfig/*.pc ; do
sed -i \
-e "s|^cudaroot\s*=.*\$|cudaroot=''${!outputDev}|" \
-e "s|^libdir\s*=.*/lib\$|libdir=''${!outputLib}/lib|" \
-e "s|^includedir\s*=.*/include\$|includedir=''${!outputDev}/include|" \
"$pc"
done
# E.g. cuda-11.8.pc -> cuda.pc
for pc in share/pkgconfig/*-"$majorMinorVersion.pc" ; do
ln -s "$(basename "$pc")" "''${pc%-$majorMinorVersion.pc}".pc
done
'';
env.majorMinorVersion = cudaMajorMinorVersion;
# We do need some other phases, like configurePhase, so the multiple-output setup hook works.
dontBuild = true;
nativeBuildInputs =
[
autoPatchelfHook
# This hook will make sure libcuda can be found
# in typically /lib/opengl-driver by adding that
@ -205,142 +200,140 @@ backendStdenv.mkDerivation (
autoAddCudaCompatRunpath
];
buildInputs =
[
# autoPatchelfHook will search for a libstdc++ and we're giving it
# one that is compatible with the rest of nixpkgs, even when
# nvcc forces us to use an older gcc
# NB: We don't actually know if this is the right thing to do
stdenv.cc.cc.lib
];
buildInputs = [
# autoPatchelfHook will search for a libstdc++ and we're giving it
# one that is compatible with the rest of nixpkgs, even when
# nvcc forces us to use an older gcc
# NB: We don't actually know if this is the right thing to do
stdenv.cc.cc.lib
];
# Picked up by autoPatchelf
# Needed e.g. for libnvrtc to locate (dlopen) libnvrtc-builtins
appendRunpaths = ["$ORIGIN"];
# Picked up by autoPatchelf
# Needed e.g. for libnvrtc to locate (dlopen) libnvrtc-builtins
appendRunpaths = [ "$ORIGIN" ];
# NOTE: We don't need to check for dev or doc, because those outputs are handled by
# the multiple-outputs setup hook.
# NOTE: moveToOutput operates on all outputs:
# https://github.com/NixOS/nixpkgs/blob/2920b6fc16a9ed5d51429e94238b28306ceda79e/pkgs/build-support/setup-hooks/multiple-outputs.sh#L105-L107
installPhase =
let
mkMoveToOutputCommand =
output:
let
template = pattern: ''moveToOutput "${pattern}" "${"$" + output}"'';
patterns = finalAttrs.outputToPatterns.${output} or [];
in
strings.concatMapStringsSep "\n" template patterns;
in
# Pre-install hook
''
runHook preInstall
''
# Handle the existence of libPath, which requires us to re-arrange the lib directory
+ strings.optionalString (libPath != null) ''
full_lib_path="lib/${libPath}"
if [[ ! -d "$full_lib_path" ]] ; then
echo "${finalAttrs.pname}: '$full_lib_path' does not exist, only found:" >&2
find lib/ -mindepth 1 -maxdepth 1 >&2
echo "This release might not support your CUDA version" >&2
exit 1
fi
echo "Making libPath '$full_lib_path' the root of lib" >&2
mv "$full_lib_path" lib_new
rm -r lib
mv lib_new lib
''
# Create the primary output, out, and move the other outputs into it.
+ ''
mkdir -p "$out"
mv * "$out"
''
# Move the outputs into their respective outputs.
+ strings.concatMapStringsSep "\n" mkMoveToOutputCommand (builtins.tail finalAttrs.outputs)
# Add a newline to the end of the installPhase, so that the post-install hook doesn't
# get concatenated with the last moveToOutput command.
+ "\n"
# Post-install hook
+ ''
runHook postInstall
'';
doInstallCheck = true;
allowFHSReferences = true; # TODO: Default to `false`
postInstallCheck = ''
echo "Executing postInstallCheck"
if [[ -z "''${allowFHSReferences-}" ]] ; then
mapfile -t outputPaths < <(for o in $(getAllOutputNames); do echo "''${!o}"; done)
if grep --max-count=5 --recursive --exclude=LICENSE /usr/ "''${outputPaths[@]}" ; then
echo "Detected references to /usr" >&2
exit 1
fi
fi
'';
# libcuda needs to be resolved during runtime
autoPatchelfIgnoreMissingDeps = [
"libcuda.so"
"libcuda.so.*"
];
# The out output leverages the same functionality which backs the `symlinkJoin` function in
# Nixpkgs:
# https://github.com/NixOS/nixpkgs/blob/d8b2a92df48f9b08d68b0132ce7adfbdbc1fbfac/pkgs/build-support/trivial-builders/default.nix#L510
#
# That should allow us to emulate "fat" default outputs without having to actually create them.
#
# It is important that this run after the autoPatchelfHook, otherwise the symlinks in out will reference libraries in lib, creating a circular dependency.
postPhases = ["postPatchelf"];
# For each output, create a symlink to it in the out output.
# NOTE: We must recreate the out output here, because the setup hook will have deleted it if it was empty.
postPatchelf = ''
mkdir -p "$out"
for output in $(getAllOutputNames); do
if [[ "$output" != "out" ]]; then
${meta.getExe lndir} "''${!output}" "$out"
fi
done
'';
# Make the CUDA-patched stdenv available
passthru.stdenv = backendStdenv;
# Setting propagatedBuildInputs to false will prevent outputs known to the multiple-outputs
# from depending on `out` by default.
# https://github.com/NixOS/nixpkgs/blob/2920b6fc16a9ed5d51429e94238b28306ceda79e/pkgs/build-support/setup-hooks/multiple-outputs.sh#L196
# Indeed, we want to do the opposite -- fat "out" outputs that contain all the other outputs.
propagatedBuildOutputs = false;
# By default, if the dev output exists it just uses that.
# However, because we disabled propagatedBuildOutputs, dev doesn't contain libraries or
# anything of the sort. To remedy this, we set outputSpecified to true, and use
# outputsToInstall, which tells Nix which outputs to use when the package name is used
# unqualified (that is, without an explicit output).
outputSpecified = true;
meta = {
description = "${redistribRelease.name}. By downloading and using the packages you accept the terms and conditions of the ${finalAttrs.meta.license.shortName}";
sourceProvenance = [sourceTypes.binaryNativeCode];
broken = lists.any trivial.id (attrsets.attrValues finalAttrs.brokenConditions);
platforms = trivial.pipe supportedRedistArchs [
# Map each redist arch to the equivalent nix system or null if there is no equivalent.
(builtins.map flags.getNixSystem)
# Filter out unsupported systems
(builtins.filter (nixSystem: !(strings.hasPrefix "unsupported-" nixSystem)))
];
badPlatforms =
# NOTE: We don't need to check for dev or doc, because those outputs are handled by
# the multiple-outputs setup hook.
# NOTE: moveToOutput operates on all outputs:
# https://github.com/NixOS/nixpkgs/blob/2920b6fc16a9ed5d51429e94238b28306ceda79e/pkgs/build-support/setup-hooks/multiple-outputs.sh#L105-L107
installPhase =
let
mkMoveToOutputCommand =
output:
let
isBadPlatform = lists.any trivial.id (attrsets.attrValues finalAttrs.badPlatformsConditions);
template = pattern: ''moveToOutput "${pattern}" "${"$" + output}"'';
patterns = finalAttrs.outputToPatterns.${output} or [ ];
in
lists.optionals isBadPlatform finalAttrs.meta.platforms;
license = licenses.unfree;
maintainers = teams.cuda.members;
# Force the use of the default, fat output by default (even though `dev` exists, which
# causes Nix to prefer that output over the others if outputSpecified isn't set).
outputsToInstall = ["out"];
};
}
)
strings.concatMapStringsSep "\n" template patterns;
in
# Pre-install hook
''
runHook preInstall
''
# Handle the existence of libPath, which requires us to re-arrange the lib directory
+ strings.optionalString (libPath != null) ''
full_lib_path="lib/${libPath}"
if [[ ! -d "$full_lib_path" ]] ; then
echo "${finalAttrs.pname}: '$full_lib_path' does not exist, only found:" >&2
find lib/ -mindepth 1 -maxdepth 1 >&2
echo "This release might not support your CUDA version" >&2
exit 1
fi
echo "Making libPath '$full_lib_path' the root of lib" >&2
mv "$full_lib_path" lib_new
rm -r lib
mv lib_new lib
''
# Create the primary output, out, and move the other outputs into it.
+ ''
mkdir -p "$out"
mv * "$out"
''
# Move the outputs into their respective outputs.
+ strings.concatMapStringsSep "\n" mkMoveToOutputCommand (builtins.tail finalAttrs.outputs)
# Add a newline to the end of the installPhase, so that the post-install hook doesn't
# get concatenated with the last moveToOutput command.
+ "\n"
# Post-install hook
+ ''
runHook postInstall
'';
doInstallCheck = true;
allowFHSReferences = true; # TODO: Default to `false`
postInstallCheck = ''
echo "Executing postInstallCheck"
if [[ -z "''${allowFHSReferences-}" ]] ; then
mapfile -t outputPaths < <(for o in $(getAllOutputNames); do echo "''${!o}"; done)
if grep --max-count=5 --recursive --exclude=LICENSE /usr/ "''${outputPaths[@]}" ; then
echo "Detected references to /usr" >&2
exit 1
fi
fi
'';
# libcuda needs to be resolved during runtime
autoPatchelfIgnoreMissingDeps = [
"libcuda.so"
"libcuda.so.*"
];
# The out output leverages the same functionality which backs the `symlinkJoin` function in
# Nixpkgs:
# https://github.com/NixOS/nixpkgs/blob/d8b2a92df48f9b08d68b0132ce7adfbdbc1fbfac/pkgs/build-support/trivial-builders/default.nix#L510
#
# That should allow us to emulate "fat" default outputs without having to actually create them.
#
# It is important that this run after the autoPatchelfHook, otherwise the symlinks in out will reference libraries in lib, creating a circular dependency.
postPhases = [ "postPatchelf" ];
# For each output, create a symlink to it in the out output.
# NOTE: We must recreate the out output here, because the setup hook will have deleted it if it was empty.
postPatchelf = ''
mkdir -p "$out"
for output in $(getAllOutputNames); do
if [[ "$output" != "out" ]]; then
${meta.getExe lndir} "''${!output}" "$out"
fi
done
'';
# Make the CUDA-patched stdenv available
passthru.stdenv = backendStdenv;
# Setting propagatedBuildInputs to false will prevent outputs known to the multiple-outputs
# from depending on `out` by default.
# https://github.com/NixOS/nixpkgs/blob/2920b6fc16a9ed5d51429e94238b28306ceda79e/pkgs/build-support/setup-hooks/multiple-outputs.sh#L196
# Indeed, we want to do the opposite -- fat "out" outputs that contain all the other outputs.
propagatedBuildOutputs = false;
# By default, if the dev output exists it just uses that.
# However, because we disabled propagatedBuildOutputs, dev doesn't contain libraries or
# anything of the sort. To remedy this, we set outputSpecified to true, and use
# outputsToInstall, which tells Nix which outputs to use when the package name is used
# unqualified (that is, without an explicit output).
outputSpecified = true;
meta = {
description = "${redistribRelease.name}. By downloading and using the packages you accept the terms and conditions of the ${finalAttrs.meta.license.shortName}";
sourceProvenance = [ sourceTypes.binaryNativeCode ];
broken = lists.any trivial.id (attrsets.attrValues finalAttrs.brokenConditions);
platforms = trivial.pipe supportedRedistArchs [
# Map each redist arch to the equivalent nix system or null if there is no equivalent.
(builtins.map flags.getNixSystem)
# Filter out unsupported systems
(builtins.filter (nixSystem: !(strings.hasPrefix "unsupported-" nixSystem)))
];
badPlatforms =
let
isBadPlatform = lists.any trivial.id (attrsets.attrValues finalAttrs.badPlatformsConditions);
in
lists.optionals isBadPlatform finalAttrs.meta.platforms;
license = licenses.unfree;
maintainers = teams.cuda.members;
# Force the use of the default, fat output by default (even though `dev` exists, which
# causes Nix to prefer that output over the others if outputSpecified isn't set).
outputsToInstall = [ "out" ];
};
})

View file

@ -52,7 +52,9 @@ let
# - Package: ../modules/${pname}/releases/package.nix
# FIXME: do this at the module system level
propagatePlatforms = lib.mapAttrs (redistArch: packages: map (p: { inherit redistArch; } // p) packages);
propagatePlatforms = lib.mapAttrs (
redistArch: packages: map (p: { inherit redistArch; } // p) packages
);
# All releases across all platforms
# See ../modules/${pname}/releases/releases.nix
@ -61,7 +63,7 @@ let
# Compute versioned attribute name to be used in this package set
# Patch version changes should not break the build, so we only use major and minor
# computeName :: Package -> String
computeName = {version, ...}: mkVersionedPackageName pname version;
computeName = { version, ... }: mkVersionedPackageName pname version;
# Check whether a package supports our CUDA version and platform.
# isSupported :: Package -> Bool
@ -81,16 +83,15 @@ let
# All the supported packages we can build for our platform.
# perSystemReleases :: List Package
allReleases = lib.pipe releaseSets
[
(lib.attrValues)
(lists.flatten)
(lib.groupBy (p: lib.versions.majorMinor p.version))
(lib.mapAttrs (_: builtins.sort preferable))
(lib.mapAttrs (_: lib.take 1))
(lib.attrValues)
(lib.concatMap lib.trivial.id)
];
allReleases = lib.pipe releaseSets [
(lib.attrValues)
(lists.flatten)
(lib.groupBy (p: lib.versions.majorMinor p.version))
(lib.mapAttrs (_: builtins.sort preferable))
(lib.mapAttrs (_: lib.take 1))
(lib.attrValues)
(lib.concatMap lib.trivial.id)
];
newest = builtins.head (builtins.sort preferable allReleases);
@ -115,7 +116,10 @@ let
buildPackage =
package:
let
shims = final.callPackage shimsFn {inherit package; inherit (package) redistArch; };
shims = final.callPackage shimsFn {
inherit package;
inherit (package) redistArch;
};
name = computeName package;
drv = final.callPackage ./manifest.nix {
inherit pname;
@ -129,7 +133,9 @@ let
# versionedDerivations :: AttrSet Derivation
versionedDerivations = builtins.listToAttrs (lists.map buildPackage allReleases);
defaultDerivation = { ${pname} = (buildPackage newest).value; };
defaultDerivation = {
${pname} = (buildPackage newest).value;
};
in
versionedDerivations // defaultDerivation;
in

View file

@ -1 +1,4 @@
{options, ...}: {options.cuda.manifests = options.generic.manifests;}
{ options, ... }:
{
options.cuda.manifests = options.generic.manifests;
}

View file

@ -1,4 +1,4 @@
{options, ...}:
{ options, ... }:
{
options.cudnn.releases = options.generic.releases;
# TODO(@connorbaker): Figure out how to add additional options to the

View file

@ -1 +1,4 @@
{options, ...}: {options.cutensor.manifests = options.generic.manifests;}
{ options, ... }:
{
options.cutensor.manifests = options.generic.manifests;
}

View file

@ -1,7 +1,7 @@
{lib, config, ...}:
{ lib, config, ... }:
{
options.generic.manifests = {
feature = import ./feature/manifest.nix {inherit lib config;};
redistrib = import ./redistrib/manifest.nix {inherit lib;};
feature = import ./feature/manifest.nix { inherit lib config; };
redistrib = import ./redistrib/manifest.nix { inherit lib; };
};
}

View file

@ -1,7 +1,7 @@
{lib, config, ...}:
{ lib, config, ... }:
let
inherit (lib) options trivial types;
Release = import ./release.nix {inherit lib config;};
Release = import ./release.nix { inherit lib config; };
in
options.mkOption {
description = "A feature manifest is an attribute set which includes a mapping from package name to release";

View file

@ -1,4 +1,4 @@
{lib, ...}:
{ lib, ... }:
let
inherit (lib) options types;
in

View file

@ -1,10 +1,10 @@
{lib, ...}:
{ lib, ... }:
let
inherit (lib) options types;
Outputs = import ./outputs.nix {inherit lib;};
Outputs = import ./outputs.nix { inherit lib; };
in
options.mkOption {
description = "A package in the manifest";
example = (import ./release.nix {inherit lib;}).linux-x86_64;
type = types.submodule {options.outputs = Outputs;};
example = (import ./release.nix { inherit lib; }).linux-x86_64;
type = types.submodule { options.outputs = Outputs; };
}

View file

@ -1,10 +1,10 @@
{lib, config, ...}:
{ lib, config, ... }:
let
inherit (lib) options types;
Package = import ./package.nix {inherit lib config;};
Package = import ./package.nix { inherit lib config; };
in
options.mkOption {
description = "A release is an attribute set which includes a mapping from platform to package";
example = (import ./manifest.nix {inherit lib;}).cuda_cccl;
example = (import ./manifest.nix { inherit lib; }).cuda_cccl;
type = types.attrsOf Package.type;
}

View file

@ -1,7 +1,7 @@
{lib, ...}:
{ lib, ... }:
let
inherit (lib) options trivial types;
Release = import ./release.nix {inherit lib;};
Release = import ./release.nix { inherit lib; };
in
options.mkOption {
description = "A redistributable manifest is an attribute set which includes a mapping from package name to release";

View file

@ -1,10 +1,10 @@
{lib, ...}:
{ lib, ... }:
let
inherit (lib) options types;
in
options.mkOption {
description = "A package in the manifest";
example = (import ./release.nix {inherit lib;}).linux-x86_64;
example = (import ./release.nix { inherit lib; }).linux-x86_64;
type = types.submodule {
options = {
relative_path = options.mkOption {

View file

@ -1,11 +1,11 @@
{lib, ...}:
{ lib, ... }:
let
inherit (lib) options types;
Package = import ./package.nix {inherit lib;};
Package = import ./package.nix { inherit lib; };
in
options.mkOption {
description = "A release is an attribute set which includes a mapping from platform to package";
example = (import ./manifest.nix {inherit lib;}).cuda_cccl;
example = (import ./manifest.nix { inherit lib; }).cuda_cccl;
type = types.submodule {
# Allow any attribute name as these will be the platform names
freeformType = types.attrsOf Package.type;

View file

@ -1,4 +1,4 @@
{lib, config, ...}:
{ lib, config, ... }:
let
inherit (config.generic.types) majorMinorVersion majorMinorPatchBuildVersion;
inherit (lib) options types;

View file

@ -1,11 +1,11 @@
{lib, ...}:
{ lib, ... }:
let
inherit (lib) options types;
in
{
options.generic.types = options.mkOption {
type = types.attrsOf types.optionType;
default = {};
default = { };
description = "A set of generic types.";
};
config.generic.types = {

View file

@ -1,4 +1,4 @@
{options, ...}:
{ options, ... }:
{
options.tensorrt.releases = options.generic.releases;
# TODO(@connorbaker): Figure out how to add additional options to the

View file

@ -22,63 +22,61 @@ let
nccl
;
in
backendStdenv.mkDerivation (
finalAttrs: {
backendStdenv.mkDerivation (finalAttrs: {
pname = "nccl-tests";
version = "2.13.9";
pname = "nccl-tests";
version = "2.13.9";
src = fetchFromGitHub {
owner = "NVIDIA";
repo = finalAttrs.pname;
rev = "v${finalAttrs.version}";
hash = "sha256-QYuMBPhvHHVo2ku14jD1CVINLPW0cyiXJkXxb77IxbE=";
};
src = fetchFromGitHub {
owner = "NVIDIA";
repo = finalAttrs.pname;
rev = "v${finalAttrs.version}";
hash = "sha256-QYuMBPhvHHVo2ku14jD1CVINLPW0cyiXJkXxb77IxbE=";
};
strictDeps = true;
strictDeps = true;
nativeBuildInputs =
[which]
++ lib.optionals (lib.versionOlder cudaVersion "11.4") [cudatoolkit]
++ lib.optionals (lib.versionAtLeast cudaVersion "11.4") [cuda_nvcc];
nativeBuildInputs =
[ which ]
++ lib.optionals (lib.versionOlder cudaVersion "11.4") [ cudatoolkit ]
++ lib.optionals (lib.versionAtLeast cudaVersion "11.4") [ cuda_nvcc ];
buildInputs =
[nccl]
++ lib.optionals (lib.versionOlder cudaVersion "11.4") [cudatoolkit]
++ lib.optionals (lib.versionAtLeast cudaVersion "11.4") [
cuda_nvcc.dev # crt/host_config.h
cuda_cudart
]
++ lib.optionals (lib.versionAtLeast cudaVersion "12.0") [
cuda_cccl.dev # <nv/target>
]
++ lib.optionals mpiSupport [mpi];
buildInputs =
[ nccl ]
++ lib.optionals (lib.versionOlder cudaVersion "11.4") [ cudatoolkit ]
++ lib.optionals (lib.versionAtLeast cudaVersion "11.4") [
cuda_nvcc.dev # crt/host_config.h
cuda_cudart
]
++ lib.optionals (lib.versionAtLeast cudaVersion "12.0") [
cuda_cccl.dev # <nv/target>
]
++ lib.optionals mpiSupport [ mpi ];
makeFlags =
["NCCL_HOME=${nccl}"]
++ lib.optionals (lib.versionOlder cudaVersion "11.4") ["CUDA_HOME=${cudatoolkit}"]
++ lib.optionals (lib.versionAtLeast cudaVersion "11.4") ["CUDA_HOME=${cuda_nvcc}"]
++ lib.optionals mpiSupport ["MPI=1"];
makeFlags =
[ "NCCL_HOME=${nccl}" ]
++ lib.optionals (lib.versionOlder cudaVersion "11.4") [ "CUDA_HOME=${cudatoolkit}" ]
++ lib.optionals (lib.versionAtLeast cudaVersion "11.4") [ "CUDA_HOME=${cuda_nvcc}" ]
++ lib.optionals mpiSupport [ "MPI=1" ];
enableParallelBuilding = true;
enableParallelBuilding = true;
installPhase = ''
mkdir -p $out/bin
cp -r build/* $out/bin/
'';
installPhase = ''
mkdir -p $out/bin
cp -r build/* $out/bin/
'';
passthru.updateScript = gitUpdater {
inherit (finalAttrs) pname version;
rev-prefix = "v";
};
passthru.updateScript = gitUpdater {
inherit (finalAttrs) pname version;
rev-prefix = "v";
};
meta = with lib; {
description = "Tests to check both the performance and the correctness of NVIDIA NCCL operations";
homepage = "https://github.com/NVIDIA/nccl-tests";
platforms = platforms.linux;
license = licenses.bsd3;
broken = !config.cudaSupport || (mpiSupport && mpi == null);
maintainers = with maintainers; [jmillerpdt] ++ teams.cuda.members;
};
}
)
meta = with lib; {
description = "Tests to check both the performance and the correctness of NVIDIA NCCL operations";
homepage = "https://github.com/NVIDIA/nccl-tests";
platforms = platforms.linux;
license = licenses.bsd3;
broken = !config.cudaSupport || (mpiSupport && mpi == null);
maintainers = with maintainers; [ jmillerpdt ] ++ teams.cuda.members;
};
})

View file

@ -22,94 +22,92 @@ let
cudaVersion
;
in
backendStdenv.mkDerivation (
finalAttrs: {
pname = "nccl";
version = "2.20.5-1";
backendStdenv.mkDerivation (finalAttrs: {
pname = "nccl";
version = "2.20.5-1";
src = fetchFromGitHub {
owner = "NVIDIA";
repo = finalAttrs.pname;
rev = "v${finalAttrs.version}";
hash = "sha256-ModIjD6RaRD/57a/PA1oTgYhZsAQPrrvhl5sNVXnO6c=";
};
src = fetchFromGitHub {
owner = "NVIDIA";
repo = finalAttrs.pname;
rev = "v${finalAttrs.version}";
hash = "sha256-ModIjD6RaRD/57a/PA1oTgYhZsAQPrrvhl5sNVXnO6c=";
};
strictDeps = true;
strictDeps = true;
outputs = [
"out"
"dev"
outputs = [
"out"
"dev"
];
nativeBuildInputs =
[
which
autoAddDriverRunpath
python3
]
++ lib.optionals (lib.versionOlder cudaVersion "11.4") [ cudatoolkit ]
++ lib.optionals (lib.versionAtLeast cudaVersion "11.4") [ cuda_nvcc ];
buildInputs =
lib.optionals (lib.versionOlder cudaVersion "11.4") [ cudatoolkit ]
++ lib.optionals (lib.versionAtLeast cudaVersion "11.4") [
cuda_nvcc.dev # crt/host_config.h
cuda_cudart
]
# NOTE: CUDA versions in Nixpkgs only use a major and minor version. When we do comparisons
# against other version, like below, it's important that we use the same format. Otherwise,
# we'll get incorrect results.
# For example, lib.versionAtLeast "12.0" "12.0.0" == false.
++ lib.optionals (lib.versionAtLeast cudaVersion "12.0") [ cuda_cccl ];
env.NIX_CFLAGS_COMPILE = toString [ "-Wno-unused-function" ];
preConfigure = ''
patchShebangs ./src/device/generate.py
makeFlagsArray+=(
"NVCC_GENCODE=${lib.concatStringsSep " " cudaFlags.gencode}"
)
'';
makeFlags =
[ "PREFIX=$(out)" ]
++ lib.optionals (lib.versionOlder cudaVersion "11.4") [
"CUDA_HOME=${cudatoolkit}"
"CUDA_LIB=${lib.getLib cudatoolkit}/lib"
"CUDA_INC=${lib.getDev cudatoolkit}/include"
]
++ lib.optionals (lib.versionAtLeast cudaVersion "11.4") [
"CUDA_HOME=${cuda_nvcc}"
"CUDA_LIB=${lib.getLib cuda_cudart}/lib"
"CUDA_INC=${lib.getDev cuda_cudart}/include"
];
nativeBuildInputs =
enableParallelBuilding = true;
postFixup = ''
moveToOutput lib/libnccl_static.a $dev
'';
passthru.updateScript = gitUpdater {
inherit (finalAttrs) pname version;
rev-prefix = "v";
};
meta = with lib; {
description = "Multi-GPU and multi-node collective communication primitives for NVIDIA GPUs";
homepage = "https://developer.nvidia.com/nccl";
license = licenses.bsd3;
platforms = platforms.linux;
# NCCL is not supported on Jetson, because it does not use NVLink or PCI-e for inter-GPU communication.
# https://forums.developer.nvidia.com/t/can-jetson-orin-support-nccl/232845/9
badPlatforms = lib.optionals cudaFlags.isJetsonBuild [ "aarch64-linux" ];
maintainers =
with maintainers;
[
which
autoAddDriverRunpath
python3
mdaiter
orivej
]
++ lib.optionals (lib.versionOlder cudaVersion "11.4") [cudatoolkit]
++ lib.optionals (lib.versionAtLeast cudaVersion "11.4") [cuda_nvcc];
buildInputs =
lib.optionals (lib.versionOlder cudaVersion "11.4") [cudatoolkit]
++ lib.optionals (lib.versionAtLeast cudaVersion "11.4") [
cuda_nvcc.dev # crt/host_config.h
cuda_cudart
]
# NOTE: CUDA versions in Nixpkgs only use a major and minor version. When we do comparisons
# against other version, like below, it's important that we use the same format. Otherwise,
# we'll get incorrect results.
# For example, lib.versionAtLeast "12.0" "12.0.0" == false.
++ lib.optionals (lib.versionAtLeast cudaVersion "12.0") [cuda_cccl];
env.NIX_CFLAGS_COMPILE = toString ["-Wno-unused-function"];
preConfigure = ''
patchShebangs ./src/device/generate.py
makeFlagsArray+=(
"NVCC_GENCODE=${lib.concatStringsSep " " cudaFlags.gencode}"
)
'';
makeFlags =
["PREFIX=$(out)"]
++ lib.optionals (lib.versionOlder cudaVersion "11.4") [
"CUDA_HOME=${cudatoolkit}"
"CUDA_LIB=${lib.getLib cudatoolkit}/lib"
"CUDA_INC=${lib.getDev cudatoolkit}/include"
]
++ lib.optionals (lib.versionAtLeast cudaVersion "11.4") [
"CUDA_HOME=${cuda_nvcc}"
"CUDA_LIB=${lib.getLib cuda_cudart}/lib"
"CUDA_INC=${lib.getDev cuda_cudart}/include"
];
enableParallelBuilding = true;
postFixup = ''
moveToOutput lib/libnccl_static.a $dev
'';
passthru.updateScript = gitUpdater {
inherit (finalAttrs) pname version;
rev-prefix = "v";
};
meta = with lib; {
description = "Multi-GPU and multi-node collective communication primitives for NVIDIA GPUs";
homepage = "https://developer.nvidia.com/nccl";
license = licenses.bsd3;
platforms = platforms.linux;
# NCCL is not supported on Jetson, because it does not use NVLink or PCI-e for inter-GPU communication.
# https://forums.developer.nvidia.com/t/can-jetson-orin-support-nccl/232845/9
badPlatforms = lib.optionals cudaFlags.isJetsonBuild [ "aarch64-linux" ];
maintainers =
with maintainers;
[
mdaiter
orivej
]
++ teams.cuda.members;
};
}
)
++ teams.cuda.members;
};
})

View file

@ -31,18 +31,18 @@ backendStdenv.mkDerivation {
cmake
autoAddDriverRunpath
]
++ lib.optionals (lib.versionOlder cudaVersion "11.4") [cudatoolkit]
++ lib.optionals (lib.versionAtLeast cudaVersion "11.4") [cuda_nvcc];
++ lib.optionals (lib.versionOlder cudaVersion "11.4") [ cudatoolkit ]
++ lib.optionals (lib.versionAtLeast cudaVersion "11.4") [ cuda_nvcc ];
buildInputs =
lib.optionals (lib.versionOlder cudaVersion "11.4") [cudatoolkit]
lib.optionals (lib.versionOlder cudaVersion "11.4") [ cudatoolkit ]
++ lib.optionals (lib.versionAtLeast cudaVersion "11.4") [
(getDev libcublas)
(getLib libcublas)
(getOutput "static" libcublas)
cuda_cudart
]
++ lib.optionals (lib.versionAtLeast cudaVersion "12.0") [cuda_cccl];
++ lib.optionals (lib.versionAtLeast cudaVersion "12.0") [ cuda_cccl ];
cmakeFlags = [
(lib.cmakeBool "CMAKE_VERBOSE_MAKEFILE" true)

View file

@ -2,63 +2,50 @@ final: _: {
# Helper hook used in both autoAddCudaCompatRunpath and
# autoAddDriverRunpath that applies a generic patching action to all elf
# files with a dynamic linking section.
autoFixElfFiles =
final.callPackage
(
{makeSetupHook}:
makeSetupHook
{
name = "auto-fix-elf-files";
}
./auto-fix-elf-files.sh
)
{};
autoFixElfFiles = final.callPackage (
{ makeSetupHook }: makeSetupHook { name = "auto-fix-elf-files"; } ./auto-fix-elf-files.sh
) { };
# Internal hook, used by cudatoolkit and cuda redist packages
# to accommodate automatic CUDAToolkit_ROOT construction
markForCudatoolkitRootHook =
final.callPackage
(
{makeSetupHook}:
makeSetupHook {name = "mark-for-cudatoolkit-root-hook";} ./mark-for-cudatoolkit-root-hook.sh
)
{};
markForCudatoolkitRootHook = final.callPackage (
{ makeSetupHook }:
makeSetupHook { name = "mark-for-cudatoolkit-root-hook"; } ./mark-for-cudatoolkit-root-hook.sh
) { };
# Currently propagated by cuda_nvcc or cudatoolkit, rather than used directly
setupCudaHook =
(final.callPackage
(
{makeSetupHook, backendStdenv}:
makeSetupHook
{
name = "setup-cuda-hook";
setupCudaHook = (
final.callPackage (
{ makeSetupHook, backendStdenv }:
makeSetupHook {
name = "setup-cuda-hook";
substitutions.setupCudaHook = placeholder "out";
substitutions.setupCudaHook = placeholder "out";
# Point NVCC at a compatible compiler
substitutions.ccRoot = "${backendStdenv.cc}";
# Point NVCC at a compatible compiler
substitutions.ccRoot = "${backendStdenv.cc}";
# Required in addition to ccRoot as otherwise bin/gcc is looked up
# when building CMakeCUDACompilerId.cu
substitutions.ccFullPath = "${backendStdenv.cc}/bin/${backendStdenv.cc.targetPrefix}c++";
}
./setup-cuda-hook.sh
)
{}
);
# Required in addition to ccRoot as otherwise bin/gcc is looked up
# when building CMakeCUDACompilerId.cu
substitutions.ccFullPath = "${backendStdenv.cc}/bin/${backendStdenv.cc.targetPrefix}c++";
} ./setup-cuda-hook.sh
) { }
);
autoAddDriverRunpath =
final.callPackage
(
{addDriverRunpath, autoFixElfFiles, makeSetupHook}:
makeSetupHook
{
name = "auto-add-opengl-runpath-hook";
propagatedBuildInputs = [addDriverRunpath autoFixElfFiles];
}
./auto-add-driver-runpath-hook.sh
)
{};
autoAddDriverRunpath = final.callPackage (
{
addDriverRunpath,
autoFixElfFiles,
makeSetupHook,
}:
makeSetupHook {
name = "auto-add-opengl-runpath-hook";
propagatedBuildInputs = [
addDriverRunpath
autoFixElfFiles
];
} ./auto-add-driver-runpath-hook.sh
) { };
# Deprecated: an alias kept for compatibility. Consider removing after 24.11
autoAddOpenGLRunpathHook = final.autoAddDriverRunpath;
@ -68,27 +55,26 @@ final: _: {
# patched elf files, but `cuda_compat` path must take precedence (otherwise,
# it doesn't have any effect) and thus appear first. Meaning this hook must be
# executed last.
autoAddCudaCompatRunpath =
final.callPackage
(
{makeSetupHook, autoFixElfFiles, cuda_compat ? null }:
makeSetupHook
{
name = "auto-add-cuda-compat-runpath-hook";
propagatedBuildInputs = [autoFixElfFiles];
autoAddCudaCompatRunpath = final.callPackage (
{
makeSetupHook,
autoFixElfFiles,
cuda_compat ? null,
}:
makeSetupHook {
name = "auto-add-cuda-compat-runpath-hook";
propagatedBuildInputs = [ autoFixElfFiles ];
substitutions = {
# Hotfix Ofborg evaluation
libcudaPath = if final.flags.isJetsonBuild then "${cuda_compat}/compat" else null;
};
substitutions = {
# Hotfix Ofborg evaluation
libcudaPath = if final.flags.isJetsonBuild then "${cuda_compat}/compat" else null;
};
meta.broken = !final.flags.isJetsonBuild;
meta.broken = !final.flags.isJetsonBuild;
# Pre-cuda_compat CUDA release:
meta.badPlatforms = final.lib.optionals (cuda_compat == null) final.lib.platforms.all;
meta.platforms = cuda_compat.meta.platforms or [ ];
}
./auto-add-cuda-compat-runpath.sh
)
{};
# Pre-cuda_compat CUDA release:
meta.badPlatforms = final.lib.optionals (cuda_compat == null) final.lib.platforms.all;
meta.platforms = cuda_compat.meta.platforms or [ ];
} ./auto-add-cuda-compat-runpath.sh
) { };
}

View file

@ -108,6 +108,6 @@ finalAttrs: prevAttrs: {
prevAttrs.meta.badPlatforms or [ ]
++ lib.optionals (targetArch == "unsupported") [ hostPlatform.system ];
homepage = "https://developer.nvidia.com/tensorrt";
maintainers = prevAttrs.meta.maintainers ++ [maintainers.aidalgol];
maintainers = prevAttrs.meta.maintainers ++ [ maintainers.aidalgol ];
};
}

View file

@ -3,9 +3,9 @@
{
tensorrt.releases = {
# jetson
linux-aarch64 = [];
linux-aarch64 = [ ];
# powerpc
linux-ppc64le = [];
linux-ppc64le = [ ];
# server-grade arm
linux-sbsa = [
{

View file

@ -40,78 +40,75 @@ let
# Backbone
gpus = builtins.import ../development/cuda-modules/gpus.nix;
nvccCompatibilities = builtins.import ../development/cuda-modules/nvcc-compatibilities.nix;
flags = callPackage ../development/cuda-modules/flags.nix {inherit cudaVersion gpus;};
passthruFunction =
final:
(
{
inherit cudaVersion lib pkgs;
inherit gpus nvccCompatibilities flags;
cudaMajorVersion = versions.major cudaVersion;
cudaMajorMinorVersion = versions.majorMinor cudaVersion;
cudaOlder = strings.versionOlder cudaVersion;
cudaAtLeast = strings.versionAtLeast cudaVersion;
flags = callPackage ../development/cuda-modules/flags.nix { inherit cudaVersion gpus; };
passthruFunction = final: ({
inherit cudaVersion lib pkgs;
inherit gpus nvccCompatibilities flags;
cudaMajorVersion = versions.major cudaVersion;
cudaMajorMinorVersion = versions.majorMinor cudaVersion;
cudaOlder = strings.versionOlder cudaVersion;
cudaAtLeast = strings.versionAtLeast cudaVersion;
# Maintain a reference to the final cudaPackages.
# Without this, if we use `final.callPackage` and a package accepts `cudaPackages` as an argument,
# it's provided with `cudaPackages` from the top-level scope, which is not what we want. We want to
# provide the `cudaPackages` from the final scope -- that is, the *current* scope.
cudaPackages = final;
# Maintain a reference to the final cudaPackages.
# Without this, if we use `final.callPackage` and a package accepts `cudaPackages` as an argument,
# it's provided with `cudaPackages` from the top-level scope, which is not what we want. We want to
# provide the `cudaPackages` from the final scope -- that is, the *current* scope.
cudaPackages = final;
# TODO(@connorbaker): `cudaFlags` is an alias for `flags` which should be removed in the future.
cudaFlags = flags;
# TODO(@connorbaker): `cudaFlags` is an alias for `flags` which should be removed in the future.
cudaFlags = flags;
# Exposed as cudaPackages.backendStdenv.
# This is what nvcc uses as a backend,
# and it has to be an officially supported one (e.g. gcc11 for cuda11).
#
# It, however, propagates current stdenv's libstdc++ to avoid "GLIBCXX_* not found errors"
# when linked with other C++ libraries.
# E.g. for cudaPackages_11_8 we use gcc11 with gcc12's libstdc++
# Cf. https://github.com/NixOS/nixpkgs/pull/218265 for context
backendStdenv = final.callPackage ../development/cuda-modules/backend-stdenv.nix {};
# Exposed as cudaPackages.backendStdenv.
# This is what nvcc uses as a backend,
# and it has to be an officially supported one (e.g. gcc11 for cuda11).
#
# It, however, propagates current stdenv's libstdc++ to avoid "GLIBCXX_* not found errors"
# when linked with other C++ libraries.
# E.g. for cudaPackages_11_8 we use gcc11 with gcc12's libstdc++
# Cf. https://github.com/NixOS/nixpkgs/pull/218265 for context
backendStdenv = final.callPackage ../development/cuda-modules/backend-stdenv.nix { };
# Loose packages
cudatoolkit = final.callPackage ../development/cuda-modules/cudatoolkit {};
saxpy = final.callPackage ../development/cuda-modules/saxpy {};
nccl = final.callPackage ../development/cuda-modules/nccl {};
nccl-tests = final.callPackage ../development/cuda-modules/nccl-tests {};
}
);
# Loose packages
cudatoolkit = final.callPackage ../development/cuda-modules/cudatoolkit { };
saxpy = final.callPackage ../development/cuda-modules/saxpy { };
nccl = final.callPackage ../development/cuda-modules/nccl { };
nccl-tests = final.callPackage ../development/cuda-modules/nccl-tests { };
});
mkVersionedPackageName =
name: version:
strings.concatStringsSep "_" [
name
(strings.replaceStrings ["."] ["_"] (versions.majorMinor version))
(strings.replaceStrings [ "." ] [ "_" ] (versions.majorMinor version))
];
composedExtension = fixedPoints.composeManyExtensions ([
(import ../development/cuda-modules/setup-hooks/extension.nix)
(callPackage ../development/cuda-modules/cuda/extension.nix {inherit cudaVersion;})
(callPackage ../development/cuda-modules/cuda/overrides.nix {inherit cudaVersion;})
(callPackage ../development/cuda-modules/generic-builders/multiplex.nix {
inherit cudaVersion flags mkVersionedPackageName;
pname = "cudnn";
releasesModule = ../development/cuda-modules/cudnn/releases.nix;
shimsFn = ../development/cuda-modules/cudnn/shims.nix;
fixupFn = ../development/cuda-modules/cudnn/fixup.nix;
})
(callPackage ../development/cuda-modules/cutensor/extension.nix {
inherit cudaVersion flags mkVersionedPackageName;
})
(callPackage ../development/cuda-modules/generic-builders/multiplex.nix {
inherit cudaVersion flags mkVersionedPackageName;
pname = "tensorrt";
releasesModule = ../development/cuda-modules/tensorrt/releases.nix;
shimsFn = ../development/cuda-modules/tensorrt/shims.nix;
fixupFn = ../development/cuda-modules/tensorrt/fixup.nix;
})
(callPackage ../development/cuda-modules/cuda-samples/extension.nix {inherit cudaVersion;})
(callPackage ../development/cuda-modules/cuda-library-samples/extension.nix {})
] ++ lib.optionals config.allowAliases [
(import ../development/cuda-modules/aliases.nix)
]);
composedExtension = fixedPoints.composeManyExtensions (
[
(import ../development/cuda-modules/setup-hooks/extension.nix)
(callPackage ../development/cuda-modules/cuda/extension.nix { inherit cudaVersion; })
(callPackage ../development/cuda-modules/cuda/overrides.nix { inherit cudaVersion; })
(callPackage ../development/cuda-modules/generic-builders/multiplex.nix {
inherit cudaVersion flags mkVersionedPackageName;
pname = "cudnn";
releasesModule = ../development/cuda-modules/cudnn/releases.nix;
shimsFn = ../development/cuda-modules/cudnn/shims.nix;
fixupFn = ../development/cuda-modules/cudnn/fixup.nix;
})
(callPackage ../development/cuda-modules/cutensor/extension.nix {
inherit cudaVersion flags mkVersionedPackageName;
})
(callPackage ../development/cuda-modules/generic-builders/multiplex.nix {
inherit cudaVersion flags mkVersionedPackageName;
pname = "tensorrt";
releasesModule = ../development/cuda-modules/tensorrt/releases.nix;
shimsFn = ../development/cuda-modules/tensorrt/shims.nix;
fixupFn = ../development/cuda-modules/tensorrt/fixup.nix;
})
(callPackage ../development/cuda-modules/cuda-samples/extension.nix { inherit cudaVersion; })
(callPackage ../development/cuda-modules/cuda-library-samples/extension.nix { })
]
++ lib.optionals config.allowAliases [ (import ../development/cuda-modules/aliases.nix) ]
);
cudaPackages = customisation.makeScope newScope (
fixedPoints.extends composedExtension passthruFunction