3
0
Fork 0
forked from mirrors/nixpkgs
nixpkgs/pkgs/development/python-modules/pytorch/default.nix
Anders Kaseorg ce00943916 pytorch: 0.2.0 → 0.3.1 with CUDA and cuDNN (#38530)
* pytorch-0.3 with optional cuda and cudnn

* pytorch tests reenabled if compiling without cuda

* pytorch: Conditionalize cudnn dependency on cudaSupport

Signed-off-by: Anders Kaseorg <andersk@mit.edu>

* pytorch: Compile with the same GCC version used by CUDA if cudaSupport

Fixes this error:

In file included from /nix/store/gv7w3c71jg627cpcff04yi6kwzpzjyap-cudatoolkit-9.1.85.1/include/host_config.h:50:0,
                 from /nix/store/gv7w3c71jg627cpcff04yi6kwzpzjyap-cudatoolkit-9.1.85.1/include/cuda_runtime.h:78,
                 from <command-line>:0:
/nix/store/gv7w3c71jg627cpcff04yi6kwzpzjyap-cudatoolkit-9.1.85.1/include/crt/host_config.h:121:2: error: #error -- unsupported GNU version! gcc versions later than 6 are not supported!
 #error -- unsupported GNU version! gcc versions later than 6 are not supported!
  ^~~~~

Signed-off-by: Anders Kaseorg <andersk@mit.edu>

* pytorch: Build with joined cudatoolkit

Similar to #30058 for TensorFlow.

Signed-off-by: Anders Kaseorg <andersk@mit.edu>

* pytorch: 0.3.0 -> 0.3.1

Signed-off-by: Anders Kaseorg <andersk@mit.edu>

* pytorch: Patch for “refcounted file mapping not supported” failure

Signed-off-by: Anders Kaseorg <andersk@mit.edu>

* pytorch: Skip distributed tests

Signed-off-by: Anders Kaseorg <andersk@mit.edu>

* pytorch: Use the stub libcuda.so from cudatoolkit for running tests

Signed-off-by: Anders Kaseorg <andersk@mit.edu>
2018-05-04 16:19:31 +02:00

90 lines
2.7 KiB
Nix
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

{ buildPythonPackage,
cudaSupport ? false, cudatoolkit ? null, cudnn ? null,
fetchFromGitHub, fetchpatch, lib, numpy, pyyaml, cffi, cmake,
git, stdenv, linkFarm, symlinkJoin,
utillinux, which }:
assert cudnn == null || cudatoolkit != null;
assert !cudaSupport || cudatoolkit != null;
let
cudatoolkit_joined = symlinkJoin {
name = "${cudatoolkit.name}-unsplit";
paths = [ cudatoolkit.out cudatoolkit.lib ];
};
# Normally libcuda.so.1 is provided at runtime by nvidia-x11 via
# LD_LIBRARY_PATH=/run/opengl-driver/lib. We only use the stub
# libcuda.so from cudatoolkit for running tests, so that we dont have
# to recompile pytorch on every update to nvidia-x11 or the kernel.
cudaStub = linkFarm "cuda-stub" [{
name = "libcuda.so.1";
path = "${cudatoolkit}/lib/stubs/libcuda.so";
}];
cudaStubEnv = lib.optionalString cudaSupport
"LD_LIBRARY_PATH=${cudaStub}\${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} ";
in buildPythonPackage rec {
version = "0.3.1";
pname = "pytorch";
name = "${pname}-${version}";
src = fetchFromGitHub {
owner = "pytorch";
repo = "pytorch";
rev = "v${version}";
fetchSubmodules = true;
sha256 = "1k8fr97v5pf7rni5cr2pi21ixc3pdj3h3lkz28njbjbgkndh7mr3";
};
patches = [
(fetchpatch {
# make sure stdatomic.h is included when checking for ATOMIC_INT_LOCK_FREE
# Fixes this test failure:
# RuntimeError: refcounted file mapping not supported on your system at /tmp/nix-build-python3.6-pytorch-0.3.0.drv-0/source/torch/lib/TH/THAllocator.c:525
url = "https://github.com/pytorch/pytorch/commit/502aaf39cf4a878f9e4f849e5f409573aa598aa9.patch";
stripLen = 3;
extraPrefix = "torch/lib/";
sha256 = "1miz4lhy3razjwcmhxqa4xmlcmhm65lqyin1czqczj8g16d3f62f";
})
];
postPatch = ''
substituteInPlace test/run_test.sh --replace \
"INIT_METHOD='file://'\$TEMP_DIR'/shared_init_file' \$PYCMD ./test_distributed.py" \
"echo Skipped for Nix package"
'';
preConfigure = lib.optionalString cudaSupport ''
export CC=${cudatoolkit.cc}/bin/gcc
'' + lib.optionalString (cudaSupport && cudnn != null) ''
export CUDNN_INCLUDE_DIR=${cudnn}/include
'';
buildInputs = [
cmake
git
numpy.blas
utillinux
which
] ++ lib.optionals cudaSupport [cudatoolkit_joined cudnn];
propagatedBuildInputs = [
cffi
numpy
pyyaml
];
checkPhase = ''
${cudaStubEnv}${stdenv.shell} test/run_test.sh
'';
meta = {
description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration.";
homepage = http://pytorch.org/;
license = lib.licenses.bsd3;
platforms = lib.platforms.linux;
maintainers = with lib.maintainers; [ teh ];
};
}