3
0
Fork 0
forked from mirrors/nixpkgs

Merge pull request #52936 from teh/pyro

Pyro
This commit is contained in:
Silvan Mosberger 2018-12-31 03:47:51 +01:00 committed by GitHub
commit a02b6bc318
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 73 additions and 0 deletions

View file

@ -0,0 +1,31 @@
{ buildPythonPackage, fetchPypi, lib, numpy, pytest, pytestpep8, pytestcov }:
buildPythonPackage rec {
version = "2.3.2";
pname = "opt_einsum";
src = fetchPypi {
inherit version pname;
sha256 = "0ny3v8x83mzpwmqjdzqhzy2pzwyy4wx01r1h9i29xw3yvas69m6k";
};
checkInputs = [
pytest
pytestpep8
pytestcov
];
checkPhase = ''
pytest
'';
propagatedBuildInputs = [
numpy
];
meta = {
description = "Optimizing NumPy's einsum function with order optimization and GPU support.";
homepage = http://optimized-einsum.readthedocs.io;
license = lib.licenses.mit;
maintainers = with lib.maintainers; [ teh ];
};
}

View file

@ -0,0 +1,38 @@
{ buildPythonPackage, fetchPypi, lib, pytorch, contextlib2
, graphviz, networkx, six, opt-einsum, tqdm }:
buildPythonPackage rec {
version = "0.3.0";
pname = "pyro-ppl";
src = fetchPypi {
inherit version pname;
sha256 = "0shsnc5bia9k1fzmqnwwbm1x5qvac3zrq4lvyhg27rjgpcamvb9l";
};
propagatedBuildInputs = [
pytorch
contextlib2
# TODO(tom): graphviz pulls in a lot of dependencies - make
# optional when some time to figure out how.
graphviz
networkx
six
opt-einsum
tqdm
];
# pyro not shipping tests do simple smoke test instead
checkPhase = ''
python -c "import pyro"
python -c "import pyro.distributions"
python -c "import pyro.infer"
python -c "import pyro.optim"
'';
meta = {
description = "A Python library for probabilistic modeling and inference";
homepage = http://pyro.ai;
license = lib.licenses.mit;
maintainers = with lib.maintainers; [ teh ];
};
}

View file

@ -2423,6 +2423,10 @@ in {
cudaSupport = pkgs.config.cudaSupport or false;
};
pyro-ppl = callPackage ../development/python-modules/pyro-ppl {};
opt-einsum = callPackage ../development/python-modules/opt-einsum {};
pytorchWithCuda = self.pytorch.override {
cudaSupport = true;
};