forked from mirrors/nixpkgs
Merge branch 'master' of https://github.com/nixos/nixpkgs into tarball-closureinfo
This commit is contained in:
commit
ceececbd04
31
.github/CODEOWNERS
vendored
31
.github/CODEOWNERS
vendored
|
@ -12,7 +12,7 @@
|
|||
|
||||
# Libraries
|
||||
/lib @edolstra @nbp
|
||||
/lib/systems @nbp @ericson2314
|
||||
/lib/systems @nbp @ericson2314 @matthewbauer
|
||||
/lib/generators.nix @edolstra @nbp @Profpatsch
|
||||
/lib/debug.nix @edolstra @nbp @Profpatsch
|
||||
|
||||
|
@ -20,9 +20,11 @@
|
|||
/default.nix @nbp
|
||||
/pkgs/top-level/default.nix @nbp @Ericson2314
|
||||
/pkgs/top-level/impure.nix @nbp @Ericson2314
|
||||
/pkgs/top-level/stage.nix @nbp @Ericson2314
|
||||
/pkgs/stdenv/generic @Ericson2314
|
||||
/pkgs/stdenv/cross @Ericson2314
|
||||
/pkgs/top-level/stage.nix @nbp @Ericson2314 @matthewbauer
|
||||
/pkgs/top-level/splice.nix @Ericson2314 @matthewbauer
|
||||
/pkgs/top-level/release-cross.nix @Ericson2314 @matthewbauer
|
||||
/pkgs/stdenv/generic @Ericson2314 @matthewbauer
|
||||
/pkgs/stdenv/cross @Ericson2314 @matthewbauer
|
||||
/pkgs/build-support/cc-wrapper @Ericson2314 @orivej
|
||||
/pkgs/build-support/bintools-wrapper @Ericson2314 @orivej
|
||||
/pkgs/build-support/setup-hooks @Ericson2314
|
||||
|
@ -45,6 +47,9 @@
|
|||
/nixos/doc/manual/man-nixos-option.xml @nbp
|
||||
/nixos/modules/installer/tools/nixos-option.sh @nbp
|
||||
|
||||
# NixOS modules
|
||||
/nixos/modules @Infinisil
|
||||
|
||||
# Python-related code and docs
|
||||
/maintainers/scripts/update-python-libraries @FRidh
|
||||
/pkgs/top-level/python-packages.nix @FRidh
|
||||
|
@ -74,6 +79,14 @@
|
|||
/pkgs/stdenv/darwin @NixOS/darwin-maintainers
|
||||
/pkgs/os-specific/darwin @NixOS/darwin-maintainers
|
||||
|
||||
# C compilers
|
||||
/pkgs/development/compilers/gcc @matthewbauer
|
||||
/pkgs/development/compilers/llvm @matthewbauer
|
||||
|
||||
# Compatibility stuff
|
||||
/pkgs/top-level/unix-tools.nix @matthewbauer
|
||||
/pkgs/development/tools/xcbuild @matthewbauer
|
||||
|
||||
# Beam-related (Erlang, Elixir, LFE, etc)
|
||||
/pkgs/development/beam-modules @gleber
|
||||
/pkgs/development/interpreters/erlang @gleber
|
||||
|
@ -97,3 +110,13 @@
|
|||
/pkgs/desktops/plasma-5 @ttuegel
|
||||
/pkgs/development/libraries/kde-frameworks @ttuegel
|
||||
/pkgs/development/libraries/qt-5 @ttuegel
|
||||
|
||||
# PostgreSQL and related stuff
|
||||
/pkgs/servers/sql/postgresql @thoughtpolice
|
||||
/nixos/modules/services/databases/postgresql.xml @thoughtpolice
|
||||
/nixos/modules/services/databases/postgresql.nix @thoughtpolice
|
||||
/nixos/tests/postgresql.nix @thoughtpolice
|
||||
|
||||
# Dhall
|
||||
/pkgs/development/dhall-modules @Gabriel439 @Profpatsch
|
||||
/pkgs/development/interpreters/dhall @Gabriel439 @Profpatsch
|
||||
|
|
|
@ -842,9 +842,12 @@ src = fetchFromGitHub {
|
|||
owner = "NixOS";
|
||||
repo = "nix";
|
||||
rev = "1f795f9f44607cc5bec70d1300150bfefcef2aae";
|
||||
sha256 = "04yri911rj9j19qqqn6m82266fl05pz98inasni0vxr1cf1gdgv9";
|
||||
sha256 = "1i2yxndxb6yc9l6c99pypbd92lfq5aac4klq7y2v93c9qvx2cgpc";
|
||||
}
|
||||
</programlisting>
|
||||
Find the value to put as <literal>sha256</literal> by running
|
||||
<literal>nix run -f '<nixpkgs>' nix-prefetch-github -c nix-prefetch-github --rev 1f795f9f44607cc5bec70d1300150bfefcef2aae NixOS nix</literal>
|
||||
or <literal>nix-prefetch-url --unpack https://github.com/NixOS/nix/archive/1f795f9f44607cc5bec70d1300150bfefcef2aae.tar.gz</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
<xi:include href="java.xml" />
|
||||
<xi:include href="lua.xml" />
|
||||
<xi:include href="node.section.xml" />
|
||||
<xi:include href="ocaml.xml" />
|
||||
<xi:include href="perl.xml" />
|
||||
<xi:include href="python.section.xml" />
|
||||
<xi:include href="qt.xml" />
|
||||
|
|
99
doc/languages-frameworks/ocaml.xml
Normal file
99
doc/languages-frameworks/ocaml.xml
Normal file
|
@ -0,0 +1,99 @@
|
|||
<section xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xml:id="sec-language-ocaml">
|
||||
<title>OCaml</title>
|
||||
|
||||
<para>
|
||||
OCaml libraries should be installed in
|
||||
<literal>$(out)/lib/ocaml/${ocaml.version}/site-lib/</literal>. Such
|
||||
directories are automatically added to the <literal>$OCAMLPATH</literal>
|
||||
environment variable when building another package that depends on them
|
||||
or when opening a <literal>nix-shell</literal>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Given that most of the OCaml ecosystem is now built with dune,
|
||||
nixpkgs includes a convenience build support function called
|
||||
<literal>buildDunePackage</literal> that will build an OCaml package
|
||||
using dune, OCaml and findlib and any additional dependencies provided
|
||||
as <literal>buildInputs</literal> or <literal>propagatedBuildInputs</literal>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Here is a simple package example. It defines an (optional) attribute
|
||||
<literal>minimumOCamlVersion</literal> that will be used to throw a
|
||||
descriptive evaluation error if building with an older OCaml is attempted.
|
||||
It uses the <literal>fetchFromGitHub</literal> fetcher to get its source.
|
||||
It sets the <literal>doCheck</literal> (optional) attribute to
|
||||
<literal>true</literal> which means that tests will be run with
|
||||
<literal>dune runtest -p angstrom</literal> after the build
|
||||
(<literal>dune build -p angstrom</literal>) is complete.
|
||||
It uses <literal>alcotest</literal> as a build input (because it is needed
|
||||
to run the tests) and <literal>bigstringaf</literal> and
|
||||
<literal>result</literal> as propagated build inputs (thus they will also
|
||||
be available to libraries depending on this library).
|
||||
The library will be installed using the <literal>angstrom.install</literal>
|
||||
file that dune generates.
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
{ stdenv, fetchFromGitHub, buildDunePackage, alcotest, result, bigstringaf }:
|
||||
|
||||
buildDunePackage rec {
|
||||
pname = "angstrom";
|
||||
version = "0.10.0";
|
||||
|
||||
minimumOCamlVersion = "4.03";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "inhabitedtype";
|
||||
repo = pname;
|
||||
rev = version;
|
||||
sha256 = "0lh6024yf9ds0nh9i93r9m6p5psi8nvrqxl5x7jwl13zb0r9xfpw";
|
||||
};
|
||||
|
||||
buildInputs = [ alcotest ];
|
||||
propagatedBuildInputs = [ bigstringaf result ];
|
||||
doCheck = true;
|
||||
|
||||
meta = {
|
||||
homepage = https://github.com/inhabitedtype/angstrom;
|
||||
description = "OCaml parser combinators built for speed and memory efficiency";
|
||||
license = stdenv.lib.licenses.bsd3;
|
||||
maintainers = with stdenv.lib.maintainers; [ sternenseemann ];
|
||||
};
|
||||
}
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
Here is a second example, this time using a source archive generated with
|
||||
<literal>dune-release</literal>. It is a good idea to use this archive when
|
||||
it is available as it will usually contain substituted variables such as a
|
||||
<literal>%%VERSION%%</literal> field. This library does not depend
|
||||
on any other OCaml library and no tests are run after building it.
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
{ stdenv, fetchurl, buildDunePackage }:
|
||||
|
||||
buildDunePackage rec {
|
||||
pname = "wtf8";
|
||||
version = "1.0.1";
|
||||
|
||||
minimumOCamlVersion = "4.01";
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://github.com/flowtype/ocaml-${pname}/releases/download/v${version}/${pname}-${version}.tbz";
|
||||
sha256 = "1msg3vycd3k8qqj61sc23qks541cxpb97vrnrvrhjnqxsqnh6ygq";
|
||||
};
|
||||
|
||||
meta = with stdenv.lib; {
|
||||
homepage = https://github.com/flowtype/ocaml-wtf8;
|
||||
description = "WTF-8 is a superset of UTF-8 that allows unpaired surrogates.";
|
||||
license = licenses.mit;
|
||||
maintainers = [ maintainers.eqyiel ];
|
||||
};
|
||||
}
|
||||
</programlisting>
|
||||
|
||||
</section>
|
|
@ -483,12 +483,12 @@ and in this case the `python35` interpreter is automatically used.
|
|||
|
||||
### Interpreters
|
||||
|
||||
Versions 2.7, 3.4, 3.5, 3.6 and 3.7 of the CPython interpreter are available as
|
||||
respectively `python27`, `python34`, `python35` and `python36`. The PyPy interpreter
|
||||
is available as `pypy`. The aliases `python2` and `python3` correspond to respectively `python27` and
|
||||
`python35`. The default interpreter, `python`, maps to `python2`.
|
||||
The Nix expressions for the interpreters can be found in
|
||||
`pkgs/development/interpreters/python`.
|
||||
Versions 2.7, 3.5, 3.6 and 3.7 of the CPython interpreter are available as
|
||||
respectively `python27`, `python35`, `python36`, and `python37`. The PyPy
|
||||
interpreter is available as `pypy`. The aliases `python2` and `python3`
|
||||
correspond to respectively `python27` and `python36`. The default interpreter,
|
||||
`python`, maps to `python2`. The Nix expressions for the interpreters can be
|
||||
found in `pkgs/development/interpreters/python`.
|
||||
|
||||
All packages depending on any Python interpreter get appended
|
||||
`out/{python.sitePackages}` to `$PYTHONPATH` if such directory
|
||||
|
@ -507,7 +507,7 @@ Each interpreter has the following attributes:
|
|||
- `buildEnv`. Function to build python interpreter environments with extra packages bundled together. See section *python.buildEnv function* for usage and documentation.
|
||||
- `withPackages`. Simpler interface to `buildEnv`. See section *python.withPackages function* for usage and documentation.
|
||||
- `sitePackages`. Alias for `lib/${libPrefix}/site-packages`.
|
||||
- `executable`. Name of the interpreter executable, e.g. `python3.4`.
|
||||
- `executable`. Name of the interpreter executable, e.g. `python3.7`.
|
||||
- `pkgs`. Set of Python packages for that specific interpreter. The package set can be modified by overriding the interpreter and passing `packageOverrides`.
|
||||
|
||||
### Building packages and applications
|
||||
|
@ -529,7 +529,6 @@ attribute set is created for each available Python interpreter. The available
|
|||
sets are
|
||||
|
||||
* `pkgs.python27Packages`
|
||||
* `pkgs.python34Packages`
|
||||
* `pkgs.python35Packages`
|
||||
* `pkgs.python36Packages`
|
||||
* `pkgs.python37Packages`
|
||||
|
@ -670,7 +669,7 @@ python3Packages.buildPythonApplication rec {
|
|||
sha256 = "035w8gqql36zlan0xjrzz9j4lh9hs0qrsgnbyw07qs7lnkvbdv9x";
|
||||
};
|
||||
|
||||
propagatedBuildInputs = with python3Packages; [ tornado_4 pythondaemon ];
|
||||
propagatedBuildInputs = with python3Packages; [ tornado_4 python-daemon ];
|
||||
|
||||
meta = with lib; {
|
||||
...
|
||||
|
@ -837,7 +836,7 @@ community to help save time. No tool is preferred at the moment.
|
|||
|
||||
### Deterministic builds
|
||||
|
||||
Python 2.7, 3.5 and 3.6 are now built deterministically and 3.4 mostly.
|
||||
The Python interpreters are now built deterministically.
|
||||
Minor modifications had to be made to the interpreters in order to generate
|
||||
deterministic bytecode. This has security implications and is relevant for
|
||||
those using Python in a `nix-shell`.
|
||||
|
|
54
doc/meta.xml
54
doc/meta.xml
|
@ -250,6 +250,60 @@ meta.platforms = stdenv.lib.platforms.linux;
|
|||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>tests</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<warning>
|
||||
<para>
|
||||
This attribute is special in that it is not actually under the
|
||||
<literal>meta</literal> attribute set but rather under the
|
||||
<literal>passthru</literal> attribute set. This is due to a current
|
||||
limitation of Nix, and will change as soon as Nixpkgs will be able to
|
||||
depend on a new enough version of Nix. See
|
||||
<link xlink:href="https://github.com/NixOS/nix/issues/2532">the relevant
|
||||
issue</link> for more details.
|
||||
</para>
|
||||
</warning>
|
||||
<para>
|
||||
An attribute set with as values tests. A test is a derivation, which
|
||||
builds successfully when the test passes, and fails to build otherwise. A
|
||||
derivation that is a test needs to have <literal>meta.timeout</literal>
|
||||
defined.
|
||||
</para>
|
||||
<para>
|
||||
The NixOS tests are available as <literal>nixosTests</literal> in
|
||||
parameters of derivations. For instance, the OpenSMTPD derivation
|
||||
includes lines similar to:
|
||||
<programlisting>
|
||||
{ /* ... */, nixosTests }:
|
||||
{
|
||||
# ...
|
||||
passthru.tests = {
|
||||
basic-functionality-and-dovecot-integration = nixosTests.opensmtpd;
|
||||
};
|
||||
}
|
||||
</programlisting>
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>timeout</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
A timeout (in seconds) for building the derivation. If the derivation
|
||||
takes longer than this time to build, it can fail due to breaking the
|
||||
timeout. However, all computers do not have the same computing power,
|
||||
hence some builders may decide to apply a multiplicative factor to this
|
||||
value. When filling this value in, try to keep it approximately
|
||||
consistent with other values already present in
|
||||
<literal>nixpkgs</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>hydraPlatforms</varname>
|
||||
|
|
|
@ -147,8 +147,8 @@ $ git add pkgs/development/libraries/libfoo/default.nix</screen>
|
|||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
You can use <command>nix-prefetch-url</command> (or similar
|
||||
nix-prefetch-git, etc) <replaceable>url</replaceable> to get the
|
||||
You can use <command>nix-prefetch-url</command>
|
||||
<replaceable>url</replaceable> to get the
|
||||
SHA-256 hash of source distributions. There are similar commands as
|
||||
<command>nix-prefetch-git</command> and
|
||||
<command>nix-prefetch-hg</command> available in
|
||||
|
|
|
@ -618,7 +618,7 @@ let f(h, h + 1, i) = i + h
|
|||
</variablelist>
|
||||
|
||||
<variablelist>
|
||||
<title>Variables affecting build properties</title>
|
||||
<title>Attributes affecting build properties</title>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>enableParallelBuilding</varname>
|
||||
|
@ -637,21 +637,6 @@ let f(h, h + 1, i) = i + h
|
|||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>preferLocalBuild</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
If set, specifies that the package is so lightweight in terms of build
|
||||
operations (e.g. write a text file from a Nix string to the store) that
|
||||
there's no need to look for it in binary caches -- it's faster to just
|
||||
build it locally. It also tells Hydra and other facilities that this
|
||||
package doesn't need to be exported in binary caches (noone would use it,
|
||||
after all).
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
|
||||
<variablelist>
|
||||
|
|
|
@ -13,6 +13,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
|
|||
* add it to this list. The URL mentioned above is a good source for inspiration.
|
||||
*/
|
||||
|
||||
abstyles = spdx {
|
||||
spdxId = "Abstyles";
|
||||
fullName = "Abstyles License";
|
||||
};
|
||||
|
||||
afl21 = spdx {
|
||||
spdxId = "AFL-2.1";
|
||||
fullName = "Academic Free License v2.1";
|
||||
|
@ -400,6 +405,10 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
|
|||
free = false;
|
||||
};
|
||||
|
||||
jasper = spdx {
|
||||
spdxId = "JasPer-2.0";
|
||||
fullName = "JasPer License";
|
||||
};
|
||||
|
||||
lgpl2 = spdx {
|
||||
spdxId = "LGPL-2.0";
|
||||
|
|
|
@ -73,7 +73,7 @@ rec {
|
|||
# Get the commit id of a git repo
|
||||
# Example: commitIdFromGitRepo <nixpkgs/.git>
|
||||
commitIdFromGitRepo =
|
||||
let readCommitFromFile = path: file:
|
||||
let readCommitFromFile = file: path:
|
||||
with builtins;
|
||||
let fileName = toString path + "/" + file;
|
||||
packedRefsName = toString path + "/packed-refs";
|
||||
|
@ -85,7 +85,7 @@ rec {
|
|||
matchRef = match "^ref: (.*)$" fileContent;
|
||||
in if isNull matchRef
|
||||
then fileContent
|
||||
else readCommitFromFile path (lib.head matchRef)
|
||||
else readCommitFromFile (lib.head matchRef) path
|
||||
# Sometimes, the file isn't there at all and has been packed away in the
|
||||
# packed-refs file, so we have to grep through it:
|
||||
else if lib.pathExists packedRefsName
|
||||
|
@ -96,7 +96,7 @@ rec {
|
|||
then throw ("Could not find " + file + " in " + packedRefsName)
|
||||
else lib.head matchRef
|
||||
else throw ("Not a .git directory: " + path);
|
||||
in lib.flip readCommitFromFile "HEAD";
|
||||
in readCommitFromFile "HEAD";
|
||||
|
||||
pathHasContext = builtins.hasContext or (lib.hasPrefix builtins.storeDir);
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ rec {
|
|||
else if final.isUClibc then "uclibc"
|
||||
else if final.isAndroid then "bionic"
|
||||
else if final.isLinux /* default */ then "glibc"
|
||||
else if final.isAvr then "avrlibc"
|
||||
# TODO(@Ericson2314) think more about other operating systems
|
||||
else "native/impure";
|
||||
extensions = {
|
||||
|
|
|
@ -99,6 +99,49 @@ rec {
|
|||
riscv64 = riscv "64";
|
||||
riscv32 = riscv "32";
|
||||
|
||||
avr = {
|
||||
config = "avr";
|
||||
};
|
||||
|
||||
arm-embedded = {
|
||||
config = "arm-none-eabi";
|
||||
libc = "newlib";
|
||||
};
|
||||
|
||||
aarch64-embedded = {
|
||||
config = "aarch64-none-elf";
|
||||
libc = "newlib";
|
||||
};
|
||||
|
||||
aarch64be-embedded = {
|
||||
config = "aarch64_be-none-elf";
|
||||
libc = "newlib";
|
||||
};
|
||||
|
||||
ppc-embedded = {
|
||||
config = "powerpc-none-eabi";
|
||||
libc = "newlib";
|
||||
};
|
||||
|
||||
ppcle-embedded = {
|
||||
config = "powerpcle-none-eabi";
|
||||
libc = "newlib";
|
||||
};
|
||||
|
||||
alpha-embedded = {
|
||||
config = "alpha-elf";
|
||||
libc = "newlib";
|
||||
};
|
||||
|
||||
i686-embedded = {
|
||||
config = "i686-elf";
|
||||
libc = "newlib";
|
||||
};
|
||||
|
||||
x86_64-embedded = {
|
||||
config = "x86_64-elf";
|
||||
libc = "newlib";
|
||||
};
|
||||
|
||||
#
|
||||
# Darwin
|
||||
|
|
|
@ -19,6 +19,7 @@ rec {
|
|||
isRiscV = { cpu = { family = "riscv"; }; };
|
||||
isSparc = { cpu = { family = "sparc"; }; };
|
||||
isWasm = { cpu = { family = "wasm"; }; };
|
||||
isAvr = { cpu = { family = "avr"; }; };
|
||||
|
||||
is32bit = { cpu = { bits = 32; }; };
|
||||
is64bit = { cpu = { bits = 64; }; };
|
||||
|
|
|
@ -80,6 +80,7 @@ rec {
|
|||
armv8r = { bits = 32; significantByte = littleEndian; family = "arm"; version = "8"; };
|
||||
armv8m = { bits = 32; significantByte = littleEndian; family = "arm"; version = "8"; };
|
||||
aarch64 = { bits = 64; significantByte = littleEndian; family = "arm"; version = "8"; };
|
||||
aarch64_be = { bits = 64; significantByte = bigEndian; family = "arm"; version = "8"; };
|
||||
|
||||
i686 = { bits = 32; significantByte = littleEndian; family = "x86"; };
|
||||
x86_64 = { bits = 64; significantByte = littleEndian; family = "x86"; };
|
||||
|
@ -92,6 +93,7 @@ rec {
|
|||
powerpc = { bits = 32; significantByte = bigEndian; family = "power"; };
|
||||
powerpc64 = { bits = 64; significantByte = bigEndian; family = "power"; };
|
||||
powerpc64le = { bits = 64; significantByte = littleEndian; family = "power"; };
|
||||
powerpcle = { bits = 32; significantByte = littleEndian; family = "power"; };
|
||||
|
||||
riscv32 = { bits = 32; significantByte = littleEndian; family = "riscv"; };
|
||||
riscv64 = { bits = 64; significantByte = littleEndian; family = "riscv"; };
|
||||
|
@ -101,6 +103,10 @@ rec {
|
|||
|
||||
wasm32 = { bits = 32; significantByte = littleEndian; family = "wasm"; };
|
||||
wasm64 = { bits = 64; significantByte = littleEndian; family = "wasm"; };
|
||||
|
||||
alpha = { bits = 64; significantByte = littleEndian; family = "alpha"; };
|
||||
|
||||
avr = { bits = 8; family = "avr"; };
|
||||
};
|
||||
|
||||
################################################################################
|
||||
|
@ -117,6 +123,7 @@ rec {
|
|||
apple = {};
|
||||
pc = {};
|
||||
|
||||
none = {};
|
||||
unknown = {};
|
||||
};
|
||||
|
||||
|
@ -200,6 +207,7 @@ rec {
|
|||
cygnus = {};
|
||||
msvc = {};
|
||||
eabi = {};
|
||||
elf = {};
|
||||
|
||||
androideabi = {};
|
||||
android = {
|
||||
|
@ -255,9 +263,16 @@ rec {
|
|||
setType "system" components;
|
||||
|
||||
mkSkeletonFromList = l: {
|
||||
"1" = if elemAt l 0 == "avr"
|
||||
then { cpu = elemAt l 0; kernel = "none"; abi = "unknown"; }
|
||||
else throw "Target specification with 1 components is ambiguous";
|
||||
"2" = # We only do 2-part hacks for things Nix already supports
|
||||
if elemAt l 1 == "cygwin"
|
||||
then { cpu = elemAt l 0; kernel = "windows"; abi = "cygnus"; }
|
||||
else if (elemAt l 1 == "eabi")
|
||||
then { cpu = elemAt l 0; vendor = "none"; kernel = "none"; abi = elemAt l 1; }
|
||||
else if (elemAt l 1 == "elf")
|
||||
then { cpu = elemAt l 0; vendor = "none"; kernel = "none"; abi = elemAt l 1; }
|
||||
else { cpu = elemAt l 0; kernel = elemAt l 1; };
|
||||
"3" = # Awkwards hacks, beware!
|
||||
if elemAt l 1 == "apple"
|
||||
|
@ -268,6 +283,10 @@ rec {
|
|||
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = "windows"; abi = "gnu"; }
|
||||
else if hasPrefix "netbsd" (elemAt l 2)
|
||||
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = elemAt l 2; }
|
||||
else if (elemAt l 2 == "eabi")
|
||||
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = "none"; abi = elemAt l 2; }
|
||||
else if (elemAt l 2 == "elf")
|
||||
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = "none"; abi = elemAt l 2; }
|
||||
else throw "Target specification with 3 components is ambiguous";
|
||||
"4" = { cpu = elemAt l 0; vendor = elemAt l 1; kernel = elemAt l 2; abi = elemAt l 3; };
|
||||
}.${toString (length l)}
|
||||
|
|
|
@ -471,6 +471,7 @@ rec {
|
|||
"x86_64-linux" = pc64;
|
||||
"armv5tel-linux" = sheevaplug;
|
||||
"armv6l-linux" = raspberrypi;
|
||||
"armv7a-linux" = armv7l-hf-multiplatform;
|
||||
"armv7l-linux" = armv7l-hf-multiplatform;
|
||||
"aarch64-linux" = aarch64-multiplatform;
|
||||
"mipsel-linux" = fuloong2f_n32;
|
||||
|
|
|
@ -169,6 +169,9 @@ rec {
|
|||
# s32 = sign 32 4294967296;
|
||||
};
|
||||
|
||||
# Alias of u16 for a port number
|
||||
port = ints.u16;
|
||||
|
||||
float = mkOptionType rec {
|
||||
name = "float";
|
||||
description = "floating point number";
|
||||
|
|
|
@ -143,6 +143,11 @@
|
|||
github = "ahmedtd";
|
||||
name = "Taahir Ahmed";
|
||||
};
|
||||
ahuzik = {
|
||||
email = "ales.guzik@gmail.com";
|
||||
github = "alesguzik";
|
||||
name = "Ales Huzik";
|
||||
};
|
||||
aij = {
|
||||
email = "aij+git@mrph.org";
|
||||
github = "aij";
|
||||
|
@ -211,6 +216,11 @@
|
|||
github = "alunduil";
|
||||
name = "Alex Brandt";
|
||||
};
|
||||
amar1729 = {
|
||||
email = "amar.paul16@gmail.com";
|
||||
github = "amar1729";
|
||||
name = "Amar Paul";
|
||||
};
|
||||
ambrop72 = {
|
||||
email = "ambrop7@gmail.com";
|
||||
github = "ambrop72";
|
||||
|
@ -401,6 +411,11 @@
|
|||
github = "AveryLychee";
|
||||
name = "Avery Lychee";
|
||||
};
|
||||
averelld = {
|
||||
email = "averell+nixos@rxd4.com";
|
||||
github = "averelld";
|
||||
name = "averelld";
|
||||
};
|
||||
avnik = {
|
||||
email = "avn@avnik.info";
|
||||
github = "avnik";
|
||||
|
@ -614,6 +629,11 @@
|
|||
github = "bramd";
|
||||
name = "Bram Duvigneau";
|
||||
};
|
||||
braydenjw = {
|
||||
email = "nixpkgs@willenborg.ca";
|
||||
github = "braydenjw";
|
||||
name = "Brayden Willenborg";
|
||||
};
|
||||
brian-dawn = {
|
||||
email = "brian.t.dawn@gmail.com";
|
||||
github = "brian-dawn";
|
||||
|
@ -952,6 +972,11 @@
|
|||
github = "danielfullmer";
|
||||
name = "Daniel Fullmer";
|
||||
};
|
||||
das-g = {
|
||||
email = "nixpkgs@raphael.dasgupta.ch";
|
||||
github = "das-g";
|
||||
name = "Raphael Das Gupta";
|
||||
};
|
||||
das_j = {
|
||||
email = "janne@hess.ooo";
|
||||
github = "dasJ";
|
||||
|
@ -1507,6 +1532,11 @@
|
|||
github = "ftrvxmtrx";
|
||||
name = "Siarhei Zirukin";
|
||||
};
|
||||
fuerbringer = {
|
||||
email = "severin@fuerbringer.info";
|
||||
github = "fuerbringer";
|
||||
name = "Severin Fürbringer";
|
||||
};
|
||||
funfunctor = {
|
||||
email = "eocallaghan@alterapraxis.com";
|
||||
name = "Edward O'Callaghan";
|
||||
|
@ -1516,6 +1546,11 @@
|
|||
github = "fuuzetsu";
|
||||
name = "Mateusz Kowalczyk";
|
||||
};
|
||||
fuwa = {
|
||||
email = "echowss@gmail.com";
|
||||
github = "fuwa0529";
|
||||
name = "Haruka Akiyama";
|
||||
};
|
||||
fuzzy-id = {
|
||||
email = "hacking+nixos@babibo.de";
|
||||
name = "Thomas Bach";
|
||||
|
@ -1734,6 +1769,11 @@
|
|||
email = "t@larkery.com";
|
||||
name = "Tom Hinton";
|
||||
};
|
||||
hlolli = {
|
||||
email = "hlolli@gmail.com";
|
||||
github = "hlolli";
|
||||
name = "Hlodver Sigurdsson";
|
||||
};
|
||||
hodapp = {
|
||||
email = "hodapp87@gmail.com";
|
||||
github = "Hodapp87";
|
||||
|
@ -2214,6 +2254,11 @@
|
|||
github = "knedlsepp";
|
||||
name = "Josef Kemetmüller";
|
||||
};
|
||||
knl = {
|
||||
email = "nikola@knezevic.co";
|
||||
github = "knl";
|
||||
name = "Nikola Knežević";
|
||||
};
|
||||
konimex = {
|
||||
email = "herdiansyah@netc.eu";
|
||||
github = "konimex";
|
||||
|
@ -2661,6 +2706,11 @@
|
|||
github = "melsigl";
|
||||
name = "Melanie B. Sigl";
|
||||
};
|
||||
melkor333 = {
|
||||
email = "samuel@ton-kunst.ch";
|
||||
github = "melkor333";
|
||||
name = "Samuel Ruprecht";
|
||||
};
|
||||
metabar = {
|
||||
email = "softs@metabarcoding.org";
|
||||
name = "Celine Mercier";
|
||||
|
@ -2670,6 +2720,11 @@
|
|||
github = "mgdelacroix";
|
||||
name = "Miguel de la Cruz";
|
||||
};
|
||||
mgregoire = {
|
||||
email = "gregoire@martinache.net";
|
||||
github = "M-Gregoire";
|
||||
name = "Gregoire Martinache";
|
||||
};
|
||||
mgttlinger = {
|
||||
email = "megoettlinger@gmail.com";
|
||||
github = "mgttlinger";
|
||||
|
@ -3795,6 +3850,11 @@
|
|||
github = "scolobb";
|
||||
name = "Sergiu Ivanov";
|
||||
};
|
||||
screendriver = {
|
||||
email = "nix@echooff.de";
|
||||
github = "screendriver";
|
||||
name = "Christian Rackerseder";
|
||||
};
|
||||
Scriptkiddi = {
|
||||
email = "nixos@scriptkiddi.de";
|
||||
github = "scriptkiddi";
|
||||
|
@ -3905,6 +3965,11 @@
|
|||
github = "sjagoe";
|
||||
name = "Simon Jagoe";
|
||||
};
|
||||
sjau = {
|
||||
email = "nixos@sjau.ch";
|
||||
github = "sjau";
|
||||
name = "Stephan Jau";
|
||||
};
|
||||
sjmackenzie = {
|
||||
email = "setori88@gmail.com";
|
||||
github = "sjmackenzie";
|
||||
|
@ -3973,6 +4038,11 @@
|
|||
github = "spacefrogg";
|
||||
name = "Michael Raitza";
|
||||
};
|
||||
spacekookie = {
|
||||
email = "kookie@spacekookie.de";
|
||||
github = "spacekookie";
|
||||
name = "Katharina Fey";
|
||||
};
|
||||
spencerjanssen = {
|
||||
email = "spencerjanssen@gmail.com";
|
||||
github = "spencerjanssen";
|
||||
|
@ -4153,6 +4223,11 @@
|
|||
github = "taku0";
|
||||
name = "Takuo Yonezawa";
|
||||
};
|
||||
talyz = {
|
||||
email = "kim.lindberger@gmail.com";
|
||||
github = "talyz";
|
||||
name = "Kim Lindberger";
|
||||
};
|
||||
tari = {
|
||||
email = "peter@taricorp.net";
|
||||
github = "tari";
|
||||
|
@ -4193,6 +4268,11 @@
|
|||
github = "tex";
|
||||
name = "Milan Svoboda";
|
||||
};
|
||||
tg-x = {
|
||||
email = "*@tg-x.net";
|
||||
github = "tg-x";
|
||||
name = "TG ⊗ Θ";
|
||||
};
|
||||
thall = {
|
||||
email = "niclas.thall@gmail.com";
|
||||
github = "thall";
|
||||
|
|
|
@ -15,7 +15,7 @@ containers.database =
|
|||
{ config =
|
||||
{ config, pkgs, ... }:
|
||||
{ <xref linkend="opt-services.postgresql.enable"/> = true;
|
||||
<xref linkend="opt-services.postgresql.package"/> = pkgs.postgresql96;
|
||||
<xref linkend="opt-services.postgresql.package"/> = pkgs.postgresql_9_6;
|
||||
};
|
||||
};
|
||||
</programlisting>
|
||||
|
|
|
@ -197,10 +197,10 @@ swapDevices = [ { device = "/dev/disk/by-label/swap"; } ];
|
|||
pkgs.emacs
|
||||
];
|
||||
|
||||
<xref linkend="opt-services.postgresql.package"/> = pkgs.postgresql90;
|
||||
<xref linkend="opt-services.postgresql.package"/> = pkgs.postgresql_10;
|
||||
</programlisting>
|
||||
The latter option definition changes the default PostgreSQL package used
|
||||
by NixOS’s PostgreSQL service to 9.0. For more information on packages,
|
||||
by NixOS’s PostgreSQL service to 10.x. For more information on packages,
|
||||
including how to add new ones, see <xref linkend="sec-custom-packages"/>.
|
||||
</para>
|
||||
</listitem>
|
||||
|
|
|
@ -34,13 +34,4 @@
|
|||
Similarly, UDP port ranges can be opened through
|
||||
<xref linkend="opt-networking.firewall.allowedUDPPortRanges"/>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Also of interest is
|
||||
<programlisting>
|
||||
<xref linkend="opt-networking.firewall.allowPing"/> = true;
|
||||
</programlisting>
|
||||
to allow the machine to respond to ping requests. (ICMPv6 pings are always
|
||||
allowed.)
|
||||
</para>
|
||||
</section>
|
||||
|
|
|
@ -106,7 +106,7 @@
|
|||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<varlistentry xml:id='types.ints.ux'>
|
||||
<term>
|
||||
<varname>types.ints.{u8, u16, u32}</varname>
|
||||
</term>
|
||||
|
@ -131,6 +131,17 @@
|
|||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>
|
||||
<varname>types.port</varname>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
A port number. This type is an alias to
|
||||
<link linkend='types.ints.ux'><varname>types.ints.u16</varname></link>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
|
||||
<para>
|
||||
|
|
|
@ -637,6 +637,11 @@ $ nix-instantiate -E '(import <nixpkgsunstable> {}).gitFull'
|
|||
anyways for clarity.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Groups <literal>kvm</literal> and <literal>render</literal> are introduced now, as systemd requires them.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
|
||||
|
|
|
@ -99,6 +99,16 @@
|
|||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The Syncthing state and configuration data has been moved from
|
||||
<varname>services.syncthing.dataDir</varname> to the newly defined
|
||||
<varname>services.syncthing.configDir</varname>, which default to
|
||||
<literal>/var/lib/syncthing/.config/syncthing</literal>.
|
||||
This change makes possible to share synced directories using ACLs
|
||||
without Syncthing resetting the permission on every start.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Package <varname>rabbitmq_server</varname> is renamed to
|
||||
|
@ -137,6 +147,14 @@
|
|||
make sure to update your configuration if you want to keep <literal>proglodyte-wasm</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
When the <literal>nixpkgs.pkgs</literal> option is set, NixOS will no
|
||||
longer ignore the <literal>nixpkgs.overlays</literal> option. The old
|
||||
behavior can be recovered by setting <literal>nixpkgs.overlays =
|
||||
lib.mkForce [];</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
OpenSMTPD has been upgraded to version 6.4.0p1. This release makes
|
||||
|
@ -145,6 +163,63 @@
|
|||
format.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The versioned <varname>postgresql</varname> have been renamed to use
|
||||
underscore number seperators. For example, <varname>postgresql96</varname>
|
||||
has been renamed to <varname>postgresql_9_6</varname>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Package <literal>consul-ui</literal> and passthrough <literal>consul.ui</literal> have been removed.
|
||||
The package <literal>consul</literal> now uses upstream releases that vendor the UI into the binary.
|
||||
See <link xlink:href="https://github.com/NixOS/nixpkgs/pull/48714#issuecomment-433454834">#48714</link>
|
||||
for details.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Slurm introduces the new option
|
||||
<literal>services.slurm.stateSaveLocation</literal>,
|
||||
which is now set to <literal>/var/spool/slurm</literal> by default
|
||||
(instead of <literal>/var/spool</literal>).
|
||||
Make sure to move all files to the new directory or to set the option accordingly.
|
||||
</para>
|
||||
<para>
|
||||
The slurmctld now runs as user <literal>slurm</literal> instead of <literal>root</literal>.
|
||||
If you want to keep slurmctld running as <literal>root</literal>, set
|
||||
<literal>services.slurm.user = root</literal>.
|
||||
</para>
|
||||
<para>
|
||||
The options <literal>services.slurm.nodeName</literal> and
|
||||
<literal>services.slurm.partitionName</literal> are now sets of
|
||||
strings to correctly reflect that fact that each of these
|
||||
options can occour more than once in the configuration.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>solr</literal> package has been upgraded from 4.10.3 to 7.5.0 and has undergone
|
||||
some major changes. The <literal>services.solr</literal> module has been updated to reflect
|
||||
these changes. Please review http://lucene.apache.org/solr/ carefully before upgrading.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
Package <literal>ckb</literal> is renamed to <literal>ckb-next</literal>,
|
||||
and options <literal>hardware.ckb.*</literal> are renamed to
|
||||
<literal>hardware.ckb-next.*</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The option <literal>services.xserver.displayManager.job.logToFile</literal> which was
|
||||
previously set to <literal>true</literal> when using the display managers
|
||||
<literal>lightdm</literal>, <literal>sddm</literal> or <literal>xpra</literal> has been
|
||||
reset to the default value (<literal>false</literal>).
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
|
||||
|
@ -172,6 +247,19 @@
|
|||
supports loading TrueCrypt volumes.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The Kubernetes DNS addons, kube-dns, has been replaced with CoreDNS.
|
||||
This change is made in accordance with Kubernetes making CoreDNS the official default
|
||||
starting from
|
||||
<link xlink:href="https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md#sig-cluster-lifecycle">Kubernetes v1.11</link>.
|
||||
Please beware that upgrading DNS-addon on existing clusters might induce
|
||||
minor downtime while the DNS-addon terminates and re-initializes.
|
||||
Also note that the DNS-service now runs with 2 pod replicas by default.
|
||||
The desired number of replicas can be configured using:
|
||||
<option>services.kubernetes.addons.dns.replicas</option>.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
</section>
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
{ system, minimal ? false, config ? {} }:
|
||||
|
||||
let pkgs = import ../.. { inherit system config; }; in
|
||||
{ system, pkgs, minimal ? false, config ? {} }:
|
||||
|
||||
with pkgs.lib;
|
||||
with import ../lib/qemu-flags.nix { inherit pkgs; };
|
||||
|
|
|
@ -1,3 +1,7 @@
|
|||
/* Build a channel tarball. These contain, in addition to the nixpkgs
|
||||
* expressions themselves, files that indicate the version of nixpkgs
|
||||
* that they represent.
|
||||
*/
|
||||
{ pkgs, nixpkgs, version, versionSuffix }:
|
||||
|
||||
pkgs.releaseTools.makeSourceTarball {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{ system, minimal ? false, config ? {} }:
|
||||
{ system, pkgs, minimal ? false, config ? {} }:
|
||||
|
||||
with import ./build-vms.nix { inherit system minimal config; };
|
||||
with import ./build-vms.nix { inherit system pkgs minimal config; };
|
||||
with pkgs;
|
||||
|
||||
let
|
||||
|
@ -69,7 +69,7 @@ in rec {
|
|||
mkdir -p $out/coverage-data
|
||||
mv $i $out/coverage-data/$(dirname $(dirname $i))
|
||||
done
|
||||
''; # */
|
||||
'';
|
||||
};
|
||||
|
||||
|
||||
|
|
86
nixos/modules/config/gtk/gtk-icon-cache.nix
Normal file
86
nixos/modules/config/gtk/gtk-icon-cache.nix
Normal file
|
@ -0,0 +1,86 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
{
|
||||
options = {
|
||||
gtk.iconCache.enable = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Whether to build icon theme caches for GTK+ applications.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf config.gtk.iconCache.enable {
|
||||
|
||||
# (Re)build icon theme caches
|
||||
# ---------------------------
|
||||
# Each icon theme has its own cache. The difficult is that many
|
||||
# packages may contribute with icons to the same theme by installing
|
||||
# some icons.
|
||||
#
|
||||
# For instance, on my current NixOS system, the following packages
|
||||
# (among many others) have icons installed into the hicolor icon
|
||||
# theme: hicolor-icon-theme, psensor, wpa_gui, caja, etc.
|
||||
#
|
||||
# As another example, the mate icon theme has icons installed by the
|
||||
# packages mate-icon-theme, mate-settings-daemon, and libmateweather.
|
||||
#
|
||||
# The HighContrast icon theme also has icons from different packages,
|
||||
# like gnome-theme-extras and meld.
|
||||
|
||||
# When the cache is built all of its icons has to be known. How to
|
||||
# implement this?
|
||||
#
|
||||
# I think that most themes have all icons installed by only one
|
||||
# package. On my system there are 71 themes installed. Only 3 of them
|
||||
# have icons installed from more than one package.
|
||||
#
|
||||
# If the main package of the theme provides a cache, presumably most
|
||||
# of its icons will be available to applications without running this
|
||||
# module. But additional icons offered by other packages will not be
|
||||
# available. Therefore I think that it is good that the main theme
|
||||
# package installs a cache (although it does not completely fixes the
|
||||
# situation for packages installed with nix-env).
|
||||
#
|
||||
# The module solution presented here keeps the cache when there is
|
||||
# only one package contributing with icons to the theme. Otherwise it
|
||||
# rebuilds the cache taking into account the icons provided all
|
||||
# packages.
|
||||
|
||||
environment.extraSetup = ''
|
||||
# For each icon theme directory ...
|
||||
|
||||
find $out/share/icons -mindepth 1 -maxdepth 1 -print0 | while read -d $'\0' themedir
|
||||
do
|
||||
|
||||
# In order to build the cache, the theme dir should be
|
||||
# writable. When the theme dir is a symbolic link to somewhere
|
||||
# in the nix store it is not writable and it means that only
|
||||
# one package is contributing to the theme. If it already has
|
||||
# a cache, no rebuild is needed. Otherwise a cache has to be
|
||||
# built, and to be able to do that we first remove the
|
||||
# symbolic link and make a directory, and then make symbolic
|
||||
# links from the original directory into the new one.
|
||||
|
||||
if [ ! -w "$themedir" -a -L "$themedir" -a ! -r "$themedir"/icon-theme.cache ]; then
|
||||
name=$(basename "$themedir")
|
||||
path=$(readlink -f "$themedir")
|
||||
rm "$themedir"
|
||||
mkdir -p "$themedir"
|
||||
ln -s "$path"/* "$themedir"/
|
||||
fi
|
||||
|
||||
# (Re)build the cache if the theme dir is writable, replacing any
|
||||
# existing cache for the theme
|
||||
|
||||
if [ -w "$themedir" ]; then
|
||||
rm -f "$themedir"/icon-theme.cache
|
||||
${pkgs.gtk3.out}/bin/gtk-update-icon-cache --ignore-theme-index "$themedir"
|
||||
fi
|
||||
done
|
||||
'';
|
||||
};
|
||||
|
||||
}
|
|
@ -228,9 +228,6 @@ in
|
|||
# /etc/protocols: IP protocol numbers.
|
||||
"protocols".source = pkgs.iana-etc + "/etc/protocols";
|
||||
|
||||
# /etc/rpc: RPC program numbers.
|
||||
"rpc".source = pkgs.glibc.out + "/etc/rpc";
|
||||
|
||||
# /etc/hosts: Hostname-to-IP mappings.
|
||||
"hosts".text = let
|
||||
oneToString = set: ip: ip + " " + concatStringsSep " " set.${ip};
|
||||
|
@ -263,11 +260,14 @@ in
|
|||
'';
|
||||
|
||||
} // optionalAttrs config.services.resolved.enable {
|
||||
# symlink the static version of resolv.conf as recommended by upstream:
|
||||
# symlink the dynamic stub resolver of resolv.conf as recommended by upstream:
|
||||
# https://www.freedesktop.org/software/systemd/man/systemd-resolved.html#/etc/resolv.conf
|
||||
"resolv.conf".source = "${pkgs.systemd}/lib/systemd/resolv.conf";
|
||||
"resolv.conf".source = "/run/systemd/resolve/stub-resolv.conf";
|
||||
} // optionalAttrs (config.services.resolved.enable && dnsmasqResolve) {
|
||||
"dnsmasq-resolv.conf".source = "/run/systemd/resolve/resolv.conf";
|
||||
} // optionalAttrs (pkgs.stdenv.hostPlatform.libc == "glibc") {
|
||||
# /etc/rpc: RPC program numbers.
|
||||
"rpc".source = pkgs.glibc.out + "/etc/rpc";
|
||||
};
|
||||
|
||||
networking.proxy.envVars =
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
{ config, lib, pkgs, pkgs_i686, ... }:
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with pkgs;
|
||||
with lib;
|
||||
|
@ -19,7 +19,7 @@ let
|
|||
|
||||
# Forces 32bit pulseaudio and alsaPlugins to be built/supported for apps
|
||||
# using 32bit alsa on 64bit linux.
|
||||
enable32BitAlsaPlugins = cfg.support32Bit && stdenv.isx86_64 && (pkgs_i686.alsaLib != null && pkgs_i686.libpulseaudio != null);
|
||||
enable32BitAlsaPlugins = cfg.support32Bit && stdenv.isx86_64 && (pkgs.pkgsi686Linux.alsaLib != null && pkgs.pkgsi686Linux.libpulseaudio != null);
|
||||
|
||||
|
||||
myConfigFile =
|
||||
|
@ -63,7 +63,7 @@ let
|
|||
pcm_type.pulse {
|
||||
libs.native = ${pkgs.alsaPlugins}/lib/alsa-lib/libasound_module_pcm_pulse.so ;
|
||||
${lib.optionalString enable32BitAlsaPlugins
|
||||
"libs.32Bit = ${pkgs_i686.alsaPlugins}/lib/alsa-lib/libasound_module_pcm_pulse.so ;"}
|
||||
"libs.32Bit = ${pkgs.pkgsi686Linux.alsaPlugins}/lib/alsa-lib/libasound_module_pcm_pulse.so ;"}
|
||||
}
|
||||
pcm.!default {
|
||||
type pulse
|
||||
|
@ -72,7 +72,7 @@ let
|
|||
ctl_type.pulse {
|
||||
libs.native = ${pkgs.alsaPlugins}/lib/alsa-lib/libasound_module_ctl_pulse.so ;
|
||||
${lib.optionalString enable32BitAlsaPlugins
|
||||
"libs.32Bit = ${pkgs_i686.alsaPlugins}/lib/alsa-lib/libasound_module_ctl_pulse.so ;"}
|
||||
"libs.32Bit = ${pkgs.pkgsi686Linux.alsaPlugins}/lib/alsa-lib/libasound_module_ctl_pulse.so ;"}
|
||||
}
|
||||
ctl.!default {
|
||||
type pulse
|
||||
|
|
|
@ -19,7 +19,9 @@ let
|
|||
pkgs.diffutils
|
||||
pkgs.findutils
|
||||
pkgs.gawk
|
||||
pkgs.glibc # for ldd, getent
|
||||
pkgs.stdenv.cc.libc
|
||||
pkgs.getent
|
||||
pkgs.getconf
|
||||
pkgs.gnugrep
|
||||
pkgs.gnupatch
|
||||
pkgs.gnused
|
||||
|
@ -133,10 +135,6 @@ in
|
|||
# outputs TODO: note that the tools will often not be linked by default
|
||||
postBuild =
|
||||
''
|
||||
if [ -x $out/bin/gtk-update-icon-cache -a -f $out/share/icons/hicolor/index.theme ]; then
|
||||
$out/bin/gtk-update-icon-cache $out/share/icons/hicolor
|
||||
fi
|
||||
|
||||
if [ -x $out/bin/glib-compile-schemas -a -w $out/share/glib-2.0/schemas ]; then
|
||||
$out/bin/glib-compile-schemas $out/share/glib-2.0/schemas
|
||||
fi
|
||||
|
|
|
@ -3,17 +3,17 @@
|
|||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.hardware.ckb;
|
||||
cfg = config.hardware.ckb-next;
|
||||
|
||||
in
|
||||
{
|
||||
options.hardware.ckb = {
|
||||
options.hardware.ckb-next = {
|
||||
enable = mkEnableOption "the Corsair keyboard/mouse driver";
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.ckb;
|
||||
defaultText = "pkgs.ckb";
|
||||
default = pkgs.ckb-next;
|
||||
defaultText = "pkgs.ckb-next";
|
||||
description = ''
|
||||
The package implementing the Corsair keyboard/mouse driver.
|
||||
'';
|
||||
|
@ -23,12 +23,12 @@ in
|
|||
config = mkIf cfg.enable {
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
|
||||
systemd.services.ckb = {
|
||||
description = "Corsair Keyboard Daemon";
|
||||
systemd.services.ckb-next = {
|
||||
description = "Corsair Keyboards and Mice Daemon";
|
||||
wantedBy = ["multi-user.target"];
|
||||
script = "${cfg.package}/bin/ckb-daemon";
|
||||
script = "exec ${cfg.package}/bin/ckb-next-daemon";
|
||||
serviceConfig = {
|
||||
Restart = "always";
|
||||
Restart = "on-failure";
|
||||
StandardOutput = "syslog";
|
||||
};
|
||||
};
|
|
@ -1,4 +1,4 @@
|
|||
{ config, lib, pkgs, pkgs_i686, ... }:
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
|
@ -148,7 +148,7 @@ in
|
|||
[ "/run/opengl-driver/share" ] ++ optional cfg.driSupport32Bit "/run/opengl-driver-32/share";
|
||||
|
||||
hardware.opengl.package = mkDefault (makePackage pkgs);
|
||||
hardware.opengl.package32 = mkDefault (makePackage pkgs_i686);
|
||||
hardware.opengl.package32 = mkDefault (makePackage pkgs.pkgsi686Linux);
|
||||
|
||||
boot.extraModulePackages = optional (elem "virtualbox" videoDrivers) kernelPackages.virtualboxGuestAdditions;
|
||||
};
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# This module provides the proprietary AMDGPU-PRO drivers.
|
||||
|
||||
{ config, lib, pkgs, pkgs_i686, ... }:
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
|
@ -11,7 +11,7 @@ let
|
|||
enabled = elem "amdgpu-pro" drivers;
|
||||
|
||||
package = config.boot.kernelPackages.amdgpu-pro;
|
||||
package32 = pkgs_i686.linuxPackages.amdgpu-pro.override { libsOnly = true; kernel = null; };
|
||||
package32 = pkgs.pkgsi686Linux.linuxPackages.amdgpu-pro.override { libsOnly = true; kernel = null; };
|
||||
|
||||
opengl = config.hardware.opengl;
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# This module provides the proprietary ATI X11 / OpenGL drivers.
|
||||
|
||||
{ config, lib, pkgs_i686, ... }:
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
|
@ -24,7 +24,7 @@ in
|
|||
{ name = "fglrx"; modules = [ ati_x11 ]; libPath = [ "${ati_x11}/lib" ]; };
|
||||
|
||||
hardware.opengl.package = ati_x11;
|
||||
hardware.opengl.package32 = pkgs_i686.linuxPackages.ati_drivers_x11.override { libsOnly = true; kernel = null; };
|
||||
hardware.opengl.package32 = pkgs.pkgsi686Linux.linuxPackages.ati_drivers_x11.override { libsOnly = true; kernel = null; };
|
||||
|
||||
environment.systemPackages = [ ati_x11 ];
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# This module provides the proprietary NVIDIA X11 / OpenGL drivers.
|
||||
|
||||
{ stdenv, config, lib, pkgs, pkgs_i686, ... }:
|
||||
{ stdenv, config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
|
@ -25,7 +25,7 @@ let
|
|||
nvidia_x11 = nvidiaForKernel config.boot.kernelPackages;
|
||||
nvidia_libs32 =
|
||||
if versionOlder nvidia_x11.version "391" then
|
||||
((nvidiaForKernel pkgs_i686.linuxPackages).override { libsOnly = true; kernel = null; }).out
|
||||
((nvidiaForKernel pkgs.pkgsi686Linux.linuxPackages).override { libsOnly = true; kernel = null; }).out
|
||||
else
|
||||
(nvidiaForKernel config.boot.kernelPackages).lib32;
|
||||
|
||||
|
|
|
@ -1,9 +1,13 @@
|
|||
{ system ? builtins.currentSystem
|
||||
, config ? {}
|
||||
, networkExpr
|
||||
}:
|
||||
|
||||
let nodes = import networkExpr; in
|
||||
|
||||
with import ../../../../lib/testing.nix { inherit system; };
|
||||
with import ../../../../lib/testing.nix {
|
||||
inherit system;
|
||||
pkgs = import ../.. { inherit system config; };
|
||||
};
|
||||
|
||||
(makeTest { inherit nodes; testScript = ""; }).driver
|
||||
|
|
|
@ -331,6 +331,11 @@
|
|||
zeronet = 304;
|
||||
lirc = 305;
|
||||
lidarr = 306;
|
||||
slurm = 307;
|
||||
kapacitor = 308;
|
||||
solr = 309;
|
||||
alerta = 310;
|
||||
minetest = 311;
|
||||
|
||||
# When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
|
||||
|
||||
|
@ -622,6 +627,11 @@
|
|||
zeronet = 304;
|
||||
lirc = 305;
|
||||
lidarr = 306;
|
||||
slurm = 307;
|
||||
kapacitor = 308;
|
||||
solr = 309;
|
||||
alerta = 310;
|
||||
minetest = 311;
|
||||
|
||||
# When adding a gid, make sure it doesn't match an existing
|
||||
# uid. Users and groups with the same name should have equal
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
{ config, options, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.nixpkgs;
|
||||
opt = options.nixpkgs;
|
||||
|
||||
isConfig = x:
|
||||
builtins.isAttrs x || lib.isFunction x;
|
||||
|
@ -54,6 +55,12 @@ let
|
|||
check = builtins.isAttrs;
|
||||
};
|
||||
|
||||
defaultPkgs = import ../../../pkgs/top-level/default.nix {
|
||||
inherit (cfg) config overlays localSystem crossSystem;
|
||||
};
|
||||
|
||||
finalPkgs = if opt.pkgs.isDefined then cfg.pkgs.appendOverlays cfg.overlays else defaultPkgs;
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
|
@ -61,21 +68,25 @@ in
|
|||
|
||||
pkgs = mkOption {
|
||||
defaultText = literalExample
|
||||
''import "''${nixos}/.." {
|
||||
''import "''${nixos}/../pkgs/top-level" {
|
||||
inherit (cfg) config overlays localSystem crossSystem;
|
||||
}
|
||||
'';
|
||||
default = import ../../.. {
|
||||
inherit (cfg) config overlays localSystem crossSystem;
|
||||
};
|
||||
type = pkgsType;
|
||||
example = literalExample ''import <nixpkgs> {}'';
|
||||
description = ''
|
||||
This is the evaluation of Nixpkgs that will be provided to
|
||||
all NixOS modules. Defining this option has the effect of
|
||||
ignoring the other options that would otherwise be used to
|
||||
evaluate Nixpkgs, because those are arguments to the default
|
||||
value. The default value imports the Nixpkgs source files
|
||||
If set, the pkgs argument to all NixOS modules is the value of
|
||||
this option, extended with <code>nixpkgs.overlays</code>, if
|
||||
that is also set. Either <code>nixpkgs.crossSystem</code> or
|
||||
<code>nixpkgs.localSystem</code> will be used in an assertion
|
||||
to check that the NixOS and Nixpkgs architectures match. Any
|
||||
other options in <code>nixpkgs.*</code>, notably <code>config</code>,
|
||||
will be ignored.
|
||||
|
||||
If unset, the pkgs argument to all NixOS modules is determined
|
||||
as shown in the default value for this option.
|
||||
|
||||
The default value imports the Nixpkgs source files
|
||||
relative to the location of this NixOS module, because
|
||||
NixOS and Nixpkgs are distributed together for consistency,
|
||||
so the <code>nixos</code> in the default value is in fact a
|
||||
|
@ -128,12 +139,14 @@ in
|
|||
description = ''
|
||||
List of overlays to use with the Nix Packages collection.
|
||||
(For details, see the Nixpkgs documentation.) It allows
|
||||
you to override packages globally. This is a function that
|
||||
you to override packages globally. Each function in the list
|
||||
takes as an argument the <emphasis>original</emphasis> Nixpkgs.
|
||||
The first argument should be used for finding dependencies, and
|
||||
the second should be used for overriding recipes.
|
||||
|
||||
Ignored when <code>nixpkgs.pkgs</code> is set.
|
||||
If <code>nixpkgs.pkgs</code> is set, overlays specified here
|
||||
will be applied after the overlays that were already present
|
||||
in <code>nixpkgs.pkgs</code>.
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -207,8 +220,26 @@ in
|
|||
|
||||
config = {
|
||||
_module.args = {
|
||||
pkgs = cfg.pkgs;
|
||||
pkgs_i686 = cfg.pkgs.pkgsi686Linux;
|
||||
pkgs = finalPkgs;
|
||||
};
|
||||
|
||||
assertions = [
|
||||
(
|
||||
let
|
||||
nixosExpectedSystem =
|
||||
if config.nixpkgs.crossSystem != null
|
||||
then config.nixpkgs.crossSystem.system
|
||||
else config.nixpkgs.localSystem.system;
|
||||
nixosOption =
|
||||
if config.nixpkgs.crossSystem != null
|
||||
then "nixpkgs.crossSystem"
|
||||
else "nixpkgs.localSystem";
|
||||
pkgsSystem = finalPkgs.stdenv.targetPlatform.system;
|
||||
in {
|
||||
assertion = nixosExpectedSystem == pkgsSystem;
|
||||
message = "The NixOS nixpkgs.pkgs option was set to a Nixpkgs invocation that compiles to target system ${pkgsSystem} but NixOS was configured for system ${nixosExpectedSystem} via NixOS option ${nixosOption}. The NixOS system settings must match the Nixpkgs target system.";
|
||||
}
|
||||
)
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
./config/xdg/icons.nix
|
||||
./config/xdg/menus.nix
|
||||
./config/xdg/mime.nix
|
||||
./config/gtk/gtk-icon-cache.nix
|
||||
./config/gnu.nix
|
||||
./config/i18n.nix
|
||||
./config/iproute2.nix
|
||||
|
@ -34,7 +35,7 @@
|
|||
./config/zram.nix
|
||||
./hardware/all-firmware.nix
|
||||
./hardware/brightnessctl.nix
|
||||
./hardware/ckb.nix
|
||||
./hardware/ckb-next.nix
|
||||
./hardware/cpu/amd-microcode.nix
|
||||
./hardware/cpu/intel-microcode.nix
|
||||
./hardware/digitalbitbox.nix
|
||||
|
@ -90,6 +91,7 @@
|
|||
./programs/criu.nix
|
||||
./programs/dconf.nix
|
||||
./programs/digitalbitbox/default.nix
|
||||
./programs/dmrconfig.nix
|
||||
./programs/environment.nix
|
||||
./programs/firejail.nix
|
||||
./programs/fish.nix
|
||||
|
@ -419,6 +421,7 @@
|
|||
./services/misc/weechat.nix
|
||||
./services/misc/xmr-stak.nix
|
||||
./services/misc/zookeeper.nix
|
||||
./services/monitoring/alerta.nix
|
||||
./services/monitoring/apcupsd.nix
|
||||
./services/monitoring/arbtt.nix
|
||||
./services/monitoring/bosun.nix
|
||||
|
@ -429,10 +432,12 @@
|
|||
./services/monitoring/dd-agent/dd-agent.nix
|
||||
./services/monitoring/fusion-inventory.nix
|
||||
./services/monitoring/grafana.nix
|
||||
./services/monitoring/grafana-reporter.nix
|
||||
./services/monitoring/graphite.nix
|
||||
./services/monitoring/hdaps.nix
|
||||
./services/monitoring/heapster.nix
|
||||
./services/monitoring/incron.nix
|
||||
./services/monitoring/kapacitor.nix
|
||||
./services/monitoring/longview.nix
|
||||
./services/monitoring/monit.nix
|
||||
./services/monitoring/munin.nix
|
||||
|
|
|
@ -63,7 +63,7 @@ with lib;
|
|||
# Tell the Nix evaluator to garbage collect more aggressively.
|
||||
# This is desirable in memory-constrained environments that don't
|
||||
# (yet) have swap set up.
|
||||
environment.variables.GC_INITIAL_HEAP_SIZE = "100000";
|
||||
environment.variables.GC_INITIAL_HEAP_SIZE = "1M";
|
||||
|
||||
# Make the installer more likely to succeed in low memory
|
||||
# environments. The kernel's overcommit heustistics bite us
|
||||
|
|
|
@ -16,7 +16,7 @@ let
|
|||
# programmable completion. If we do, enable all modules installed in
|
||||
# the system and user profile in obsolete /etc/bash_completion.d/
|
||||
# directories. Bash loads completions in all
|
||||
# $XDG_DATA_DIRS/share/bash-completion/completions/
|
||||
# $XDG_DATA_DIRS/bash-completion/completions/
|
||||
# on demand, so they do not need to be sourced here.
|
||||
if shopt -q progcomp &>/dev/null; then
|
||||
. "${pkgs.bash-completion}/etc/profile.d/bash_completion.sh"
|
||||
|
|
38
nixos/modules/programs/dmrconfig.nix
Normal file
38
nixos/modules/programs/dmrconfig.nix
Normal file
|
@ -0,0 +1,38 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.programs.dmrconfig;
|
||||
|
||||
in {
|
||||
meta.maintainers = [ maintainers.etu ];
|
||||
|
||||
###### interface
|
||||
options = {
|
||||
programs.dmrconfig = {
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = ''
|
||||
Whether to configure system to enable use of dmrconfig. This
|
||||
enables the required udev rules and installs the program.
|
||||
'';
|
||||
relatedPackages = [ "dmrconfig" ];
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
default = pkgs.dmrconfig;
|
||||
type = types.package;
|
||||
defaultText = "pkgs.dmrconfig";
|
||||
description = "dmrconfig derivation to use";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
###### implementation
|
||||
config = mkIf cfg.enable {
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
services.udev.packages = [ cfg.package ];
|
||||
};
|
||||
}
|
|
@ -13,7 +13,7 @@ with lib;
|
|||
# Set up the per-user profile.
|
||||
mkdir -m 0755 -p "$NIX_USER_PROFILE_DIR"
|
||||
if [ "$(stat --printf '%u' "$NIX_USER_PROFILE_DIR")" != "$(id -u)" ]; then
|
||||
echo "WARNING: bad ownership on $NIX_USER_PROFILE_DIR, should be $(id -u)" >&2
|
||||
echo "WARNING: the per-user profile dir $NIX_USER_PROFILE_DIR should belong to user id $(id -u)" >&2
|
||||
fi
|
||||
|
||||
if [ -w "$HOME" ]; then
|
||||
|
@ -35,7 +35,7 @@ with lib;
|
|||
NIX_USER_GCROOTS_DIR="/nix/var/nix/gcroots/per-user/$USER"
|
||||
mkdir -m 0755 -p "$NIX_USER_GCROOTS_DIR"
|
||||
if [ "$(stat --printf '%u' "$NIX_USER_GCROOTS_DIR")" != "$(id -u)" ]; then
|
||||
echo "WARNING: bad ownership on $NIX_USER_GCROOTS_DIR, should be $(id -u)" >&2
|
||||
echo "WARNING: the per-user gcroots dir $NIX_USER_GCROOTS_DIR should belong to user id $(id -u)" >&2
|
||||
fi
|
||||
|
||||
# Set up a default Nix expression from which to install stuff.
|
||||
|
|
|
@ -5,6 +5,15 @@ with lib;
|
|||
let
|
||||
cfg = config.programs.sway-beta;
|
||||
swayPackage = cfg.package;
|
||||
|
||||
swayWrapped = pkgs.writeShellScriptBin "sway" ''
|
||||
${cfg.extraSessionCommands}
|
||||
exec ${pkgs.dbus.dbus-launch} --exit-with-session ${swayPackage}/bin/sway
|
||||
'';
|
||||
swayJoined = pkgs.symlinkJoin {
|
||||
name = "sway-joined";
|
||||
paths = [ swayWrapped swayPackage ];
|
||||
};
|
||||
in {
|
||||
options.programs.sway-beta = {
|
||||
enable = mkEnableOption ''
|
||||
|
@ -20,13 +29,30 @@ in {
|
|||
'';
|
||||
};
|
||||
|
||||
extraSessionCommands = mkOption {
|
||||
type = types.lines;
|
||||
default = "";
|
||||
example = ''
|
||||
export SDL_VIDEODRIVER=wayland
|
||||
# needs qt5.qtwayland in systemPackages
|
||||
export QT_QPA_PLATFORM=wayland
|
||||
export QT_WAYLAND_DISABLE_WINDOWDECORATION="1"
|
||||
# Fix for some Java AWT applications (e.g. Android Studio),
|
||||
# use this if they aren't displayed properly:
|
||||
export _JAVA_AWT_WM_NONREPARENTING=1
|
||||
'';
|
||||
description = ''
|
||||
Shell commands executed just before Sway is started.
|
||||
'';
|
||||
};
|
||||
|
||||
extraPackages = mkOption {
|
||||
type = with types; listOf package;
|
||||
default = with pkgs; [
|
||||
xwayland dmenu
|
||||
xwayland rxvt_unicode dmenu
|
||||
];
|
||||
defaultText = literalExample ''
|
||||
with pkgs; [ xwayland dmenu ];
|
||||
with pkgs; [ xwayland rxvt_unicode dmenu ];
|
||||
'';
|
||||
example = literalExample ''
|
||||
with pkgs; [
|
||||
|
@ -42,7 +68,7 @@ in {
|
|||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
environment.systemPackages = [ swayPackage ] ++ cfg.extraPackages;
|
||||
environment.systemPackages = [ swayJoined ] ++ cfg.extraPackages;
|
||||
security.pam.services.swaylock = {};
|
||||
hardware.opengl.enable = mkDefault true;
|
||||
fonts.enableDefaultFonts = mkDefault true;
|
||||
|
@ -51,4 +77,3 @@ in {
|
|||
|
||||
meta.maintainers = with lib.maintainers; [ gnidorah primeos colemickens ];
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,10 @@ with lib;
|
|||
(config:
|
||||
let enabled = getAttrFromPath [ "services" "printing" "gutenprint" ] config;
|
||||
in if enabled then [ pkgs.gutenprint ] else [ ]))
|
||||
(mkRenamedOptionModule [ "services" "ddclient" "domain" ] [ "services" "ddclient" "domains" ])
|
||||
(mkChangedOptionModule [ "services" "ddclient" "domain" ] [ "services" "ddclient" "domains" ]
|
||||
(config:
|
||||
let value = getAttrFromPath [ "services" "ddclient" "domain" ] config;
|
||||
in if value != "" then [ value ] else []))
|
||||
(mkRemovedOptionModule [ "services" "ddclient" "homeDir" ] "")
|
||||
(mkRenamedOptionModule [ "services" "elasticsearch" "host" ] [ "services" "elasticsearch" "listenAddress" ])
|
||||
(mkRenamedOptionModule [ "services" "graphite" "api" "host" ] [ "services" "graphite" "api" "listenAddress" ])
|
||||
|
@ -279,6 +282,10 @@ with lib;
|
|||
(mkRenamedOptionModule [ "programs" "man" "enable" ] [ "documentation" "man" "enable" ])
|
||||
(mkRenamedOptionModule [ "services" "nixosManual" "enable" ] [ "documentation" "nixos" "enable" ])
|
||||
|
||||
# ckb
|
||||
(mkRenamedOptionModule [ "hardware" "ckb" "enable" ] [ "hardware" "ckb-next" "enable" ])
|
||||
(mkRenamedOptionModule [ "hardware" "ckb" "package" ] [ "hardware" "ckb-next" "package" ])
|
||||
|
||||
] ++ (flip map [ "blackboxExporter" "collectdExporter" "fritzboxExporter"
|
||||
"jsonExporter" "minioExporter" "nginxExporter" "nodeExporter"
|
||||
"snmpExporter" "unifiExporter" "varnishExporter" ]
|
||||
|
|
|
@ -28,7 +28,7 @@ with lib;
|
|||
capability setuid,
|
||||
network inet raw,
|
||||
|
||||
${pkgs.glibc.out}/lib/*.so mr,
|
||||
${pkgs.stdenv.cc.libc.out}/lib/*.so mr,
|
||||
${pkgs.libcap.lib}/lib/libcap.so* mr,
|
||||
${pkgs.attr.out}/lib/libattr.so* mr,
|
||||
|
||||
|
|
|
@ -170,4 +170,6 @@ in {
|
|||
'';
|
||||
}) cfg.params;
|
||||
};
|
||||
|
||||
meta.maintainers = with lib.maintainers; [ ekleog ];
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ with lib;
|
|||
|
||||
description = "Hardware RNG Entropy Gatherer Daemon";
|
||||
|
||||
serviceConfig.ExecStart = "${pkgs.rng-tools}/sbin/rngd -f -v";
|
||||
serviceConfig.ExecStart = "${pkgs.rng-tools}/sbin/rngd -f";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -53,6 +53,9 @@ in
|
|||
Type = "notify";
|
||||
NotifyAccess = "all";
|
||||
};
|
||||
restartTriggers = [
|
||||
config.environment.etc."salt/master".source
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -15,7 +15,6 @@ let
|
|||
# Default is in /etc/salt/pki/minion
|
||||
pki_dir = "/var/lib/salt/pki/minion";
|
||||
} cfg.configuration;
|
||||
configDir = pkgs.writeTextDir "minion" (builtins.toJSON fullConfig);
|
||||
|
||||
in
|
||||
|
||||
|
@ -28,15 +27,24 @@ in
|
|||
default = {};
|
||||
description = ''
|
||||
Salt minion configuration as Nix attribute set.
|
||||
See <link xlink:href="https://docs.saltstack.com/en/latest/ref/configuration/minion.html"/>
|
||||
for details.
|
||||
See <link xlink:href="https://docs.saltstack.com/en/latest/ref/configuration/minion.html"/>
|
||||
for details.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
environment.systemPackages = with pkgs; [ salt ];
|
||||
environment = {
|
||||
# Set this up in /etc/salt/minion so `salt-call`, etc. work.
|
||||
# The alternatives are
|
||||
# - passing --config-dir to all salt commands, not just the minion unit,
|
||||
# - setting aglobal environment variable.
|
||||
etc."salt/minion".source = pkgs.writeText "minion" (
|
||||
builtins.toJSON fullConfig
|
||||
);
|
||||
systemPackages = with pkgs; [ salt ];
|
||||
};
|
||||
systemd.services.salt-minion = {
|
||||
description = "Salt Minion";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
@ -45,11 +53,14 @@ in
|
|||
utillinux
|
||||
];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.salt}/bin/salt-minion --config-dir=${configDir}";
|
||||
ExecStart = "${pkgs.salt}/bin/salt-minion";
|
||||
LimitNOFILE = 8192;
|
||||
Type = "notify";
|
||||
NotifyAccess = "all";
|
||||
};
|
||||
restartTriggers = [
|
||||
config.environment.etc."salt/minion".source
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -87,9 +87,19 @@ in {
|
|||
}
|
||||
'';
|
||||
description = ''
|
||||
New style config options.
|
||||
Configuration options in RabbitMQ's new config file format,
|
||||
which is a simple key-value format that can not express nested
|
||||
data structures. This is known as the <literal>rabbitmq.conf</literal> file,
|
||||
although outside NixOS that filename may have Erlang syntax, particularly
|
||||
prior to RabbitMQ 3.7.0.
|
||||
|
||||
See http://www.rabbitmq.com/configure.html
|
||||
If you do need to express nested data structures, you can use
|
||||
<literal>config</literal> option. Configuration from <literal>config</literal>
|
||||
will be merged into these options by RabbitMQ at runtime to
|
||||
form the final configuration.
|
||||
|
||||
See http://www.rabbitmq.com/configure.html#config-items
|
||||
For the distinct formats, see http://www.rabbitmq.com/configure.html#config-file-formats
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -97,10 +107,17 @@ in {
|
|||
default = "";
|
||||
type = types.str;
|
||||
description = ''
|
||||
Verbatim advanced configuration file contents.
|
||||
Prefered way is to use configItems.
|
||||
Verbatim advanced configuration file contents using the Erlang syntax.
|
||||
This is also known as the <literal>advanced.config</literal> file or the old config format.
|
||||
|
||||
See http://www.rabbitmq.com/configure.html
|
||||
<literal>configItems</literal> is preferred whenever possible. However, nested
|
||||
data structures can only be expressed properly using the <literal>config</literal> option.
|
||||
|
||||
The contents of this option will be merged into the <literal>configItems</literal>
|
||||
by RabbitMQ at runtime to form the final configuration.
|
||||
|
||||
See the second table on http://www.rabbitmq.com/configure.html#config-items
|
||||
For the distinct formats, see http://www.rabbitmq.com/configure.html#config-file-formats
|
||||
'';
|
||||
};
|
||||
|
||||
|
|
|
@ -346,8 +346,12 @@ in {
|
|||
description = "Bacula File Daemon";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
path = [ pkgs.bacula ];
|
||||
serviceConfig.ExecStart = "${pkgs.bacula}/sbin/bacula-fd -f -u root -g bacula -c ${fd_conf}";
|
||||
serviceConfig.ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.bacula}/sbin/bacula-fd -f -u root -g bacula -c ${fd_conf}";
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
LogsDirectory = "bacula";
|
||||
StateDirectory = "bacula";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.bacula-sd = mkIf sd_cfg.enable {
|
||||
|
@ -355,8 +359,12 @@ in {
|
|||
description = "Bacula Storage Daemon";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
path = [ pkgs.bacula ];
|
||||
serviceConfig.ExecStart = "${pkgs.bacula}/sbin/bacula-sd -f -u bacula -g bacula -c ${sd_conf}";
|
||||
serviceConfig.ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.bacula}/sbin/bacula-sd -f -u bacula -g bacula -c ${sd_conf}";
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
LogsDirectory = "bacula";
|
||||
StateDirectory = "bacula";
|
||||
};
|
||||
};
|
||||
|
||||
services.postgresql.enable = dir_cfg.enable == true;
|
||||
|
@ -366,8 +374,12 @@ in {
|
|||
description = "Bacula Director Daemon";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
path = [ pkgs.bacula ];
|
||||
serviceConfig.ExecStart = "${pkgs.bacula}/sbin/bacula-dir -f -u bacula -g bacula -c ${dir_conf}";
|
||||
serviceConfig.ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.bacula}/sbin/bacula-dir -f -u bacula -g bacula -c ${dir_conf}";
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
LogsDirectory = "bacula";
|
||||
StateDirectory = "bacula";
|
||||
};
|
||||
preStart = ''
|
||||
if ! test -e "${libDir}/db-created"; then
|
||||
${pkgs.postgresql}/bin/createuser --no-superuser --no-createdb --no-createrole bacula
|
||||
|
|
|
@ -20,6 +20,8 @@ let
|
|||
'';
|
||||
|
||||
script = ''
|
||||
umask 0077 # ensure backup is only readable by postgres user
|
||||
|
||||
if [ -e ${cfg.location}/${db}.sql.gz ]; then
|
||||
${pkgs.coreutils}/bin/mv ${cfg.location}/${db}.sql.gz ${cfg.location}/${db}.prev.sql.gz
|
||||
fi
|
||||
|
|
|
@ -3,8 +3,13 @@
|
|||
with lib;
|
||||
|
||||
let
|
||||
version = "1.14.10";
|
||||
version = "1.2.5";
|
||||
cfg = config.services.kubernetes.addons.dns;
|
||||
ports = {
|
||||
dns = 10053;
|
||||
health = 10054;
|
||||
metrics = 10055;
|
||||
};
|
||||
in {
|
||||
options.services.kubernetes.addons.dns = {
|
||||
enable = mkEnableOption "kubernetes dns addon";
|
||||
|
@ -27,49 +32,130 @@ in {
|
|||
type = types.str;
|
||||
};
|
||||
|
||||
kube-dns = mkOption {
|
||||
description = "Docker image to seed for the kube-dns main container.";
|
||||
type = types.attrs;
|
||||
default = {
|
||||
imageName = "k8s.gcr.io/k8s-dns-kube-dns-amd64";
|
||||
imageDigest = "sha256:b99fc3eee2a9f052f7eb4cc00f15eb12fc405fa41019baa2d6b79847ae7284a8";
|
||||
finalImageTag = version;
|
||||
sha256 = "0x583znk9smqn0fix7ld8sm5jgaxhqhx3fq97b1wkqm7iwhvl3pj";
|
||||
};
|
||||
replicas = mkOption {
|
||||
description = "Number of DNS pod replicas to deploy in the cluster.";
|
||||
default = 2;
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
dnsmasq-nanny = mkOption {
|
||||
description = "Docker image to seed for the kube-dns dnsmasq container.";
|
||||
coredns = mkOption {
|
||||
description = "Docker image to seed for the CoreDNS container.";
|
||||
type = types.attrs;
|
||||
default = {
|
||||
imageName = "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64";
|
||||
imageDigest = "sha256:bbb2a290a568125b3b996028958eb773f33b5b87a6b37bf38a28f8b62dddb3c8";
|
||||
imageName = "coredns/coredns";
|
||||
imageDigest = "sha256:33c8da20b887ae12433ec5c40bfddefbbfa233d5ce11fb067122e68af30291d6";
|
||||
finalImageTag = version;
|
||||
sha256 = "1fihml7s2mfwgac51cbqpylkwbivc8nyhgi4vb820s83zvl8a6y1";
|
||||
};
|
||||
};
|
||||
|
||||
sidecar = mkOption {
|
||||
description = "Docker image to seed for the kube-dns sidecar container.";
|
||||
type = types.attrs;
|
||||
default = {
|
||||
imageName = "k8s.gcr.io/k8s-dns-sidecar-amd64";
|
||||
imageDigest = "sha256:4f1ab957f87b94a5ec1edc26fae50da2175461f00afecf68940c4aa079bd08a4";
|
||||
finalImageTag = version;
|
||||
sha256 = "08l1bv5jgrhvjzpqpbinrkgvv52snc4fzyd8ya9v18ns2klyz7m0";
|
||||
sha256 = "13q19rgwapv27xcs664dw502254yw4zw63insf6g2danidv2mg6i";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
services.kubernetes.kubelet.seedDockerImages = with pkgs.dockerTools; [
|
||||
(pullImage cfg.kube-dns)
|
||||
(pullImage cfg.dnsmasq-nanny)
|
||||
(pullImage cfg.sidecar)
|
||||
];
|
||||
services.kubernetes.kubelet.seedDockerImages =
|
||||
singleton (pkgs.dockerTools.pullImage cfg.coredns);
|
||||
|
||||
services.kubernetes.addonManager.addons = {
|
||||
kubedns-deployment = {
|
||||
coredns-sa = {
|
||||
apiVersion = "v1";
|
||||
kind = "ServiceAccount";
|
||||
metadata = {
|
||||
labels = {
|
||||
"addonmanager.kubernetes.io/mode" = "Reconcile";
|
||||
"k8s-app" = "kube-dns";
|
||||
"kubernetes.io/cluster-service" = "true";
|
||||
};
|
||||
name = "coredns";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
};
|
||||
|
||||
coredns-cr = {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1beta1";
|
||||
kind = "ClusterRole";
|
||||
metadata = {
|
||||
labels = {
|
||||
"addonmanager.kubernetes.io/mode" = "Reconcile";
|
||||
"k8s-app" = "kube-dns";
|
||||
"kubernetes.io/cluster-service" = "true";
|
||||
"kubernetes.io/bootstrapping" = "rbac-defaults";
|
||||
};
|
||||
name = "system:coredns";
|
||||
};
|
||||
rules = [
|
||||
{
|
||||
apiGroups = [ "" ];
|
||||
resources = [ "endpoints" "services" "pods" "namespaces" ];
|
||||
verbs = [ "list" "watch" ];
|
||||
}
|
||||
{
|
||||
apiGroups = [ "" ];
|
||||
resources = [ "nodes" ];
|
||||
verbs = [ "get" ];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
coredns-crb = {
|
||||
apiVersion = "rbac.authorization.k8s.io/v1beta1";
|
||||
kind = "ClusterRoleBinding";
|
||||
metadata = {
|
||||
annotations = {
|
||||
"rbac.authorization.kubernetes.io/autoupdate" = "true";
|
||||
};
|
||||
labels = {
|
||||
"addonmanager.kubernetes.io/mode" = "Reconcile";
|
||||
"k8s-app" = "kube-dns";
|
||||
"kubernetes.io/cluster-service" = "true";
|
||||
"kubernetes.io/bootstrapping" = "rbac-defaults";
|
||||
};
|
||||
name = "system:coredns";
|
||||
};
|
||||
roleRef = {
|
||||
apiGroup = "rbac.authorization.k8s.io";
|
||||
kind = "ClusterRole";
|
||||
name = "system:coredns";
|
||||
};
|
||||
subjects = [
|
||||
{
|
||||
kind = "ServiceAccount";
|
||||
name = "coredns";
|
||||
namespace = "kube-system";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
coredns-cm = {
|
||||
apiVersion = "v1";
|
||||
kind = "ConfigMap";
|
||||
metadata = {
|
||||
labels = {
|
||||
"addonmanager.kubernetes.io/mode" = "Reconcile";
|
||||
"k8s-app" = "kube-dns";
|
||||
"kubernetes.io/cluster-service" = "true";
|
||||
};
|
||||
name = "coredns";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
data = {
|
||||
Corefile = ".:${toString ports.dns} {
|
||||
errors
|
||||
health :${toString ports.health}
|
||||
kubernetes ${cfg.clusterDomain} in-addr.arpa ip6.arpa {
|
||||
pods insecure
|
||||
upstream
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
prometheus :${toString ports.metrics}
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}";
|
||||
};
|
||||
};
|
||||
|
||||
coredns-deploy = {
|
||||
apiVersion = "extensions/v1beta1";
|
||||
kind = "Deployment";
|
||||
metadata = {
|
||||
|
@ -77,182 +163,96 @@ in {
|
|||
"addonmanager.kubernetes.io/mode" = "Reconcile";
|
||||
"k8s-app" = "kube-dns";
|
||||
"kubernetes.io/cluster-service" = "true";
|
||||
"kubernetes.io/name" = "CoreDNS";
|
||||
};
|
||||
name = "kube-dns";
|
||||
name = "coredns";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
spec = {
|
||||
selector.matchLabels."k8s-app" = "kube-dns";
|
||||
replicas = cfg.replicas;
|
||||
selector = {
|
||||
matchLabels = { k8s-app = "kube-dns"; };
|
||||
};
|
||||
strategy = {
|
||||
rollingUpdate = {
|
||||
maxSurge = "10%";
|
||||
maxUnavailable = 0;
|
||||
};
|
||||
rollingUpdate = { maxUnavailable = 1; };
|
||||
type = "RollingUpdate";
|
||||
};
|
||||
template = {
|
||||
metadata = {
|
||||
annotations."scheduler.alpha.kubernetes.io/critical-pod" = "";
|
||||
labels.k8s-app = "kube-dns";
|
||||
labels = {
|
||||
k8s-app = "kube-dns";
|
||||
};
|
||||
};
|
||||
spec = {
|
||||
priorityClassName = "system-cluster-critical";
|
||||
containers = [
|
||||
{
|
||||
name = "kubedns";
|
||||
image = with cfg.kube-dns; "${imageName}:${finalImageTag}";
|
||||
args = [ "-conf" "/etc/coredns/Corefile" ];
|
||||
image = with cfg.coredns; "${imageName}:${finalImageTag}";
|
||||
imagePullPolicy = "Never";
|
||||
livenessProbe = {
|
||||
failureThreshold = 5;
|
||||
httpGet = {
|
||||
path = "/health";
|
||||
port = ports.health;
|
||||
scheme = "HTTP";
|
||||
};
|
||||
initialDelaySeconds = 60;
|
||||
successThreshold = 1;
|
||||
timeoutSeconds = 5;
|
||||
};
|
||||
name = "coredns";
|
||||
ports = [
|
||||
{
|
||||
containerPort = ports.dns;
|
||||
name = "dns";
|
||||
protocol = "UDP";
|
||||
}
|
||||
{
|
||||
containerPort = ports.dns;
|
||||
name = "dns-tcp";
|
||||
protocol = "TCP";
|
||||
}
|
||||
{
|
||||
containerPort = ports.metrics;
|
||||
name = "metrics";
|
||||
protocol = "TCP";
|
||||
}
|
||||
];
|
||||
resources = {
|
||||
limits.memory = "170Mi";
|
||||
limits = {
|
||||
memory = "170Mi";
|
||||
};
|
||||
requests = {
|
||||
cpu = "100m";
|
||||
memory = "70Mi";
|
||||
};
|
||||
};
|
||||
livenessProbe = {
|
||||
failureThreshold = 5;
|
||||
httpGet = {
|
||||
path = "/healthcheck/kubedns";
|
||||
port = 10054;
|
||||
scheme = "HTTP";
|
||||
};
|
||||
initialDelaySeconds = 60;
|
||||
successThreshold = 1;
|
||||
timeoutSeconds = 5;
|
||||
};
|
||||
readinessProbe = {
|
||||
httpGet = {
|
||||
path = "/readiness";
|
||||
port = 8081;
|
||||
scheme = "HTTP";
|
||||
};
|
||||
initialDelaySeconds = 3;
|
||||
timeoutSeconds = 5;
|
||||
};
|
||||
args = [
|
||||
"--domain=${cfg.clusterDomain}"
|
||||
"--dns-port=10053"
|
||||
"--config-dir=/kube-dns-config"
|
||||
"--v=2"
|
||||
];
|
||||
env = [
|
||||
{
|
||||
name = "PROMETHEUS_PORT";
|
||||
value = "10055";
|
||||
}
|
||||
];
|
||||
ports = [
|
||||
{
|
||||
containerPort = 10053;
|
||||
name = "dns-local";
|
||||
protocol = "UDP";
|
||||
}
|
||||
{
|
||||
containerPort = 10053;
|
||||
name = "dns-tcp-local";
|
||||
protocol = "TCP";
|
||||
}
|
||||
{
|
||||
containerPort = 10055;
|
||||
name = "metrics";
|
||||
protocol = "TCP";
|
||||
}
|
||||
];
|
||||
volumeMounts = [
|
||||
{
|
||||
mountPath = "/kube-dns-config";
|
||||
name = "kube-dns-config";
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
name = "dnsmasq";
|
||||
image = with cfg.dnsmasq-nanny; "${imageName}:${finalImageTag}";
|
||||
livenessProbe = {
|
||||
httpGet = {
|
||||
path = "/healthcheck/dnsmasq";
|
||||
port = 10054;
|
||||
scheme = "HTTP";
|
||||
};
|
||||
initialDelaySeconds = 60;
|
||||
timeoutSeconds = 5;
|
||||
successThreshold = 1;
|
||||
failureThreshold = 5;
|
||||
};
|
||||
args = [
|
||||
"-v=2"
|
||||
"-logtostderr"
|
||||
"-configDir=/etc/k8s/dns/dnsmasq-nanny"
|
||||
"-restartDnsmasq=true"
|
||||
"--"
|
||||
"-k"
|
||||
"--cache-size=1000"
|
||||
"--log-facility=-"
|
||||
"--server=/${cfg.clusterDomain}/127.0.0.1#10053"
|
||||
"--server=/in-addr.arpa/127.0.0.1#10053"
|
||||
"--server=/ip6.arpa/127.0.0.1#10053"
|
||||
];
|
||||
ports = [
|
||||
{
|
||||
containerPort = 53;
|
||||
name = "dns";
|
||||
protocol = "UDP";
|
||||
}
|
||||
{
|
||||
containerPort = 53;
|
||||
name = "dns-tcp";
|
||||
protocol = "TCP";
|
||||
}
|
||||
];
|
||||
resources = {
|
||||
requests = {
|
||||
cpu = "150m";
|
||||
memory = "20Mi";
|
||||
securityContext = {
|
||||
allowPrivilegeEscalation = false;
|
||||
capabilities = {
|
||||
drop = [ "all" ];
|
||||
};
|
||||
readOnlyRootFilesystem = true;
|
||||
};
|
||||
volumeMounts = [
|
||||
{
|
||||
mountPath = "/etc/k8s/dns/dnsmasq-nanny";
|
||||
name = "kube-dns-config";
|
||||
mountPath = "/etc/coredns";
|
||||
name = "config-volume";
|
||||
readOnly = true;
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
name = "sidecar";
|
||||
image = with cfg.sidecar; "${imageName}:${finalImageTag}";
|
||||
livenessProbe = {
|
||||
httpGet = {
|
||||
path = "/metrics";
|
||||
port = 10054;
|
||||
scheme = "HTTP";
|
||||
};
|
||||
initialDelaySeconds = 60;
|
||||
timeoutSeconds = 5;
|
||||
successThreshold = 1;
|
||||
failureThreshold = 5;
|
||||
};
|
||||
args = [
|
||||
"--v=2"
|
||||
"--logtostderr"
|
||||
"--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.${cfg.clusterDomain},5,A"
|
||||
"--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.${cfg.clusterDomain},5,A"
|
||||
];
|
||||
ports = [
|
||||
{
|
||||
containerPort = 10054;
|
||||
name = "metrics";
|
||||
protocol = "TCP";
|
||||
}
|
||||
];
|
||||
resources = {
|
||||
requests = {
|
||||
cpu = "10m";
|
||||
memory = "20Mi";
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
dnsPolicy = "Default";
|
||||
serviceAccountName = "kube-dns";
|
||||
nodeSelector = {
|
||||
"beta.kubernetes.io/os" = "linux";
|
||||
};
|
||||
serviceAccountName = "coredns";
|
||||
tolerations = [
|
||||
{
|
||||
effect = "NoSchedule";
|
||||
key = "node-role.kubernetes.io/master";
|
||||
}
|
||||
{
|
||||
key = "CriticalAddonsOnly";
|
||||
operator = "Exists";
|
||||
|
@ -261,10 +261,15 @@ in {
|
|||
volumes = [
|
||||
{
|
||||
configMap = {
|
||||
name = "kube-dns";
|
||||
optional = true;
|
||||
items = [
|
||||
{
|
||||
key = "Corefile";
|
||||
path = "Corefile";
|
||||
}
|
||||
];
|
||||
name = "coredns";
|
||||
};
|
||||
name = "kube-dns-config";
|
||||
name = "config-volume";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
@ -272,51 +277,40 @@ in {
|
|||
};
|
||||
};
|
||||
|
||||
kubedns-svc = {
|
||||
coredns-svc = {
|
||||
apiVersion = "v1";
|
||||
kind = "Service";
|
||||
metadata = {
|
||||
annotations = {
|
||||
"prometheus.io/port" = toString ports.metrics;
|
||||
"prometheus.io/scrape" = "true";
|
||||
};
|
||||
labels = {
|
||||
"addonmanager.kubernetes.io/mode" = "Reconcile";
|
||||
"k8s-app" = "kube-dns";
|
||||
"kubernetes.io/cluster-service" = "true";
|
||||
"kubernetes.io/name" = "KubeDNS";
|
||||
"kubernetes.io/name" = "CoreDNS";
|
||||
};
|
||||
name = "kube-dns";
|
||||
namespace = "kube-system";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
spec = {
|
||||
clusterIP = cfg.clusterIp;
|
||||
ports = [
|
||||
{name = "dns"; port = 53; protocol = "UDP";}
|
||||
{name = "dns-tcp"; port = 53; protocol = "TCP";}
|
||||
{
|
||||
name = "dns";
|
||||
port = 53;
|
||||
targetPort = ports.dns;
|
||||
protocol = "UDP";
|
||||
}
|
||||
{
|
||||
name = "dns-tcp";
|
||||
port = 53;
|
||||
targetPort = ports.dns;
|
||||
protocol = "TCP";
|
||||
}
|
||||
];
|
||||
selector.k8s-app = "kube-dns";
|
||||
};
|
||||
};
|
||||
|
||||
kubedns-sa = {
|
||||
apiVersion = "v1";
|
||||
kind = "ServiceAccount";
|
||||
metadata = {
|
||||
name = "kube-dns";
|
||||
namespace = "kube-system";
|
||||
labels = {
|
||||
"kubernetes.io/cluster-service" = "true";
|
||||
"addonmanager.kubernetes.io/mode" = "Reconcile";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
kubedns-cm = {
|
||||
apiVersion = "v1";
|
||||
kind = "ConfigMap";
|
||||
metadata = {
|
||||
name = "kube-dns";
|
||||
namespace = "kube-system";
|
||||
labels = {
|
||||
"addonmanager.kubernetes.io/mode" = "EnsureExists";
|
||||
};
|
||||
selector = { k8s-app = "kube-dns"; };
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -6,13 +6,18 @@ let
|
|||
|
||||
cfg = config.services.slurm;
|
||||
# configuration file can be generated by http://slurm.schedmd.com/configurator.html
|
||||
|
||||
defaultUser = "slurm";
|
||||
|
||||
configFile = pkgs.writeTextDir "slurm.conf"
|
||||
''
|
||||
ClusterName=${cfg.clusterName}
|
||||
StateSaveLocation=${cfg.stateSaveLocation}
|
||||
SlurmUser=${cfg.user}
|
||||
${optionalString (cfg.controlMachine != null) ''controlMachine=${cfg.controlMachine}''}
|
||||
${optionalString (cfg.controlAddr != null) ''controlAddr=${cfg.controlAddr}''}
|
||||
${optionalString (cfg.nodeName != null) ''nodeName=${cfg.nodeName}''}
|
||||
${optionalString (cfg.partitionName != null) ''partitionName=${cfg.partitionName}''}
|
||||
${toString (map (x: "NodeName=${x}\n") cfg.nodeName)}
|
||||
${toString (map (x: "PartitionName=${x}\n") cfg.partitionName)}
|
||||
PlugStackConfig=${plugStackConfig}
|
||||
ProctrackType=${cfg.procTrackType}
|
||||
${cfg.extraConfig}
|
||||
|
@ -24,12 +29,19 @@ let
|
|||
${cfg.extraPlugstackConfig}
|
||||
'';
|
||||
|
||||
|
||||
cgroupConfig = pkgs.writeTextDir "cgroup.conf"
|
||||
''
|
||||
${cfg.extraCgroupConfig}
|
||||
'';
|
||||
|
||||
slurmdbdConf = pkgs.writeTextDir "slurmdbd.conf"
|
||||
''
|
||||
DbdHost=${cfg.dbdserver.dbdHost}
|
||||
SlurmUser=${cfg.user}
|
||||
StorageType=accounting_storage/mysql
|
||||
${cfg.dbdserver.extraConfig}
|
||||
'';
|
||||
|
||||
# slurm expects some additional config files to be
|
||||
# in the same directory as slurm.conf
|
||||
etcSlurm = pkgs.symlinkJoin {
|
||||
|
@ -43,6 +55,8 @@ in
|
|||
|
||||
###### interface
|
||||
|
||||
meta.maintainers = [ maintainers.markuskowa ];
|
||||
|
||||
options = {
|
||||
|
||||
services.slurm = {
|
||||
|
@ -60,6 +74,27 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
dbdserver = {
|
||||
enable = mkEnableOption "SlurmDBD service";
|
||||
|
||||
dbdHost = mkOption {
|
||||
type = types.str;
|
||||
default = config.networking.hostName;
|
||||
description = ''
|
||||
Hostname of the machine where <literal>slurmdbd</literal>
|
||||
is running (i.e. name returned by <literal>hostname -s</literal>).
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
type = types.lines;
|
||||
default = "";
|
||||
description = ''
|
||||
Extra configuration for <literal>slurmdbd.conf</literal>
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
client = {
|
||||
enable = mkEnableOption "slurm client daemon";
|
||||
};
|
||||
|
@ -116,9 +151,9 @@ in
|
|||
};
|
||||
|
||||
nodeName = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = "linux[1-32] CPUs=1 State=UNKNOWN";
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
example = literalExample ''[ "linux[1-32] CPUs=1 State=UNKNOWN" ];'';
|
||||
description = ''
|
||||
Name that SLURM uses to refer to a node (or base partition for BlueGene
|
||||
systems). Typically this would be the string that "/bin/hostname -s"
|
||||
|
@ -127,9 +162,9 @@ in
|
|||
};
|
||||
|
||||
partitionName = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = "debug Nodes=linux[1-32] Default=YES MaxTime=INFINITE State=UP";
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
example = literalExample ''[ "debug Nodes=linux[1-32] Default=YES MaxTime=INFINITE State=UP" ];'';
|
||||
description = ''
|
||||
Name by which the partition may be referenced. Note that now you have
|
||||
to write the partition's parameters after the name.
|
||||
|
@ -150,7 +185,7 @@ in
|
|||
};
|
||||
|
||||
procTrackType = mkOption {
|
||||
type = types.string;
|
||||
type = types.str;
|
||||
default = "proctrack/linuxproc";
|
||||
description = ''
|
||||
Plugin to be used for process tracking on a job step basis.
|
||||
|
@ -159,6 +194,25 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
stateSaveLocation = mkOption {
|
||||
type = types.str;
|
||||
default = "/var/spool/slurmctld";
|
||||
description = ''
|
||||
Directory into which the Slurm controller, slurmctld, saves its state.
|
||||
'';
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = defaultUser;
|
||||
description = ''
|
||||
Set this option when you want to run the slurmctld daemon
|
||||
as something else than the default slurm user "slurm".
|
||||
Note that the UID of this user needs to be the same
|
||||
on all nodes.
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
default = "";
|
||||
type = types.lines;
|
||||
|
@ -184,6 +238,8 @@ in
|
|||
used when <literal>procTrackType=proctrack/cgroup</literal>.
|
||||
'';
|
||||
};
|
||||
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
@ -220,12 +276,24 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
in mkIf (cfg.enableStools || cfg.client.enable || cfg.server.enable) {
|
||||
in mkIf ( cfg.enableStools ||
|
||||
cfg.client.enable ||
|
||||
cfg.server.enable ||
|
||||
cfg.dbdserver.enable ) {
|
||||
|
||||
environment.systemPackages = [ wrappedSlurm ];
|
||||
|
||||
services.munge.enable = mkDefault true;
|
||||
|
||||
# use a static uid as default to ensure it is the same on all nodes
|
||||
users.users.slurm = mkIf (cfg.user == defaultUser) {
|
||||
name = defaultUser;
|
||||
group = "slurm";
|
||||
uid = config.ids.uids.slurm;
|
||||
};
|
||||
|
||||
users.groups.slurm.gid = config.ids.uids.slurm;
|
||||
|
||||
systemd.services.slurmd = mkIf (cfg.client.enable) {
|
||||
path = with pkgs; [ wrappedSlurm coreutils ]
|
||||
++ lib.optional cfg.enableSrunX11 slurm-spank-x11;
|
||||
|
@ -261,6 +329,29 @@ in
|
|||
PIDFile = "/run/slurmctld.pid";
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
};
|
||||
|
||||
preStart = ''
|
||||
mkdir -p ${cfg.stateSaveLocation}
|
||||
chown -R ${cfg.user}:slurm ${cfg.stateSaveLocation}
|
||||
'';
|
||||
};
|
||||
|
||||
systemd.services.slurmdbd = mkIf (cfg.dbdserver.enable) {
|
||||
path = with pkgs; [ wrappedSlurm munge coreutils ];
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" "munged.service" "mysql.service" ];
|
||||
requires = [ "munged.service" "mysql.service" ];
|
||||
|
||||
# slurm strips the last component off the path
|
||||
environment.SLURM_CONF = "${slurmdbdConf}/slurm.conf";
|
||||
|
||||
serviceConfig = {
|
||||
Type = "forking";
|
||||
ExecStart = "${cfg.package}/bin/slurmdbd";
|
||||
PIDFile = "/run/slurmdbd.pid";
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
|
|
@ -55,7 +55,7 @@ in
|
|||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
example = literalExample "pkgs.postgresql96";
|
||||
example = literalExample "pkgs.postgresql_9_6";
|
||||
description = ''
|
||||
PostgreSQL package to use.
|
||||
'';
|
||||
|
@ -118,7 +118,7 @@ in
|
|||
extraPlugins = mkOption {
|
||||
type = types.listOf types.path;
|
||||
default = [];
|
||||
example = literalExample "[ (pkgs.postgis.override { postgresql = pkgs.postgresql94; }) ]";
|
||||
example = literalExample "[ (pkgs.postgis.override { postgresql = pkgs.postgresql_9_4; }) ]";
|
||||
description = ''
|
||||
When this list contains elements a new store path is created.
|
||||
PostgreSQL and the elements are symlinked into it. Then pg_config,
|
||||
|
@ -167,9 +167,9 @@ in
|
|||
# Note: when changing the default, make it conditional on
|
||||
# ‘system.stateVersion’ to maintain compatibility with existing
|
||||
# systems!
|
||||
mkDefault (if versionAtLeast config.system.stateVersion "17.09" then pkgs.postgresql96
|
||||
else if versionAtLeast config.system.stateVersion "16.03" then pkgs.postgresql95
|
||||
else pkgs.postgresql94);
|
||||
mkDefault (if versionAtLeast config.system.stateVersion "17.09" then pkgs.postgresql_9_6
|
||||
else if versionAtLeast config.system.stateVersion "16.03" then pkgs.postgresql_9_5
|
||||
else pkgs.postgresql_9_4);
|
||||
|
||||
services.postgresql.dataDir =
|
||||
mkDefault (if versionAtLeast config.system.stateVersion "17.09" then "/var/lib/postgresql/${config.services.postgresql.package.psqlSchema}"
|
||||
|
@ -271,5 +271,5 @@ in
|
|||
};
|
||||
|
||||
meta.doc = ./postgresql.xml;
|
||||
|
||||
meta.maintainers = with lib.maintainers; [ thoughtpolice ];
|
||||
}
|
||||
|
|
|
@ -27,12 +27,12 @@
|
|||
<filename>configuration.nix</filename>:
|
||||
<programlisting>
|
||||
<xref linkend="opt-services.postgresql.enable"/> = true;
|
||||
<xref linkend="opt-services.postgresql.package"/> = pkgs.postgresql94;
|
||||
<xref linkend="opt-services.postgresql.package"/> = pkgs.postgresql_9_4;
|
||||
</programlisting>
|
||||
Note that you are required to specify the desired version of PostgreSQL
|
||||
(e.g. <literal>pkgs.postgresql94</literal>). Since upgrading your PostgreSQL
|
||||
version requires a database dump and reload (see below), NixOS cannot
|
||||
provide a default value for
|
||||
(e.g. <literal>pkgs.postgresql_9_4</literal>). Since upgrading your
|
||||
PostgreSQL version requires a database dump and reload (see below), NixOS
|
||||
cannot provide a default value for
|
||||
<xref linkend="opt-services.postgresql.package"/> such as the most recent
|
||||
release of PostgreSQL.
|
||||
</para>
|
||||
|
|
|
@ -145,6 +145,7 @@ in {
|
|||
systemd.services.jupyter = {
|
||||
description = "Jupyter development server";
|
||||
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
# TODO: Patch notebook so we can explicitly pass in a shell
|
||||
|
|
|
@ -84,7 +84,9 @@ in
|
|||
home = "/var/lib/minetest";
|
||||
createHome = true;
|
||||
uid = config.ids.uids.minetest;
|
||||
group = "minetest";
|
||||
};
|
||||
users.groups.minetest.gid = config.ids.gids.minetest;
|
||||
|
||||
systemd.services.minetest-server = {
|
||||
description = "Minetest Server Service";
|
||||
|
@ -93,6 +95,7 @@ in
|
|||
|
||||
serviceConfig.Restart = "always";
|
||||
serviceConfig.User = "minetest";
|
||||
serviceConfig.Group = "minetest";
|
||||
|
||||
script = ''
|
||||
cd /var/lib/minetest
|
||||
|
|
|
@ -56,6 +56,32 @@ in
|
|||
{ Type = "dbus";
|
||||
BusName = "org.freedesktop.UPower";
|
||||
ExecStart = "@${cfg.package}/libexec/upowerd upowerd";
|
||||
Restart = "on-failure";
|
||||
# Upstream lockdown:
|
||||
# Filesystem lockdown
|
||||
ProtectSystem = "strict";
|
||||
# Needed by keyboard backlight support
|
||||
ProtectKernelTunables = false;
|
||||
ProtectControlGroups = true;
|
||||
ReadWritePaths = "/var/lib/upower";
|
||||
ProtectHome = true;
|
||||
PrivateTmp = true;
|
||||
|
||||
# Network
|
||||
# PrivateNetwork=true would block udev's netlink socket
|
||||
RestrictAddressFamilies = "AF_UNIX AF_NETLINK";
|
||||
|
||||
# Execute Mappings
|
||||
MemoryDenyWriteExecute = true;
|
||||
|
||||
# Modules
|
||||
ProtectKernelModules = true;
|
||||
|
||||
# Real-time
|
||||
RestrictRealtime = true;
|
||||
|
||||
# Privilege escalation
|
||||
NoNewPrivileges = true;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -176,4 +176,6 @@ in
|
|||
}
|
||||
) cfg.instances);
|
||||
};
|
||||
|
||||
meta.maintainers = with lib.maintainers; [ ekleog ];
|
||||
}
|
||||
|
|
|
@ -115,4 +115,6 @@ in
|
|||
};
|
||||
};
|
||||
};
|
||||
|
||||
meta.maintainers = with lib.maintainers; [ ekleog ];
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ let
|
|||
|
||||
cfg = config.services.rspamd;
|
||||
opts = options.services.rspamd;
|
||||
postfixCfg = config.services.postfix;
|
||||
|
||||
bindSocketOpts = {options, config, ... }: {
|
||||
options = {
|
||||
|
@ -58,7 +59,7 @@ let
|
|||
};
|
||||
type = mkOption {
|
||||
type = types.nullOr (types.enum [
|
||||
"normal" "controller" "fuzzy_storage" "proxy" "lua"
|
||||
"normal" "controller" "fuzzy_storage" "rspamd_proxy" "lua"
|
||||
]);
|
||||
description = "The type of this worker";
|
||||
};
|
||||
|
@ -99,19 +100,21 @@ let
|
|||
description = "Additional entries to put verbatim into worker section of rspamd config file.";
|
||||
};
|
||||
};
|
||||
config = mkIf (name == "normal" || name == "controller" || name == "fuzzy") {
|
||||
config = mkIf (name == "normal" || name == "controller" || name == "fuzzy" || name == "rspamd_proxy") {
|
||||
type = mkDefault name;
|
||||
includes = mkDefault [ "$CONFDIR/worker-${name}.inc" ];
|
||||
bindSockets = mkDefault (if name == "normal"
|
||||
then [{
|
||||
socket = "/run/rspamd/rspamd.sock";
|
||||
mode = "0660";
|
||||
owner = cfg.user;
|
||||
group = cfg.group;
|
||||
}]
|
||||
else if name == "controller"
|
||||
then [ "localhost:11334" ]
|
||||
else [] );
|
||||
includes = mkDefault [ "$CONFDIR/worker-${if name == "rspamd_proxy" then "proxy" else name}.inc" ];
|
||||
bindSockets =
|
||||
let
|
||||
unixSocket = name: {
|
||||
mode = "0660";
|
||||
socket = "/run/rspamd/${name}.sock";
|
||||
owner = cfg.user;
|
||||
group = cfg.group;
|
||||
};
|
||||
in mkDefault (if name == "normal" then [(unixSocket "rspamd")]
|
||||
else if name == "controller" then [ "localhost:11334" ]
|
||||
else if name == "rspamd_proxy" then [ (unixSocket "proxy") ]
|
||||
else [] );
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -127,28 +130,83 @@ let
|
|||
options {
|
||||
pidfile = "$RUNDIR/rspamd.pid";
|
||||
.include "$CONFDIR/options.inc"
|
||||
.include(try=true; priority=1,duplicate=merge) "$LOCAL_CONFDIR/local.d/options.inc"
|
||||
.include(try=true; priority=10) "$LOCAL_CONFDIR/override.d/options.inc"
|
||||
}
|
||||
|
||||
logging {
|
||||
type = "syslog";
|
||||
.include "$CONFDIR/logging.inc"
|
||||
.include(try=true; priority=1,duplicate=merge) "$LOCAL_CONFDIR/local.d/logging.inc"
|
||||
.include(try=true; priority=10) "$LOCAL_CONFDIR/override.d/logging.inc"
|
||||
}
|
||||
|
||||
${concatStringsSep "\n" (mapAttrsToList (name: value: ''
|
||||
worker ${optionalString (value.name != "normal" && value.name != "controller") "${value.name}"} {
|
||||
${concatStringsSep "\n" (mapAttrsToList (name: value: let
|
||||
includeName = if name == "rspamd_proxy" then "proxy" else name;
|
||||
tryOverride = if value.extraConfig == "" then "true" else "false";
|
||||
in ''
|
||||
worker "${value.type}" {
|
||||
type = "${value.type}";
|
||||
${optionalString (value.enable != null)
|
||||
"enabled = ${if value.enable != false then "yes" else "no"};"}
|
||||
${mkBindSockets value.enable value.bindSockets}
|
||||
${optionalString (value.count != null) "count = ${toString value.count};"}
|
||||
${concatStringsSep "\n " (map (each: ".include \"${each}\"") value.includes)}
|
||||
${value.extraConfig}
|
||||
.include(try=true; priority=1,duplicate=merge) "$LOCAL_CONFDIR/local.d/worker-${includeName}.inc"
|
||||
.include(try=${tryOverride}; priority=10) "$LOCAL_CONFDIR/override.d/worker-${includeName}.inc"
|
||||
}
|
||||
'') cfg.workers)}
|
||||
|
||||
${cfg.extraConfig}
|
||||
${optionalString (cfg.extraConfig != "") ''
|
||||
.include(priority=10) "$LOCAL_CONFDIR/override.d/extra-config.inc"
|
||||
''}
|
||||
'';
|
||||
|
||||
filterFiles = files: filterAttrs (n: v: v.enable) files;
|
||||
rspamdDir = pkgs.linkFarm "etc-rspamd-dir" (
|
||||
(mapAttrsToList (name: file: { name = "local.d/${name}"; path = file.source; }) (filterFiles cfg.locals)) ++
|
||||
(mapAttrsToList (name: file: { name = "override.d/${name}"; path = file.source; }) (filterFiles cfg.overrides)) ++
|
||||
(optional (cfg.localLuaRules != null) { name = "rspamd.local.lua"; path = cfg.localLuaRules; }) ++
|
||||
[ { name = "rspamd.conf"; path = rspamdConfFile; } ]
|
||||
);
|
||||
|
||||
configFileModule = prefix: { name, config, ... }: {
|
||||
options = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Whether this file ${prefix} should be generated. This
|
||||
option allows specific ${prefix} files to be disabled.
|
||||
'';
|
||||
};
|
||||
|
||||
text = mkOption {
|
||||
default = null;
|
||||
type = types.nullOr types.lines;
|
||||
description = "Text of the file.";
|
||||
};
|
||||
|
||||
source = mkOption {
|
||||
type = types.path;
|
||||
description = "Path of the source file.";
|
||||
};
|
||||
};
|
||||
config = {
|
||||
source = mkIf (config.text != null) (
|
||||
let name' = "rspamd-${prefix}-" + baseNameOf name;
|
||||
in mkDefault (pkgs.writeText name' config.text));
|
||||
};
|
||||
};
|
||||
|
||||
configOverrides =
|
||||
(mapAttrs' (n: v: nameValuePair "worker-${if n == "rspamd_proxy" then "proxy" else n}.inc" {
|
||||
text = v.extraConfig;
|
||||
})
|
||||
(filterAttrs (n: v: v.extraConfig != "") cfg.workers))
|
||||
// (if cfg.extraConfig == "" then {} else {
|
||||
"extra-config.inc".text = cfg.extraConfig;
|
||||
});
|
||||
in
|
||||
|
||||
{
|
||||
|
@ -167,6 +225,41 @@ in
|
|||
description = "Whether to run the rspamd daemon in debug mode.";
|
||||
};
|
||||
|
||||
locals = mkOption {
|
||||
type = with types; attrsOf (submodule (configFileModule "locals"));
|
||||
default = {};
|
||||
description = ''
|
||||
Local configuration files, written into <filename>/etc/rspamd/local.d/{name}</filename>.
|
||||
'';
|
||||
example = literalExample ''
|
||||
{ "redis.conf".source = "/nix/store/.../etc/dir/redis.conf";
|
||||
"arc.conf".text = "allow_envfrom_empty = true;";
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
overrides = mkOption {
|
||||
type = with types; attrsOf (submodule (configFileModule "overrides"));
|
||||
default = {};
|
||||
description = ''
|
||||
Overridden configuration files, written into <filename>/etc/rspamd/override.d/{name}</filename>.
|
||||
'';
|
||||
example = literalExample ''
|
||||
{ "redis.conf".source = "/nix/store/.../etc/dir/redis.conf";
|
||||
"arc.conf".text = "allow_envfrom_empty = true;";
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
localLuaRules = mkOption {
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
description = ''
|
||||
Path of file to link to <filename>/etc/rspamd/rspamd.local.lua</filename> for local
|
||||
rules written in Lua
|
||||
'';
|
||||
};
|
||||
|
||||
workers = mkOption {
|
||||
type = with types; attrsOf (submodule workerOpts);
|
||||
description = ''
|
||||
|
@ -210,7 +303,7 @@ in
|
|||
description = ''
|
||||
User to use when no root privileges are required.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.string;
|
||||
|
@ -218,7 +311,30 @@ in
|
|||
description = ''
|
||||
Group to use when no root privileges are required.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
postfix = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Add rspamd milter to postfix main.conf";
|
||||
};
|
||||
|
||||
config = mkOption {
|
||||
type = with types; attrsOf (either bool (either str (listOf str)));
|
||||
description = ''
|
||||
Addon to postfix configuration
|
||||
'';
|
||||
default = {
|
||||
smtpd_milters = ["unix:/run/rspamd/rspamd-milter.sock"];
|
||||
non_smtpd_milters = ["unix:/run/rspamd/rspamd-milter.sock"];
|
||||
};
|
||||
example = {
|
||||
smtpd_milters = ["unix:/run/rspamd/rspamd-milter.sock"];
|
||||
non_smtpd_milters = ["unix:/run/rspamd/rspamd-milter.sock"];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -226,6 +342,25 @@ in
|
|||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
services.rspamd.overrides = configOverrides;
|
||||
services.rspamd.workers = mkIf cfg.postfix.enable {
|
||||
controller = {};
|
||||
rspamd_proxy = {
|
||||
bindSockets = [ {
|
||||
mode = "0660";
|
||||
socket = "/run/rspamd/rspamd-milter.sock";
|
||||
owner = cfg.user;
|
||||
group = postfixCfg.group;
|
||||
} ];
|
||||
extraConfig = ''
|
||||
upstream "local" {
|
||||
default = yes; # Self-scan upstreams are always default
|
||||
self_scan = yes; # Enable self-scan
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
services.postfix.config = mkIf cfg.postfix.enable cfg.postfix.config;
|
||||
|
||||
# Allow users to run 'rspamc' and 'rspamadm'.
|
||||
environment.systemPackages = [ pkgs.rspamd ];
|
||||
|
@ -242,16 +377,17 @@ in
|
|||
gid = config.ids.gids.rspamd;
|
||||
};
|
||||
|
||||
environment.etc."rspamd.conf".source = rspamdConfFile;
|
||||
environment.etc."rspamd".source = rspamdDir;
|
||||
|
||||
systemd.services.rspamd = {
|
||||
description = "Rspamd Service";
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
restartTriggers = [ rspamdDir ];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.rspamd}/bin/rspamd ${optionalString cfg.debug "-d"} --user=${cfg.user} --group=${cfg.group} --pid=/run/rspamd.pid -c ${rspamdConfFile} -f";
|
||||
ExecStart = "${pkgs.rspamd}/bin/rspamd ${optionalString cfg.debug "-d"} --user=${cfg.user} --group=${cfg.group} --pid=/run/rspamd.pid -c /etc/rspamd/rspamd.conf -f";
|
||||
Restart = "always";
|
||||
RuntimeDirectory = "rspamd";
|
||||
PrivateTmp = true;
|
||||
|
|
|
@ -6,6 +6,7 @@ let
|
|||
cfg = config.services.gitea;
|
||||
gitea = cfg.package;
|
||||
pg = config.services.postgresql;
|
||||
useMysql = cfg.database.type == "mysql";
|
||||
usePostgresql = cfg.database.type == "postgres";
|
||||
configFile = pkgs.writeText "app.ini" ''
|
||||
APP_NAME = ${cfg.appName}
|
||||
|
@ -14,7 +15,7 @@ let
|
|||
|
||||
[database]
|
||||
DB_TYPE = ${cfg.database.type}
|
||||
HOST = ${cfg.database.host}:${toString cfg.database.port}
|
||||
HOST = ${if cfg.database.socket != null then cfg.database.socket else cfg.database.host + ":" + toString cfg.database.port}
|
||||
NAME = ${cfg.database.name}
|
||||
USER = ${cfg.database.user}
|
||||
PASSWD = #dbpass#
|
||||
|
@ -148,6 +149,13 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
socket = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
example = "/run/mysqld/mysqld.sock";
|
||||
description = "Path to the unix socket file to use for authentication.";
|
||||
};
|
||||
|
||||
path = mkOption {
|
||||
type = types.str;
|
||||
default = "${cfg.stateDir}/data/gitea.db";
|
||||
|
@ -253,7 +261,7 @@ in
|
|||
|
||||
systemd.services.gitea = {
|
||||
description = "gitea";
|
||||
after = [ "network.target" "postgresql.service" ];
|
||||
after = [ "network.target" ] ++ lib.optional usePostgresql "postgresql.service" ++ lib.optional useMysql "mysql.service";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
path = [ gitea.bin ];
|
||||
|
||||
|
|
|
@ -14,15 +14,16 @@ let
|
|||
pathUrlQuote = url: replaceStrings ["/"] ["%2F"] url;
|
||||
pgSuperUser = config.services.postgresql.superUser;
|
||||
|
||||
databaseYml = ''
|
||||
production:
|
||||
adapter: postgresql
|
||||
database: ${cfg.databaseName}
|
||||
host: ${cfg.databaseHost}
|
||||
password: ${cfg.databasePassword}
|
||||
username: ${cfg.databaseUsername}
|
||||
encoding: utf8
|
||||
'';
|
||||
databaseConfig = {
|
||||
production = {
|
||||
adapter = "postgresql";
|
||||
database = cfg.databaseName;
|
||||
host = cfg.databaseHost;
|
||||
password = cfg.databasePassword;
|
||||
username = cfg.databaseUsername;
|
||||
encoding = "utf8";
|
||||
};
|
||||
};
|
||||
|
||||
gitalyToml = pkgs.writeText "gitaly.toml" ''
|
||||
socket_path = "${lib.escape ["\""] gitalySocket}"
|
||||
|
@ -45,35 +46,31 @@ let
|
|||
'') gitlabConfig.production.repositories.storages))}
|
||||
'';
|
||||
|
||||
gitlabShellYml = ''
|
||||
user: ${cfg.user}
|
||||
gitlab_url: "http+unix://${pathUrlQuote gitlabSocket}"
|
||||
http_settings:
|
||||
self_signed_cert: false
|
||||
repos_path: "${cfg.statePath}/repositories"
|
||||
secret_file: "${cfg.statePath}/config/gitlab_shell_secret"
|
||||
log_file: "${cfg.statePath}/log/gitlab-shell.log"
|
||||
custom_hooks_dir: "${cfg.statePath}/custom_hooks"
|
||||
redis:
|
||||
bin: ${pkgs.redis}/bin/redis-cli
|
||||
host: 127.0.0.1
|
||||
port: 6379
|
||||
database: 0
|
||||
namespace: resque:gitlab
|
||||
'';
|
||||
gitlabShellConfig = {
|
||||
user = cfg.user;
|
||||
gitlab_url = "http+unix://${pathUrlQuote gitlabSocket}";
|
||||
http_settings.self_signed_cert = false;
|
||||
repos_path = "${cfg.statePath}/repositories";
|
||||
secret_file = "${cfg.statePath}/config/gitlab_shell_secret";
|
||||
log_file = "${cfg.statePath}/log/gitlab-shell.log";
|
||||
custom_hooks_dir = "${cfg.statePath}/custom_hooks";
|
||||
redis = {
|
||||
bin = "${pkgs.redis}/bin/redis-cli";
|
||||
host = "127.0.0.1";
|
||||
port = 6379;
|
||||
database = 0;
|
||||
namespace = "resque:gitlab";
|
||||
};
|
||||
};
|
||||
|
||||
redisYml = ''
|
||||
production:
|
||||
url: redis://localhost:6379/
|
||||
'';
|
||||
redisConfig.production.url = "redis://localhost:6379/";
|
||||
|
||||
secretsYml = ''
|
||||
production:
|
||||
secret_key_base: ${cfg.secrets.secret}
|
||||
otp_key_base: ${cfg.secrets.otp}
|
||||
db_key_base: ${cfg.secrets.db}
|
||||
openid_connect_signing_key: ${builtins.toJSON cfg.secrets.jws}
|
||||
'';
|
||||
secretsConfig.production = {
|
||||
secret_key_base = cfg.secrets.secret;
|
||||
otp_key_base = cfg.secrets.otp;
|
||||
db_key_base = cfg.secrets.db;
|
||||
openid_connect_signing_key = cfg.secrets.jws;
|
||||
};
|
||||
|
||||
gitlabConfig = {
|
||||
# These are the default settings from config/gitlab.example.yml
|
||||
|
@ -115,12 +112,8 @@ let
|
|||
upload_pack = true;
|
||||
receive_pack = true;
|
||||
};
|
||||
workhorse = {
|
||||
secret_file = "${cfg.statePath}/.gitlab_workhorse_secret";
|
||||
};
|
||||
git = {
|
||||
bin_path = "git";
|
||||
};
|
||||
workhorse.secret_file = "${cfg.statePath}/.gitlab_workhorse_secret";
|
||||
git.bin_path = "git";
|
||||
monitoring = {
|
||||
ip_whitelist = [ "127.0.0.0/8" "::1/128" ];
|
||||
sidekiq_exporter = {
|
||||
|
@ -138,7 +131,7 @@ let
|
|||
HOME = "${cfg.statePath}/home";
|
||||
UNICORN_PATH = "${cfg.statePath}/";
|
||||
GITLAB_PATH = "${cfg.packages.gitlab}/share/gitlab/";
|
||||
GITLAB_STATE_PATH = "${cfg.statePath}";
|
||||
GITLAB_STATE_PATH = cfg.statePath;
|
||||
GITLAB_UPLOADS_PATH = "${cfg.statePath}/uploads";
|
||||
SCHEMA = "${cfg.statePath}/db/schema.rb";
|
||||
GITLAB_LOG_PATH = "${cfg.statePath}/log";
|
||||
|
@ -146,13 +139,11 @@ let
|
|||
GITLAB_SHELL_CONFIG_PATH = "${cfg.statePath}/shell/config.yml";
|
||||
GITLAB_SHELL_SECRET_PATH = "${cfg.statePath}/config/gitlab_shell_secret";
|
||||
GITLAB_SHELL_HOOKS_PATH = "${cfg.statePath}/shell/hooks";
|
||||
GITLAB_REDIS_CONFIG_FILE = pkgs.writeText "gitlab-redis.yml" redisYml;
|
||||
GITLAB_REDIS_CONFIG_FILE = pkgs.writeText "redis.yml" (builtins.toJSON redisConfig);
|
||||
prometheus_multiproc_dir = "/run/gitlab";
|
||||
RAILS_ENV = "production";
|
||||
};
|
||||
|
||||
unicornConfig = builtins.readFile ./defaultUnicornConfig.rb;
|
||||
|
||||
gitlab-rake = pkgs.stdenv.mkDerivation rec {
|
||||
name = "gitlab-rake";
|
||||
buildInputs = [ pkgs.makeWrapper ];
|
||||
|
@ -162,7 +153,6 @@ let
|
|||
mkdir -p $out/bin
|
||||
makeWrapper ${cfg.packages.gitlab.rubyEnv}/bin/rake $out/bin/gitlab-rake \
|
||||
${concatStrings (mapAttrsToList (name: value: "--set ${name} '${value}' ") gitlabEnv)} \
|
||||
--set GITLAB_CONFIG_PATH '${cfg.statePath}/config' \
|
||||
--set PATH '${lib.makeBinPath [ pkgs.nodejs pkgs.gzip pkgs.git pkgs.gnutar config.services.postgresql.package pkgs.coreutils pkgs.procps ]}:$PATH' \
|
||||
--set RAKEOPT '-f ${cfg.packages.gitlab}/share/gitlab/Rakefile' \
|
||||
--run 'cd ${cfg.packages.gitlab}/share/gitlab'
|
||||
|
@ -306,7 +296,6 @@ in {
|
|||
|
||||
initialRootPassword = mkOption {
|
||||
type = types.str;
|
||||
default = "UseNixOS!";
|
||||
description = ''
|
||||
Initial password of the root account if this is a new install.
|
||||
'';
|
||||
|
@ -461,10 +450,30 @@ in {
|
|||
}
|
||||
];
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /run/gitlab 0755 ${cfg.user} ${cfg.group} -"
|
||||
"d ${gitlabEnv.HOME} 0750 ${cfg.user} ${cfg.group} -"
|
||||
"d ${cfg.backupPath} 0750 ${cfg.user} ${cfg.group} -"
|
||||
"d ${cfg.statePath}/builds 0750 ${cfg.user} ${cfg.group} -"
|
||||
"d ${cfg.statePath}/config 0750 ${cfg.user} ${cfg.group} -"
|
||||
"d ${cfg.statePath}/db 0750 ${cfg.user} ${cfg.group} -"
|
||||
"d ${cfg.statePath}/log 0750 ${cfg.user} ${cfg.group} -"
|
||||
"d ${cfg.statePath}/repositories 2770 ${cfg.user} ${cfg.group} -"
|
||||
"d ${cfg.statePath}/shell 0750 ${cfg.user} ${cfg.group} -"
|
||||
"d ${cfg.statePath}/tmp/pids 0750 ${cfg.user} ${cfg.group} -"
|
||||
"d ${cfg.statePath}/tmp/sockets 0750 ${cfg.user} ${cfg.group} -"
|
||||
"d ${cfg.statePath}/uploads 0700 ${cfg.user} ${cfg.group} -"
|
||||
"d ${cfg.statePath}/custom_hooks/pre-receive.d 0700 ${cfg.user} ${cfg.group} -"
|
||||
"d ${cfg.statePath}/custom_hooks/post-receive.d 0700 ${cfg.user} ${cfg.group} -"
|
||||
"d ${cfg.statePath}/custom_hooks/update.d 0700 ${cfg.user} ${cfg.group} -"
|
||||
"d ${gitlabConfig.production.shared.path}/artifacts 0750 ${cfg.user} ${cfg.group} -"
|
||||
"d ${gitlabConfig.production.shared.path}/lfs-objects 0750 ${cfg.user} ${cfg.group} -"
|
||||
"d ${gitlabConfig.production.shared.path}/pages 0750 ${cfg.user} ${cfg.group} -"
|
||||
];
|
||||
|
||||
systemd.services.gitlab-sidekiq = {
|
||||
after = [ "network.target" "redis.service" ];
|
||||
after = [ "network.target" "redis.service" "gitlab.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
partOf = [ "gitlab.service" ];
|
||||
environment = gitlabEnv;
|
||||
path = with pkgs; [
|
||||
config.services.postgresql.package
|
||||
|
@ -486,10 +495,8 @@ in {
|
|||
};
|
||||
|
||||
systemd.services.gitaly = {
|
||||
after = [ "network.target" "gitlab.service" ];
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
environment.HOME = gitlabEnv.HOME;
|
||||
environment.GITLAB_SHELL_CONFIG_PATH = gitlabEnv.GITLAB_SHELL_CONFIG_PATH;
|
||||
path = with pkgs; [ gitAndTools.git cfg.packages.gitaly.rubyEnv cfg.packages.gitaly.rubyEnv.wrappedRuby ];
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
|
@ -505,8 +512,6 @@ in {
|
|||
systemd.services.gitlab-workhorse = {
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
environment.HOME = gitlabEnv.HOME;
|
||||
environment.GITLAB_SHELL_CONFIG_PATH = gitlabEnv.GITLAB_SHELL_CONFIG_PATH;
|
||||
path = with pkgs; [
|
||||
gitAndTools.git
|
||||
gnutar
|
||||
|
@ -514,10 +519,6 @@ in {
|
|||
openssh
|
||||
gitlab-workhorse
|
||||
];
|
||||
preStart = ''
|
||||
mkdir -p /run/gitlab
|
||||
chown ${cfg.user}:${cfg.group} /run/gitlab
|
||||
'';
|
||||
serviceConfig = {
|
||||
PermissionsStartOnly = true; # preStart must be run as root
|
||||
Type = "simple";
|
||||
|
@ -538,7 +539,7 @@ in {
|
|||
};
|
||||
|
||||
systemd.services.gitlab = {
|
||||
after = [ "network.target" "postgresql.service" "redis.service" ];
|
||||
after = [ "gitlab-workhorse.service" "gitaly.service" "network.target" "postgresql.service" "redis.service" ];
|
||||
requires = [ "gitlab-sidekiq.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
environment = gitlabEnv;
|
||||
|
@ -551,102 +552,76 @@ in {
|
|||
gnupg
|
||||
];
|
||||
preStart = ''
|
||||
mkdir -p ${cfg.backupPath}
|
||||
mkdir -p ${cfg.statePath}/builds
|
||||
mkdir -p ${cfg.statePath}/repositories
|
||||
mkdir -p ${gitlabConfig.production.shared.path}/artifacts
|
||||
mkdir -p ${gitlabConfig.production.shared.path}/lfs-objects
|
||||
mkdir -p ${gitlabConfig.production.shared.path}/pages
|
||||
mkdir -p ${cfg.statePath}/log
|
||||
mkdir -p ${cfg.statePath}/tmp/pids
|
||||
mkdir -p ${cfg.statePath}/tmp/sockets
|
||||
mkdir -p ${cfg.statePath}/shell
|
||||
mkdir -p ${cfg.statePath}/db
|
||||
mkdir -p ${cfg.statePath}/uploads
|
||||
mkdir -p ${cfg.statePath}/custom_hooks/pre-receive.d
|
||||
mkdir -p ${cfg.statePath}/custom_hooks/post-receive.d
|
||||
mkdir -p ${cfg.statePath}/custom_hooks/update.d
|
||||
|
||||
rm -rf ${cfg.statePath}/config ${cfg.statePath}/shell/hooks
|
||||
mkdir -p ${cfg.statePath}/config
|
||||
|
||||
${pkgs.openssl}/bin/openssl rand -hex 32 > ${cfg.statePath}/config/gitlab_shell_secret
|
||||
|
||||
mkdir -p /run/gitlab
|
||||
mkdir -p ${cfg.statePath}/log
|
||||
[ -d /run/gitlab/log ] || ln -sf ${cfg.statePath}/log /run/gitlab/log
|
||||
[ -d /run/gitlab/tmp ] || ln -sf ${cfg.statePath}/tmp /run/gitlab/tmp
|
||||
[ -d /run/gitlab/uploads ] || ln -sf ${cfg.statePath}/uploads /run/gitlab/uploads
|
||||
ln -sf $GITLAB_SHELL_CONFIG_PATH /run/gitlab/shell-config.yml
|
||||
chown -R ${cfg.user}:${cfg.group} /run/gitlab
|
||||
|
||||
# Prepare home directory
|
||||
mkdir -p ${gitlabEnv.HOME}/.ssh
|
||||
touch ${gitlabEnv.HOME}/.ssh/authorized_keys
|
||||
chown -R ${cfg.user}:${cfg.group} ${gitlabEnv.HOME}/
|
||||
|
||||
cp -rf ${cfg.packages.gitlab}/share/gitlab/db/* ${cfg.statePath}/db
|
||||
cp -rf ${cfg.packages.gitlab}/share/gitlab/config.dist/* ${cfg.statePath}/config
|
||||
${optionalString cfg.smtp.enable ''
|
||||
ln -sf ${smtpSettings} ${cfg.statePath}/config/initializers/smtp_settings.rb
|
||||
''}
|
||||
ln -sf ${cfg.statePath}/config /run/gitlab/config
|
||||
rm -rf ${cfg.statePath}/config
|
||||
mkdir ${cfg.statePath}/config
|
||||
if [ -e ${cfg.statePath}/lib ]; then
|
||||
rm ${cfg.statePath}/lib
|
||||
fi
|
||||
ln -sf ${pkgs.gitlab}/share/gitlab/lib ${cfg.statePath}/lib
|
||||
|
||||
ln -sf ${cfg.packages.gitlab}/share/gitlab/lib ${cfg.statePath}/lib
|
||||
[ -L /run/gitlab/config ] || ln -sf ${cfg.statePath}/config /run/gitlab/config
|
||||
[ -L /run/gitlab/log ] || ln -sf ${cfg.statePath}/log /run/gitlab/log
|
||||
[ -L /run/gitlab/tmp ] || ln -sf ${cfg.statePath}/tmp /run/gitlab/tmp
|
||||
[ -L /run/gitlab/uploads ] || ln -sf ${cfg.statePath}/uploads /run/gitlab/uploads
|
||||
${optionalString cfg.smtp.enable ''
|
||||
ln -sf ${smtpSettings} ${cfg.statePath}/config/initializers/smtp_settings.rb
|
||||
''}
|
||||
cp ${cfg.packages.gitlab}/share/gitlab/VERSION ${cfg.statePath}/VERSION
|
||||
cp -rf ${cfg.packages.gitlab}/share/gitlab/config.dist/* ${cfg.statePath}/config
|
||||
${pkgs.openssl}/bin/openssl rand -hex 32 > ${cfg.statePath}/config/gitlab_shell_secret
|
||||
|
||||
# JSON is a subset of YAML
|
||||
ln -fs ${pkgs.writeText "gitlab.yml" (builtins.toJSON gitlabConfig)} ${cfg.statePath}/config/gitlab.yml
|
||||
ln -fs ${pkgs.writeText "database.yml" databaseYml} ${cfg.statePath}/config/database.yml
|
||||
ln -fs ${pkgs.writeText "secrets.yml" secretsYml} ${cfg.statePath}/config/secrets.yml
|
||||
ln -fs ${pkgs.writeText "unicorn.rb" unicornConfig} ${cfg.statePath}/config/unicorn.rb
|
||||
ln -sf ${pkgs.writeText "gitlab.yml" (builtins.toJSON gitlabConfig)} ${cfg.statePath}/config/gitlab.yml
|
||||
ln -sf ${pkgs.writeText "database.yml" (builtins.toJSON databaseConfig)} ${cfg.statePath}/config/database.yml
|
||||
ln -sf ${pkgs.writeText "secrets.yml" (builtins.toJSON secretsConfig)} ${cfg.statePath}/config/secrets.yml
|
||||
ln -sf ${./defaultUnicornConfig.rb} ${cfg.statePath}/config/unicorn.rb
|
||||
|
||||
# Install the shell required to push repositories
|
||||
ln -sf ${pkgs.writeText "config.yml" (builtins.toJSON gitlabShellConfig)} /run/gitlab/shell-config.yml
|
||||
[ -L ${cfg.statePath}/shell/hooks ] || ln -sf ${cfg.packages.gitlab-shell}/hooks ${cfg.statePath}/shell/hooks
|
||||
${cfg.packages.gitlab-shell}/bin/install
|
||||
|
||||
chown -R ${cfg.user}:${cfg.group} ${cfg.statePath}/
|
||||
chmod -R ug+rwX,o-rwx+X ${cfg.statePath}/
|
||||
chown -R ${cfg.user}:${cfg.group} /run/gitlab
|
||||
|
||||
# Install the shell required to push repositories
|
||||
ln -fs ${pkgs.writeText "config.yml" gitlabShellYml} "$GITLAB_SHELL_CONFIG_PATH"
|
||||
ln -fs ${cfg.packages.gitlab-shell}/hooks "$GITLAB_SHELL_HOOKS_PATH"
|
||||
${cfg.packages.gitlab-shell}/bin/install
|
||||
|
||||
if [ "${cfg.databaseHost}" = "127.0.0.1" ]; then
|
||||
if ! test -e "${cfg.statePath}/db-created"; then
|
||||
if ! test -e "${cfg.statePath}/db-created"; then
|
||||
if [ "${cfg.databaseHost}" = "127.0.0.1" ]; then
|
||||
${pkgs.sudo}/bin/sudo -u ${pgSuperUser} psql postgres -c "CREATE ROLE ${cfg.databaseUsername} WITH LOGIN NOCREATEDB NOCREATEROLE ENCRYPTED PASSWORD '${cfg.databasePassword}'"
|
||||
${pkgs.sudo}/bin/sudo -u ${pgSuperUser} ${config.services.postgresql.package}/bin/createdb --owner ${cfg.databaseUsername} ${cfg.databaseName}
|
||||
touch "${cfg.statePath}/db-created"
|
||||
|
||||
# enable required pg_trgm extension for gitlab
|
||||
${pkgs.sudo}/bin/sudo -u ${pgSuperUser} psql ${cfg.databaseName} -c "CREATE EXTENSION IF NOT EXISTS pg_trgm"
|
||||
fi
|
||||
|
||||
# enable required pg_trgm extension for gitlab
|
||||
${pkgs.sudo}/bin/sudo -u ${pgSuperUser} psql ${cfg.databaseName} -c "CREATE EXTENSION IF NOT EXISTS pg_trgm"
|
||||
${pkgs.sudo}/bin/sudo -u ${cfg.user} -H ${gitlab-rake}/bin/gitlab-rake db:schema:load
|
||||
|
||||
touch "${cfg.statePath}/db-created"
|
||||
fi
|
||||
|
||||
# Always do the db migrations just to be sure the database is up-to-date
|
||||
${gitlab-rake}/bin/gitlab-rake db:migrate RAILS_ENV=production
|
||||
${pkgs.sudo}/bin/sudo -u ${cfg.user} -H ${gitlab-rake}/bin/gitlab-rake db:migrate
|
||||
|
||||
# The gitlab:setup task is horribly broken somehow, the db:migrate
|
||||
# task above and the db:seed_fu below will do the same for setting
|
||||
# up the initial database
|
||||
if ! test -e "${cfg.statePath}/db-seeded"; then
|
||||
${gitlab-rake}/bin/gitlab-rake db:seed_fu RAILS_ENV=production \
|
||||
${pkgs.sudo}/bin/sudo -u ${cfg.user} ${gitlab-rake}/bin/gitlab-rake db:seed_fu \
|
||||
GITLAB_ROOT_PASSWORD='${cfg.initialRootPassword}' GITLAB_ROOT_EMAIL='${cfg.initialRootEmail}'
|
||||
touch "${cfg.statePath}/db-seeded"
|
||||
fi
|
||||
|
||||
# The gitlab:shell:setup regenerates the authorized_keys file so that
|
||||
# the store path to the gitlab-shell in it gets updated
|
||||
${pkgs.sudo}/bin/sudo -u ${cfg.user} force=yes ${gitlab-rake}/bin/gitlab-rake gitlab:shell:setup RAILS_ENV=production
|
||||
${pkgs.sudo}/bin/sudo -u ${cfg.user} -H force=yes ${gitlab-rake}/bin/gitlab-rake gitlab:shell:setup
|
||||
|
||||
# The gitlab:shell:create_hooks task seems broken for fixing links
|
||||
# so we instead delete all the hooks and create them anew
|
||||
rm -f ${cfg.statePath}/repositories/**/*.git/hooks
|
||||
${gitlab-rake}/bin/gitlab-rake gitlab:shell:create_hooks RAILS_ENV=production
|
||||
${pkgs.sudo}/bin/sudo -u ${cfg.user} -H ${gitlab-rake}/bin/gitlab-rake gitlab:shell:create_hooks
|
||||
|
||||
${pkgs.sudo}/bin/sudo -u ${cfg.user} -H ${pkgs.git}/bin/git config --global core.autocrlf "input"
|
||||
|
||||
# Change permissions in the last step because some of the
|
||||
# intermediary scripts like to create directories as root.
|
||||
chown -R ${cfg.user}:${cfg.group} ${cfg.statePath}
|
||||
chmod -R ug+rwX,o-rwx+X ${cfg.statePath}
|
||||
chmod -R u+rwX,go-rwx+X ${gitlabEnv.HOME}
|
||||
chmod -R ug+rwX,o-rwx ${cfg.statePath}/repositories
|
||||
chmod -R ug-s ${cfg.statePath}/repositories
|
||||
|
|
|
@ -157,6 +157,7 @@ in {
|
|||
Restart = "on-failure";
|
||||
ProtectSystem = "strict";
|
||||
ReadWritePaths = "${cfg.configDir}";
|
||||
KillSignal = "SIGINT";
|
||||
PrivateTmp = true;
|
||||
RemoveIPC = true;
|
||||
};
|
||||
|
|
|
@ -62,11 +62,15 @@ let
|
|||
''}
|
||||
$extraOptions
|
||||
END
|
||||
'' + optionalString cfg.checkConfig ''
|
||||
echo "Checking that Nix can read nix.conf..."
|
||||
ln -s $out ./nix.conf
|
||||
NIX_CONF_DIR=$PWD ${cfg.package}/bin/nix show-config >/dev/null
|
||||
'');
|
||||
'' + optionalString cfg.checkConfig (
|
||||
if pkgs.stdenv.hostPlatform != pkgs.stdenv.buildPlatform then ''
|
||||
echo "Ignore nix.checkConfig when cross-compiling"
|
||||
'' else ''
|
||||
echo "Checking that Nix can read nix.conf..."
|
||||
ln -s $out ./nix.conf
|
||||
NIX_CONF_DIR=$PWD ${cfg.package}/bin/nix show-config >/dev/null
|
||||
'')
|
||||
);
|
||||
|
||||
in
|
||||
|
||||
|
|
|
@ -6,11 +6,8 @@ let
|
|||
|
||||
cfg = config.services.packagekit;
|
||||
|
||||
backend = "nix";
|
||||
|
||||
packagekitConf = ''
|
||||
[Daemon]
|
||||
DefaultBackend=${backend}
|
||||
KeepCache=false
|
||||
'';
|
||||
|
||||
|
|
116
nixos/modules/services/monitoring/alerta.nix
Normal file
116
nixos/modules/services/monitoring/alerta.nix
Normal file
|
@ -0,0 +1,116 @@
|
|||
{ options, config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.alerta;
|
||||
|
||||
alertaConf = pkgs.writeTextFile {
|
||||
name = "alertad.conf";
|
||||
text = ''
|
||||
DATABASE_URL = '${cfg.databaseUrl}'
|
||||
DATABASE_NAME = '${cfg.databaseName}'
|
||||
LOG_FILE = '${cfg.logDir}/alertad.log'
|
||||
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
CORS_ORIGINS = [ ${concatMapStringsSep ", " (s: "\"" + s + "\"") cfg.corsOrigins} ];
|
||||
AUTH_REQUIRED = ${if cfg.authenticationRequired then "True" else "False"}
|
||||
SIGNUP_ENABLED = ${if cfg.signupEnabled then "True" else "False"}
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
options.services.alerta = {
|
||||
enable = mkEnableOption "alerta";
|
||||
|
||||
port = mkOption {
|
||||
type = types.int;
|
||||
default = 5000;
|
||||
description = "Port of Alerta";
|
||||
};
|
||||
|
||||
bind = mkOption {
|
||||
type = types.str;
|
||||
default = "0.0.0.0";
|
||||
example = literalExample "0.0.0.0";
|
||||
description = "Address to bind to. The default is to bind to all addresses";
|
||||
};
|
||||
|
||||
logDir = mkOption {
|
||||
type = types.path;
|
||||
description = "Location where the logfiles are stored";
|
||||
default = "/var/log/alerta";
|
||||
};
|
||||
|
||||
databaseUrl = mkOption {
|
||||
type = types.str;
|
||||
description = "URL of the MongoDB or PostgreSQL database to connect to";
|
||||
default = "mongodb://localhost";
|
||||
example = "mongodb://localhost";
|
||||
};
|
||||
|
||||
databaseName = mkOption {
|
||||
type = types.str;
|
||||
description = "Name of the database instance to connect to";
|
||||
default = "monitoring";
|
||||
example = "monitoring";
|
||||
};
|
||||
|
||||
corsOrigins = mkOption {
|
||||
type = types.listOf types.str;
|
||||
description = "List of URLs that can access the API for Cross-Origin Resource Sharing (CORS)";
|
||||
example = [ "http://localhost" "http://localhost:5000" ];
|
||||
default = [ "http://localhost" "http://localhost:5000" ];
|
||||
};
|
||||
|
||||
authenticationRequired = mkOption {
|
||||
type = types.bool;
|
||||
description = "Whether users must authenticate when using the web UI or command-line tool";
|
||||
default = false;
|
||||
};
|
||||
|
||||
signupEnabled = mkOption {
|
||||
type = types.bool;
|
||||
description = "Whether to prevent sign-up of new users via the web UI";
|
||||
default = true;
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
description = "These lines go into alertad.conf verbatim.";
|
||||
default = "";
|
||||
type = types.lines;
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.alerta = {
|
||||
description = "Alerta Monitoring System";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "networking.target" ];
|
||||
environment = {
|
||||
ALERTA_SVR_CONF_FILE = alertaConf;
|
||||
};
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.python36Packages.alerta-server}/bin/alertad run --port ${toString cfg.port} --host ${cfg.bind}";
|
||||
User = "alerta";
|
||||
Group = "alerta";
|
||||
PermissionsStartOnly = true;
|
||||
};
|
||||
preStart = ''
|
||||
mkdir -p ${cfg.logDir}
|
||||
chown alerta:alerta ${cfg.logDir}
|
||||
'';
|
||||
};
|
||||
|
||||
environment.systemPackages = [ pkgs.python36Packages.alerta ];
|
||||
|
||||
users.users.alerta = {
|
||||
uid = config.ids.uids.alerta;
|
||||
description = "Alerta user";
|
||||
};
|
||||
|
||||
users.groups.alerta = {
|
||||
gid = config.ids.gids.alerta;
|
||||
};
|
||||
};
|
||||
}
|
66
nixos/modules/services/monitoring/grafana-reporter.nix
Normal file
66
nixos/modules/services/monitoring/grafana-reporter.nix
Normal file
|
@ -0,0 +1,66 @@
|
|||
{ options, config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.grafana_reporter;
|
||||
|
||||
in {
|
||||
options.services.grafana_reporter = {
|
||||
enable = mkEnableOption "grafana_reporter";
|
||||
|
||||
grafana = {
|
||||
protocol = mkOption {
|
||||
description = "Grafana protocol.";
|
||||
default = "http";
|
||||
type = types.enum ["http" "https"];
|
||||
};
|
||||
addr = mkOption {
|
||||
description = "Grafana address.";
|
||||
default = "127.0.0.1";
|
||||
type = types.str;
|
||||
};
|
||||
port = mkOption {
|
||||
description = "Grafana port.";
|
||||
default = 3000;
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
};
|
||||
addr = mkOption {
|
||||
description = "Listening address.";
|
||||
default = "127.0.0.1";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Listening port.";
|
||||
default = 8686;
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
templateDir = mkOption {
|
||||
description = "Optional template directory to use custom tex templates";
|
||||
default = "${pkgs.grafana_reporter}";
|
||||
type = types.str;
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.grafana_reporter = {
|
||||
description = "Grafana Reporter Service Daemon";
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = ["network.target"];
|
||||
serviceConfig = let
|
||||
args = lib.concatSepString " " [
|
||||
"-proto ${cfg.grafana.protocol}://"
|
||||
"-ip ${cfg.grafana.addr}:${toString cfg.grafana.port}"
|
||||
"-port :${toString cfg.port}"
|
||||
"-templates ${cfg.templateDir}"
|
||||
];
|
||||
in {
|
||||
ExecStart = "${pkgs.grafana_reporter.bin}/bin/grafana-reporter ${args}";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
192
nixos/modules/services/monitoring/kapacitor.nix
Normal file
192
nixos/modules/services/monitoring/kapacitor.nix
Normal file
|
@ -0,0 +1,192 @@
|
|||
{ options, config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.kapacitor;
|
||||
|
||||
kapacitorConf = pkgs.writeTextFile {
|
||||
name = "kapacitord.conf";
|
||||
text = ''
|
||||
hostname="${config.networking.hostName}"
|
||||
data_dir="${cfg.dataDir}"
|
||||
|
||||
[http]
|
||||
bind-address = "${cfg.bind}:${toString cfg.port}"
|
||||
log-enabled = false
|
||||
auth-enabled = false
|
||||
|
||||
[task]
|
||||
dir = "${cfg.dataDir}/tasks"
|
||||
snapshot-interval = "${cfg.taskSnapshotInterval}"
|
||||
|
||||
[replay]
|
||||
dir = "${cfg.dataDir}/replay"
|
||||
|
||||
[storage]
|
||||
boltdb = "${cfg.dataDir}/kapacitor.db"
|
||||
|
||||
${optionalString (cfg.loadDirectory != null) ''
|
||||
[load]
|
||||
enabled = true
|
||||
dir = "${cfg.loadDirectory}"
|
||||
''}
|
||||
|
||||
${optionalString (cfg.defaultDatabase.enable) ''
|
||||
[[influxdb]]
|
||||
name = "default"
|
||||
enabled = true
|
||||
default = true
|
||||
urls = [ "${cfg.defaultDatabase.url}" ]
|
||||
username = "${cfg.defaultDatabase.username}"
|
||||
password = "${cfg.defaultDatabase.password}"
|
||||
''}
|
||||
|
||||
${optionalString (cfg.alerta.enable) ''
|
||||
[alerta]
|
||||
enabled = true
|
||||
url = "${cfg.alerta.url}"
|
||||
token = "${cfg.alerta.token}"
|
||||
environment = "${cfg.alerta.environment}"
|
||||
origin = "${cfg.alerta.origin}"
|
||||
''}
|
||||
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
options.services.kapacitor = {
|
||||
enable = mkEnableOption "kapacitor";
|
||||
|
||||
dataDir = mkOption {
|
||||
type = types.path;
|
||||
example = "/var/lib/kapacitor";
|
||||
default = "/var/lib/kapacitor";
|
||||
description = "Location where Kapacitor stores its state";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.int;
|
||||
default = 9092;
|
||||
description = "Port of Kapacitor";
|
||||
};
|
||||
|
||||
bind = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
example = literalExample "0.0.0.0";
|
||||
description = "Address to bind to. The default is to bind to all addresses";
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
description = "These lines go into kapacitord.conf verbatim.";
|
||||
default = "";
|
||||
type = types.lines;
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "kapacitor";
|
||||
description = "User account under which Kapacitor runs";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
default = "kapacitor";
|
||||
description = "Group under which Kapacitor runs";
|
||||
};
|
||||
|
||||
taskSnapshotInterval = mkOption {
|
||||
type = types.str;
|
||||
description = "Specifies how often to snapshot the task state (in InfluxDB time units)";
|
||||
default = "1m0s";
|
||||
example = "1m0s";
|
||||
};
|
||||
|
||||
loadDirectory = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
description = "Directory where to load services from, such as tasks, templates and handlers (or null to disable service loading on startup)";
|
||||
default = null;
|
||||
};
|
||||
|
||||
defaultDatabase = {
|
||||
enable = mkEnableOption "kapacitor.defaultDatabase";
|
||||
|
||||
url = mkOption {
|
||||
description = "The URL to an InfluxDB server that serves as the default database";
|
||||
example = "http://localhost:8086";
|
||||
type = types.string;
|
||||
};
|
||||
|
||||
username = mkOption {
|
||||
description = "The username to connect to the remote InfluxDB server";
|
||||
type = types.string;
|
||||
};
|
||||
|
||||
password = mkOption {
|
||||
description = "The password to connect to the remote InfluxDB server";
|
||||
type = types.string;
|
||||
};
|
||||
};
|
||||
|
||||
alerta = {
|
||||
enable = mkEnableOption "kapacitor alerta integration";
|
||||
|
||||
url = mkOption {
|
||||
description = "The URL to the Alerta REST API";
|
||||
default = "http://localhost:5000";
|
||||
example = "http://localhost:5000";
|
||||
type = types.string;
|
||||
};
|
||||
|
||||
token = mkOption {
|
||||
description = "Default Alerta authentication token";
|
||||
type = types.str;
|
||||
default = "";
|
||||
};
|
||||
|
||||
environment = mkOption {
|
||||
description = "Default Alerta environment";
|
||||
type = types.str;
|
||||
default = "Production";
|
||||
};
|
||||
|
||||
origin = mkOption {
|
||||
description = "Default origin of alert";
|
||||
type = types.str;
|
||||
default = "kapacitor";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
environment.systemPackages = [ pkgs.kapacitor ];
|
||||
|
||||
systemd.services.kapacitor = {
|
||||
description = "Kapacitor Real-Time Stream Processing Engine";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "networking.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.kapacitor}/bin/kapacitord -config ${kapacitorConf}";
|
||||
User = "kapacitor";
|
||||
Group = "kapacitor";
|
||||
PermissionsStartOnly = true;
|
||||
};
|
||||
preStart = ''
|
||||
mkdir -p ${cfg.dataDir}
|
||||
chown ${cfg.user}:${cfg.group} ${cfg.dataDir}
|
||||
'';
|
||||
};
|
||||
|
||||
users.users.kapacitor = {
|
||||
uid = config.ids.uids.kapacitor;
|
||||
description = "Kapacitor user";
|
||||
home = cfg.dataDir;
|
||||
};
|
||||
|
||||
users.groups.kapacitor = {
|
||||
gid = config.ids.gids.kapacitor;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,33 +1,30 @@
|
|||
# Monit system watcher
|
||||
# http://mmonit.org/monit/
|
||||
|
||||
{config, pkgs, lib, ...}:
|
||||
|
||||
let inherit (lib) mkOption mkIf;
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.monit;
|
||||
in
|
||||
|
||||
{
|
||||
options = {
|
||||
services.monit = {
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to run Monit system watcher.
|
||||
'';
|
||||
};
|
||||
config = mkOption {
|
||||
default = "";
|
||||
description = "monitrc content";
|
||||
};
|
||||
options.services.monit = {
|
||||
|
||||
enable = mkEnableOption "Monit";
|
||||
|
||||
config = mkOption {
|
||||
type = types.lines;
|
||||
default = "";
|
||||
description = "monitrc content";
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = mkIf config.services.monit.enable {
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
environment.systemPackages = [ pkgs.monit ];
|
||||
|
||||
environment.etc."monitrc" = {
|
||||
text = config.services.monit.config;
|
||||
text = cfg.config;
|
||||
mode = "0400";
|
||||
};
|
||||
|
||||
|
|
|
@ -10,6 +10,13 @@ let
|
|||
# Get a submodule without any embedded metadata:
|
||||
_filter = x: filterAttrs (k: v: k != "_module") x;
|
||||
|
||||
# a wrapper that verifies that the configuration is valid
|
||||
promtoolCheck = what: name: file: pkgs.runCommand "${name}-${what}-checked"
|
||||
{ buildInputs = [ cfg.package ]; } ''
|
||||
ln -s ${file} $out
|
||||
promtool ${what} $out
|
||||
'';
|
||||
|
||||
# Pretty-print JSON to a file
|
||||
writePrettyJSON = name: x:
|
||||
pkgs.runCommand name { } ''
|
||||
|
@ -19,18 +26,19 @@ let
|
|||
# This becomes the main config file
|
||||
promConfig = {
|
||||
global = cfg.globalConfig;
|
||||
rule_files = cfg.ruleFiles ++ [
|
||||
rule_files = map (promtoolCheck "check-rules" "rules") (cfg.ruleFiles ++ [
|
||||
(pkgs.writeText "prometheus.rules" (concatStringsSep "\n" cfg.rules))
|
||||
];
|
||||
]);
|
||||
scrape_configs = cfg.scrapeConfigs;
|
||||
};
|
||||
|
||||
generatedPrometheusYml = writePrettyJSON "prometheus.yml" promConfig;
|
||||
|
||||
prometheusYml =
|
||||
if cfg.configText != null then
|
||||
prometheusYml = let
|
||||
yml = if cfg.configText != null then
|
||||
pkgs.writeText "prometheus.yml" cfg.configText
|
||||
else generatedPrometheusYml;
|
||||
else generatedPrometheusYml;
|
||||
in promtoolCheck "check-config" "prometheus.yml" yml;
|
||||
|
||||
cmdlineArgs = cfg.extraFlags ++ [
|
||||
"-storage.local.path=${cfg.dataDir}/metrics"
|
||||
|
@ -376,6 +384,15 @@ in {
|
|||
'';
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.prometheus;
|
||||
defaultText = "pkgs.prometheus";
|
||||
description = ''
|
||||
The prometheus package that should be used.
|
||||
'';
|
||||
};
|
||||
|
||||
listenAddress = mkOption {
|
||||
type = types.str;
|
||||
default = "0.0.0.0:9090";
|
||||
|
@ -495,7 +512,7 @@ in {
|
|||
after = [ "network.target" ];
|
||||
script = ''
|
||||
#!/bin/sh
|
||||
exec ${pkgs.prometheus}/bin/prometheus \
|
||||
exec ${cfg.package}/bin/prometheus \
|
||||
${concatStringsSep " \\\n " cmdlineArgs}
|
||||
'';
|
||||
serviceConfig = {
|
||||
|
|
|
@ -198,6 +198,9 @@ in
|
|||
install -m 0755 -d /var/log/glusterfs
|
||||
'';
|
||||
|
||||
# glustereventsd uses the `gluster` executable
|
||||
path = [ glusterfs ];
|
||||
|
||||
serviceConfig = {
|
||||
Type="simple";
|
||||
Environment="PYTHONPATH=${glusterfs}/usr/lib/python2.7/site-packages";
|
||||
|
|
|
@ -33,7 +33,7 @@ let
|
|||
|
||||
purple_plugin_path =
|
||||
lib.concatMapStringsSep ":"
|
||||
(plugin: "${plugin}/lib/pidgin/")
|
||||
(plugin: "${plugin}/lib/pidgin/:${plugin}/lib/purple-2/")
|
||||
cfg.libpurple_plugins
|
||||
;
|
||||
|
||||
|
|
|
@ -93,6 +93,8 @@ in
|
|||
|
||||
services.timesyncd.enable = mkForce false;
|
||||
|
||||
systemd.services.systemd-timedated.environment = { SYSTEMD_TIMEDATED_NTP_SERVICES = "chronyd.service"; };
|
||||
|
||||
systemd.services.chronyd =
|
||||
{ description = "chrony NTP daemon";
|
||||
|
||||
|
|
|
@ -6,9 +6,10 @@ let
|
|||
dataDir = "/var/lib/consul";
|
||||
cfg = config.services.consul;
|
||||
|
||||
configOptions = { data_dir = dataDir; } //
|
||||
(if cfg.webUi then { ui_dir = "${cfg.package.ui}"; } else { }) //
|
||||
cfg.extraConfig;
|
||||
configOptions = {
|
||||
data_dir = dataDir;
|
||||
ui = cfg.webUi;
|
||||
} // cfg.extraConfig;
|
||||
|
||||
configFiles = [ "/etc/consul.json" "/etc/consul-addrs.json" ]
|
||||
++ cfg.extraConfigFiles;
|
||||
|
|
|
@ -67,6 +67,8 @@ in
|
|||
environment.systemPackages = [ pkgs.ntp ];
|
||||
services.timesyncd.enable = mkForce false;
|
||||
|
||||
systemd.services.systemd-timedated.environment = { SYSTEMD_TIMEDATED_NTP_SERVICES = "ntpd.service"; };
|
||||
|
||||
users.users = singleton
|
||||
{ name = ntpUser;
|
||||
uid = config.ids.uids.ntp;
|
||||
|
|
|
@ -267,4 +267,6 @@ in
|
|||
"ip46tables -t nat -D OUTPUT -p tcp ${redCond block} -j ${chain} 2>/dev/null || true"
|
||||
) cfg.redsocks;
|
||||
};
|
||||
|
||||
meta.maintainers = with lib.maintainers; [ ekleog ];
|
||||
}
|
||||
|
|
|
@ -130,7 +130,7 @@ in
|
|||
};
|
||||
|
||||
ports = mkOption {
|
||||
type = types.listOf types.int;
|
||||
type = types.listOf types.port;
|
||||
default = [22];
|
||||
description = ''
|
||||
Specifies on which ports the SSH daemon listens.
|
||||
|
|
|
@ -62,9 +62,21 @@ in {
|
|||
dataDir = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/lib/syncthing";
|
||||
description = ''
|
||||
Path where synced directories will exist.
|
||||
'';
|
||||
};
|
||||
|
||||
configDir = mkOption {
|
||||
type = types.path;
|
||||
description = ''
|
||||
Path where the settings and keys will exist.
|
||||
'';
|
||||
default =
|
||||
let
|
||||
nixos = config.system.stateVersion;
|
||||
cond = versionAtLeast nixos "19.03";
|
||||
in cfg.dataDir + (optionalString cond "/.config/syncthing");
|
||||
};
|
||||
|
||||
openDefaultPorts = mkOption {
|
||||
|
@ -144,7 +156,7 @@ in {
|
|||
${cfg.package}/bin/syncthing \
|
||||
-no-browser \
|
||||
-gui-address=${cfg.guiAddress} \
|
||||
-home=${cfg.dataDir}
|
||||
-home=${cfg.configDir}
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
|
|
@ -39,7 +39,8 @@ in
|
|||
systemd.services.zerotierone = {
|
||||
description = "ZeroTierOne";
|
||||
path = [ cfg.package ];
|
||||
after = [ "network.target" ];
|
||||
bindsTo = [ "network-online.target" ];
|
||||
after = [ "network-online.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
preStart = ''
|
||||
mkdir -p /var/lib/zerotier-one/networks.d
|
||||
|
|
|
@ -6,142 +6,105 @@ let
|
|||
|
||||
cfg = config.services.solr;
|
||||
|
||||
# Assemble all jars needed for solr
|
||||
solrJars = pkgs.stdenv.mkDerivation {
|
||||
name = "solr-jars";
|
||||
|
||||
src = pkgs.fetchurl {
|
||||
url = http://archive.apache.org/dist/tomcat/tomcat-5/v5.5.36/bin/apache-tomcat-5.5.36.tar.gz;
|
||||
sha256 = "01mzvh53wrs1p2ym765jwd00gl6kn8f9k3nhdrnhdqr8dhimfb2p";
|
||||
};
|
||||
|
||||
installPhase = ''
|
||||
mkdir -p $out/lib
|
||||
cp common/lib/*.jar $out/lib/
|
||||
ln -s ${pkgs.ant}/lib/ant/lib/ant.jar $out/lib/
|
||||
ln -s ${cfg.solrPackage}/lib/ext/* $out/lib/
|
||||
ln -s ${pkgs.jdk.home}/lib/tools.jar $out/lib/
|
||||
'' + optionalString (cfg.extraJars != []) ''
|
||||
for f in ${concatStringsSep " " cfg.extraJars}; do
|
||||
cp $f $out/lib
|
||||
done
|
||||
'';
|
||||
};
|
||||
|
||||
in {
|
||||
in
|
||||
|
||||
{
|
||||
options = {
|
||||
services.solr = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Enables the solr service.
|
||||
'';
|
||||
};
|
||||
enable = mkEnableOption "Enables the solr service.";
|
||||
|
||||
javaPackage = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.jre;
|
||||
defaultText = "pkgs.jre";
|
||||
description = ''
|
||||
Which Java derivation to use for running solr.
|
||||
'';
|
||||
};
|
||||
|
||||
solrPackage = mkOption {
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.solr;
|
||||
defaultText = "pkgs.solr";
|
||||
description = ''
|
||||
Which solr derivation to use for running solr.
|
||||
'';
|
||||
description = "Which Solr package to use.";
|
||||
};
|
||||
|
||||
extraJars = mkOption {
|
||||
type = types.listOf types.path;
|
||||
default = [];
|
||||
description = ''
|
||||
List of paths pointing to jars. Jars are copied to commonLibFolder to be available to java/solr.
|
||||
'';
|
||||
port = mkOption {
|
||||
type = types.int;
|
||||
default = 8983;
|
||||
description = "Port on which Solr is ran.";
|
||||
};
|
||||
|
||||
log4jConfiguration = mkOption {
|
||||
type = types.lines;
|
||||
default = ''
|
||||
log4j.rootLogger=INFO, stdout
|
||||
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.stdout.Target=System.out
|
||||
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
|
||||
'';
|
||||
description = ''
|
||||
Contents of the <literal>log4j.properties</literal> used. By default,
|
||||
everything is logged to stdout (picked up by systemd) with level INFO.
|
||||
'';
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
The user that should run the solr process and.
|
||||
the working directories.
|
||||
'';
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
The group that will own the working directory.
|
||||
'';
|
||||
};
|
||||
|
||||
solrHome = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
The solr home directory. It is your own responsibility to
|
||||
make sure this directory contains a working solr configuration,
|
||||
and is writeable by the the user running the solr service.
|
||||
Failing to do so, the solr will not start properly.
|
||||
'';
|
||||
stateDir = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/lib/solr";
|
||||
description = "The solr home directory containing config, data, and logging files.";
|
||||
};
|
||||
|
||||
extraJavaOptions = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
description = ''
|
||||
Extra command line options given to the java process running
|
||||
solr.
|
||||
'';
|
||||
description = "Extra command line options given to the java process running Solr.";
|
||||
};
|
||||
|
||||
extraWinstoneOptions = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
description = ''
|
||||
Extra command line options given to the Winstone, which is
|
||||
the servlet container hosting solr.
|
||||
'';
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "solr";
|
||||
description = "User under which Solr is ran.";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
default = "solr";
|
||||
description = "Group under which Solr is ran.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
services.winstone.solr = {
|
||||
serviceName = "solr";
|
||||
inherit (cfg) user group javaPackage;
|
||||
warFile = "${cfg.solrPackage}/lib/solr.war";
|
||||
extraOptions = [
|
||||
"--commonLibFolder=${solrJars}/lib"
|
||||
"--useJasper"
|
||||
] ++ cfg.extraWinstoneOptions;
|
||||
extraJavaOptions = [
|
||||
"-Dsolr.solr.home=${cfg.solrHome}"
|
||||
"-Dlog4j.configuration=file://${pkgs.writeText "log4j.properties" cfg.log4jConfiguration}"
|
||||
] ++ cfg.extraJavaOptions;
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
|
||||
systemd.services.solr = {
|
||||
after = [ "network.target" "remote-fs.target" "nss-lookup.target" "systemd-journald-dev-log.socket" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
environment = {
|
||||
SOLR_HOME = "${cfg.stateDir}/data";
|
||||
LOG4J_PROPS = "${cfg.stateDir}/log4j2.xml";
|
||||
SOLR_LOGS_DIR = "${cfg.stateDir}/logs";
|
||||
SOLR_PORT = "${toString cfg.port}";
|
||||
};
|
||||
path = with pkgs; [
|
||||
gawk
|
||||
procps
|
||||
];
|
||||
preStart = ''
|
||||
mkdir -p "${cfg.stateDir}/data";
|
||||
mkdir -p "${cfg.stateDir}/logs";
|
||||
|
||||
if ! test -e "${cfg.stateDir}/data/solr.xml"; then
|
||||
install -D -m0640 ${cfg.package}/server/solr/solr.xml "${cfg.stateDir}/data/solr.xml"
|
||||
install -D -m0640 ${cfg.package}/server/solr/zoo.cfg "${cfg.stateDir}/data/zoo.cfg"
|
||||
fi
|
||||
|
||||
if ! test -e "${cfg.stateDir}/log4j2.xml"; then
|
||||
install -D -m0640 ${cfg.package}/server/resources/log4j2.xml "${cfg.stateDir}/log4j2.xml"
|
||||
fi
|
||||
'';
|
||||
|
||||
serviceConfig = {
|
||||
User = cfg.user;
|
||||
Group = cfg.group;
|
||||
ExecStart="${cfg.package}/bin/solr start -f -a \"${concatStringsSep " " cfg.extraJavaOptions}\"";
|
||||
ExecStop="${cfg.package}/bin/solr stop";
|
||||
};
|
||||
};
|
||||
|
||||
users.users = optionalAttrs (cfg.user == "solr") (singleton
|
||||
{ name = "solr";
|
||||
group = cfg.group;
|
||||
home = cfg.stateDir;
|
||||
createHome = true;
|
||||
uid = config.ids.uids.solr;
|
||||
});
|
||||
|
||||
users.groups = optionalAttrs (cfg.group == "solr") (singleton
|
||||
{ name = "solr";
|
||||
gid = config.ids.gids.solr;
|
||||
});
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -3,13 +3,20 @@
|
|||
with lib;
|
||||
|
||||
let cfg = config.services.cloud-init;
|
||||
path = with pkgs; [ cloud-init nettools utillinux e2fsprogs shadow openssh iproute ];
|
||||
path = with pkgs; [
|
||||
cloud-init
|
||||
iproute
|
||||
nettools
|
||||
openssh
|
||||
shadow
|
||||
utillinux
|
||||
] ++ optional cfg.btrfs.enable btrfs-progs
|
||||
++ optional cfg.ext4.enable e2fsprogs
|
||||
;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
|
||||
services.cloud-init = {
|
||||
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
|
@ -29,6 +36,22 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
btrfs.enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Allow the cloud-init service to operate `btrfs` filesystem.
|
||||
'';
|
||||
};
|
||||
|
||||
ext4.enable = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Allow the cloud-init service to operate `ext4` filesystem.
|
||||
'';
|
||||
};
|
||||
|
||||
config = mkOption {
|
||||
type = types.str;
|
||||
default = ''
|
||||
|
|
|
@ -171,7 +171,12 @@ in {
|
|||
dbhost = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = "localhost";
|
||||
description = "Database host.";
|
||||
description = ''
|
||||
Database host.
|
||||
|
||||
Note: for using Unix authentication with PostgreSQL, this should be
|
||||
set to <literal>/tmp</literal>.
|
||||
'';
|
||||
};
|
||||
dbport = mkOption {
|
||||
type = with types; nullOr (either int str);
|
||||
|
|
|
@ -31,10 +31,26 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
purifyOnStart = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
On startup, the `baseDir` directory is populated with various files,
|
||||
subdirectories and symlinks. If this option is enabled, these items
|
||||
(except for the `logs` and `work` subdirectories) are first removed.
|
||||
This prevents interference from remainders of an old configuration
|
||||
(libraries, webapps, etc.), so it's recommended to enable this option.
|
||||
'';
|
||||
};
|
||||
|
||||
baseDir = mkOption {
|
||||
type = lib.types.path;
|
||||
default = "/var/tomcat";
|
||||
description = "Location where Tomcat stores configuration files, webapplications and logfiles";
|
||||
description = ''
|
||||
Location where Tomcat stores configuration files, web applications
|
||||
and logfiles. Note that it is partially cleared on each service startup
|
||||
if `purifyOnStart` is enabled.
|
||||
'';
|
||||
};
|
||||
|
||||
logDirs = mkOption {
|
||||
|
@ -197,6 +213,15 @@ in
|
|||
after = [ "network.target" ];
|
||||
|
||||
preStart = ''
|
||||
${lib.optionalString cfg.purifyOnStart ''
|
||||
# Delete most directories/symlinks we create from the existing base directory,
|
||||
# to get rid of remainders of an old configuration.
|
||||
# The list of directories to delete is taken from the "mkdir" command below,
|
||||
# excluding "logs" (because logs are valuable) and "work" (because normally
|
||||
# session files are there), and additionally including "bin".
|
||||
rm -rf ${cfg.baseDir}/{conf,virtualhosts,temp,lib,shared/lib,webapps,bin}
|
||||
''}
|
||||
|
||||
# Create the base directory
|
||||
mkdir -p \
|
||||
${cfg.baseDir}/{conf,virtualhosts,logs,temp,lib,shared/lib,webapps,work}
|
||||
|
|
|
@ -22,7 +22,7 @@ let
|
|||
# This wrapper ensures that we actually get themes
|
||||
makeWrapper ${pkgs.lightdm_gtk_greeter}/sbin/lightdm-gtk-greeter \
|
||||
$out/greeter \
|
||||
--prefix PATH : "${pkgs.glibc.bin}/bin" \
|
||||
--prefix PATH : "${lib.getBin pkgs.stdenv.cc.libc}/bin" \
|
||||
--set GDK_PIXBUF_MODULE_FILE "${pkgs.librsvg.out}/lib/gdk-pixbuf-2.0/2.10.0/loaders.cache" \
|
||||
--set GTK_PATH "${theme}:${pkgs.gtk3.out}" \
|
||||
--set GTK_EXE_PREFIX "${theme}" \
|
||||
|
|
|
@ -208,15 +208,11 @@ in
|
|||
}
|
||||
];
|
||||
|
||||
services.xserver.displayManager.job = {
|
||||
logToFile = true;
|
||||
|
||||
# lightdm relaunches itself via just `lightdm`, so needs to be on the PATH
|
||||
execCmd = ''
|
||||
export PATH=${lightdm}/sbin:$PATH
|
||||
exec ${lightdm}/sbin/lightdm
|
||||
'';
|
||||
};
|
||||
# lightdm relaunches itself via just `lightdm`, so needs to be on the PATH
|
||||
services.xserver.displayManager.job.execCmd = ''
|
||||
export PATH=${lightdm}/sbin:$PATH
|
||||
exec ${lightdm}/sbin/lightdm
|
||||
'';
|
||||
|
||||
environment.etc."lightdm/lightdm.conf".source = lightdmConf;
|
||||
environment.etc."lightdm/users.conf".source = usersConf;
|
||||
|
|
|
@ -209,8 +209,6 @@ in
|
|||
];
|
||||
|
||||
services.xserver.displayManager.job = {
|
||||
logToFile = true;
|
||||
|
||||
environment = {
|
||||
# Load themes from system environment
|
||||
QT_PLUGIN_PATH = "/run/current-system/sw/" + pkgs.qt5.qtbase.qtPluginPrefix;
|
||||
|
|
|
@ -219,30 +219,26 @@ in
|
|||
VideoRam 192000
|
||||
'';
|
||||
|
||||
services.xserver.displayManager.job = {
|
||||
logToFile = true;
|
||||
|
||||
execCmd = ''
|
||||
${optionalString (cfg.pulseaudio)
|
||||
"export PULSE_COOKIE=/var/run/pulse/.config/pulse/cookie"}
|
||||
exec ${pkgs.xpra}/bin/xpra start \
|
||||
--daemon=off \
|
||||
--log-dir=/var/log \
|
||||
--log-file=xpra.log \
|
||||
--opengl=on \
|
||||
--clipboard=on \
|
||||
--notifications=on \
|
||||
--speaker=yes \
|
||||
--mdns=no \
|
||||
--pulseaudio=no \
|
||||
${optionalString (cfg.pulseaudio) "--sound-source=pulse"} \
|
||||
--socket-dirs=/var/run/xpra \
|
||||
--xvfb="xpra_Xdummy ${concatStringsSep " " dmcfg.xserverArgs}" \
|
||||
${optionalString (cfg.bindTcp != null) "--bind-tcp=${cfg.bindTcp}"} \
|
||||
--auth=${cfg.auth} \
|
||||
${concatStringsSep " " cfg.extraOptions}
|
||||
'';
|
||||
};
|
||||
services.xserver.displayManager.job.execCmd = ''
|
||||
${optionalString (cfg.pulseaudio)
|
||||
"export PULSE_COOKIE=/var/run/pulse/.config/pulse/cookie"}
|
||||
exec ${pkgs.xpra}/bin/xpra start \
|
||||
--daemon=off \
|
||||
--log-dir=/var/log \
|
||||
--log-file=xpra.log \
|
||||
--opengl=on \
|
||||
--clipboard=on \
|
||||
--notifications=on \
|
||||
--speaker=yes \
|
||||
--mdns=no \
|
||||
--pulseaudio=no \
|
||||
${optionalString (cfg.pulseaudio) "--sound-source=pulse"} \
|
||||
--socket-dirs=/var/run/xpra \
|
||||
--xvfb="xpra_Xdummy ${concatStringsSep " " dmcfg.xserverArgs}" \
|
||||
${optionalString (cfg.bindTcp != null) "--bind-tcp=${cfg.bindTcp}"} \
|
||||
--auth=${cfg.auth} \
|
||||
${concatStringsSep " " cfg.extraOptions}
|
||||
'';
|
||||
|
||||
services.xserver.terminateOnReset = false;
|
||||
|
||||
|
|
|
@ -21,7 +21,8 @@ let
|
|||
[ coreutils
|
||||
gnugrep
|
||||
findutils
|
||||
glibc # needed for getent
|
||||
getent
|
||||
stdenv.cc.libc # nscd in update-users-groups.pl
|
||||
shadow
|
||||
nettools # needed for hostname
|
||||
utillinux # needed for mount and mountpoint
|
||||
|
|
|
@ -246,10 +246,7 @@ checkFS() {
|
|||
if [ "$fsType" = iso9660 -o "$fsType" = udf ]; then return 0; fi
|
||||
|
||||
# Don't check resilient COWs as they validate the fs structures at mount time
|
||||
if [ "$fsType" = btrfs -o "$fsType" = zfs ]; then return 0; fi
|
||||
|
||||
# Skip fsck for bcachefs - not implemented yet.
|
||||
if [ "$fsType" = bcachefs ]; then return 0; fi
|
||||
if [ "$fsType" = btrfs -o "$fsType" = zfs -o "$fsType" = bcachefs ]; then return 0; fi
|
||||
|
||||
# Skip fsck for nilfs2 - not needed by design and no fsck tool for this filesystem.
|
||||
if [ "$fsType" = nilfs2 ]; then return 0; fi
|
||||
|
|
|
@ -147,7 +147,7 @@ let
|
|||
${config.boot.initrd.extraUtilsCommands}
|
||||
|
||||
# Copy ld manually since it isn't detected correctly
|
||||
cp -pv ${pkgs.glibc.out}/lib/ld*.so.? $out/lib
|
||||
cp -pv ${pkgs.stdenv.cc.libc.out}/lib/ld*.so.? $out/lib
|
||||
|
||||
# Copy all of the needed libraries
|
||||
find $out/bin $out/lib -type f | while read BIN; do
|
||||
|
|
|
@ -112,6 +112,7 @@ in {
|
|||
|
||||
environment.etc."systemd/nspawn".source = generateUnits "nspawn" units [] [];
|
||||
|
||||
systemd.targets."multi-user".wants = [ "machines.target "];
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -230,6 +230,8 @@ in
|
|||
let
|
||||
fsToSkipCheck = [ "none" "bindfs" "btrfs" "zfs" "tmpfs" "nfs" "vboxsf" "glusterfs" ];
|
||||
skipCheck = fs: fs.noCheck || fs.device == "none" || builtins.elem fs.fsType fsToSkipCheck;
|
||||
# https://wiki.archlinux.org/index.php/fstab#Filepath_spaces
|
||||
escape = string: builtins.replaceStrings [ " " ] [ "\\040" ] string;
|
||||
in ''
|
||||
# This is a generated file. Do not edit!
|
||||
#
|
||||
|
@ -238,10 +240,10 @@ in
|
|||
|
||||
# Filesystems.
|
||||
${concatMapStrings (fs:
|
||||
(if fs.device != null then fs.device
|
||||
else if fs.label != null then "/dev/disk/by-label/${fs.label}"
|
||||
(if fs.device != null then escape fs.device
|
||||
else if fs.label != null then "/dev/disk/by-label/${escape fs.label}"
|
||||
else throw "No device specified for mount point ‘${fs.mountPoint}’.")
|
||||
+ " " + fs.mountPoint
|
||||
+ " " + escape fs.mountPoint
|
||||
+ " " + fs.fsType
|
||||
+ " " + builtins.concatStringsSep "," fs.options
|
||||
+ " 0"
|
||||
|
|
|
@ -1,26 +1,65 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
{ config, lib, pkgs, utils, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
inInitrd = any (fs: fs == "bcachefs") config.boot.initrd.supportedFilesystems;
|
||||
bootFs = filterAttrs (n: fs: (fs.fsType == "bcachefs") && (utils.fsNeededForBoot fs)) config.fileSystems;
|
||||
|
||||
commonFunctions = ''
|
||||
prompt() {
|
||||
local name="$1"
|
||||
printf "enter passphrase for $name: "
|
||||
}
|
||||
tryUnlock() {
|
||||
local name="$1"
|
||||
local path="$2"
|
||||
if bcachefs unlock -c $path > /dev/null 2> /dev/null; then # test for encryption
|
||||
prompt $name
|
||||
until bcachefs unlock $path 2> /dev/null; do # repeat until sucessfully unlocked
|
||||
printf "unlocking failed!\n"
|
||||
prompt $name
|
||||
done
|
||||
printf "unlocking successful.\n"
|
||||
fi
|
||||
}
|
||||
'';
|
||||
|
||||
openCommand = name: fs:
|
||||
let
|
||||
# we need only unlock one device manually, and cannot pass multiple at once
|
||||
# remove this adaptation when bcachefs implements mounting by filesystem uuid
|
||||
# also, implement automatic waiting for the constituent devices when that happens
|
||||
# bcachefs does not support mounting devices with colons in the path, ergo we don't (see #49671)
|
||||
firstDevice = head (splitString ":" fs.device);
|
||||
in
|
||||
''
|
||||
tryUnlock ${name} ${firstDevice}
|
||||
'';
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
config = mkIf (any (fs: fs == "bcachefs") config.boot.supportedFilesystems) {
|
||||
config = mkIf (elem "bcachefs" config.boot.supportedFilesystems) (mkMerge [
|
||||
{
|
||||
system.fsPackages = [ pkgs.bcachefs-tools ];
|
||||
|
||||
system.fsPackages = [ pkgs.bcachefs-tools ];
|
||||
# use kernel package with bcachefs support until it's in mainline
|
||||
boot.kernelPackages = pkgs.linuxPackages_testing_bcachefs;
|
||||
}
|
||||
|
||||
# use kernel package with bcachefs support until it's in mainline
|
||||
boot.kernelPackages = pkgs.linuxPackages_testing_bcachefs;
|
||||
boot.initrd.availableKernelModules = mkIf inInitrd [ "bcachefs" ];
|
||||
(mkIf ((elem "bcachefs" config.boot.initrd.supportedFilesystems) || (bootFs != {})) {
|
||||
# the cryptographic modules are required only for decryption attempts
|
||||
boot.initrd.availableKernelModules = [ "bcachefs" "chacha20" "poly1305" ];
|
||||
|
||||
boot.initrd.extraUtilsCommands = mkIf inInitrd
|
||||
''
|
||||
copy_bin_and_libs ${pkgs.bcachefs-tools}/bin/fsck.bcachefs
|
||||
boot.initrd.extraUtilsCommands = ''
|
||||
copy_bin_and_libs ${pkgs.bcachefs-tools}/bin/bcachefs
|
||||
'';
|
||||
boot.initrd.extraUtilsCommandsTest = ''
|
||||
$out/bin/bcachefs version
|
||||
'';
|
||||
|
||||
};
|
||||
boot.initrd.postDeviceCommands = commonFunctions + concatStrings (mapAttrsToList openCommand bootFs);
|
||||
})
|
||||
]);
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ let cfg = config.ec2; in
|
|||
# Mount all formatted ephemeral disks and activate all swap devices.
|
||||
# We cannot do this with the ‘fileSystems’ and ‘swapDevices’ options
|
||||
# because the set of devices is dependent on the instance type
|
||||
# (e.g. "m1.large" has one ephemeral filesystem and one swap device,
|
||||
# (e.g. "m1.small" has one ephemeral filesystem and one swap device,
|
||||
# while "m1.large" has two ephemeral filesystems and no swap
|
||||
# devices). Also, put /tmp and /var on /disk0, since it has a lot
|
||||
# more space than the root device. Similarly, "move" /nix to /disk0
|
||||
|
|
|
@ -243,6 +243,9 @@ let
|
|||
|
||||
Restart = "on-failure";
|
||||
|
||||
Slice = "machine.slice";
|
||||
Delegate = true;
|
||||
|
||||
# Hack: we don't want to kill systemd-nspawn, since we call
|
||||
# "machinectl poweroff" in preStop to shut down the
|
||||
# container cleanly. But systemd requires sending a signal
|
||||
|
@ -606,7 +609,7 @@ in
|
|||
{ config =
|
||||
{ config, pkgs, ... }:
|
||||
{ services.postgresql.enable = true;
|
||||
services.postgresql.package = pkgs.postgresql96;
|
||||
services.postgresql.package = pkgs.postgresql_9_6;
|
||||
|
||||
system.stateVersion = "17.03";
|
||||
};
|
||||
|
@ -657,6 +660,8 @@ in
|
|||
serviceConfig = serviceDirectives dummyConfig;
|
||||
};
|
||||
in {
|
||||
systemd.targets."multi-user".wants = [ "machines.target" ];
|
||||
|
||||
systemd.services = listToAttrs (filter (x: x.value != null) (
|
||||
# The generic container template used by imperative containers
|
||||
[{ name = "container@"; value = unit; }]
|
||||
|
@ -680,7 +685,7 @@ in
|
|||
} // (
|
||||
if config.autoStart then
|
||||
{
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
wantedBy = [ "machines.target" ];
|
||||
wants = [ "network.target" ];
|
||||
after = [ "network.target" ];
|
||||
restartTriggers = [ config.path ];
|
||||
|
|
135
nixos/modules/virtualisation/docker-preloader.nix
Normal file
135
nixos/modules/virtualisation/docker-preloader.nix
Normal file
|
@ -0,0 +1,135 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
with builtins;
|
||||
|
||||
let
|
||||
cfg = config.virtualisation;
|
||||
|
||||
sanitizeImageName = image: replaceStrings ["/"] ["-"] image.imageName;
|
||||
hash = drv: head (split "-" (baseNameOf drv.outPath));
|
||||
# The label of an ext4 FS is limited to 16 bytes
|
||||
labelFromImage = image: substring 0 16 (hash image);
|
||||
|
||||
# The Docker image is loaded and some files from /var/lib/docker/
|
||||
# are written into a qcow image.
|
||||
preload = image: pkgs.vmTools.runInLinuxVM (
|
||||
pkgs.runCommand "docker-preload-image-${sanitizeImageName image}" {
|
||||
buildInputs = with pkgs; [ docker e2fsprogs utillinux curl kmod ];
|
||||
preVM = pkgs.vmTools.createEmptyImage {
|
||||
size = cfg.dockerPreloader.qcowSize;
|
||||
fullName = "docker-deamon-image.qcow2";
|
||||
};
|
||||
}
|
||||
''
|
||||
mkfs.ext4 /dev/vda
|
||||
e2label /dev/vda ${labelFromImage image}
|
||||
mkdir -p /var/lib/docker
|
||||
mount -t ext4 /dev/vda /var/lib/docker
|
||||
|
||||
modprobe overlay
|
||||
|
||||
# from https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
|
||||
mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
|
||||
cd /sys/fs/cgroup
|
||||
for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
|
||||
mkdir -p $sys
|
||||
if ! mountpoint -q $sys; then
|
||||
if ! mount -n -t cgroup -o $sys cgroup $sys; then
|
||||
rmdir $sys || true
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
dockerd -H tcp://127.0.0.1:5555 -H unix:///var/run/docker.sock &
|
||||
|
||||
until $(curl --output /dev/null --silent --connect-timeout 2 http://127.0.0.1:5555); do
|
||||
printf '.'
|
||||
sleep 1
|
||||
done
|
||||
|
||||
docker load -i ${image}
|
||||
|
||||
kill %1
|
||||
find /var/lib/docker/ -maxdepth 1 -mindepth 1 -not -name "image" -not -name "overlay2" | xargs rm -rf
|
||||
'');
|
||||
|
||||
preloadedImages = map preload cfg.dockerPreloader.images;
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
options.virtualisation.dockerPreloader = {
|
||||
images = mkOption {
|
||||
default = [ ];
|
||||
type = types.listOf types.package;
|
||||
description =
|
||||
''
|
||||
A list of Docker images to preload (in the /var/lib/docker directory).
|
||||
'';
|
||||
};
|
||||
qcowSize = mkOption {
|
||||
default = 1024;
|
||||
type = types.int;
|
||||
description =
|
||||
''
|
||||
The size (MB) of qcow files.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
assertions = [{
|
||||
# If docker.storageDriver is null, Docker choose the storage
|
||||
# driver. So, in this case, we cannot be sure overlay2 is used.
|
||||
assertion = cfg.dockerPreloader.images == []
|
||||
|| cfg.docker.storageDriver == "overlay2"
|
||||
|| cfg.docker.storageDriver == "overlay"
|
||||
|| cfg.docker.storageDriver == null;
|
||||
message = "The Docker image Preloader only works with overlay2 storage driver!";
|
||||
}];
|
||||
|
||||
virtualisation.qemu.options =
|
||||
map (path: "-drive if=virtio,file=${path}/disk-image.qcow2,readonly,media=cdrom,format=qcow2")
|
||||
preloadedImages;
|
||||
|
||||
|
||||
# All attached QCOW files are mounted and their contents are linked
|
||||
# to /var/lib/docker/ in order to make image available.
|
||||
systemd.services.docker-preloader = {
|
||||
description = "Preloaded Docker images";
|
||||
wantedBy = ["docker.service"];
|
||||
after = ["network.target"];
|
||||
path = with pkgs; [ mount rsync jq ];
|
||||
script = ''
|
||||
mkdir -p /var/lib/docker/overlay2/l /var/lib/docker/image/overlay2
|
||||
echo '{}' > /tmp/repositories.json
|
||||
|
||||
for i in ${concatStringsSep " " (map labelFromImage cfg.dockerPreloader.images)}; do
|
||||
mkdir -p /mnt/docker-images/$i
|
||||
|
||||
# The ext4 label is limited to 16 bytes
|
||||
mount /dev/disk/by-label/$(echo $i | cut -c1-16) -o ro,noload /mnt/docker-images/$i
|
||||
|
||||
find /mnt/docker-images/$i/overlay2/ -maxdepth 1 -mindepth 1 -not -name l\
|
||||
-exec ln -s '{}' /var/lib/docker/overlay2/ \;
|
||||
cp -P /mnt/docker-images/$i/overlay2/l/* /var/lib/docker/overlay2/l/
|
||||
|
||||
rsync -a /mnt/docker-images/$i/image/ /var/lib/docker/image/
|
||||
|
||||
# Accumulate image definitions
|
||||
cp /tmp/repositories.json /tmp/repositories.json.tmp
|
||||
jq -s '.[0] * .[1]' \
|
||||
/tmp/repositories.json.tmp \
|
||||
/mnt/docker-images/$i/image/overlay2/repositories.json \
|
||||
> /tmp/repositories.json
|
||||
done
|
||||
|
||||
mv /tmp/repositories.json /var/lib/docker/image/overlay2/repositories.json
|
||||
'';
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -144,7 +144,6 @@ in
|
|||
path = with pkgs; [ iproute ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${gce}/bin/google_network_daemon --debug";
|
||||
Type = "oneshot";
|
||||
};
|
||||
};
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue