1
0
Fork 1
mirror of https://github.com/NixOS/nixpkgs.git synced 2024-11-26 07:31:20 +00:00

Merge branch 'upstream' into pam_mount

Conflicts:
	pkgs/top-level/all-packages.nix
This commit is contained in:
Thomas Strobel 2014-09-15 12:15:11 +02:00
commit 6366509e54
753 changed files with 23410 additions and 12447 deletions

View file

@ -1,6 +1,6 @@
if ! builtins ? nixVersion || builtins.compareVersions "1.6" builtins.nixVersion == 1 then
if ! builtins ? nixVersion || builtins.compareVersions "1.7" builtins.nixVersion == 1 then
abort "This version of Nixpkgs requires Nix >= 1.6, please upgrade!"
abort "This version of Nixpkgs requires Nix >= 1.7, please upgrade!"
else

View file

@ -233,6 +233,17 @@ twisted = buildPythonPackage {
</section>
<section xml:id="ssec-language-java"><title>Ruby</title>
<para>For example, to package yajl-ruby package, use gem-nix:</para>
<screen>
$ nix-env -i gem-nix
$ gem-nix --no-user-install --nix-file=pkgs/development/interpreters/ruby/generated.nix yajl-ruby
$ nix-build -A rubyLibs.yajl-ruby
</screen>
</section>
<section xml:id="ssec-language-java"><title>Java</title>
<para>Ant-based Java packages are typically built from source as follows:

View file

@ -84,8 +84,7 @@ hello-2.3 A program that produces a familiar, friendly greeting
<section><title>Standard meta-attributes</title>
<para>The following meta-attributes have a standard
interpretation:</para>
<para>It is expected that each meta-attribute is one of the following:</para>
<variablelist>
@ -112,12 +111,23 @@ interpretation:</para>
package.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>version</varname></term>
<listitem><para>Package version.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>homepage</varname></term>
<listitem><para>The packages homepage. Example:
<literal>http://www.gnu.org/software/hello/manual/</literal></para></listitem>
</varlistentry>
<varlistentry>
<term><varname>downloadPage</varname></term>
<listitem><para>The page where a link to the current version can be found. Example:
<literal>http://ftp.gnu.org/gnu/hello/</literal></para></listitem>
</varlistentry>
<varlistentry>
<term><varname>license</varname></term>
<listitem><para>The license for the package. One from the
@ -195,6 +205,16 @@ meta.hydraPlatforms = [];
they are fixed.</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>updateWalker</varname></term>
<listitem><para>If set to <literal>true</literal>, the package is
tested to be updated correctly by the <literal>update-walker.sh</literal>
script without additional settings. Such packages have
<varname>meta.version</varname> set and their homepage (or
the page specified by <varname>meta.downloadPage</varname>) contains
a direct link to the package tarball.</para></listitem>
</varlistentry>
</variablelist>
@ -256,9 +276,9 @@ to indicate the specific license:
</variablelist>
</para>
</section>
</chapter>

View file

@ -92,17 +92,6 @@ $ git add pkgs/development/libraries/libfoo/default.nix</screen>
hackery.</para>
</listitem>
<listitem>
<para>BitTorrent (wxPython-based): <link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/networking/p2p/bittorrent/default.nix"><filename>pkgs/tools/networking/p2p/bittorrent/default.nix</filename></link>.
Uses an external <link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/networking/p2p/bittorrent/builder.sh">build
script</link>, which can be useful if you have lots of code
that you dont want cluttering up the Nix expression. But
external builders are mostly obsolete.
</para>
</listitem>
<listitem>
<para>Thunderbird: <link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/networking/mailreaders/thunderbird/default.nix"><filename>pkgs/applications/networking/mailreaders/thunderbird/default.nix</filename></link>.

View file

@ -72,6 +72,11 @@ rec {
fullName = "Creative Commons Attribution 3.0";
};
cc-by-40 = spdx {
shortName = "CC-BY-4.0";
fullName = "Creative Commons Attribution 4.0";
};
cddl = spdx {
shortName = "CDDL-1.0";
fullName = "Common Development and Distribution License 1.0";
@ -232,6 +237,11 @@ rec {
fullName = "OpenSSL License";
};
postgresql = spdx {
shortName = "PostgreSQL";
fullName = "PostgreSQL License";
};
psfl = spdx {
shortName = "Python-2.0";
fullName = "Python Software Foundation License version 2";

View file

@ -15,6 +15,7 @@
AndersonTorres = "Anderson Torres <torres.anderson.85@gmail.com>";
andres = "Andres Loeh <ksnixos@andres-loeh.de>";
antono = "Antono Vasiljev <self@antono.info>";
aristid = "Aristid Breitkreuz <aristidb@gmail.com>";
arobyn = "Alexei Robyn <shados@shados.net>";
astsmtl = "Alexander Tsamutali <astsmtl@yandex.ru>";
aszlig = "aszlig <aszlig@redmoonstudios.org>";
@ -27,6 +28,7 @@
bjg = "Brian Gough <bjg@gnu.org>";
bjornfor = "Bjørn Forsman <bjorn.forsman@gmail.com>";
bluescreen303 = "Mathijs Kwik <mathijs@bluescreen303.nl>";
bobvanderlinden = "Bob van der Linden <bobvanderlinden@gmail.com>";
bodil = "Bodil Stokke <nix@bodil.org>";
bosu = "Boris Sukholitko <boriss@gmail.com>";
calrama = "Moritz Maxeiner <moritz@ucworks.org>";
@ -38,6 +40,8 @@
coroa = "Jonas Hörsch <jonas@chaoflow.net>";
cstrahan = "Charles Strahan <charles.c.strahan@gmail.com>";
DamienCassou = "Damien Cassou <damien.cassou@gmail.com>";
DerGuteMoritz = "Moritz Heidkamp <moritz@twoticketsplease.de>";
dbohdan = "Danyil Bohdan <danyil.bohdan@gmail.com>";
doublec = "Chris Double <chris.double@double.co.nz>";
ederoyd46 = "Matthew Brown <matt@ederoyd.co.uk>";
edwtjo = "Edward Tjörnhammar <ed@cflags.cc>";
@ -101,6 +105,7 @@
roelof = "Roelof Wobben <rwobben@hotmail.com>";
romildo = "José Romildo Malaquias <malaquias@gmail.com>";
rszibele = "Richard Szibele <richard_szibele@hotmail.com>";
rycee = "Robert Helgesson <robert@rycee.net>";
sander = "Sander van der Burg <s.vanderburg@tudelft.nl>";
shlevy = "Shea Levy <shea@shealevy.com>";
simons = "Peter Simons <simons@cryp.to>";
@ -115,6 +120,7 @@
ttuegel = "Thomas Tuegel <ttuegel@gmail.com>";
tv = "Tomislav Viljetić <tv@shackspace.de>";
urkud = "Yury G. Kudryashov <urkud+nix@ya.ru>";
vandenoever = "Jos van den Oever <jos@vandenoever.info>";
vbmithr = "Vincent Bernardoff <vb@luminar.eu.org>";
vcunat = "Vladimír Čunát <vcunat@gmail.com>";
viric = "Lluís Batlle i Rossell <viric@viric.name>";

View file

@ -132,20 +132,44 @@ rec {
The exception is the options attribute, which specifies
sub-options. These can be specified multiple times to allow one
module to add sub-options to an option declared somewhere else
(e.g. multiple modules define sub-options for fileSystems). */
(e.g. multiple modules define sub-options for fileSystems).
'loc' is the list of attribute names where the option is located.
'opts' is a list of modules. Each module has an options attribute which
correspond to the definition of 'loc' in 'opt.file'. */
mergeOptionDecls = loc: opts:
fold (opt: res:
if opt.options ? default && res ? default ||
opt.options ? example && res ? example ||
opt.options ? description && res ? description ||
opt.options ? apply && res ? apply ||
opt.options ? type && res ? type
# Accept to merge options which have identical types.
opt.options ? type && res ? type && opt.options.type.name != res.type.name
then
throw "The option `${showOption loc}' in `${opt.file}' is already declared in ${showFiles res.declarations}."
else
opt.options // res //
let
/* Add the modules of the current option to the list of modules
already collected. The options attribute except either a list of
submodules or a submodule. For each submodule, we add the file of the
current option declaration as the file use for the submodule. If the
submodule defines any filename, then we ignore the enclosing option file. */
options' = toList opt.options.options;
addModuleFile = m:
if isFunction m then args: { _file = opt.file; } // (m args)
else { _file = opt.file; } // m;
coerceOption = file: opt:
if isFunction opt then args: { _file = file; } // (opt args)
else { _file = file; options = opt; };
getSubModules = opt.options.type.getSubModules or null;
submodules =
if getSubModules != null then map addModuleFile getSubModules ++ res.options
else if opt.options ? options then map (coerceOption opt.file) options' ++ res.options
else res.options;
in opt.options // res //
{ declarations = [opt.file] ++ res.declarations;
options = if opt.options ? options then [(toList opt.options.options ++ res.options)] else [];
options = submodules;
}
) { inherit loc; declarations = []; options = []; } opts;
@ -273,15 +297,12 @@ rec {
in sort compare defs';
/* Hack for backward compatibility: convert options of type
optionSet to configOf. FIXME: remove eventually. */
optionSet to options of type submodule. FIXME: remove
eventually. */
fixupOptionType = loc: opt:
let
options' = opt.options or
options = opt.options or
(throw "Option `${showOption loc'}' has type optionSet but has no option attribute, in ${showFiles opt.declarations}.");
coerce = x:
if isFunction x then x
else { config, ... }: { options = x; };
options = map coerce (flatten options');
f = tp:
if tp.name == "option set" || tp.name == "submodule" then
throw "The option ${showOption loc} uses submodules without a wrapping type, in ${showFiles opt.declarations}."
@ -290,7 +311,10 @@ rec {
else if tp.name == "list of option sets" then types.listOf (types.submodule options)
else if tp.name == "null or option set" then types.nullOr (types.submodule options)
else tp;
in opt // { type = f (opt.type or types.unspecified); };
in
if opt.type.getSubModules or null == null
then opt // { type = f (opt.type or types.unspecified); }
else opt // { type = opt.type.substSubModules opt.options; options = []; };
/* Properties. */

View file

@ -7,7 +7,7 @@ rec {
freebsd = ["i686-freebsd" "x86_64-freebsd"];
openbsd = ["i686-openbsd" "x86_64-openbsd"];
netbsd = ["i686-netbsd" "x86_64-netbsd"];
cygwin = ["i686-cygwin"];
cygwin = ["i686-cygwin" "x86_64-cygwin"];
unix = linux ++ darwin ++ freebsd ++ openbsd;
all = linux ++ darwin ++ cygwin ++ freebsd ++ openbsd;
none = [];

View file

@ -33,9 +33,14 @@ rec {
, # Return a flat list of sub-options. Used to generate
# documentation.
getSubOptions ? prefix: {}
, # List of modules if any, or null if none.
getSubModules ? null
, # Function for building the same option type with a different list of
# modules.
substSubModules ? m: null
}:
{ _type = "option-type";
inherit name check merge getSubOptions;
inherit name check merge getSubOptions getSubModules substSubModules;
};
@ -110,6 +115,8 @@ rec {
elemType.merge (loc ++ ["[${toString n}-${toString m}]"])
[{ inherit (def) file; value = def'; }]) def.value) defs);
getSubOptions = prefix: elemType.getSubOptions (prefix ++ ["*"]);
getSubModules = elemType.getSubModules;
substSubModules = m: listOf (elemType.substSubModules m);
};
attrsOf = elemType: mkOptionType {
@ -121,6 +128,8 @@ rec {
(map (def: listToAttrs (mapAttrsToList (n: def':
{ name = n; value = { inherit (def) file; value = def'; }; }) def.value)) defs);
getSubOptions = prefix: elemType.getSubOptions (prefix ++ ["<name>"]);
getSubModules = elemType.getSubModules;
substSubModules = m: attrsOf (elemType.substSubModules m);
};
# List or attribute set of ...
@ -147,12 +156,16 @@ rec {
else false;
merge = loc: defs: attrOnly.merge loc (imap convertIfList defs);
getSubOptions = prefix: elemType.getSubOptions (prefix ++ ["<name?>"]);
getSubModules = elemType.getSubModules;
substSubModules = m: loaOf (elemType.substSubModules m);
};
uniq = elemType: mkOptionType {
inherit (elemType) name check;
merge = mergeOneOption;
getSubOptions = elemType.getSubOptions;
getSubModules = elemType.getSubModules;
substSubModules = m: uniq (elemType.substSubModules m);
};
nullOr = elemType: mkOptionType {
@ -165,14 +178,8 @@ rec {
throw "The option `${showOption loc}' is defined both null and not null, in ${showFiles (getFiles defs)}."
else elemType.merge loc defs;
getSubOptions = elemType.getSubOptions;
};
functionTo = elemType: mkOptionType {
name = "function that evaluates to a(n) ${elemType.name}";
check = isFunction;
merge = loc: defs:
fnArgs: elemType.merge loc (map (fn: { inherit (fn) file; value = fn.value fnArgs; }) defs);
getSubOptions = elemType.getSubOptions;
getSubModules = elemType.getSubModules;
substSubModules = m: nullOr (elemType.substSubModules m);
};
submodule = opts:
@ -192,6 +199,8 @@ rec {
{ modules = opts'; inherit prefix;
# FIXME: hack to get shit to evaluate.
args = { name = ""; }; }).options;
getSubModules = opts';
substSubModules = m: submodule m;
};
enum = values: mkOptionType {

View file

@ -13,11 +13,10 @@ states that a user account named <literal>alice</literal> shall exist:
<programlisting>
users.extraUsers.alice =
{ createHome = true;
{ isNormalUser = true;
home = "/home/alice";
description = "Alice Foobar";
extraGroups = [ "wheel" "networkmanager" ];
useDefaultShell = true;
openssh.authorizedKeys.keys = [ "ssh-dss AAAAB3Nza... alice@foobar" ];
};
</programlisting>
@ -58,11 +57,6 @@ users.extraGroups.students.gid = 1000;
As with users, the group ID (gid) is optional and will be assigned
automatically if its missing.</para>
<warning><para>Currently declarative user management is not perfect:
<command>nixos-rebuild</command> does not know how to realise certain
configuration changes. This includes removing a user or group, and
removing group membership from a user.</para></warning>
<para>In the imperative style, users and groups are managed by
commands such as <command>useradd</command>,
<command>groupmod</command> and so on. For instance, to create a user

View file

@ -67,7 +67,7 @@ with other kernel modules.</para>
<para>On 64-bit systems, if you want full acceleration for 32-bit
programs such as Wine, you should also set the following:
<programlisting>
services.xserver.driSupport32Bit = true;
hardware.opengl.driSupport32Bit = true;
</programlisting>
</para>

View file

@ -6,12 +6,21 @@ with pkgs.lib;
let
# Remove invisible and internal options.
options' = filter (opt: opt.visible && !opt.internal) (optionAttrSetToDocList options);
optionsList = filter (opt: opt.visible && !opt.internal) (optionAttrSetToDocList options);
# Replace functions by the string <function>
substFunction = x:
if builtins.isAttrs x then mapAttrs (name: substFunction) x
else if builtins.isList x then map substFunction x
else if builtins.isFunction x then "<function>"
else x;
# Clean up declaration sites to not refer to the NixOS source tree.
options'' = flip map options' (opt: opt // {
optionsList' = flip map optionsList (opt: opt // {
declarations = map (fn: stripPrefix fn) opt.declarations;
});
}
// optionalAttrs (opt ? example) { example = substFunction opt.example; }
// optionalAttrs (opt ? default) { default = substFunction opt.default; });
prefix = toString ../../..;
@ -21,10 +30,35 @@ let
else
fn;
optionsXML = builtins.toFile "options.xml" (builtins.unsafeDiscardStringContext (builtins.toXML options''));
# Convert the list of options into an XML file and a JSON file. The builtin
# unsafeDiscardStringContext is used to prevent the realisation of the store
# paths which are used in options definitions.
optionsXML = builtins.toFile "options.xml" (builtins.unsafeDiscardStringContext (builtins.toXML optionsList'));
optionsJSON = builtins.toFile "options.json" (builtins.unsafeDiscardStringContext (builtins.toJSON optionsList'));
# Tools-friendly version of the list of NixOS options.
options' = stdenv.mkDerivation {
name = "options";
buildCommand = ''
# Export list of options in different format.
dst=$out/share/doc/nixos
mkdir -p $dst
cp ${optionsJSON} $dst/options.json
cp ${optionsXML} $dst/options.xml
mkdir -p $out/nix-support
echo "file json $dst/options.json" >> $out/nix-support/hydra-build-products
echo "file xml $dst/options.xml" >> $out/nix-support/hydra-build-products
''; # */
meta.description = "List of NixOS options in various formats.";
};
optionsDocBook = runCommand "options-db.xml" {} ''
if grep /nixpkgs/nixos/modules ${optionsXML}; then
optionsXML=${options'}/share/doc/nixos/options.xml
if grep /nixpkgs/nixos/modules $optionsXML; then
echo "The manual appears to depend on the location of Nixpkgs, which is bad"
echo "since this prevents sharing via the NixOS channel. This is typically"
echo "caused by an option default that refers to a relative path (see above"
@ -33,7 +67,7 @@ let
fi
${libxslt}/bin/xsltproc \
--stringparam revision '${revision}' \
-o $out ${./options-to-docbook.xsl} ${optionsXML}
-o $out ${./options-to-docbook.xsl} $optionsXML
'';
sources = sourceFilesBySuffices ./. [".xml"];
@ -49,6 +83,9 @@ let
in rec {
# Tools-friendly version of the list of NixOS options.
options = options';
# Generate the NixOS manual.
manual = stdenv.mkDerivation {
name = "nixos-manual";
@ -90,7 +127,7 @@ in rec {
mkdir -p $out/nix-support
echo "nix-build out $out" >> $out/nix-support/hydra-build-products
echo "doc manual $dst manual.html" >> $out/nix-support/hydra-build-products
echo "doc manual $dst" >> $out/nix-support/hydra-build-products
''; # */
meta.description = "The NixOS manual in HTML format";

View file

@ -17,11 +17,6 @@
<refsynopsisdiv>
<cmdsynopsis>
<command>nixos-option</command>
<group choice="opt">
<option>-v</option>
<option>-d</option>
<option>-l</option>
</group>
<arg choice='plain'><replaceable>option.name</replaceable></arg>
</cmdsynopsis>
</refsynopsisdiv>
@ -31,50 +26,13 @@
<para>This command evaluates the configuration specified in
<filename>/etc/nixos/configuration.nix</filename> and returns the properties
of the option name given as argument. By default, it returns the value of
the option.</para>
of the option name given as argument.</para>
<para>When the option name is not an option, the command prints the list of
attributes contained in the attribute set.</para>
</refsection>
<refsection><title>Options</title>
<para>This command accepts the following options:</para>
<variablelist>
<varlistentry>
<term><option>--value</option>, <option>-v</option></term>
<listitem>
<para>Returns the value of the option. This is the default operation
if no other options are defined.</para>
</listitem>
</varlistentry>
<varlistentry>
<term><option>--description</option>, <option>-d</option></term>
<listitem>
<para>Return the default value, the example and the description of the
option when available.</para>
</listitem>
</varlistentry>
<varlistentry>
<term><option>--lookup</option>, <option>-l</option></term>
<listitem>
<para>Return the locations where the option is declared and where it
is defined. This is extremely useful to find sources of errors in
your configuration.</para>
</listitem>
</varlistentry>
</variablelist>
</refsection>
<refsection><title>Environment</title>
<variablelist>
@ -103,27 +61,21 @@ grub
initScript
$ nixos-option boot.loader.grub.enable
true</screen></para>
Value:
true
<para>Prints option information:
Default:
true
<screen>$ nixos-option -d networking.hostName
Default: "nixos"
Description:
The name of the machine. Leave it empty if you want to obtain
it from a DHCP server (if using DHCP).</screen></para>
Whether to enable the GNU GRUB boot loader.
<para>Find the locations which are declaring and defining an option:
<screen>$ nixos-option -l hardware.firmware
Declared by:
/mnt/data/nix-sources/nixos/modules/services/hardware/udev.nix
"/nix/var/nix/profiles/per-user/root/channels/nixos/nixpkgs/nixos/modules/system/boot/loader/grub/grub.nix"
Defined by:
/path/to/nixpkgs/nixos/modules/system/boot/kernel.nix
/path/to/nixpkgs/nixos/modules/hardware/network/rt73.nix
/path/to/nixpkgs/nixos/modules/hardware/network/intel-3945abg.nix
/path/to/nixpkgs/nixos/modules/hardware/network/intel-2200bg.nix</screen></para>
"/nix/var/nix/profiles/per-user/root/channels/nixos/nixpkgs/nixos/modules/system/boot/loader/grub/grub.nix"
</screen></para>
</refsection>

View file

@ -9,8 +9,8 @@
<xsl:template match="logfile">
<html>
<head>
<script type="text/javascript" src="//ajax.googleapis.com/ajax/libs/jquery/1.8.3/jquery.min.js"></script>
<script type="text/javascript" src="//ajax.googleapis.com/ajax/libs/jqueryui/1.10.3/jquery-ui.min.js"></script>
<script type="text/javascript" src="jquery.min.js"></script>
<script type="text/javascript" src="jquery-ui.min.js"></script>
<script type="text/javascript" src="treebits.js" />
<link rel="stylesheet" href="logfile.css" type="text/css" />
<title>Log File</title>

View file

@ -53,6 +53,8 @@ rec {
xsltproc --output $out/log.html ${./test-driver/log2html.xsl} $out/log.xml
ln -s ${./test-driver/logfile.css} $out/logfile.css
ln -s ${./test-driver/treebits.js} $out/treebits.js
ln -s ${jquery}/js/jquery.min.js $out/
ln -s ${jquery-ui}/js/jquery-ui.min.js $out/
touch $out/nix-support/hydra-build-products
echo "report testlog $out log.html" >> $out/nix-support/hydra-build-products

View file

@ -5,6 +5,7 @@ rec {
# Escape a path according to the systemd rules, e.g. /dev/xyzzy
# becomes dev-xyzzy. FIXME: slow.
escapeSystemdPath = s:
replaceChars ["/" "-" " "] ["-" "\\x2d" "\\x20"] (substring 1 (stringLength s) s);
replaceChars ["/" "-" " "] ["-" "\\x2d" "\\x20"]
(if hasPrefix "/" s then substring 1 (stringLength s) s else s);
}

View file

@ -0,0 +1,11 @@
#! /bin/sh -e
BUCKET_NAME=${BUCKET_NAME:-nixos}
export NIX_PATH=nixpkgs=../../../..
export NIXOS_CONFIG=$(dirname $(readlink -f $0))/../../../modules/virtualisation/azure-image.nix
export TIMESTAMP=$(date +%Y%m%d%H%M)
nix-build '<nixpkgs/nixos>' \
-A config.system.build.azureImage --argstr system x86_64-linux -o azure --option extra-binary-caches http://hydra.nixos.org -j 10
azure vm image create nixos-test --location "West Europe" --md5-skip -v --os Linux azure/disk.vhd

View file

@ -10,10 +10,12 @@ let
systemWide = cfg.enable && cfg.systemWide;
nonSystemWide = cfg.enable && !cfg.systemWide;
uid = config.ids.uids.pulseaudio;
gid = config.ids.gids.pulseaudio;
ids = config.ids;
stateDir = "/run/pulse";
uid = ids.uids.pulseaudio;
gid = ids.gids.pulseaudio;
stateDir = "/var/run/pulse";
# Create pulse/client.conf even if PulseAudio is disabled so
# that we can disable the autospawn feature in programs that
@ -138,6 +140,8 @@ in {
group = "pulse";
extraGroups = [ "audio" ];
description = "PulseAudio system service user";
home = stateDir;
createHome = true;
};
users.extraGroups.pulse.gid = gid;
@ -147,10 +151,6 @@ in {
wantedBy = [ "sound.target" ];
before = [ "sound.target" ];
environment.PULSE_RUNTIME_PATH = stateDir;
preStart = ''
mkdir -p --mode 755 ${stateDir}
chown -R pulse:pulse ${stateDir}
'';
serviceConfig = {
ExecStart = "${cfg.package}/bin/pulseaudio -D --log-level=${cfg.daemon.logLevel} --system --use-pid-file -n --file=${cfg.configFile}";
PIDFile = "${stateDir}/pid";

View file

@ -9,6 +9,23 @@ let
cfg = config.environment;
exportedEnvVars =
let
absoluteVariables =
mapAttrs (n: toList) cfg.variables;
suffixedVariables =
flip mapAttrs cfg.profileRelativeEnvVars (envVar: listSuffixes:
concatMap (profile: map (suffix: "${profile}${suffix}") listSuffixes) cfg.profiles
);
allVariables =
zipAttrsWith (n: concatLists) [ absoluteVariables suffixedVariables ];
exportVariables =
mapAttrsToList (n: v: ''export ${n}="${concatStringsSep ":" v}"'') allVariables;
in
concatStringsSep "\n" exportVariables;
in
{
@ -49,22 +66,15 @@ in
type = types.listOf types.string;
};
environment.profileVariables = mkOption {
default = (p: {});
environment.profileRelativeEnvVars = mkOption {
type = types.attrsOf (types.listOf types.str);
example = { PATH = [ "/bin" "/sbin" ]; MANPATH = [ "/man" "/share/man" ]; };
description = ''
A function which given a profile path should give back
a set of environment variables for that profile.
Attribute set of environment variable. Each attribute maps to a list
of relative paths. Each relative path is appended to the each profile
of <option>environment.profiles</option> to form the content of the
corresponding environment variable.
'';
# !!! this should be of the following type:
#type = types.functionTo (types.attrsOf (types.optionSet envVar));
# and envVar should be changed to something more like environOpts.
# Having unique `value' _or_ multiple `list' is much more useful
# than just sticking everything together with ':' unconditionally.
# Anyway, to have this type mentioned above
# types.optionSet needs to be transformed into a type constructor
# (it has a !!! mark on that in nixpkgs)
# for now we hack all this to be
type = types.functionTo (types.attrsOf (types.listOf types.string));
};
# !!! isn't there a better way?
@ -165,10 +175,7 @@ in
system.build.setEnvironment = pkgs.writeText "set-environment"
''
${concatStringsSep "\n" (
(mapAttrsToList (n: v: ''export ${n}="${concatStringsSep ":" v}"'')
# This line is a kind of a hack because of !!! note above
(zipAttrsWith (const concatLists) ([ (mapAttrs (n: v: [ v ]) cfg.variables) ] ++ map cfg.profileVariables cfg.profiles))))}
${exportedEnvVars}
${cfg.extraInit}

View file

@ -0,0 +1,239 @@
use strict;
use File::Path qw(make_path);
use File::Slurp;
use JSON;
make_path("/var/lib/nixos", { mode => 0755 });
# Functions for allocating free GIDs/UIDs. FIXME: respect ID ranges in
# /etc/login.defs.
sub allocId {
my ($used, $idMin, $idMax, $up, $getid) = @_;
my $id = $up ? $idMin : $idMax;
while ($id >= $idMin && $id <= $idMax) {
if (!$used->{$id} && !defined &$getid($id)) {
$used->{$id} = 1;
return $id;
}
$used->{$id} = 1;
if ($up) { $id++; } else { $id--; }
}
die "$0: out of free UIDs or GIDs\n";
}
my (%gidsUsed, %uidsUsed);
sub allocGid {
return allocId(\%gidsUsed, 400, 499, 0, sub { my ($gid) = @_; getgrgid($gid) });
}
sub allocUid {
my ($isSystemUser) = @_;
my ($min, $max, $up) = $isSystemUser ? (400, 499, 0) : (1000, 29999, 1);
return allocId(\%uidsUsed, $min, $max, $up, sub { my ($uid) = @_; getpwuid($uid) });
}
# Read the declared users/groups.
my $spec = decode_json(read_file($ARGV[0]));
# Don't allocate UIDs/GIDs that are already in use.
foreach my $g (@{$spec->{groups}}) {
$gidsUsed{$g->{gid}} = 1 if defined $g->{gid};
}
foreach my $u (@{$spec->{groups}}) {
$uidsUsed{$u->{u}} = 1 if defined $u->{uid};
}
# Read the current /etc/group.
sub parseGroup {
chomp;
my @f = split(':', $_, -4);
my $gid = $f[2] eq "" ? undef : int($f[2]);
$gidsUsed{$gid} = 1 if defined $gid;
return ($f[0], { name => $f[0], password => $f[1], gid => $gid, members => $f[3] });
}
my %groupsCur = -f "/etc/group" ? map { parseGroup } read_file("/etc/group") : ();
# Read the current /etc/passwd.
sub parseUser {
chomp;
my @f = split(':', $_, -7);
my $uid = $f[2] eq "" ? undef : int($f[2]);
$uidsUsed{$uid} = 1 if defined $uid;
return ($f[0], { name => $f[0], fakePassword => $f[1], uid => $uid,
gid => $f[3], description => $f[4], home => $f[5], shell => $f[6] });
}
my %usersCur = -f "/etc/passwd" ? map { parseUser } read_file("/etc/passwd") : ();
# Read the groups that were created declaratively (i.e. not by groups)
# in the past. These must be removed if they are no longer in the
# current spec.
my $declGroupsFile = "/var/lib/nixos/declarative-groups";
my %declGroups;
$declGroups{$_} = 1 foreach split / /, -e $declGroupsFile ? read_file($declGroupsFile) : "";
# Idem for the users.
my $declUsersFile = "/var/lib/nixos/declarative-users";
my %declUsers;
$declUsers{$_} = 1 foreach split / /, -e $declUsersFile ? read_file($declUsersFile) : "";
# Generate a new /etc/group containing the declared groups.
my %groupsOut;
foreach my $g (@{$spec->{groups}}) {
my $name = $g->{name};
my $existing = $groupsCur{$name};
my %members = map { ($_, 1) } @{$g->{members}};
if (defined $existing) {
$g->{gid} = $existing->{gid} if !defined $g->{gid};
if ($g->{gid} != $existing->{gid}) {
warn "warning: not applying GID change of group $name\n";
$g->{gid} = $existing->{gid};
}
$g->{password} = $existing->{password}; # do we want this?
if ($spec->{mutableUsers}) {
# Merge in non-declarative group members.
foreach my $uname (split /,/, $existing->{members} // "") {
$members{$uname} = 1 if !defined $declUsers{$uname};
}
}
} else {
$g->{gid} = allocGid if !defined $g->{gid};
$g->{password} = "x";
}
$g->{members} = join ",", sort(keys(%members));
$groupsOut{$name} = $g;
}
# Update the persistent list of declarative groups.
write_file($declGroupsFile, join(" ", sort(keys %groupsOut)));
# Merge in the existing /etc/group.
foreach my $name (keys %groupsCur) {
my $g = $groupsCur{$name};
next if defined $groupsOut{$name};
if (!$spec->{mutableUsers} || defined $declGroups{$name}) {
print STDERR "removing group $name\n";
} else {
$groupsOut{$name} = $g;
}
}
# Rewrite /etc/group. FIXME: acquire lock.
my @lines = map { join(":", $_->{name}, $_->{password}, $_->{gid}, $_->{members}) . "\n" }
(sort { $a->{gid} <=> $b->{gid} } values(%groupsOut));
write_file("/etc/group.tmp", @lines);
rename("/etc/group.tmp", "/etc/group") or die;
system("nscd --invalidate group");
# Generate a new /etc/passwd containing the declared users.
my %usersOut;
foreach my $u (@{$spec->{users}}) {
my $name = $u->{name};
# Resolve the gid of the user.
if ($u->{group} =~ /^[0-9]$/) {
$u->{gid} = $u->{group};
} elsif (defined $groupsOut{$u->{group}}) {
$u->{gid} = $groupsOut{$u->{group}}->{gid} // die;
} else {
warn "warning: user $name has unknown group $u->{group}\n";
$u->{gid} = 65534;
}
my $existing = $usersCur{$name};
if (defined $existing) {
$u->{uid} = $existing->{uid} if !defined $u->{uid};
if ($u->{uid} != $existing->{uid}) {
warn "warning: not applying UID change of user $name\n";
$u->{uid} = $existing->{uid};
}
} else {
$u->{uid} = allocUid($u->{isSystemUser}) if !defined $u->{uid};
# Create a home directory.
if ($u->{createHome}) {
make_path($u->{home}, { mode => 0700 }) if ! -e $u->{home};
chown $u->{uid}, $u->{gid}, $u->{home};
}
}
if (defined $u->{passwordFile}) {
if (-e $u->{passwordFile}) {
$u->{hashedPassword} = read_file($u->{passwordFile});
chomp $u->{hashedPassword};
} else {
warn "warning: password file $u->{passwordFile} does not exist\n";
}
}
$u->{fakePassword} = $existing->{fakePassword} // "x";
$usersOut{$name} = $u;
}
# Update the persistent list of declarative users.
write_file($declUsersFile, join(" ", sort(keys %usersOut)));
# Merge in the existing /etc/passwd.
foreach my $name (keys %usersCur) {
my $u = $usersCur{$name};
next if defined $usersOut{$name};
if (!$spec->{mutableUsers} || defined $declUsers{$name}) {
print STDERR "removing user $name\n";
} else {
$usersOut{$name} = $u;
}
}
# Rewrite /etc/passwd. FIXME: acquire lock.
@lines = map { join(":", $_->{name}, $_->{fakePassword}, $_->{uid}, $_->{gid}, $_->{description}, $_->{home}, $_->{shell}) . "\n" }
(sort { $a->{uid} <=> $b->{uid} } (values %usersOut));
write_file("/etc/passwd.tmp", @lines);
rename("/etc/passwd.tmp", "/etc/passwd") or die;
system("nscd --invalidate passwd");
# Rewrite /etc/shadow to add new accounts or remove dead ones.
my @shadowNew;
my %shadowSeen;
foreach my $line (-f "/etc/shadow" ? read_file("/etc/shadow") : ()) {
chomp $line;
my ($name, $password, @rest) = split(':', $line, -9);
my $u = $usersOut{$name};;
next if !defined $u;
$password = $u->{hashedPassword} if defined $u->{hashedPassword} && !$spec->{mutableUsers}; # FIXME
push @shadowNew, join(":", $name, $password, @rest) . "\n";
$shadowSeen{$name} = 1;
}
foreach my $u (values %usersOut) {
next if defined $shadowSeen{$u->{name}};
my $password = "!";
$password = $u->{hashedPassword} if defined $u->{hashedPassword};
# FIXME: set correct value for sp_lstchg.
push @shadowNew, join(":", $u->{name}, $password, "1::::::") . "\n";
}
write_file("/etc/shadow.tmp", { perms => 0600 }, @shadowNew);
rename("/etc/shadow.tmp", "/etc/shadow") or die;
# Call chpasswd to apply password. FIXME: generate the hashes directly
# and merge into the /etc/shadow updating above.
foreach my $u (@{$spec->{users}}) {
if (defined $u->{password}) {
my $pid = open(PW, "| chpasswd") or die;
print PW "$u->{name}:$u->{password}\n";
close PW or die "unable to change password of user $u->{name}: $?\n";
}
}

View file

@ -7,9 +7,6 @@ let
ids = config.ids;
cfg = config.users;
nonUidUsers = filterAttrs (n: u: u.createUser && u.uid == null) cfg.extraUsers;
nonGidGroups = filterAttrs (n: g: g.gid == null) cfg.extraGroups;
passwordDescription = ''
The options <literal>hashedPassword</literal>,
<literal>password</literal> and <literal>passwordFile</literal>
@ -55,10 +52,8 @@ let
type = with types; nullOr int;
default = null;
description = ''
The account UID. If the <option>mutableUsers</option> option
is false, the UID cannot be null. Otherwise, the UID might be
null, in which case a free UID is picked on activation (by the
useradd command).
The account UID. If the UID is null, a free UID is picked on
activation.
'';
};
@ -67,8 +62,7 @@ let
default = false;
description = ''
Indicates if the user is a system user or not. This option
only has an effect if <option>mutableUsers</option> is
<literal>true</literal> and <option>uid</option> is
only has an effect if <option>uid</option> is
<option>null</option>, in which case it determines whether
the user's UID is allocated in the range for system users
(below 500) or in the range for normal users (starting at
@ -76,6 +70,21 @@ let
'';
};
isNormalUser = mkOption {
type = types.bool;
default = false;
description = ''
Indicates whether this is an account for a real user. This
automatically sets <option>group</option> to
<literal>users</literal>, <option>createHome</option> to
<literal>true</literal>, <option>home</option> to
<filename>/home/<replaceable>username</replaceable></filename>,
<option>useDefaultShell</option> to <literal>true</literal>,
and <option>isSystemUser</option> to
<literal>false</literal>.
'';
};
group = mkOption {
type = types.str;
default = "nogroup";
@ -182,22 +191,20 @@ let
${passwordDescription}
'';
};
createUser = mkOption {
type = types.bool;
default = true;
description = ''
Indicates if the user should be created automatically as a local user.
Set this to false if the user for instance is an LDAP user. NixOS will
then not modify any of the basic properties for the user account.
'';
};
};
config = {
name = mkDefault name;
shell = mkIf config.useDefaultShell (mkDefault cfg.defaultUserShell);
};
config = mkMerge
[ { name = mkDefault name;
shell = mkIf config.useDefaultShell (mkDefault cfg.defaultUserShell);
}
(mkIf config.isNormalUser {
group = mkDefault "users";
createHome = mkDefault true;
home = mkDefault "/home/${name}";
useDefaultShell = mkDefault true;
isSystemUser = mkDefault false;
})
];
};
@ -217,10 +224,8 @@ let
type = with types; nullOr int;
default = null;
description = ''
The group GID. If the <literal>mutableUsers</literal> option
is false, the GID cannot be null. Otherwise, the GID might be
null, in which case a free GID is picked on activation (by the
groupadd command).
The group GID. If the GID is null, a free GID is picked on
activation.
'';
};
@ -271,97 +276,17 @@ let
};
};
getGroup = gname:
let
groups = mapAttrsToList (n: g: g) (
filterAttrs (n: g: g.name == gname) cfg.extraGroups
);
in
if length groups == 1 then head groups
else if groups == [] then throw "Group ${gname} not defined"
else throw "Group ${gname} has multiple definitions";
getUser = uname:
let
users = mapAttrsToList (n: u: u) (
filterAttrs (n: u: u.name == uname) cfg.extraUsers
);
in
if length users == 1 then head users
else if users == [] then throw "User ${uname} not defined"
else throw "User ${uname} has multiple definitions";
mkGroupEntry = gname:
let
g = getGroup gname;
users = mapAttrsToList (n: u: u.name) (
filterAttrs (n: u: elem g.name u.extraGroups) cfg.extraUsers
);
in concatStringsSep ":" [
g.name "x" (toString g.gid)
(concatStringsSep "," (users ++ (filter (u: !(elem u users)) g.members)))
];
mkPasswdEntry = uname: let u = getUser uname; in
concatStringsSep ":" [
u.name "x" (toString u.uid)
(toString (getGroup u.group).gid)
u.description u.home u.shell
];
filterNull = a: filter (x: hasAttr a x && getAttr a x != null);
sortOn = a: sort (as1: as2: lessThan (getAttr a as1) (getAttr a as2));
groupFile = pkgs.writeText "group" (
concatStringsSep "\n" (map (g: mkGroupEntry g.name) (
sortOn "gid" (filterNull "gid" (attrValues cfg.extraGroups))
))
);
passwdFile = pkgs.writeText "passwd" (
concatStringsSep "\n" (map (u: mkPasswdEntry u.name) (
sortOn "uid" (filterNull "uid" (attrValues cfg.extraUsers))
))
);
mkSubuidEntry = user: concatStrings (
map (range: "${user.name}:${toString range.startUid}:${toString range.count}\n")
user.subUidRanges);
user.subUidRanges);
subuidFile = concatStrings (map mkSubuidEntry (
sortOn "uid" (filterNull "uid" (attrValues cfg.extraUsers))));
subuidFile = concatStrings (map mkSubuidEntry (attrValues cfg.extraUsers));
mkSubgidEntry = user: concatStrings (
map (range: "${user.name}:${toString range.startGid}:${toString range.count}\n")
user.subGidRanges);
subgidFile = concatStrings (map mkSubgidEntry (
sortOn "uid" (filterNull "uid" (attrValues cfg.extraUsers))));
# If mutableUsers is true, this script adds all users/groups defined in
# users.extra{Users,Groups} to /etc/{passwd,group} iff there isn't any
# existing user/group with the same name in those files.
# If mutableUsers is false, the /etc/{passwd,group} files will simply be
# replaced with the users/groups defined in the NixOS configuration.
# The merging procedure could certainly be improved, and instead of just
# keeping the lines as-is from /etc/{passwd,group} they could be combined
# in some way with the generated content from the NixOS configuration.
merger = src: pkgs.writeScript "merger" ''
#!${pkgs.bash}/bin/bash
PATH=${pkgs.gawk}/bin:${pkgs.gnugrep}/bin:$PATH
${if !cfg.mutableUsers
then ''cp ${src} $1.tmp''
else ''awk -F: '{ print "^"$1":.*" }' $1 | egrep -vf - ${src} | cat $1 - > $1.tmp''
}
# set mtime to +1, otherwise change might go unnoticed (vipw/vigr only looks at mtime)
touch -m -t $(date -d @$(($(stat -c %Y $1)+1)) +%Y%m%d%H%M.%S) $1.tmp
mv -f $1.tmp $1
'';
subgidFile = concatStrings (map mkSubgidEntry (attrValues cfg.extraUsers));
idsAreUnique = set: idAttr: !(fold (name: args@{ dup, acc }:
let
@ -376,6 +301,21 @@ let
uidsAreUnique = idsAreUnique (filterAttrs (n: u: u.uid != null) cfg.extraUsers) "uid";
gidsAreUnique = idsAreUnique (filterAttrs (n: g: g.gid != null) cfg.extraGroups) "gid";
spec = pkgs.writeText "users-groups.json" (builtins.toJSON {
inherit (cfg) mutableUsers;
users = mapAttrsToList (n: u:
{ inherit (u)
name uid group description home shell createHome isSystemUser
password passwordFile hashedPassword;
}) cfg.extraUsers;
groups = mapAttrsToList (n: g:
{ inherit (g) name gid;
members = mapAttrsToList (n: u: u.name) (
filterAttrs (n: u: elem g.name u.extraGroups) cfg.extraUsers
);
}) cfg.extraGroups;
});
in {
###### interface
@ -512,67 +452,12 @@ in {
grsecurity.gid = ids.gids.grsecurity;
};
system.activationScripts.users =
let
mkhomeUsers = filterAttrs (n: u: u.createHome) cfg.extraUsers;
setpwUsers = filterAttrs (n: u: u.createUser) cfg.extraUsers;
pwFile = u: if !(isNull u.hashedPassword)
then pkgs.writeTextFile { name = "password-file"; text = u.hashedPassword; }
else if !(isNull u.password)
then pkgs.runCommand "password-file" { pw = u.password; } ''
echo -n "$pw" | ${pkgs.mkpasswd}/bin/mkpasswd -s > $out
'' else u.passwordFile;
setpw = n: u: ''
setpw=yes
${optionalString cfg.mutableUsers ''
test "$(getent shadow '${u.name}' | cut -d: -f2)" != "x" && setpw=no
''}
if [ "$setpw" == "yes" ]; then
${if !(isNull (pwFile u))
then ''
echo -n "${u.name}:" | cat - "${pwFile u}" | \
${pkgs.shadow}/sbin/chpasswd -e
''
else "passwd -l '${u.name}' &>/dev/null"
}
fi
'';
mkhome = n: u: ''
uid="$(id -u ${u.name})"
gid="$(id -g ${u.name})"
h="${u.home}"
test -a "$h" || mkdir -p "$h" || true
test "$(stat -c %u "$h")" = $uid || chown $uid "$h" || true
test "$(stat -c %g "$h")" = $gid || chgrp $gid "$h" || true
'';
groupadd = n: g: ''
if [ -z "$(getent group "${g.name}")" ]; then
${pkgs.shadow}/sbin/groupadd "${g.name}"
fi
'';
useradd = n: u: ''
if ! id "${u.name}" &>/dev/null; then
${pkgs.shadow}/sbin/useradd \
-g "${u.group}" \
-G "${concatStringsSep "," u.extraGroups}" \
-s "${u.shell}" \
-d "${u.home}" \
${optionalString u.isSystemUser "--system"} \
"${u.name}"
echo "${u.name}:x" | ${pkgs.shadow}/sbin/chpasswd -e
fi
'';
in stringAfter [ "etc" ] ''
touch /etc/group
touch /etc/passwd
VISUAL=${merger groupFile} ${pkgs.shadow}/sbin/vigr &>/dev/null
VISUAL=${merger passwdFile} ${pkgs.shadow}/sbin/vipw &>/dev/null
${pkgs.shadow}/sbin/grpconv
${pkgs.shadow}/sbin/pwconv
${concatStrings (mapAttrsToList groupadd nonGidGroups)}
${concatStrings (mapAttrsToList useradd nonUidUsers)}
${concatStrings (mapAttrsToList mkhome mkhomeUsers)}
${concatStrings (mapAttrsToList setpw setpwUsers)}
system.activationScripts.users = stringAfter [ "etc" ]
''
${pkgs.perl}/bin/perl -w \
-I${pkgs.perlPackages.FileSlurp}/lib/perl5/site_perl \
-I${pkgs.perlPackages.JSON}/lib/perl5/site_perl \
${./update-users-groups.pl} ${spec}
'';
# for backwards compatibility
@ -589,13 +474,7 @@ in {
assertions = [
{ assertion = !cfg.enforceIdUniqueness || (uidsAreUnique && gidsAreUnique);
message = "uids and gids must be unique!";
}
{ assertion = cfg.mutableUsers || (nonUidUsers == {});
message = "When mutableUsers is false, no uid can be null: ${toString (attrNames nonUidUsers)}";
}
{ assertion = cfg.mutableUsers || (nonGidGroups == {});
message = "When mutableUsers is false, no gid can be null";
message = "UIDs and GIDs must be unique!";
}
];

View file

@ -11,7 +11,7 @@ let
# The Grub image.
grubImage = pkgs.runCommand "grub_eltorito" {}
''
${pkgs.grub2}/bin/grub-mkimage -O i386-pc -o tmp biosdisk iso9660 help linux linux16 chain png jpeg echo gfxmenu reboot
${pkgs.grub2}/bin/grub-mkimage -p /boot/grub -O i386-pc -o tmp biosdisk iso9660 help linux linux16 chain png jpeg echo gfxmenu reboot
cat ${pkgs.grub2}/lib/grub/*/cdboot.img tmp > $out
''; # */

View file

@ -79,7 +79,6 @@ in
pkgs.jfsrec
# Some compression/archiver tools.
pkgs.unrar
pkgs.unzip
pkgs.zip
pkgs.xz

View file

@ -70,7 +70,6 @@ in
pkgs.btrfsProgs
# Some compression/archiver tools.
pkgs.unrar
pkgs.unzip
pkgs.zip
pkgs.xz

View file

@ -20,6 +20,13 @@ sub uniq {
return @res;
}
sub runCommand {
my ($cmd) = @_;
open FILE, "$cmd 2>&1 |" or die "Failed to execute: $cmd\n";
my @ret = <FILE>;
close FILE;
return ($?, @ret);
}
# Process the command line.
my $outDir = "/etc/nixos";
@ -304,10 +311,13 @@ foreach my $fs (read_file("/proc/self/mountinfo")) {
# Maybe this is a bind-mount of a filesystem we saw earlier?
if (defined $fsByDev{$fields[2]}) {
my $path = $fields[3]; $path = "" if $path eq "/";
my $base = $fsByDev{$fields[2]};
$base = "" if $base eq "/";
$fileSystems .= <<EOF;
# Make sure this isn't a btrfs subvolume
my ($status, @msg) = runCommand("btrfs subvol show $rootDir$mountPoint");
if (join("", @msg) =~ /ERROR:/) {
my $path = $fields[3]; $path = "" if $path eq "/";
my $base = $fsByDev{$fields[2]};
$base = "" if $base eq "/";
$fileSystems .= <<EOF;
fileSystems.\"$mountPoint\" =
{ device = \"$base$path\";
fsType = \"none\";
@ -315,7 +325,8 @@ foreach my $fs (read_file("/proc/self/mountinfo")) {
};
EOF
next;
next;
}
}
$fsByDev{$fields[2]} = $mountPoint;
@ -337,6 +348,30 @@ EOF
}
}
# Is this a btrfs filesystem?
if ($fsType eq "btrfs") {
my ($status, @id_info) = runCommand("btrfs subvol show $rootDir$mountPoint");
if ($status != 0 || join("", @msg) =~ /ERROR:/) {
die "Failed to retreive subvolume info for $mountPoint\n";
}
my @ids = join("", @id_info) =~ m/Object ID:[ \t\n]*([^ \t\n]*)/;
if ($#ids > 0) {
die "Btrfs subvol name for $mountPoint listed multiple times in mount\n"
} elsif ($#ids == 0) {
my ($status, @path_info) = runCommand("btrfs subvol list $rootDir$mountPoint");
if ($status != 0) {
die "Failed to find $mountPoint subvolume id from btrfs\n";
}
my @paths = join("", @path_info) =~ m/ID $ids[0] [^\n]* path ([^\n]*)/;
if ($#paths > 0) {
die "Btrfs returned multiple paths for a single subvolume id, mountpoint $mountPoint\n";
} elsif ($#paths != 0) {
die "Btrfs did not return a path for the subvolume at $mountPoint\n";
}
push @extraOptions, "subvol=$paths[0]";
}
}
# Emit the filesystem.
$fileSystems .= <<EOF;
fileSystems.\"$mountPoint\" =
@ -490,12 +525,8 @@ $bootLoaderConfig
# Define a user account. Don't forget to set a password with passwd.
# users.extraUsers.guest = {
# name = "guest";
# group = "users";
# isNormalUser = true;
# uid = 1000;
# createHome = true;
# home = "/home/guest";
# shell = "/run/current-system/sw/bin/bash";
# };
}

View file

@ -7,6 +7,9 @@
# * nix-env -p /nix/var/nix/profiles/system -i <nix-expr for the configuration>
# * install the boot loader
# Ensure a consistent umask.
umask 0022
# Re-exec ourselves in a private mount namespace so that our bind
# mounts get cleaned up automatically.
if [ "$(id -u)" = 0 ]; then
@ -243,7 +246,7 @@ chroot $mountPoint /nix/var/nix/profiles/system/activate
# Ask the user to set a root password.
if [ -t 0 ] ; then
echo "setting root password..."
chroot $mountPoint passwd
chroot $mountPoint /var/setuid-wrappers/passwd
fi

View file

@ -11,9 +11,6 @@ usage () {
# Process Arguments #
#####################
desc=false
defs=false
value=false
xml=false
verbose=false
@ -24,14 +21,11 @@ for arg; do
if test -z "$argfun"; then
case $arg in
-*)
longarg=""
sarg="$arg"
longarg=""
while test "$sarg" != "-"; do
case $sarg in
--*) longarg=$arg; sarg="--";;
-d*) longarg="$longarg --description";;
-v*) longarg="$longarg --value";;
-l*) longarg="$longarg --lookup";;
-*) usage;;
esac
# remove the first letter option
@ -42,9 +36,6 @@ for arg; do
esac
for larg in $longarg; do
case $larg in
--description) desc=true;;
--value) value=true;;
--lookup) defs=true;;
--xml) xml=true;;
--verbose) verbose=true;;
--help) usage;;
@ -67,16 +58,6 @@ for arg; do
fi
done
if $xml; then
value=true
desc=true
defs=true
fi
if ! $defs && ! $desc; then
value=true
fi
if $verbose; then
set -x
else
@ -95,8 +76,7 @@ evalAttr(){
local prefix="$1"
local strict="$2"
local suffix="$3"
echo "(import <nixos> {}).$prefix${option:+.$option}${suffix:+.$suffix}" |
evalNix ${strict:+--strict}
echo "(import <nixos> {}).$prefix${option:+.$option}${suffix:+.$suffix}" | evalNix ${strict:+--strict}
}
evalOpt(){
@ -189,35 +169,37 @@ EOF
fi
if test "$(evalOpt "_type" 2> /dev/null)" = '"option"'; then
$value && evalCfg 1
echo "Value:"
evalCfg 1
if $desc; then
$value && echo;
echo
if default=$(evalOpt "default" - 2> /dev/null); then
echo "Default: $default"
else
echo "Default: <None>"
fi
if example=$(evalOpt "example" - 2> /dev/null); then
echo "Example: $example"
fi
echo "Description:"
eval printf $(evalOpt "description")
echo "Default:"
if default=$(evalOpt "default" - 2> /dev/null); then
echo "$default"
else
echo "<None>"
fi
if $defs; then
$desc || $value && echo;
printPath () { echo " $1"; }
echo "Declared by:"
nixMap printPath "$(findSources "declarations")"
echo ""
echo "Defined by:"
nixMap printPath "$(findSources "files")"
echo ""
echo
if example=$(evalOpt "example" - 2> /dev/null); then
echo "Example:"
echo "$example"
echo
fi
echo "Description:"
echo
eval printf $(evalOpt "description")
echo $desc;
printPath () { echo " $1"; }
echo "Declared by:"
nixMap printPath "$(findSources "declarations")"
echo
echo "Defined by:"
nixMap printPath "$(findSources "files")"
echo
else
# echo 1>&2 "Warning: This value is not an option."

View file

@ -194,13 +194,13 @@ if [ -z "$rollback" ]; then
nix-env "${extraBuildFlags[@]}" -p "$profile" -f '<nixpkgs/nixos>' --set -A system
pathToConfig="$profile"
elif [ "$action" = test -o "$action" = build -o "$action" = dry-run ]; then
nix-build '<nixpkgs/nixos>' -A system -K -k "${extraBuildFlags[@]}" > /dev/null
nix-build '<nixpkgs/nixos>' -A system -k "${extraBuildFlags[@]}" > /dev/null
pathToConfig=./result
elif [ "$action" = build-vm ]; then
nix-build '<nixpkgs/nixos>' -A vm -K -k "${extraBuildFlags[@]}" > /dev/null
nix-build '<nixpkgs/nixos>' -A vm -k "${extraBuildFlags[@]}" > /dev/null
pathToConfig=./result
elif [ "$action" = build-vm-with-bootloader ]; then
nix-build '<nixpkgs/nixos>' -A vmWithBootLoader -K -k "${extraBuildFlags[@]}" > /dev/null
nix-build '<nixpkgs/nixos>' -A vmWithBootLoader -k "${extraBuildFlags[@]}" > /dev/null
pathToConfig=./result
else
showSyntax
@ -226,7 +226,7 @@ fi
# default and/or activate it now.
if [ "$action" = switch -o "$action" = boot -o "$action" = test ]; then
if ! $pathToConfig/bin/switch-to-configuration "$action"; then
echo "warning: there were error switching to the new configuration" >&2
echo "warning: error(s) occured while switching to the new configuration" >&2
exit 1
fi
fi

View file

@ -38,6 +38,7 @@ let
nixos-generate-config = makeProg {
name = "nixos-generate-config";
src = ./nixos-generate-config.pl;
path = [ pkgs.btrfsProgs ];
perl = "${pkgs.perl}/bin/perl -I${pkgs.perlPackages.FileSlurp}/lib/perl5/site_perl";
};

View file

@ -10,6 +10,9 @@ with lib;
../profiles/clone-config.nix
];
# FIXME: UUID detection is currently broken
boot.loader.grub.fsIdentifier = "provided";
# Allow mounting of shared folders.
users.extraUsers.demo.extraGroups = [ "vboxsf" ];

View file

@ -150,6 +150,7 @@
zookeeper = 140;
dnsmasq = 141;
uhub = 142;
yandexdisk=143;
# When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!

View file

@ -41,7 +41,7 @@ in {
output = mkOption {
type = types.path;
default = /var/cache/locatedb;
default = "/var/cache/locatedb";
description = ''
The database file to build.
'';

10
nixos/modules/module-list.nix Normal file → Executable file
View file

@ -85,7 +85,8 @@
./services/amqp/activemq/default.nix
./services/amqp/rabbitmq.nix
./services/audio/alsa.nix
./services/audio/fuppes.nix
# Disabled as fuppes it does no longer builds.
# ./services/audio/fuppes.nix
./services/audio/mpd.nix
./services/audio/mopidy.nix
./services/backup/almir.nix
@ -95,6 +96,8 @@
./services/backup/rsnapshot.nix
./services/backup/sitecopy-backup.nix
./services/backup/tarsnap.nix
./services/computing/torque/server.nix
./services/computing/torque/mom.nix
./services/continuous-integration/jenkins/default.nix
./services/continuous-integration/jenkins/slave.nix
./services/databases/4store-endpoint.nix
@ -161,10 +164,13 @@
./services/misc/folding-at-home.nix
./services/misc/gitolite.nix
./services/misc/gpsd.nix
./services/misc/mesos-master.nix
./services/misc/mesos-slave.nix
./services/misc/nix-daemon.nix
./services/misc/nix-gc.nix
./services/misc/nixos-manual.nix
./services/misc/nix-ssh-serve.nix
./services/misc/phd.nix
./services/misc/rippled.nix
./services/misc/rogue.nix
./services/misc/siproxd.nix
@ -192,6 +198,8 @@
./services/network-filesystems/openafs-client/default.nix
./services/network-filesystems/rsyncd.nix
./services/network-filesystems/samba.nix
./services/network-filesystems/diod.nix
./services/network-filesystems/yandex-disk.nix
./services/networking/amuled.nix
./services/networking/atftpd.nix
./services/networking/avahi-daemon.nix

View file

@ -37,7 +37,6 @@
#pkgs.jfsrec # disabled because of Boost dependency
# Some compression/archiver tools.
pkgs.unrar
pkgs.unzip
pkgs.zip
pkgs.dar # disk archiver

View file

@ -4,12 +4,9 @@
imports = [ ./graphical.nix ];
users.extraUsers.demo =
{ description = "Demo user account";
group = "users";
{ isNormalUser = true;
description = "Demo user account";
extraGroups = [ "wheel" ];
home = "/home/demo";
createHome = true;
useDefaultShell = true;
password = "demo";
uid = 1000;
};

View file

@ -37,26 +37,24 @@ in
"/run/current-system/sw"
];
# !!! fix environment.profileVariables definition and then move
# most of these elsewhere
environment.profileVariables = (i:
{ PATH = [ "${i}/bin" "${i}/sbin" "${i}/lib/kde4/libexec" ];
MANPATH = [ "${i}/man" "${i}/share/man" ];
INFOPATH = [ "${i}/info" "${i}/share/info" ];
PKG_CONFIG_PATH = [ "${i}/lib/pkgconfig" ];
TERMINFO_DIRS = [ "${i}/share/terminfo" ];
PERL5LIB = [ "${i}/lib/perl5/site_perl" ];
ALSA_PLUGIN_DIRS = [ "${i}/lib/alsa-lib" ];
GST_PLUGIN_SYSTEM_PATH = [ "${i}/lib/gstreamer-0.10" ];
KDEDIRS = [ "${i}" ];
STRIGI_PLUGIN_PATH = [ "${i}/lib/strigi/" ];
QT_PLUGIN_PATH = [ "${i}/lib/qt4/plugins" "${i}/lib/kde4/plugins" ];
QTWEBKIT_PLUGIN_PATH = [ "${i}/lib/mozilla/plugins/" ];
GTK_PATH = [ "${i}/lib/gtk-2.0" "${i}/lib/gtk-3.0" ];
XDG_CONFIG_DIRS = [ "${i}/etc/xdg" ];
XDG_DATA_DIRS = [ "${i}/share" ];
MOZ_PLUGIN_PATH = [ "${i}/lib/mozilla/plugins" ];
});
# TODO: move most of these elsewhere
environment.profileRelativeEnvVars =
{ PATH = [ "/bin" "/sbin" "/lib/kde4/libexec" ];
MANPATH = [ "/man" "/share/man" ];
INFOPATH = [ "/info" "/share/info" ];
PKG_CONFIG_PATH = [ "/lib/pkgconfig" ];
TERMINFO_DIRS = [ "/share/terminfo" ];
PERL5LIB = [ "/lib/perl5/site_perl" ];
ALSA_PLUGIN_DIRS = [ "/lib/alsa-lib" ];
KDEDIRS = [ "" ];
STRIGI_PLUGIN_PATH = [ "/lib/strigi/" ];
QT_PLUGIN_PATH = [ "/lib/qt4/plugins" "/lib/kde4/plugins" ];
QTWEBKIT_PLUGIN_PATH = [ "/lib/mozilla/plugins/" ];
GTK_PATH = [ "/lib/gtk-2.0" "/lib/gtk-3.0" ];
XDG_CONFIG_DIRS = [ "/etc/xdg" ];
XDG_DATA_DIRS = [ "/share" ];
MOZ_PLUGIN_PATH = [ "/lib/mozilla/plugins" ];
};
environment.extraInit =
''

View file

@ -59,6 +59,13 @@ in
'';
};
package = mkOption {
default = pkgs.openssh;
description = ''
The package used for the openssh client and daemon.
'';
};
};
};
@ -92,7 +99,7 @@ in
wantedBy = [ "default.target" ];
serviceConfig =
{ ExecStartPre = "${pkgs.coreutils}/bin/rm -f %t/ssh-agent";
ExecStart = "${pkgs.openssh}/bin/ssh-agent -a %t/ssh-agent";
ExecStart = "${cfg.package}/bin/ssh-agent -a %t/ssh-agent";
StandardOutput = "null";
Type = "forking";
Restart = "on-failure";

View file

@ -15,7 +15,7 @@ let virtualbox = config.boot.kernelPackages.virtualbox; in
''
KERNEL=="vboxdrv", OWNER="root", GROUP="vboxusers", MODE="0660", TAG+="systemd"
KERNEL=="vboxdrvu", OWNER="root", GROUP="root", MODE="0666", TAG+="systemd"
KERNEL=="vboxnetctl", OWNER="root", GROUP="root", MODE="0600", TAG+="systemd"
KERNEL=="vboxnetctl", OWNER="root", GROUP="vboxusers", MODE="0660", TAG+="systemd"
SUBSYSTEM=="usb_device", ACTION=="add", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh $major $minor $attr{bDeviceClass}"
SUBSYSTEM=="usb", ACTION=="add", ENV{DEVTYPE}=="usb_device", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh $major $minor $attr{bDeviceClass}"
SUBSYSTEM=="usb_device", ACTION=="remove", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh --remove $major $minor"

View file

@ -76,10 +76,7 @@ in
config = {
security.setuidPrograms =
[ "mount.nfs" "mount.nfs4" "mount.cifs"
"fusermount" "umount"
"wodim" "cdrdao" "growisofs" ];
security.setuidPrograms = [ "fusermount" ];
system.activationScripts.setuid =
let

View file

@ -89,6 +89,8 @@ in {
wantedBy = [ "multi-user.target" ];
after = [ "network-interfaces.target" ];
path = [ pkgs.rabbitmq_server ];
environment = {
RABBITMQ_MNESIA_BASE = "${cfg.dataDir}/mnesia";
RABBITMQ_NODE_IP_ADDRESS = cfg.listenAddress;
@ -119,6 +121,8 @@ in {
mkdir -p /var/log/rabbitmq && chmod 0700 /var/log/rabbitmq
chown rabbitmq:rabbitmq /var/log/rabbitmq
'';
postStart = mkBefore "until rabbitmqctl status; do sleep 1; done";
};
};

View file

@ -0,0 +1,63 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.torque.mom;
torque = pkgs.torque;
momConfig = pkgs.writeText "torque-mom-config" ''
$pbsserver ${cfg.serverNode}
$logevent 225
'';
in
{
options = {
services.torque.mom = {
enable = mkEnableOption "torque computing node";
serverNode = mkOption {
type = types.str;
description = "Hostname running pbs server.";
};
};
};
config = mkIf cfg.enable {
environment.systemPackages = [ pkgs.torque ];
systemd.services.torque-mom-init = {
path = with pkgs; [ torque utillinux procps inetutils ];
script = ''
pbs_mkdirs -v aux
pbs_mkdirs -v mom
hostname > /var/spool/torque/server_name
cp -v ${momConfig} /var/spool/torque/mom_priv/config
'';
serviceConfig.Type = "oneshot";
unitConfig.ConditionPathExists = "!/var/spool/torque";
};
systemd.services.torque-mom = {
path = [ torque ];
wantedBy = [ "multi-user.target" ];
requires = [ "torque-mom-init.service" ];
after = [ "torque-mom-init.service" "network.target" ];
serviceConfig = {
Type = "forking";
ExecStart = "${torque}/bin/pbs_mom";
PIDFile = "/var/spool/torque/mom_priv/mom.lock";
};
};
};
}

View file

@ -0,0 +1,96 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.torque.server;
torque = pkgs.torque;
in
{
options = {
services.torque.server = {
enable = mkEnableOption "torque server";
};
};
config = mkIf cfg.enable {
environment.systemPackages = [ pkgs.torque ];
systemd.services.torque-server-init = {
path = with pkgs; [ torque utillinux procps inetutils ];
script = ''
tmpsetup=$(mktemp -t torque-XXXX)
cp -p ${torque}/bin/torque.setup $tmpsetup
sed -i $tmpsetup -e 's/pbs_server -t create/pbs_server -f -t create/'
pbs_mkdirs -v aux
pbs_mkdirs -v server
hostname > /var/spool/torque/server_name
cp -prv ${torque}/var/spool/torque/* /var/spool/torque/
$tmpsetup root
sleep 1
rm -f $tmpsetup
kill $(pgrep pbs_server) 2>/dev/null
kill $(pgrep trqauthd) 2>/dev/null
'';
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
};
unitConfig = {
ConditionPathExists = "!/var/spool/torque";
};
};
systemd.services.trqauthd = {
path = [ torque ];
requires = [ "torque-server-init.service" ];
after = [ "torque-server-init.service" ];
serviceConfig = {
Type = "forking";
ExecStart = "${torque}/bin/trqauthd";
};
};
systemd.services.torque-server = {
path = [ torque ];
wantedBy = [ "multi-user.target" ];
wants = [ "torque-scheduler.service" "trqauthd.service" ];
before = [ "trqauthd.service" ];
requires = [ "torque-server-init.service" ];
after = [ "torque-server-init.service" "network.target" ];
serviceConfig = {
Type = "forking";
ExecStart = "${torque}/bin/pbs_server";
ExecStop = "${torque}/bin/qterm";
PIDFile = "/var/spool/torque/server_priv/server.lock";
};
};
systemd.services.torque-scheduler = {
path = [ torque ];
requires = [ "torque-server-init.service" ];
after = [ "torque-server-init.service" ];
serviceConfig = {
Type = "forking";
ExecStart = "${torque}/bin/pbs_sched";
PIDFile = "/var/spool/torque/sched_priv/sched.lock";
};
};
};
}

View file

@ -15,7 +15,7 @@ in {
user = mkOption {
default = "jenkins";
type = with types; string;
type = types.str;
description = ''
User the jenkins server should execute under.
'';
@ -23,16 +23,25 @@ in {
group = mkOption {
default = "jenkins";
type = with types; string;
type = types.str;
description = ''
If the default user "jenkins" is configured then this is the primary
group of that user.
'';
};
extraGroups = mkOption {
type = types.listOf types.str;
default = [ ];
example = [ "wheel" "dialout" ];
description = ''
List of extra groups that the "jenkins" user should be a part of.
'';
};
home = mkOption {
default = "/var/lib/jenkins";
type = with types; string;
type = types.path;
description = ''
The path to use as JENKINS_HOME. If the default user "jenkins" is configured then
this is the home of the "jenkins" user.
@ -57,12 +66,21 @@ in {
environment = mkOption {
default = { NIX_REMOTE = "daemon"; };
type = with types; attrsOf string;
type = with types; attrsOf str;
description = ''
Additional environment variables to be passed to the jenkins process.
The environment will always include JENKINS_HOME.
'';
};
extraOptions = mkOption {
type = types.listOf types.str;
default = [ ];
example = [ "--debug=9" "--httpListenAddress=localhost" ];
description = ''
Additional command line arguments to pass to Jenkins.
'';
};
};
};
@ -78,6 +96,7 @@ in {
createHome = true;
home = cfg.home;
group = cfg.group;
extraGroups = cfg.extraGroups;
useDefaultShell = true;
uid = config.ids.uids.jenkins;
};
@ -94,7 +113,7 @@ in {
path = cfg.packages;
script = ''
${pkgs.jdk}/bin/java -jar ${pkgs.jenkins} --httpPort=${toString cfg.port}
${pkgs.jdk}/bin/java -jar ${pkgs.jenkins} --httpPort=${toString cfg.port} ${concatStringsSep " " cfg.extraOptions}
'';
postStart = ''

View file

@ -23,7 +23,7 @@ in {
user = mkOption {
default = "jenkins";
type = with types; string;
type = types.str;
description = ''
User the jenkins slave agent should execute under.
'';
@ -31,7 +31,7 @@ in {
group = mkOption {
default = "jenkins";
type = with types; string;
type = types.str;
description = ''
If the default slave agent user "jenkins" is configured then this is
the primary group of that user.
@ -40,7 +40,7 @@ in {
home = mkOption {
default = "/var/lib/jenkins";
type = with types; string;
type = types.path;
description = ''
The path to use as JENKINS_HOME. If the default user "jenkins" is configured then
this is the home of the "jenkins" user.

View file

@ -224,6 +224,11 @@ in
mkdir -m 0770 -p ${cfg.dataDir}
if [ "$(id -u)" = 0 ]; then chown -R ${cfg.user}:${cfg.group} ${cfg.dataDir}; fi
'';
postStart = mkBefore ''
until ${pkgs.curl}/bin/curl -s -o /dev/null 'http://${cfg.bindAddress}:${toString cfg.apiPort}/'; do
sleep 1;
done
'';
};
users.extraUsers = optional (cfg.user == "influxdb") {

View file

@ -11,20 +11,25 @@ in
###### interface
options = {
services.logstash = {
enable = mkOption {
type = types.bool;
default = false;
description = "Enable logstash";
description = "Enable logstash.";
};
enableWeb = mkOption {
type = types.bool;
default = false;
description = "Enable logstash web interface";
description = "Enable the logstash web interface.";
};
inputConfig = mkOption {
type = types.lines;
default = ''stdin { type => "example" }'';
description = "Logstash input configuration";
description = "Logstash input configuration.";
example = ''
# Read from journal
pipe {
@ -35,8 +40,9 @@ in
};
filterConfig = mkOption {
type = types.lines;
default = ''noop {}'';
description = "logstash filter configuration";
description = "logstash filter configuration.";
example = ''
if [type] == "syslog" {
# Keep only relevant systemd fields
@ -52,13 +58,15 @@ in
};
outputConfig = mkOption {
type = types.lines;
default = ''stdout { debug => true debug_format => "json"}'';
description = "Logstash output configuration";
description = "Logstash output configuration.";
example = ''
redis { host => "localhost" data_type => "list" key => "logstash" codec => json }
elasticsearch { embedded => true }
'';
};
};
};

View file

@ -0,0 +1,103 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.mesos.master;
in {
options.services.mesos = {
master = {
enable = mkOption {
description = "Whether to enable the Mesos Master.";
default = false;
type = types.uniq types.bool;
};
port = mkOption {
description = "Mesos Master port";
default = 5050;
type = types.int;
};
zk = mkOption {
description = ''
ZooKeeper URL (used for leader election amongst masters).
May be one of:
zk://host1:port1,host2:port2,.../mesos
zk://username:password@host1:port1,host2:port2,.../mesos
'';
type = types.str;
};
workDir = mkOption {
description = "The Mesos work directory.";
default = "/var/lib/mesos/master";
type = types.str;
};
extraCmdLineOptions = mkOption {
description = ''
Extra command line options for Mesos Master.
See https://mesos.apache.org/documentation/latest/configuration/
'';
default = [ "" ];
type = types.listOf types.string;
example = [ "--credentials=VALUE" ];
};
quorum = mkOption {
description = ''
The size of the quorum of replicas when using 'replicated_log' based
registry. It is imperative to set this value to be a majority of
masters i.e., quorum > (number of masters)/2.
If 0 will fall back to --registry=in_memory.
'';
default = 0;
type = types.int;
};
logLevel = mkOption {
description = ''
The logging level used. Possible values:
'INFO', 'WARNING', 'ERROR'
'';
default = "INFO";
type = types.str;
};
};
};
config = mkIf cfg.enable {
systemd.services.mesos-master = {
description = "Mesos Master";
wantedBy = [ "multi-user.target" ];
after = [ "network-interfaces.target" ];
serviceConfig = {
ExecStart = ''
${pkgs.mesos}/bin/mesos-master \
--port=${toString cfg.port} \
--zk=${cfg.zk} \
${if cfg.quorum == 0 then "--registry=in_memory" else "--registry=replicated_log --quorum=${cfg.quorum}"} \
--work_dir=${cfg.workDir} \
--logging_level=${cfg.logLevel} \
${toString cfg.extraCmdLineOptions}
'';
PermissionsStartOnly = true;
};
preStart = ''
mkdir -m 0700 -p ${cfg.workDir}
'';
};
};
}

View file

@ -0,0 +1,93 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.mesos.slave;
in {
options.services.mesos = {
slave = {
enable = mkOption {
description = "Whether to enable the Mesos Slave.";
default = false;
type = types.uniq types.bool;
};
port = mkOption {
description = "Mesos Slave port";
default = 5051;
type = types.int;
};
master = mkOption {
description = ''
May be one of:
zk://host1:port1,host2:port2,.../path
zk://username:password@host1:port1,host2:port2,.../path
'';
type = types.str;
};
withHadoop = mkOption {
description = "Add the HADOOP_HOME to the slave.";
default = false;
type = types.bool;
};
workDir = mkOption {
description = "The Mesos work directory.";
default = "/var/lib/mesos/slave";
type = types.str;
};
extraCmdLineOptions = mkOption {
description = ''
Extra command line options for Mesos Slave.
See https://mesos.apache.org/documentation/latest/configuration/
'';
default = [ "" ];
type = types.listOf types.string;
example = [ "--gc_delay=3days" ];
};
logLevel = mkOption {
description = ''
The logging level used. Possible values:
'INFO', 'WARNING', 'ERROR'
'';
default = "INFO";
type = types.str;
};
};
};
config = mkIf cfg.enable {
systemd.services.mesos-slave = {
description = "Mesos Slave";
wantedBy = [ "multi-user.target" ];
after = [ "network-interfaces.target" ];
serviceConfig = {
ExecStart = ''
${pkgs.mesos}/bin/mesos-slave \
--port=${toString cfg.port} \
--master=${cfg.master} \
${optionalString cfg.withHadoop "--hadoop-home=${pkgs.hadoop}"} \
--work_dir=${cfg.workDir} \
--logging_level=${cfg.logLevel} \
${toString cfg.extraCmdLineOptions}
'';
PermissionsStartOnly = true;
};
preStart = ''
mkdir -m 0700 -p ${cfg.workDir}
'';
};
};
}

View file

@ -0,0 +1,52 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.phd;
in
{
###### interface
options = {
services.phd = {
enable = mkOption {
default = false;
description = "
Enable daemons for phabricator.
";
};
};
};
###### implementation
config = mkIf cfg.enable {
systemd.services.phd = {
path = [ pkgs.phabricator pkgs.php pkgs.mercurial pkgs.git pkgs.subversion ];
after = [ "httpd.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = "${pkgs.phabricator}/phabricator/bin/phd start";
ExecStop = "${pkgs.phabricator}/phabricator/bin/phd stop";
User = "wwwrun";
RestartSec = "30s";
Restart = "always";
StartLimitInterval = "1m";
};
};
};
}

View file

@ -161,6 +161,7 @@ in
# systemd kills it with SIGKILL.
TimeoutStopSec = 5;
};
unitConfig.Documentation = "man:apcupsd(8)";
};
# A special service to tell the UPS to power down/hibernate just before the

View file

@ -8,6 +8,16 @@ let
dataDir = cfg.dataDir;
graphiteApiConfig = pkgs.writeText "graphite-api.yaml" ''
time_zone: ${config.time.timeZone}
search_index: ${dataDir}/index
${optionalString (cfg.api.finders != []) ''finders:''}
${concatMapStringsSep "\n" (f: " - " + f.moduleName) cfg.api.finders}
${optionalString (cfg.api.functions != []) ''functions:''}
${concatMapStringsSep "\n" (f: " - " + f) cfg.api.functions}
${cfg.api.extraConfig}
'';
configDir = pkgs.buildEnv {
name = "graphite-config";
paths = lists.filter (el: el != null) [
@ -65,6 +75,40 @@ in {
};
};
api = {
enable = mkOption {
description = "Whether to enable graphite api.";
default = false;
type = types.uniq types.bool;
};
finders = mkOption {
description = "List of finder plugins load.";
default = [];
example = [ pkgs.python27Packages.graphite_influxdb ];
type = types.listOf types.package;
};
functions = mkOption {
description = "List of functions to load.";
default = [
"graphite_api.functions.SeriesFunctions"
"graphite_api.functions.PieFunctions"
];
type = types.listOf types.str;
};
extraConfig = mkOption {
description = "Extra configuration for graphite api.";
default = ''
whisper:
directories:
- ${dataDir}/whisper
'';
type = types.str;
};
};
carbon = {
config = mkOption {
description = "Content of carbon configuration file.";
@ -176,7 +220,7 @@ in {
###### implementation
config = mkIf (cfg.carbon.enableAggregator || cfg.carbon.enableCache || cfg.carbon.enableRelay || cfg.web.enable) {
config = mkIf (cfg.carbon.enableAggregator || cfg.carbon.enableCache || cfg.carbon.enableRelay || cfg.web.enable || cfg.api.enable) {
systemd.services.carbonCache = {
enable = cfg.carbon.enableCache;
description = "Graphite Data Storage Backend";
@ -189,10 +233,6 @@ in {
Group = "graphite";
PermissionsStartOnly = true;
};
restartTriggers = [
pkgs.pythonPackages.carbon
configDir
];
preStart = ''
mkdir -p ${cfg.dataDir}/whisper
chmod 0700 ${cfg.dataDir}/whisper
@ -211,10 +251,6 @@ in {
User = "graphite";
Group = "graphite";
};
restartTriggers = [
pkgs.pythonPackages.carbon
configDir
];
};
systemd.services.carbonRelay = {
@ -228,10 +264,6 @@ in {
User = "graphite";
Group = "graphite";
};
restartTriggers = [
pkgs.pythonPackages.carbon
configDir
];
};
systemd.services.graphiteWeb = {
@ -243,7 +275,7 @@ in {
environment = {
PYTHONPATH = "${pkgs.python27Packages.graphite_web}/lib/python2.7/site-packages";
DJANGO_SETTINGS_MODULE = "graphite.settings";
GRAPHITE_CONF_DIR = "/etc/graphite/";
GRAPHITE_CONF_DIR = configDir;
GRAPHITE_STORAGE_DIR = dataDir;
};
serviceConfig = {
@ -271,9 +303,40 @@ in {
chown -R graphite:graphite ${cfg.dataDir}
fi
'';
restartTriggers = [
pkgs.python27Packages.graphite_web
];
};
systemd.services.graphiteApi = {
enable = cfg.api.enable;
description = "Graphite Api Interface";
wantedBy = [ "multi-user.target" ];
after = [ "network-interfaces.target" ];
environment = {
PYTHONPATH =
"${pkgs.python27Packages.graphite_api}/lib/python2.7/site-packages:" +
concatMapStringsSep ":" (f: f + "/lib/python2.7/site-packages") cfg.api.finders;
GRAPHITE_API_CONFIG = graphiteApiConfig;
LD_LIBRARY_PATH = "${pkgs.cairo}/lib";
};
serviceConfig = {
ExecStart = ''
${pkgs.python27Packages.waitress}/bin/waitress-serve \
--host=${cfg.web.host} --port=${toString cfg.web.port} \
graphite_api.app:app
'';
User = "graphite";
Group = "graphite";
PermissionsStartOnly = true;
};
preStart = ''
if ! test -e ${dataDir}/db-created; then
mkdir -p ${dataDir}/cache/
chmod 0700 ${dataDir}/cache/
touch ${dataDir}/db-created
chown -R graphite:graphite ${cfg.dataDir}
fi
'';
};
environment.systemPackages = [

View file

@ -37,7 +37,7 @@ let
--set PATH "/run/current-system/sw/bin:/run/current-system/sw/sbin" \
--set MUNIN_LIBDIR "${pkgs.munin}/lib" \
--set MUNIN_PLUGSTATE "/var/run/munin"
# munin uses markers to tell munin-node-configure what a plugin can do
echo "#%# family=$family" >> $file
echo "#%# capabilities=$cap" >> $file
@ -57,7 +57,7 @@ let
rundir /var/run/munin
${cronCfg.extraGlobalConfig}
${cronCfg.hosts}
'';
@ -72,10 +72,10 @@ let
group root
host_name ${config.networking.hostName}
setsid 0
# wrapped plugins by makeWrapper being with dots
ignore_file ^\.
allow ^127\.0\.0\.1$
${nodeCfg.extraConfig}
@ -97,7 +97,7 @@ in
See <link xlink:href='http://munin-monitoring.org/wiki/munin-node.conf' />.
'';
};
extraConfig = mkOption {
default = "";
description = ''
@ -118,7 +118,7 @@ in
Enable munin-cron. Takes care of all heavy lifting to collect data from
nodes and draws graphs to html. Runs munin-update, munin-limits,
munin-graphs and munin-html in that order.
HTML output is in <filename>/var/www/munin/</filename>, configure your
favourite webserver to serve static files.
'';
@ -138,7 +138,7 @@ in
};
'';
};
extraGlobalConfig = mkOption {
default = "";
description = ''
@ -160,7 +160,7 @@ in
<link xlink:href='http://munin-monitoring.org/wiki/munin.conf' />
'';
};
};
};

View file

@ -8,13 +8,20 @@ let
configFile = pkgs.writeText "statsd.conf" ''
{
host: "${cfg.host}",
address: "${cfg.host}",
port: "${toString cfg.port}",
mgmt_address: "${cfg.mgmt_address}",
mgmt_port: "${toString cfg.mgmt_port}",
backends: [${concatMapStrings (el: ''"./backends/${el}",'') cfg.backends}],
graphiteHost: "${cfg.graphiteHost}",
graphitePort: "${toString cfg.graphitePort}",
backends: [${concatMapStringsSep "," (el: if (nixType el) == "string" then ''"./backends/${el}"'' else ''"${head el.names}"'') cfg.backends}],
${optionalString (cfg.graphiteHost!=null) ''graphiteHost: "${cfg.graphiteHost}",''}
${optionalString (cfg.graphitePort!=null) ''graphitePort: "${toString cfg.graphitePort}",''}
console: {
prettyprint: false
},
log: {
backend: "syslog"
},
automaticConfigReload: false${optionalString (cfg.extraConfig != null) ","}
${cfg.extraConfig}
}
'';
@ -60,24 +67,26 @@ in
backends = mkOption {
description = "List of backends statsd will use for data persistance";
default = ["graphite"];
example = ["graphite" pkgs.nodePackages."statsd-influxdb-backend"];
type = types.listOf (types.either types.str types.package);
};
graphiteHost = mkOption {
description = "Hostname or IP of Graphite server";
default = config.services.graphite.web.host;
type = types.str;
default = null;
type = types.nullOr types.str;
};
graphitePort = mkOption {
description = "Port of Graphite server (i.e. carbon-cache).";
default = 2003;
type = types.uniq types.int;
default = null;
type = types.nullOr types.int;
};
extraConfig = mkOption {
default = "";
description = "Extra configuration options for statsd";
type = types.str;
default = "";
type = types.nullOr types.str;
};
};
@ -95,6 +104,9 @@ in
systemd.services.statsd = {
description = "Statsd Server";
wantedBy = [ "multi-user.target" ];
environment = {
NODE_PATH=concatMapStringsSep ":" (el: "${el}/lib/node_modules") (filter (el: (nixType el) != "string") cfg.backends);
};
serviceConfig = {
ExecStart = "${pkgs.nodePackages.statsd}/bin/statsd ${configFile}";
User = "statsd";

View file

@ -0,0 +1,160 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.diod;
diodBool = b: if b then "1" else "0";
diodConfig = pkgs.writeText "diod.conf" ''
allsquash = ${diodBool cfg.allsquash}
auth_required = ${diodBool cfg.authRequired}
exportall = ${diodBool cfg.exportall}
exportopts = "${concatStringsSep "," cfg.exportopts}"
exports = { ${concatStringsSep ", " (map (s: ''"${s}"'' ) cfg.exports)} }
listen = { ${concatStringsSep ", " (map (s: ''"${s}"'' ) cfg.listen)} }
logdest = "${cfg.logdest}"
nwthreads = ${toString cfg.nwthreads}
squashuser = "${cfg.squashuser}"
statfs_passthru = ${diodBool cfg.statfsPassthru}
userdb = ${diodBool cfg.userdb}
${cfg.extraConfig}
'';
in
{
options = {
services.diod = {
enable = mkOption {
type = types.bool;
default = false;
description = "Whether to enable the diod 9P file server.";
};
listen = mkOption {
type = types.listOf types.str;
default = [ "0.0.0.0:564" ];
description = ''
[ "IP:PORT" [,"IP:PORT",...] ]
List the interfaces and ports that diod should listen on.
'';
};
exports = mkOption {
type = types.listOf types.str;
default = [];
description = ''
List the file systems that clients will be allowed to mount. All paths should
be fully qualified. The exports table can include two types of element:
a string element (as above),
or an alternate table element form { path="/path", opts="ro" }.
In the alternate form, the (optional) opts attribute is a comma-separated list
of export options. The two table element forms can be mixed in the exports
table. Note that although diod will not traverse file system boundaries for a
given mount due to inode uniqueness constraints, subdirectories of a file
system can be separately exported.
'';
};
exportall = mkOption {
type = types.bool;
default = true;
description = ''
Export all file systems listed in /proc/mounts. If new file systems are mounted
after diod has started, they will become immediately mountable. If there is a
duplicate entry for a file system in the exports list, any options listed in
the exports entry will apply.
'';
};
exportopts = mkOption {
type = types.listOf types.str;
default = [];
description = ''
Establish a default set of export options. These are overridden, not appended
to, by opts attributes in an "exports" entry.
'';
};
nwthreads = mkOption {
type = types.int;
default = 16;
description = ''
Sets the (fixed) number of worker threads created to handle 9P
requests for a unique aname.
'';
};
authRequired = mkOption {
type = types.bool;
default = false;
description = ''
Allow clients to connect without authentication, i.e. without a valid MUNGE credential.
'';
};
userdb = mkOption {
type = types.bool;
default = false;
description = ''
This option disables password/group lookups. It allows any uid to attach and
assumes gid=uid, and supplementary groups contain only the primary gid.
'';
};
allsquash = mkOption {
type = types.bool;
default = true;
description = ''
Remap all users to "nobody". The attaching user need not be present in the
password file.
'';
};
squashuser = mkOption {
type = types.str;
default = "nobody";
description = ''
Change the squash user. The squash user must be present in the password file.
'';
};
logdest = mkOption {
type = types.str;
default = "syslog:daemon:err";
description = ''
Set the destination for logging.
The value has the form of "syslog:facility:level" or "filename".
'';
};
statfsPassthru = mkOption {
type = types.bool;
default = false;
description = ''
This option configures statfs to return the host file system's type
rather than V9FS_MAGIC.
'';
};
extraConfig = mkOption {
type = types.lines;
default = "";
description = "Extra configuration options for diod.conf.";
};
};
};
config = mkIf config.services.diod.enable {
environment.systemPackages = [ pkgs.diod ];
systemd.services.diod = {
description = "diod 9P file server";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
ExecStart = "${pkgs.diod}/sbin/diod -f -c ${diodConfig}";
Capabilities = "cap_net_bind_service+=ep";
};
};
};
}

View file

@ -0,0 +1,104 @@
{ config, pkgs, ... }:
with pkgs.lib;
let
cfg = config.services.yandex-disk;
dir = "/var/lib/yandex-disk";
u = if cfg.user != null then cfg.user else "yandexdisk";
in
{
###### interface
options = {
services.yandex-disk = {
enable = mkOption {
default = false;
description = "
Whether to enable Yandex-disk client. See https://disk.yandex.ru/
";
};
username = mkOption {
default = "";
type = types.string;
description = ''
Your yandex.com login name.
'';
};
password = mkOption {
default = "";
type = types.string;
description = ''
Your yandex.com password. Warning: it will be world-readable in /nix/store.
'';
};
user = mkOption {
default = null;
description = ''
The user the yandex-disk daemon should run as.
'';
};
directory = mkOption {
default = "/home/Yandex.Disk";
description = "The directory to use for Yandex.Disk storage";
};
};
};
###### implementation
config = mkIf cfg.enable {
users.extraUsers = mkIf (cfg.user == null) [ {
name = u;
uid = config.ids.uids.yandexdisk;
group = "nogroup";
home = dir;
} ];
systemd.services.yandex-disk = {
description = "Yandex-disk server";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
# FIXME: have to specify ${directory} here as well
unitConfig.RequiresMountsFor = dir;
script = ''
mkdir -p -m 700 ${dir}
chown ${u} ${dir}
if ! test -d "${cfg.directory}" ; then
mkdir -p -m 755 ${cfg.directory} ||
exit 1
fi
${pkgs.su}/bin/su -s ${pkgs.stdenv.shell} ${u} \
-c '${pkgs.yandex-disk}/bin/yandex-disk token -p ${cfg.password} ${cfg.username} ${dir}/token'
${pkgs.su}/bin/su -s ${pkgs.stdenv.shell} ${u} \
-c '${pkgs.yandex-disk}/bin/yandex-disk start --no-daemon -a ${dir}/token -d ${cfg.directory}'
'';
};
};
}

View file

@ -0,0 +1,53 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.copy-com;
in
{
options = {
services.copy-com = {
enable = mkOption {
default = false;
description = "
Enable the copy.com client.
The first time copy.com is run, it needs to be configured. Before enabling run
copy_console manually.
";
};
user = mkOption {
description = "The user for which copy should run.";
};
debug = mkOption {
default = false;
description = "Output more.";
};
};
};
config = mkIf cfg.enable {
environment.systemPackages = [ pkgs.postfix ];
systemd.services."copy-com-${cfg.user}" = {
description = "Copy.com Client";
after = [ "network.target" "local-fs.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = "${pkgs.copy-com}/bin/copy_console ${if cfg.debug then "-consoleOutput -debugToConsole=dirwatch,path-watch,csm_path,csm -debug -console" else ""}";
User = "${cfg.user}";
};
};
};
}

View file

@ -138,6 +138,9 @@ in
{ description = "DHCP Client";
wantedBy = [ "network.target" ];
# Work-around to deal with problems where the kernel would remove &
# re-create Wifi interfaces early during boot.
after = [ "network-interfaces.target" ];
# Stopping dhcpcd during a reconfiguration is undesirable
# because it brings down the network interfaces configured by

View file

@ -11,7 +11,10 @@ let
conf-file=/etc/dnsmasq-conf.conf
resolv-file=/etc/dnsmasq-resolv.conf
''}
${cfg.extraConfig}
${flip concatMapStrings cfg.servers (server: ''
server=${server}
'')}
${cfg.extraConfig}
'';
in
@ -43,12 +46,10 @@ in
default = [];
example = [ "8.8.8.8" "8.8.4.4" ];
description = ''
The parameter to dnsmasq -S.
The DNS servers which dnsmasq should query.
'';
};
extraConfig = mkOption {
type = types.string;
default = "";
@ -67,8 +68,8 @@ in
config = mkIf config.services.dnsmasq.enable {
environment.systemPackages = [ dnsmasq ]
++ (if cfg.resolveLocalQueries then [ pkgs.openresolv ] else []);
networking.nameservers =
optional cfg.resolveLocalQueries "127.0.0.1";
services.dbus.packages = [ dnsmasq ];
@ -83,10 +84,14 @@ in
description = "dnsmasq daemon";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
path = [ dnsmasq ];
preStart = ''
touch /etc/dnsmasq-{conf,resolv}.conf
dnsmasq --test
'';
serviceConfig = {
Type = "dbus";
BusName = "uk.org.thekelleys.dnsmasq";
ExecStartPre = "${dnsmasq}/bin/dnsmasq --test";
ExecStart = "${dnsmasq}/bin/dnsmasq -k --enable-dbus --user=dnsmasq -C ${dnsmasqConf}";
ExecReload = "${dnsmasq}/bin/kill -HUP $MAINPID";
};

View file

@ -75,6 +75,31 @@ in
'';
};
networking.nat.forwardPorts = mkOption {
type = types.listOf types.optionSet;
default = [];
example = [ { sourcePort = 8080; destination = "10.0.0.1:80"; } ];
options = {
sourcePort = mkOption {
type = types.int;
example = 8080;
description = "Source port of the external interface";
};
destination = mkOption {
type = types.str;
example = "10.0.0.1:80";
description = "Forward tcp connection to destination ip:port";
};
};
description =
''
List of forwarded ports from the external interface to
internal destinations by using DNAT.
'';
};
};
@ -118,6 +143,14 @@ in
-s '${range}' -o ${cfg.externalInterface} ${dest}
'') cfg.internalIPs}
# NAT from external ports to internal ports.
${concatMapStrings (fwd: ''
iptables -w -t nat -A PREROUTING \
-i ${cfg.externalInterface} -p tcp \
--dport ${builtins.toString fwd.sourcePort} \
-j DNAT --to-destination ${fwd.destination}
'') cfg.forwardPorts}
echo 1 > /proc/sys/net/ipv4/ip_forward
'';

View file

@ -10,16 +10,16 @@ let
pidFile = stateDir + "/var/nsd.pid";
zoneFiles = pkgs.stdenv.mkDerivation {
preferLocalBuild = true;
name = "nsd-env";
buildCommand = concatStringsSep "\n"
[ "mkdir -p $out"
(concatStrings (mapAttrsToList (zoneName: zoneOptions: ''
cat > "$out/${zoneName}" <<_EOF_
${zoneOptions.data}
_EOF_
'') zoneConfigs))
];
preferLocalBuild = true;
name = "nsd-env";
buildCommand = concatStringsSep "\n"
[ "mkdir -p $out"
(concatStrings (mapAttrsToList (zoneName: zoneOptions: ''
cat > "$out/${zoneName}" <<_EOF_
${zoneOptions.data}
_EOF_
'') zoneConfigs))
];
};
configFile = pkgs.writeText "nsd.conf" ''
@ -33,7 +33,6 @@ let
# the list of dynamically added zones.
zonelistfile: "${stateDir}/var/zone.list"
database: "${stateDir}/var/nsd.db"
logfile: "${stateDir}/var/nsd.log"
pidfile: "${pidFile}"
xfrdfile: "${stateDir}/var/xfrd.state"
xfrdir: "${stateDir}/tmp"
@ -105,21 +104,20 @@ let
zoneConfigFile = name: zone: ''
zone:
name: "${name}"
zonefile: "${stateDir}/zones/${name}"
${maybeString "outgoing-interface: " zone.outgoingInterface}
${forEach " rrl-whitelist: " zone.rrlWhitelist}
zone:
name: "${name}"
zonefile: "${stateDir}/zones/${name}"
${maybeString "outgoing-interface: " zone.outgoingInterface}
${forEach " rrl-whitelist: " zone.rrlWhitelist}
${forEach " allow-notify: " zone.allowNotify}
${forEach " request-xfr: " zone.requestXFR}
allow-axfr-fallback: ${yesOrNo zone.allowAXFRFallback}
${forEach " allow-notify: " zone.allowNotify}
${forEach " request-xfr: " zone.requestXFR}
allow-axfr-fallback: ${yesOrNo zone.allowAXFRFallback}
${forEach " notify: " zone.notify}
notify-retry: ${toString zone.notifyRetry}
${forEach " provide-xfr: " zone.provideXFR}
'';
${forEach " notify: " zone.notify}
notify-retry: ${toString zone.notifyRetry}
${forEach " provide-xfr: " zone.provideXFR}
'';
zoneConfigs = zoneConfigs' {} "" { children = cfg.zones; };
@ -130,8 +128,8 @@ let
# fork -> pattern
else zipAttrsWith (name: head) (
mapAttrsToList (name: child: zoneConfigs' (parent // zone // { children = {}; }) name child)
zone.children
mapAttrsToList (name: child: zoneConfigs' (parent // zone // { children = {}; }) name child)
zone.children
);
# fighting infinite recursion
@ -145,138 +143,135 @@ let
childConfig = x: v: { options.children = { type = types.attrsOf x; visible = v; }; };
zoneOptionsRaw = types.submodule (
{ options, ... }:
{ options = {
children = mkOption {
default = {};
description = ''
Children zones inherit all options of their parents. Attributes
defined in a child will overwrite the ones of its parent. Only
leaf zones will be actually served. This way it's possible to
define maybe zones which share most attributes without
duplicating everything. This mechanism replaces nsd's patterns
in a save and functional way.
'';
};
allowNotify = mkOption {
type = types.listOf types.str;
default = [ ];
example = [ "192.0.2.0/24 NOKEY" "10.0.0.1-10.0.0.5 my_tsig_key_name"
"10.0.3.4&255.255.0.0 BLOCKED"
];
description = ''
Listed primary servers are allowed to notify this secondary server.
<screen><![CDATA[
Format: <ip> <key-name | NOKEY | BLOCKED>
<ip> either a plain IPv4/IPv6 address or range. Valid patters for ranges:
* 10.0.0.0/24 # via subnet size
* 10.0.0.0&255.255.255.0 # via subnet mask
* 10.0.0.1-10.0.0.254 # via range
A optional port number could be added with a '@':
* 2001:1234::1@1234
<key-name | NOKEY | BLOCKED>
* <key-name> will use the specified TSIG key
* NOKEY no TSIG signature is required
* BLOCKED notifies from non-listed or blocked IPs will be ignored
* ]]></screen>
'';
};
requestXFR = mkOption {
type = types.listOf types.str;
default = [];
example = [];
description = ''
Format: <code>[AXFR|UDP] &lt;ip-address&gt; &lt;key-name | NOKEY&gt;</code>
'';
};
allowAXFRFallback = mkOption {
type = types.bool;
default = true;
description = ''
If NSD as secondary server should be allowed to AXFR if the primary
server does not allow IXFR.
'';
};
notify = mkOption {
type = types.listOf types.str;
default = [];
example = [ "10.0.0.1@3721 my_key" "::5 NOKEY" ];
description = ''
This primary server will notify all given secondary servers about
zone changes.
<screen><![CDATA[
Format: <ip> <key-name | NOKEY>
<ip> a plain IPv4/IPv6 address with on optional port number (ip@port)
<key-name | NOKEY>
* <key-name> sign notifies with the specified key
* NOKEY don't sign notifies
]]></screen>
'';
};
notifyRetry = mkOption {
type = types.int;
default = 5;
description = ''
Specifies the number of retries for failed notifies. Set this along with notify.
'';
};
provideXFR = mkOption {
type = types.listOf types.str;
default = [];
example = [ "192.0.2.0/24 NOKEY" "192.0.2.0/24 my_tsig_key_name" ];
description = ''
Allow these IPs and TSIG to transfer zones, addr TSIG|NOKEY|BLOCKED
address range 192.0.2.0/24, 1.2.3.4&amp;255.255.0.0, 3.0.2.20-3.0.2.40
'';
};
outgoingInterface = mkOption {
type = types.nullOr types.str;
default = null;
example = "2000::1@1234";
description = ''
This address will be used for zone-transfere requests if configured
as a secondary server or notifications in case of a primary server.
Supply either a plain IPv4 or IPv6 address with an optional port
number (ip@port).
'';
};
rrlWhitelist = mkOption {
type = types.listOf types.str;
default = [];
description = ''
Whitelists the given rrl-types.
The RRL classification types are: nxdomain, error, referral, any,
rrsig, wildcard, nodata, dnskey, positive, all
'';
};
data = mkOption {
type = types.str;
default = "";
example = "";
description = ''
The actual zone data. This is the content of your zone file.
Use imports or pkgs.lib.readFile if you don't want this data in your config file.
'';
};
zoneOptionsRaw = types.submodule {
options = {
children = mkOption {
default = {};
description = ''
Children zones inherit all options of their parents. Attributes
defined in a child will overwrite the ones of its parent. Only
leaf zones will be actually served. This way it's possible to
define maybe zones which share most attributes without
duplicating everything. This mechanism replaces nsd's patterns
in a save and functional way.
'';
};
}
);
allowNotify = mkOption {
type = types.listOf types.str;
default = [ ];
example = [ "192.0.2.0/24 NOKEY" "10.0.0.1-10.0.0.5 my_tsig_key_name"
"10.0.3.4&255.255.0.0 BLOCKED"
];
description = ''
Listed primary servers are allowed to notify this secondary server.
<screen><![CDATA[
Format: <ip> <key-name | NOKEY | BLOCKED>
<ip> either a plain IPv4/IPv6 address or range. Valid patters for ranges:
* 10.0.0.0/24 # via subnet size
* 10.0.0.0&255.255.255.0 # via subnet mask
* 10.0.0.1-10.0.0.254 # via range
A optional port number could be added with a '@':
* 2001:1234::1@1234
<key-name | NOKEY | BLOCKED>
* <key-name> will use the specified TSIG key
* NOKEY no TSIG signature is required
* BLOCKED notifies from non-listed or blocked IPs will be ignored
* ]]></screen>
'';
};
requestXFR = mkOption {
type = types.listOf types.str;
default = [];
example = [];
description = ''
Format: <code>[AXFR|UDP] &lt;ip-address&gt; &lt;key-name | NOKEY&gt;</code>
'';
};
allowAXFRFallback = mkOption {
type = types.bool;
default = true;
description = ''
If NSD as secondary server should be allowed to AXFR if the primary
server does not allow IXFR.
'';
};
notify = mkOption {
type = types.listOf types.str;
default = [];
example = [ "10.0.0.1@3721 my_key" "::5 NOKEY" ];
description = ''
This primary server will notify all given secondary servers about
zone changes.
<screen><![CDATA[
Format: <ip> <key-name | NOKEY>
<ip> a plain IPv4/IPv6 address with on optional port number (ip@port)
<key-name | NOKEY>
* <key-name> sign notifies with the specified key
* NOKEY don't sign notifies
]]></screen>
'';
};
notifyRetry = mkOption {
type = types.int;
default = 5;
description = ''
Specifies the number of retries for failed notifies. Set this along with notify.
'';
};
provideXFR = mkOption {
type = types.listOf types.str;
default = [];
example = [ "192.0.2.0/24 NOKEY" "192.0.2.0/24 my_tsig_key_name" ];
description = ''
Allow these IPs and TSIG to transfer zones, addr TSIG|NOKEY|BLOCKED
address range 192.0.2.0/24, 1.2.3.4&amp;255.255.0.0, 3.0.2.20-3.0.2.40
'';
};
outgoingInterface = mkOption {
type = types.nullOr types.str;
default = null;
example = "2000::1@1234";
description = ''
This address will be used for zone-transfere requests if configured
as a secondary server or notifications in case of a primary server.
Supply either a plain IPv4 or IPv6 address with an optional port
number (ip@port).
'';
};
rrlWhitelist = mkOption {
type = types.listOf types.str;
default = [];
description = ''
Whitelists the given rrl-types.
The RRL classification types are: nxdomain, error, referral, any,
rrsig, wildcard, nodata, dnskey, positive, all
'';
};
data = mkOption {
type = types.str;
default = "";
example = "";
description = ''
The actual zone data. This is the content of your zone file.
Use imports or pkgs.lib.readFile if you don't want this data in your config file.
'';
};
};
};
in
{
@ -585,37 +580,33 @@ in
keys = mkOption {
type = types.attrsOf (types.submodule (
{ options, ... }:
{ options = {
algorithm = mkOption {
type = types.str;
default = "hmac-sha256";
description = ''
Authentication algorithm for this key.
'';
};
keyFile = mkOption {
type = types.path;
description = ''
Path to the file which contains the actual base64 encoded
key. The key will be copied into "${stateDir}/private" before
NSD starts. The copied file is only accessibly by the NSD
user.
'';
};
type = types.attrsOf (types.submodule {
options = {
algorithm = mkOption {
type = types.str;
default = "hmac-sha256";
description = ''
Authentication algorithm for this key.
'';
};
}));
default = {
};
keyFile = mkOption {
type = types.path;
description = ''
Path to the file which contains the actual base64 encoded
key. The key will be copied into "${stateDir}/private" before
NSD starts. The copied file is only accessibly by the NSD
user.
'';
};
};
});
default = {};
example = {
"tsig.example.org" = {
algorithm = "hmac-md5";
secret = "aaaaaabbbbbbccccccdddddd";
};
"tsig.example.org" = {
algorithm = "hmac-md5";
secret = "aaaaaabbbbbbccccccdddddd";
};
};
description = ''
Define your TSIG keys here.
@ -626,32 +617,32 @@ in
type = types.attrsOf zoneOptions;
default = {};
example = {
"serverGroup1" = {
provideXFR = [ "10.1.2.3 NOKEY" ];
children = {
"example.com." = {
data = ''
$ORIGIN example.com.
$TTL 86400
@ IN SOA a.ns.example.com. admin.example.com. (
...
'';
};
"example.org." = {
data = ''
$ORIGIN example.org.
$TTL 86400
@ IN SOA a.ns.example.com. admin.example.com. (
...
'';
};
};
"serverGroup1" = {
provideXFR = [ "10.1.2.3 NOKEY" ];
children = {
"example.com." = {
data = ''
$ORIGIN example.com.
$TTL 86400
@ IN SOA a.ns.example.com. admin.example.com. (
...
'';
};
"example.org." = {
data = ''
$ORIGIN example.org.
$TTL 86400
@ IN SOA a.ns.example.com. admin.example.com. (
...
'';
};
};
};
"example.net." = {
provideXFR = [ "10.3.2.1 NOKEY" ];
data = ''...'';
};
"example.net." = {
provideXFR = [ "10.3.2.1 NOKEY" ];
data = ''...'';
};
};
description = ''
Define your zones here. Zones can cascade other zones and therefore
@ -670,23 +661,23 @@ in
# this is not working :(
nixpkgs.config.nsd = {
ipv6 = cfg.ipv6;
ratelimit = cfg.ratelimit.enable;
rootServer = cfg.rootServer;
ipv6 = cfg.ipv6;
ratelimit = cfg.ratelimit.enable;
rootServer = cfg.rootServer;
};
users.extraGroups = singleton {
name = username;
gid = config.ids.gids.nsd;
name = username;
gid = config.ids.gids.nsd;
};
users.extraUsers = singleton {
name = username;
description = "NSD service user";
home = stateDir;
createHome = true;
uid = config.ids.uids.nsd;
group = username;
name = username;
description = "NSD service user";
home = stateDir;
createHome = true;
uid = config.ids.uids.nsd;
group = username;
};
systemd.services.nsd = {
@ -695,10 +686,9 @@ in
after = [ "network.target" ];
serviceConfig = {
Type = "forking";
PIDFile = pidFile;
Restart = "always";
ExecStart = "${pkgs.nsd}/sbin/nsd -c ${configFile}";
ExecStart = "${pkgs.nsd}/sbin/nsd -d -c ${configFile}";
};
preStart = ''

View file

@ -144,6 +144,36 @@ in
'';
};
listenAddresses = mkOption {
type = types.listOf types.optionSet;
default = [];
example = [ { addr = "192.168.3.1"; port = 22; } { addr = "0.0.0.0"; port = 64022; } ];
description = ''
List of addresses and ports to listen on (ListenAddress directive
in config). If port is not specified for address sshd will listen
on all ports specified by <literal>ports</literal> option.
NOTE: this will override default listening on all local addresses and port 22.
NOTE: setting this option won't automatically enable given ports
in firewall configuration.
'';
options = {
addr = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Host, IPv4 or IPv6 address to listen to.
'';
};
port = mkOption {
type = types.nullOr types.int;
default = null;
description = ''
Port to listen to.
'';
};
};
};
passwordAuthentication = mkOption {
type = types.bool;
default = true;
@ -261,7 +291,7 @@ in
};
environment.etc = authKeysFiles ++ [
{ source = "${pkgs.openssh}/etc/ssh/moduli";
{ source = "${cfgc.package}/etc/ssh/moduli";
target = "ssh/moduli";
}
{ source = knownHostsFile;
@ -278,7 +308,7 @@ in
stopIfChanged = false;
path = [ pkgs.openssh pkgs.gawk ];
path = [ cfgc.package pkgs.gawk ];
environment.LD_LIBRARY_PATH = nssModulesPath;
@ -295,7 +325,7 @@ in
serviceConfig =
{ ExecStart =
"${pkgs.openssh}/sbin/sshd " + (optionalString cfg.startWhenNeeded "-i ") +
"${cfgc.package}/sbin/sshd " + (optionalString cfg.startWhenNeeded "-i ") +
"-f ${pkgs.writeText "sshd_config" cfg.extraConfig}";
KillMode = "process";
} // (if cfg.startWhenNeeded then {
@ -349,6 +379,10 @@ in
Port ${toString port}
'') cfg.ports}
${concatMapStrings ({ port, addr }: ''
ListenAddress ${addr}${if port != null then ":" + toString port else ""}
'') cfg.listenAddresses}
${optionalString cfgc.setXAuthLocation ''
XAuthLocation ${pkgs.xorg.xauth}/bin/xauth
''}
@ -360,7 +394,7 @@ in
''}
${optionalString cfg.allowSFTP ''
Subsystem sftp ${pkgs.openssh}/libexec/sftp-server
Subsystem sftp ${cfgc.package}/libexec/sftp-server
''}
PermitRootLogin ${cfg.permitRootLogin}
@ -383,6 +417,10 @@ in
assertion = (data.publicKey == null && data.publicKeyFile != null) ||
(data.publicKey != null && data.publicKeyFile == null);
message = "knownHost ${name} must contain either a publicKey or publicKeyFile";
})
++ flip map cfg.listenAddresses ({ addr, port }: {
assertion = addr != null;
message = "addr must be specified in each listenAddresses entry";
});
};

View file

@ -6,8 +6,6 @@ let
cfg = config.services.unbound;
username = "unbound";
stateDir = "/var/lib/unbound";
access = concatMapStrings (x: " access-control: ${x} allow\n") cfg.allowedAccess;
@ -21,21 +19,13 @@ let
confFile = pkgs.writeText "unbound.conf" ''
server:
directory: "${stateDir}"
username: ${username}
# make sure unbound can access entropy from inside the chroot.
# e.g. on linux the use these commands (on BSD, devfs(8) is used):
# mount --bind -n /dev/random /etc/unbound/dev/random
# and mount --bind -n /dev/log /etc/unbound/dev/log
username: unbound
chroot: "${stateDir}"
# logfile: "${stateDir}/unbound.log" #uncomment to use logfile.
pidfile: "${stateDir}/unbound.pid"
verbosity: 1 # uncomment and increase to get more logging.
pidfile: ""
${interfaces}
${access}
${forward}
${cfg.extraConfig}
${forward}
'';
in
@ -82,7 +72,7 @@ in
environment.systemPackages = [ pkgs.unbound ];
users.extraUsers = singleton {
name = username;
name = "unbound";
uid = config.ids.uids.unbound;
description = "unbound daemon user";
home = stateDir;
@ -96,8 +86,18 @@ in
wants = [" nss-lookup.target" ];
wantedBy = [ "multi-user.target" ];
path = [ pkgs.unbound ];
serviceConfig.ExecStart = "${pkgs.unbound}/sbin/unbound -d -c ${confFile}";
preStart = ''
mkdir -m 0755 -p ${stateDir}/dev/
cp ${confFile} ${stateDir}/unbound.conf
chown unbound ${stateDir}
touch ${stateDir}/dev/random
${pkgs.utillinux}/bin/mount --bind -n /dev/random ${stateDir}/dev/random
'';
serviceConfig = {
ExecStart = "${pkgs.unbound}/sbin/unbound -d -c ${stateDir}/unbound.conf";
ExecStopPost="${pkgs.utillinux}/bin/umount ${stateDir}/dev/random";
};
};
};

View file

@ -305,7 +305,6 @@ in
uid = config.ids.uids.znc;
home = cfg.dataDir;
createHome = true;
createUser = true;
};
users.extraGroups = optional (cfg.user == defaultUser)

View file

@ -35,7 +35,7 @@ let
bindir = pkgs.buildEnv {
name = "cups-progs";
paths = cfg.drivers;
pathsToLink = [ "/lib/cups" "/share/cups" "/bin" ];
pathsToLink = [ "/lib/cups" "/share/cups" "/bin" "/etc/cups" ];
postBuild = cfg.bindirCmds;
};
@ -89,6 +89,20 @@ in
'';
};
clientConf = mkOption {
type = types.lines;
default = "";
example =
''
ServerName server.example.com
Encryption Never
'';
description = ''
The contents of the client configuration.
(<filename>client.conf</filename>)
'';
};
drivers = mkOption {
type = types.listOf types.path;
example = literalExample "[ pkgs.splix ]";
@ -124,6 +138,14 @@ in
environment.systemPackages = [ cups ];
environment.variables.CUPS_SERVERROOT = "/etc/cups";
environment.etc = [
{ source = pkgs.writeText "client.conf" cfg.clientConf;
target = "cups/client.conf";
}
];
services.dbus.packages = [ cups ];
# Cups uses libusb to talk to printers, and does not use the

View file

@ -135,6 +135,11 @@ in {
rm ${cfg.dataDir}/plugins || true
ln -s ${esPlugins}/plugins ${cfg.dataDir}/plugins
'';
postStart = mkBefore ''
until ${pkgs.curl}/bin/curl -s -o /dev/null ${cfg.host}:${toString cfg.port}; do
sleep 1
done
'';
};
environment.systemPackages = [ pkgs.elasticsearch ];

View file

@ -40,7 +40,7 @@ in
socket = /run/fail2ban/fail2ban.sock
pidfile = /run/fail2ban/fail2ban.pid
'';
type = types.string;
type = types.lines;
description =
''
The contents of Fail2ban's main configuration file. It's
@ -64,7 +64,7 @@ in
maxretry = 5
'';
};
type = types.attrsOf types.string;
type = types.attrsOf types.lines;
description =
''
The configuration of each Fail2ban jail. A jail

View file

@ -1,22 +0,0 @@
diff --git a/includes/specials/SpecialActiveusers.php b/includes/specials/SpecialActiveusers.php
index f739d3b..fdd8db3 100644
--- a/includes/specials/SpecialActiveusers.php
+++ b/includes/specials/SpecialActiveusers.php
@@ -112,7 +112,7 @@ class ActiveUsersPager extends UsersPager {
return array(
'tables' => array( 'querycachetwo', 'user', 'recentchanges' ),
'fields' => array( 'user_name', 'user_id', 'recentedits' => 'COUNT(*)', 'qcc_title' ),
- 'options' => array( 'GROUP BY' => array( 'qcc_title' ) ),
+ 'options' => array( 'GROUP BY' => array( 'qcc_title', 'user_name', 'user_id' ) ),
'conds' => $conds
);
}
@@ -349,7 +349,7 @@ class SpecialActiveUsers extends SpecialPage {
__METHOD__,
array(
'GROUP BY' => array( 'rc_user_text' ),
- 'ORDER BY' => 'NULL' // avoid filesort
+ 'ORDER BY' => 'lastedittime DESC'
)
);
$names = array();

View file

@ -72,15 +72,13 @@ let
# Unpack Mediawiki and put the config file in its root directory.
mediawikiRoot = pkgs.stdenv.mkDerivation rec {
name= "mediawiki-1.23.1";
name= "mediawiki-1.23.3";
src = pkgs.fetchurl {
url = "http://download.wikimedia.org/mediawiki/1.23/${name}.tar.gz";
sha256 = "07z5j8d988cdg4ml4n0vs9fwmj0p594ibbqdid16faxwqm52dkhl";
sha256 = "0l6798jwjwk2khfnm84mgc65ij53a8pnv30wdnn15ys4ivia4bpf";
};
patches = [ ./mediawiki-postgresql-fixes.patch ];
skins = config.skins;
buildPhase =

View file

@ -1,35 +1,30 @@
{ config, lib, pkgs, ... }:
with lib;
let
phabricatorRoot = pkgs.stdenv.mkDerivation rec {
version = "2014-05-12";
name = "phabricator-${version}";
srcLibphutil = pkgs.fetchgit {
url = git://github.com/facebook/libphutil.git;
rev = "2f3b5a1cf6ea464a0250d4b1c653a795a90d2716";
sha256 = "9598cec400984dc149162f1e648814a54ea0cd34fcd529973dc83f5486fdd9fd";
};
srcArcanist = pkgs.fetchgit {
url = git://github.com/facebook/arcanist.git;
rev = "54c377448db8dbc40f0ca86d43c837d30e493485";
sha256 = "086db3c0d1154fbad23e7c6def31fd913384ee20247b329515838b669c3028e0";
};
srcPhabricator = pkgs.fetchgit {
url = git://github.com/facebook/phabricator.git;
rev = "1644ef185ecf1e9fca3eb6b16351ef46b19d110f";
sha256 = "e1135e4ba76d53f48aad4161563035414ed7e878f39a8a34a875a01b41b2a084";
};
buildCommand = ''
mkdir -p $out
cp -R ${srcLibphutil} $out/libphutil
cp -R ${srcArcanist} $out/arcanist
cp -R ${srcPhabricator} $out/phabricator
'';
};
phabricatorRoot = pkgs.phabricator;
in {
enablePHP = true;
extraApacheModules = [ "mod_rewrite" ];
DocumentRoot = "${phabricatorRoot}/phabricator/webroot";
options = {
git = mkOption {
default = true;
description = "Enable git repositories.";
};
mercurial = mkOption {
default = true;
description = "Enable mercurial repositories.";
};
subversion = mkOption {
default = true;
description = "Enable subversion repositories.";
};
};
extraConfig = ''
DocumentRoot ${phabricatorRoot}/phabricator/webroot
@ -38,4 +33,18 @@ in {
RewriteRule ^/favicon.ico - [L,QSA]
RewriteRule ^(.*)$ /index.php?__path__=$1 [B,L,QSA]
'';
extraServerPath = [
"${pkgs.which}"
"${pkgs.diffutils}"
] ++
(if config.mercurial then ["${pkgs.mercurial}"] else []) ++
(if config.subversion then ["${pkgs.subversion}"] else []) ++
(if config.git then ["${pkgs.git}"] else []);
startupScript = pkgs.writeScript "activatePhabricator" ''
mkdir -p /var/repo
chown wwwrun /var/repo
'';
}

View file

@ -5,7 +5,7 @@ with lib;
let
cfg = config.services.tomcat;
tomcat = pkgs.tomcat6;
tomcat = cfg.package;
in
{
@ -21,6 +21,15 @@ in
description = "Whether to enable Apache Tomcat";
};
package = mkOption {
type = types.package;
default = pkgs.tomcat7;
example = lib.literalExample "pkgs.tomcat8";
description = ''
Which tomcat package to use.
'';
};
baseDir = mkOption {
default = "/var/tomcat";
description = "Location where Tomcat stores configuration files, webapplications and logfiles";

View file

@ -34,10 +34,6 @@ in
# Set GTK_DATA_PREFIX so that GTK+ can find the Xfce themes.
export GTK_DATA_PREFIX=${config.system.path}
# Necessary to get xfce4-mixer to find GST's ALSA plugin.
# Ugly.
export GST_PLUGIN_PATH=${config.system.path}/lib
exec ${pkgs.stdenv.shell} ${pkgs.xfce.xinitrc}
'';
};

View file

@ -6,7 +6,8 @@ let
cfg = config.boot.loader.grub;
realGrub = if cfg.version == 1 then pkgs.grub else pkgs.grub2;
realGrub = if cfg.version == 1 then pkgs.grub
else pkgs.grub2.override { zfsSupport = cfg.zfsSupport; };
grub =
# Don't include GRUB if we're only generating a GRUB menu (e.g.,
@ -25,11 +26,12 @@ let
inherit (cfg)
version extraConfig extraPerEntryConfig extraEntries
extraEntriesBeforeNixOS extraPrepareConfig configurationLimit copyKernels timeout
default devices explicitBootRoot;
default devices fsIdentifier;
path = (makeSearchPath "bin" [
pkgs.coreutils pkgs.gnused pkgs.gnugrep pkgs.findutils pkgs.diffutils
pkgs.coreutils pkgs.gnused pkgs.gnugrep pkgs.findutils pkgs.diffutils pkgs.btrfsProgs
pkgs.utillinux
]) + ":" + (makeSearchPath "sbin" [
pkgs.mdadm
pkgs.mdadm pkgs.utillinux
]);
});
@ -209,12 +211,26 @@ in
'';
};
explicitBootRoot = mkOption {
default = "";
type = types.str;
fsIdentifier = mkOption {
default = "uuid";
type = types.addCheck types.str
(type: type == "uuid" || type == "label" || type == "provided");
description = ''
The relative path of /boot within the parent volume. Leave empty
if /boot is not a btrfs subvolume.
Determines how grub will identify devices when generating the
configuration file. A value of uuid / label signifies that grub
will always resolve the uuid or label of the device before using
it in the configuration. A value of provided means that grub will
use the device name as show in <command>df</command> or
<command>mount</command>. Note, zfs zpools / datasets are ignored
and will always be mounted using their labels.
'';
};
zfsSupport = mkOption {
default = false;
type = types.bool;
description = ''
Whether grub should be build against libzfs.
'';
};
@ -244,7 +260,7 @@ in
if cfg.devices == [] then
throw "You must set the option boot.loader.grub.device to make the system bootable."
else
"PERL5LIB=${makePerlPath [ pkgs.perlPackages.XMLLibXML pkgs.perlPackages.XMLSAX ]} " +
"PERL5LIB=${makePerlPath (with pkgs.perlPackages; [ FileSlurp XMLLibXML XMLSAX ])} " +
"${pkgs.perl}/bin/perl ${./install-grub.pl} ${grubConfig}";
system.build.grub = grub;
@ -260,6 +276,13 @@ in
${pkgs.coreutils}/bin/cp -pf "${v}" "/boot/${n}"
'') config.boot.loader.grub.extraFiles);
assertions = [{ assertion = !cfg.zfsSupport || cfg.version == 2;
message = "Only grub version 2 provides zfs support";}]
++ flip map cfg.devices (dev: {
assertion = dev == "nodev" || hasPrefix "/" dev;
message = "Grub devices must be absolute paths, not ${dev}";
});
})
];

View file

@ -1,10 +1,12 @@
use strict;
use warnings;
use Class::Struct;
use XML::LibXML;
use File::Basename;
use File::Path;
use File::stat;
use File::Copy;
use File::Slurp;
use POSIX;
use Cwd;
@ -27,6 +29,14 @@ sub writeFile {
close FILE or die;
}
sub runCommand {
my ($cmd) = @_;
open FILE, "$cmd 2>/dev/null |" or die "Failed to execute: $cmd\n";
my @ret = <FILE>;
close FILE;
return ($?, @ret);
}
my $grub = get("grub");
my $grubVersion = int(get("version"));
my $extraConfig = get("extraConfig");
@ -39,7 +49,7 @@ my $configurationLimit = int(get("configurationLimit"));
my $copyKernels = get("copyKernels") eq "true";
my $timeout = int(get("timeout"));
my $defaultEntry = int(get("default"));
my $explicitBootRoot = get("explicitBootRoot");
my $fsIdentifier = get("fsIdentifier");
$ENV{'PATH'} = get("path");
die "unsupported GRUB version\n" if $grubVersion != 1 && $grubVersion != 2;
@ -48,23 +58,148 @@ print STDERR "updating GRUB $grubVersion menu...\n";
mkpath("/boot/grub", 0, 0700);
# Discover whether /boot is on the same filesystem as / and
# /nix/store. If not, then all kernels and initrds must be copied to
# /boot, and all paths in the GRUB config file must be relative to the
# root of the /boot filesystem. `$bootRoot' is the path to be
# prepended to paths under /boot.
my $bootRoot = "/boot";
if (stat("/")->dev != stat("/boot")->dev) {
$bootRoot = "";
$copyKernels = 1;
} elsif (stat("/boot")->dev != stat("/nix/store")->dev) {
# /boot.
if (stat("/boot")->dev != stat("/nix/store")->dev) {
$copyKernels = 1;
}
if ($explicitBootRoot ne "") {
$bootRoot = $explicitBootRoot;
# Discover information about the location of /boot
struct(Fs => {
device => '$',
type => '$',
mount => '$',
});
sub PathInMount {
my ($path, $mount) = @_;
my @splitMount = split /\//, $mount;
my @splitPath = split /\//, $path;
if ($#splitPath < $#splitMount) {
return 0;
}
for (my $i = 0; $i <= $#splitMount; $i++) {
if ($splitMount[$i] ne $splitPath[$i]) {
return 0;
}
}
return 1;
}
sub GetFs {
my ($dir) = @_;
my $bestFs = Fs->new(device => "", type => "", mount => "");
foreach my $fs (read_file("/proc/self/mountinfo")) {
chomp $fs;
my @fields = split / /, $fs;
my $mountPoint = $fields[4];
next unless -d $mountPoint;
my @mountOptions = split /,/, $fields[5];
# Skip the optional fields.
my $n = 6; $n++ while $fields[$n] ne "-"; $n++;
my $fsType = $fields[$n];
my $device = $fields[$n + 1];
my @superOptions = split /,/, $fields[$n + 2];
# Skip the read-only bind-mount on /nix/store.
next if $mountPoint eq "/nix/store" && (grep { $_ eq "rw" } @superOptions) && (grep { $_ eq "ro" } @mountOptions);
# Ensure this matches the intended directory
next unless PathInMount($dir, $mountPoint);
# Is it better than our current match?
if (length($mountPoint) > length($bestFs->mount)) {
$bestFs = Fs->new(device => $device, type => $fsType, mount => $mountPoint);
}
}
return $bestFs;
}
struct (Grub => {
path => '$',
search => '$',
});
my $driveid = 1;
sub GrubFs {
my ($dir) = @_;
my $fs = GetFs($dir);
my $path = "/" . substr($dir, length($fs->mount));
my $search = "";
if ($grubVersion > 1) {
# ZFS is completely separate logic as zpools are always identified by a label
# or custom UUID
if ($fs->type eq 'zfs') {
my $sid = index($fs->device, '/');
if ($sid < 0) {
$search = '--label ' . $fs->device;
$path = '/@' . $path;
} else {
$search = '--label ' . substr($fs->device, 0, $sid);
$path = '/' . substr($fs->device, $sid) . '/@' . $path;
}
} else {
my %types = ('uuid' => '--fs-uuid', 'label' => '--label');
if ($fsIdentifier eq 'provided') {
# If the provided dev is identifying the partition using a label or uuid,
# we should get the label / uuid and do a proper search
my @matches = $fs->device =~ m/\/dev\/disk\/by-(label|uuid)\/(.*)/;
if ($#matches > 1) {
die "Too many matched devices"
} elsif ($#matches == 1) {
$search = "$types{$matches[0]} $matches[1]"
}
} else {
# Determine the identifying type
$search = $types{$fsIdentifier} . ' ';
# Based on the type pull in the identifier from the system
my ($status, @devInfo) = runCommand("blkid -o export @{[$fs->device]}");
if ($status != 0) {
die "Failed to get blkid info for @{[$fs->mount]} on @{[$fs->device]}";
}
my @matches = join("", @devInfo) =~ m/@{[uc $fsIdentifier]}=([^\n]*)/;
if ($#matches != 0) {
die "Couldn't find a $types{$fsIdentifier} for @{[$fs->device]}\n"
}
$search .= $matches[0];
}
# BTRFS is a special case in that we need to fix the referrenced path based on subvolumes
if ($fs->type eq 'btrfs') {
my ($status, @id_info) = runCommand("btrfs subvol show @{[$fs->mount]}");
if ($status != 0) {
die "Failed to retrieve subvolume info for @{[$fs->mount]}\n";
}
my @ids = join("", @id_info) =~ m/Object ID:[ \t\n]*([^ \t\n]*)/;
if ($#ids > 0) {
die "Btrfs subvol name for @{[$fs->device]} listed multiple times in mount\n"
} elsif ($#ids == 0) {
my ($status, @path_info) = runCommand("btrfs subvol list @{[$fs->mount]}");
if ($status != 0) {
die "Failed to find @{[$fs->mount]} subvolume id from btrfs\n";
}
my @paths = join("", @path_info) =~ m/ID $ids[0] [^\n]* path ([^\n]*)/;
if ($#paths > 0) {
die "Btrfs returned multiple paths for a single subvolume id, mountpoint @{[$fs->mount]}\n";
} elsif ($#paths != 0) {
die "Btrfs did not return a path for the subvolume at @{[$fs->mount]}\n";
}
$path = "/$paths[0]$path";
}
}
}
if (not $search eq "") {
$search = "search --set=drive$driveid " . $search;
$path = "(\$drive$driveid)$path";
$driveid += 1;
}
}
return Grub->new(path => $path, search => $search);
}
my $grubBoot = GrubFs("/boot");
my $grubStore = GrubFs("/nix/store");
# Generate the header.
my $conf .= "# Automatically generated. DO NOT EDIT THIS FILE!\n";
@ -76,12 +211,17 @@ if ($grubVersion == 1) {
";
if ($splashImage) {
copy $splashImage, "/boot/background.xpm.gz" or die "cannot copy $splashImage to /boot\n";
$conf .= "splashimage $bootRoot/background.xpm.gz\n";
$conf .= "splashimage " . $grubBoot->path . "/background.xpm.gz\n";
}
}
else {
if ($copyKernels == 0) {
$conf .= "
" . $grubStore->search;
}
$conf .= "
" . $grubBoot->search . "
if [ -s \$prefix/grubenv ]; then
load_env
fi
@ -102,7 +242,7 @@ else {
set timeout=$timeout
fi
if loadfont $bootRoot/grub/fonts/unicode.pf2; then
if loadfont " . $grubBoot->path . "/grub/fonts/unicode.pf2; then
set gfxmode=640x480
insmod gfxterm
insmod vbe
@ -116,7 +256,7 @@ else {
copy $splashImage, "/boot/background.png" or die "cannot copy $splashImage to /boot\n";
$conf .= "
insmod png
if background_image $bootRoot/background.png; then
if background_image " . $grubBoot->path . "/background.png; then
set color_normal=white/black
set color_highlight=black/white
else
@ -138,7 +278,7 @@ mkpath("/boot/kernels", 0, 0755) if $copyKernels;
sub copyToKernelsDir {
my ($path) = @_;
return $path unless $copyKernels;
return $grubStore->path . substr($path, length("/nix/store")) unless $copyKernels;
$path =~ /\/nix\/store\/(.*)/ or die;
my $name = $1; $name =~ s/\//-/g;
my $dst = "/boot/kernels/$name";
@ -151,7 +291,7 @@ sub copyToKernelsDir {
rename $tmp, $dst or die "cannot rename $tmp to $dst\n";
}
$copied{$dst} = 1;
return "$bootRoot/kernels/$name";
return $grubBoot->path . "/kernels/$name";
}
sub addEntry {
@ -178,6 +318,10 @@ sub addEntry {
$conf .= " " . ($xen ? "module" : "initrd") . " $initrd\n\n";
} else {
$conf .= "menuentry \"$name\" {\n";
$conf .= $grubBoot->search . "\n";
if ($copyKernels == 0) {
$conf .= $grubStore->search . "\n";
}
$conf .= " $extraPerEntryConfig\n" if $extraPerEntryConfig;
$conf .= " multiboot $xen $xenParams\n" if $xen;
$conf .= " " . ($xen ? "module" : "linux") . " $kernel $kernelParams\n";
@ -195,7 +339,7 @@ addEntry("NixOS - Default", $defaultConfig);
$conf .= "$extraEntries\n" unless $extraEntriesBeforeNixOS;
# extraEntries could refer to @bootRoot@, which we have to substitute
$conf =~ s/\@bootRoot\@/$bootRoot/g;
$conf =~ s/\@bootRoot\@/$grubBoot->path/g;
# Emit submenus for all system profiles.
sub addProfile {

View file

@ -16,7 +16,7 @@ let
nix = config.nix.package;
inherit (cfg) timeout;
timeout = if cfg.timeout != null then cfg.timeout else "";
inherit (efi) efiSysMountPoint canTouchEfiVariables;
};

View file

@ -77,6 +77,11 @@ with lib;
'')}
${config.boot.extraModprobeConfig}
'';
environment.etc."modprobe.d/usb-load-ehci-first.conf".text =
''
softdep uhci_hcd pre: ehci_hcd
softdep ohci_hcd pre: ehci_hcd
'';
environment.systemPackages = [ config.system.sbin.modprobe pkgs.kmod ];

View file

@ -50,7 +50,7 @@ let
cp -pvd ${pkgs.busybox}/bin/* ${pkgs.busybox}/sbin/* $out/bin/
# Copy some utillinux stuff.
cp -vf ${pkgs.utillinux}/sbin/blkid $out/bin
cp -vf --remove-destination ${pkgs.utillinux}/sbin/blkid $out/bin
cp -pdv ${pkgs.utillinux}/lib/libblkid*.so.* $out/lib
cp -pdv ${pkgs.utillinux}/lib/libuuid*.so.* $out/lib

View file

@ -30,9 +30,7 @@ in
config = mkIf (!config.boot.isContainer && config.powerManagement.cpuFreqGovernor != null) {
boot.kernelModules = [ "acpi-cpufreq" "speedstep-lib" "pcc-cpufreq"
"cpufreq_${cfg.cpuFreqGovernor}"
];
boot.kernelModules = [ "cpufreq_${cfg.cpuFreqGovernor}" ];
environment.systemPackages = [ cpupower ];

View file

@ -133,7 +133,7 @@ in
};
boot.initrd = mkIf inInitrd {
kernelModules = [ "spl" "zfs" ] ;
kernelModules = [ "spl" "zfs" ];
extraUtilsCommands =
''
cp -v ${zfsPkg}/sbin/zfs $out/bin
@ -148,6 +148,10 @@ in
'';
};
boot.loader.grub = mkIf inInitrd {
zfsSupport = true;
};
systemd.services."zpool-import" = {
description = "Import zpools";
after = [ "systemd-udev-settle.service" ];

View file

@ -1,6 +1,7 @@
{ config, lib, pkgs, ... }:
{ config, lib, pkgs, utils, ... }:
with lib;
with utils;
let
@ -10,6 +11,10 @@ let
hasSits = cfg.sits != { };
hasBonds = cfg.bonds != { };
# We must escape interfaces due to the systemd interpretation
subsystemDevice = interface:
"sys-subsystem-net-devices-${escapeSystemdPath interface}.device";
addrOpts = v:
assert v == 4 || v == 6;
{
@ -103,7 +108,7 @@ let
'';
};
ipv6prefixLength = mkOption {
ipv6PrefixLength = mkOption {
default = 64;
example = 64;
type = types.int;
@ -138,8 +143,6 @@ let
Whether this interface is virtual and should be created by tunctl.
This is mainly useful for creating bridges between a host a virtual
network such as VPN or a virtual machine.
Defaults to tap device, unless interface contains "tun" in its name.
'';
};
@ -151,6 +154,15 @@ let
'';
};
virtualType = mkOption {
default = null;
type = types.nullOr (types.addCheck types.str (v: v == "tun" || v == "tap"));
description = ''
The explicit type of interface to create. Accepts tun or tap strings.
Also accepts null to implicitly detect the type of device.
'';
};
proxyARP = mkOption {
default = false;
type = types.bool;
@ -596,8 +608,8 @@ in
nameValuePair "${i.name}-cfg"
{ description = "Configuration of ${i.name}";
wantedBy = [ "network-interfaces.target" ];
bindsTo = [ "sys-subsystem-net-devices-${i.name}.device" ];
after = [ "sys-subsystem-net-devices-${i.name}.device" ];
bindsTo = [ (subsystemDevice i.name) ];
after = [ (subsystemDevice i.name) ];
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
path = [ pkgs.iproute pkgs.gawk ];
@ -673,26 +685,32 @@ in
'');
};
createTunDevice = i: nameValuePair "${i.name}"
createTunDevice = i: nameValuePair "${i.name}-netdev"
{ description = "Virtual Network Interface ${i.name}";
requires = [ "dev-net-tun.device" ];
after = [ "dev-net-tun.device" ];
wantedBy = [ "network.target" ];
requiredBy = [ "sys-subsystem-net-devices-${i.name}.device" ];
serviceConfig =
{ Type = "oneshot";
RemainAfterExit = true;
ExecStart = "${pkgs.tunctl}/bin/tunctl -t '${i.name}' -u '${i.virtualOwner}'";
ExecStop = "${pkgs.tunctl}/bin/tunctl -d '${i.name}'";
};
wantedBy = [ "network.target" (subsystemDevice i.name) ];
path = [ pkgs.iproute ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
};
script = ''
ip tuntap add dev "${i.name}" \
${optionalString (i.virtualType != null) "mode ${i.virtualType}"} \
user "${i.virtualOwner}"
'';
postStop = ''
ip link del ${i.name}
'';
};
createBridgeDevice = n: v:
let
deps = map (i: "sys-subsystem-net-devices-${i}.device") v.interfaces;
createBridgeDevice = n: v: nameValuePair "${n}-netdev"
(let
deps = map subsystemDevice v.interfaces;
in
{ description = "Bridge Interface ${n}";
wantedBy = [ "network.target" "sys-subsystem-net-devices-${n}.device" ];
wantedBy = [ "network.target" (subsystemDevice n) ];
bindsTo = deps;
after = deps;
serviceConfig.Type = "oneshot";
@ -725,14 +743,14 @@ in
ip link set "${n}" down
brctl delbr "${n}"
'';
};
});
createBondDevice = n: v:
let
deps = map (i: "sys-subsystem-net-devices-${i}.device") v.interfaces;
createBondDevice = n: v: nameValuePair "${n}-netdev"
(let
deps = map subsystemDevice v.interfaces;
in
{ description = "Bond Interface ${n}";
wantedBy = [ "network.target" "sys-subsystem-net-devices-${n}.device" ];
wantedBy = [ "network.target" (subsystemDevice n) ];
bindsTo = deps;
after = deps;
serviceConfig.Type = "oneshot";
@ -764,14 +782,14 @@ in
ifenslave -d "${n}"
ip link delete "${n}"
'';
};
});
createSitDevice = n: v:
let
deps = optional (v.dev != null) "sys-subsystem-net-devices-${v.dev}.device";
createSitDevice = n: v: nameValuePair "${n}-netdev"
(let
deps = optional (v.dev != null) (subsystemDevice v.dev);
in
{ description = "6-to-4 Tunnel Interface ${n}";
wantedBy = [ "network.target" "sys-subsystem-net-devices-${n}.device" ];
wantedBy = [ "network.target" (subsystemDevice n) ];
bindsTo = deps;
after = deps;
serviceConfig.Type = "oneshot";
@ -790,14 +808,14 @@ in
postStop = ''
ip link delete "${n}"
'';
};
});
createVlanDevice = n: v:
let
deps = [ "sys-subsystem-net-devices-${v.interface}.device" ];
createVlanDevice = n: v: nameValuePair "${n}-netdev"
(let
deps = [ (subsystemDevice v.interface) ];
in
{ description = "Vlan Interface ${n}";
wantedBy = [ "network.target" "sys-subsystem-net-devices-${n}.device" ];
wantedBy = [ "network.target" (subsystemDevice n) ];
bindsTo = deps;
after = deps;
serviceConfig.Type = "oneshot";
@ -812,15 +830,15 @@ in
postStop = ''
ip link delete "${n}"
'';
};
});
in listToAttrs (
map configureInterface interfaces ++
map createTunDevice (filter (i: i.virtual) interfaces))
// mapAttrs createBridgeDevice cfg.bridges
// mapAttrs createBondDevice cfg.bonds
// mapAttrs createSitDevice cfg.sits
// mapAttrs createVlanDevice cfg.vlans
// mapAttrs' createBridgeDevice cfg.bridges
// mapAttrs' createBondDevice cfg.bonds
// mapAttrs' createSitDevice cfg.sits
// mapAttrs' createVlanDevice cfg.vlans
// { "network-setup" = networkSetup; };
# Set the host and domain names in the activation script. Don't

View file

@ -0,0 +1,5 @@
{ config, pkgs, modulesPath, ... }:
{
imports = [ "${modulesPath}/virtualisation/azure-image.nix" ];
}

View file

@ -0,0 +1,125 @@
{ config, lib, pkgs, ... }:
with lib;
let
diskSize = "4096";
in
{
imports = [ ../profiles/headless.nix ];
system.build.azureImage =
pkgs.vmTools.runInLinuxVM (
pkgs.runCommand "azure-image"
{ preVM =
''
mkdir $out
diskImage=$out/$diskImageBase
cyl=$(((${diskSize}*1024*1024)/(512*63*255)))
size=$(($cyl*255*63*512))
roundedsize=$((($size/(1024*1024)+1)*(1024*1024)))
${pkgs.vmTools.qemu}/bin/qemu-img create -f raw $diskImage $roundedsize
mv closure xchg/
'';
postVM =
''
mkdir -p $out
${pkgs.vmTools.qemu}/bin/qemu-img convert -f raw -O vpc $diskImage $out/disk.vhd
rm $diskImage
'';
diskImageBase = "nixos-${config.system.nixosVersion}-${pkgs.stdenv.system}.raw";
buildInputs = [ pkgs.utillinux pkgs.perl ];
exportReferencesGraph =
[ "closure" config.system.build.toplevel ];
}
''
# Create partition table
${pkgs.parted}/sbin/parted /dev/vda mklabel msdos
${pkgs.parted}/sbin/parted /dev/vda mkpart primary ext4 1 ${diskSize}M
${pkgs.parted}/sbin/parted /dev/vda print
. /sys/class/block/vda1/uevent
mknod /dev/vda1 b $MAJOR $MINOR
# Create an empty filesystem and mount it.
${pkgs.e2fsprogs}/sbin/mkfs.ext4 -L nixos /dev/vda1
${pkgs.e2fsprogs}/sbin/tune2fs -c 0 -i 0 /dev/vda1
mkdir /mnt
mount /dev/vda1 /mnt
# The initrd expects these directories to exist.
mkdir /mnt/dev /mnt/proc /mnt/sys
mount --bind /proc /mnt/proc
mount --bind /dev /mnt/dev
mount --bind /sys /mnt/sys
# Copy all paths in the closure to the filesystem.
storePaths=$(perl ${pkgs.pathsFromGraph} /tmp/xchg/closure)
mkdir -p /mnt/nix/store
echo "copying everything (will take a while)..."
cp -prd $storePaths /mnt/nix/store/
# Register the paths in the Nix database.
printRegistration=1 perl ${pkgs.pathsFromGraph} /tmp/xchg/closure | \
chroot /mnt ${config.nix.package}/bin/nix-store --load-db
# Create the system profile to allow nixos-rebuild to work.
chroot /mnt ${config.nix.package}/bin/nix-env \
-p /nix/var/nix/profiles/system --set ${config.system.build.toplevel}
# `nixos-rebuild' requires an /etc/NIXOS.
mkdir -p /mnt/etc
touch /mnt/etc/NIXOS
# `switch-to-configuration' requires a /bin/sh
mkdir -p /mnt/bin
ln -s ${config.system.build.binsh}/bin/sh /mnt/bin/sh
# Install a configuration.nix.
mkdir -p /mnt/etc/nixos /mnt/boot/grub
cp ${./azure-config.nix} /mnt/etc/nixos/configuration.nix
# Generate the GRUB menu.
ln -s vda /dev/sda
chroot /mnt ${config.system.build.toplevel}/bin/switch-to-configuration boot
umount /mnt/proc /mnt/dev /mnt/sys
umount /mnt
''
);
fileSystems."/".device = "/dev/disk/by-label/nixos";
# Azure metadata is available as a CD-ROM drive.
fileSystems."/metadata".device = "/dev/sr0";
boot.kernelParams = [ "console=ttyS0" "earlyprintk=ttyS0" "rootdelay=300" "panic=1" "boot.panic_on_fail" ];
boot.initrd.kernelModules = [ "hv_vmbus" "hv_netvsc" "hv_utils" "hv_storvsc" ];
# Generate a GRUB menu.
boot.loader.grub.device = "/dev/sda";
boot.loader.grub.version = 2;
boot.loader.grub.timeout = 0;
# Don't put old configurations in the GRUB menu. The user has no
# way to select them anyway.
boot.loader.grub.configurationLimit = 0;
# Allow root logins only using the SSH key that the user specified
# at instance creation time.
services.openssh.enable = true;
services.openssh.permitRootLogin = "without-password";
# Force getting the hostname from Azure
networking.hostName = mkDefault "";
# Always include cryptsetup so that NixOps can use it.
environment.systemPackages = [ pkgs.cryptsetup ];
networking.usePredictableInterfaceNames = false;
users.extraUsers.root.openssh.authorizedKeys.keys = [ (builtins.readFile <ssh-pub-key>) ];
}

View file

@ -174,6 +174,11 @@ in
# Clean up existing machined registration and interfaces.
machinectl terminate "$INSTANCE" 2> /dev/null || true
if [ "$PRIVATE_NETWORK" = 1 ]; then
ip link del dev "ve-$INSTANCE" 2> /dev/null || true
fi
if [ "$PRIVATE_NETWORK" = 1 ]; then
ip link del dev "ve-$INSTANCE" 2> /dev/null || true
fi

View file

@ -59,6 +59,7 @@ in
config = mkIf cfg.enable (mkMerge [
{ environment.systemPackages = [ pkgs.docker ];
users.extraGroups.docker.gid = config.ids.gids.docker;
}
(mkIf cfg.socketActivation {

View file

@ -201,15 +201,32 @@ sub runInContainer {
die "cannot run nsenter: $!\n";
}
# Remove a directory while recursively unmounting all mounted filesystems within
# that directory and unmounting/removing that directory afterwards as well.
#
# NOTE: If the specified path is a mountpoint, its contents will be removed,
# only mountpoints underneath that path will be unmounted properly.
sub safeRemoveTree {
my ($path) = @_;
system("find", $path, "-mindepth", "1", "-xdev",
"(", "-type", "d", "-exec", "mountpoint", "-q", "{}", ";", ")",
"-exec", "umount", "-fR", "{}", "+");
system("rm", "--one-file-system", "-rf", $path);
if (-e $path) {
system("umount", "-fR", $path);
system("rm", "--one-file-system", "-rf", $path);
}
}
if ($action eq "destroy") {
die "$0: cannot destroy declarative container (remove it from your configuration.nix instead)\n"
unless POSIX::access($confFile, &POSIX::W_OK);
stopContainer if isContainerRunning;
rmtree($profileDir) if -e $profileDir;
rmtree($gcRootsDir) if -e $gcRootsDir;
rmtree($root) if -e $root;
safeRemoveTree($profileDir) if -e $profileDir;
safeRemoveTree($gcRootsDir) if -e $gcRootsDir;
safeRemoveTree($root) if -e $root;
unlink($confFile) or die;
}

View file

@ -278,7 +278,7 @@ in
boot.initrd.extraUtilsCommands =
''
# We need mke2fs in the initrd.
cp -f ${pkgs.e2fsprogs}/sbin/mke2fs $out/bin
cp -vf --remove-destination ${pkgs.e2fsprogs}/sbin/mke2fs $out/bin
'';
boot.initrd.postDeviceCommands =

View file

@ -34,7 +34,7 @@ in rec {
maintainers = [ pkgs.lib.maintainers.eelco pkgs.lib.maintainers.shlevy ];
};
constituents =
let all = x: [ x.x86_64-linux x.i686-linux ]; in
let all = x: map (p: x.${p}) supportedSystems; in
[ nixos.channel
(all nixos.manual)
@ -52,6 +52,11 @@ in rec {
(all nixos.tests.installer.lvm)
(all nixos.tests.installer.separateBoot)
(all nixos.tests.installer.simple)
(all nixos.tests.installer.simpleLabels)
(all nixos.tests.installer.simpleProvided)
(all nixos.tests.installer.btrfsSimple)
(all nixos.tests.installer.btrfsSubvols)
(all nixos.tests.installer.btrfsSubvolDefault)
(all nixos.tests.ipv6)
(all nixos.tests.kde4)
(all nixos.tests.login)

View file

@ -231,14 +231,15 @@ in rec {
tests.installer.simpleLabels = forAllSystems (system: (import tests/installer.nix { inherit system; }).simpleLabels.test);
tests.installer.simpleProvided = forAllSystems (system: (import tests/installer.nix { inherit system; }).simpleProvided.test);
tests.installer.btrfsSimple = forAllSystems (system: (import tests/installer.nix { inherit system; }).btrfsSimple.test);
#tests.installer.btrfsSubvols = forAllSystems (system: (import tests/installer.nix { inherit system; }).btrfsSubvols.test);
tests.installer.btrfsSubvols = forAllSystems (system: (import tests/installer.nix { inherit system; }).btrfsSubvols.test);
tests.installer.btrfsSubvolDefault = forAllSystems (system: (import tests/installer.nix { inherit system; }).btrfsSubvolDefault.test);
tests.influxdb = callTest tests/influxdb.nix {};
tests.ipv6 = callTest tests/ipv6.nix {};
tests.jenkins = callTest tests/jenkins.nix {};
tests.kde4 = callTest tests/kde4.nix {};
tests.latestKernel.login = callTest tests/login.nix { latestKernel = true; };
tests.login = callTest tests/login.nix {};
tests.logstash = callTest tests/logstash.nix {};
#tests.logstash = callTest tests/logstash.nix {};
tests.misc = callTest tests/misc.nix {};
tests.mumble = callTest tests/mumble.nix {};
tests.munin = callTest tests/munin.nix {};
@ -246,6 +247,7 @@ in rec {
tests.mysqlReplication = callTest tests/mysql-replication.nix {};
tests.nat = callTest tests/nat.nix {};
tests.nfs3 = callTest tests/nfs.nix { version = 3; };
tests.nsd = callTest tests/nsd.nix {};
tests.openssh = callTest tests/openssh.nix {};
tests.printing = callTest tests/printing.nix {};
tests.proxy = callTest tests/proxy.nix {};

View file

@ -28,7 +28,7 @@ in
nodes =
{ tracker =
{ config, pkgs, ... }:
{ environment.systemPackages = [ pkgs.transmission pkgs.bittorrent ];
{ environment.systemPackages = [ pkgs.transmission ];
# We need Apache on the tracker to serve the torrents.
services.httpd.enable = true;

View file

@ -1,11 +1,9 @@
{ pkgs, ... }:
{ users.extraUsers = pkgs.lib.singleton
{ name = "alice";
{ isNormalUser = true;
name = "alice";
description = "Alice Foobar";
home = "/home/alice";
createHome = true;
useDefaultShell = true;
password = "foobar";
uid = 1000;
};

View file

@ -56,23 +56,52 @@ import ./make-test.nix {
die if $id1 eq $id2;
# Put the root of $id2 into a bind mount.
$machine->succeed(
"mv /var/lib/containers/$id2 /id2-bindmount",
"mount --bind /id2-bindmount /var/lib/containers/$id1"
);
my $ip1 = $machine->succeed("nixos-container show-ip $id1");
chomp $ip1;
my $ip2 = $machine->succeed("nixos-container show-ip $id2");
chomp $ip2;
die if $ip1 eq $ip2;
# Create a directory and a file we can later check if it still exists
# after destruction of the container.
$machine->succeed(
"mkdir /nested-bindmount",
"echo important data > /nested-bindmount/dummy",
);
# Create a directory with a dummy file and bind-mount it into both
# containers.
foreach ($id1, $id2) {
my $importantPath = "/var/lib/containers/$_/very/important/data";
$machine->succeed(
"mkdir -p $importantPath",
"mount --bind /nested-bindmount $importantPath"
);
}
# Start one of them.
$machine->succeed("nixos-container start $id1");
# Execute commands via the root shell.
$machine->succeed("nixos-container run $id1 -- uname") =~ /Linux/ or die;
$machine->succeed("nixos-container set-root-password $id1 foobar");
# Destroy the containers.
$machine->succeed("nixos-container destroy $id1");
$machine->succeed("nixos-container destroy $id2");
$machine->succeed(
# Check whether destruction of any container has killed important data
"grep -qF 'important data' /nested-bindmount/dummy",
# Ensure that the container path is gone
"test ! -e /var/lib/containers/$id1"
);
# Destroying a declarative container should fail.
$machine->fail("nixos-container destroy webserver");
'';

View file

@ -23,7 +23,7 @@ import ./make-test.nix {
$machine->succeed("su - alice -c 'DISPLAY=:0.0 gnome-terminal &'");
$machine->waitForWindow(qr/Terminal/);
$machine->sleep(10);
$machine->sleep(20);
$machine->screenshot("screen");
'';

View file

@ -24,7 +24,7 @@ import ./make-test.nix {
$machine->succeed("su - alice -c 'DISPLAY=:0.0 gnome-terminal &'");
$machine->waitForWindow(qr/Terminal/);
$machine->sleep(10);
$machine->sleep(20);
$machine->screenshot("screen");
'';

View file

@ -35,8 +35,9 @@ let
# The configuration to install.
makeConfig = { testChannel, useEFI, grubVersion, grubDevice }: pkgs.writeText "configuration.nix"
''
makeConfig = { testChannel, useEFI, grubVersion, grubDevice, grubIdentifier
, readOnly ? true, forceGrubReinstallCount ? 0 }:
pkgs.writeText "configuration.nix" ''
{ config, pkgs, modulesPath, ... }:
{ imports =
@ -54,8 +55,13 @@ let
''}
boot.loader.grub.device = "${grubDevice}";
boot.loader.grub.extraConfig = "serial; terminal_output.serial";
boot.loader.grub.fsIdentifier = "${grubIdentifier}";
''}
boot.loader.grub.configurationLimit = 100 + ${toString forceGrubReinstallCount};
${optionalString (!readOnly) "nix.readOnlyStore = false;"}
environment.systemPackages = [ ${optionalString testChannel "pkgs.rlwrap"} ];
}
'';
@ -93,7 +99,7 @@ let
# disk, and then reboot from the hard disk. It's parameterized with
# a test script fragment `createPartitions', which must create
# partitions and filesystems.
testScriptFun = { createPartitions, testChannel, useEFI, grubVersion, grubDevice }:
testScriptFun = { createPartitions, testChannel, useEFI, grubVersion, grubDevice, grubIdentifier }:
let
# FIXME: OVMF doesn't boot from virtio http://www.mail-archive.com/edk2-devel@lists.sourceforge.net/msg01501.html
iface = if useEFI || grubVersion == 1 then "scsi" else "virtio";
@ -161,7 +167,7 @@ let
$machine->succeed("cat /mnt/etc/nixos/hardware-configuration.nix >&2");
$machine->copyFileFromHost(
"${ makeConfig { inherit testChannel useEFI grubVersion grubDevice; } }",
"${ makeConfig { inherit testChannel useEFI grubVersion grubDevice grubIdentifier; } }",
"/mnt/etc/nixos/configuration.nix");
# Perform the installation.
@ -197,16 +203,30 @@ let
$machine->succeed("type -tP ls | tee /dev/stderr") =~ /.nix-profile/
or die "nix-env failed";
# We need to a writable nix-store on next boot
$machine->copyFileFromHost(
"${ makeConfig { inherit testChannel useEFI grubVersion grubDevice grubIdentifier; readOnly = false; forceGrubReinstallCount = 1; } }",
"/etc/nixos/configuration.nix");
# Check whether nixos-rebuild works.
$machine->succeed("nixos-rebuild switch >&2");
# Test nixos-option.
$machine->succeed("nixos-option boot.initrd.kernelModules | grep virtio_console");
$machine->succeed("nixos-option -d boot.initrd.kernelModules | grep 'List of modules'");
$machine->succeed("nixos-option -l boot.initrd.kernelModules | grep qemu-guest.nix");
$machine->succeed("nixos-option boot.initrd.kernelModules | grep 'List of modules'");
$machine->succeed("nixos-option boot.initrd.kernelModules | grep qemu-guest.nix");
$machine->shutdown;
# Check whether a writable store build works
$machine = createMachine({ ${hdFlags} qemuFlags => "${qemuFlags}" });
$machine->waitForUnit("multi-user.target");
$machine->copyFileFromHost(
"${ makeConfig { inherit testChannel useEFI grubVersion grubDevice grubIdentifier; readOnly = false; forceGrubReinstallCount = 2; } }",
"/etc/nixos/configuration.nix");
$machine->succeed("nixos-rebuild boot >&2");
$machine->shutdown;
# And just to be sure, check that the machine still boots after
# "nixos-rebuild switch".
$machine = createMachine({ ${hdFlags} qemuFlags => "${qemuFlags}" });
@ -216,13 +236,13 @@ let
makeInstallerTest = name:
{ createPartitions, testChannel ? false, useEFI ? false, grubVersion ? 2, grubDevice ? "/dev/vda" }:
{ createPartitions, testChannel ? false, useEFI ? false, grubVersion ? 2, grubDevice ? "/dev/vda", grubIdentifier ? "uuid" }:
makeTest {
inherit iso;
name = "installer-" + name;
nodes = if testChannel then { inherit webserver; } else { };
testScript = testScriptFun {
inherit createPartitions testChannel useEFI grubVersion grubDevice;
inherit createPartitions testChannel useEFI grubVersion grubDevice grubIdentifier;
};
};
@ -461,11 +481,36 @@ in {
"mount LABEL=root /mnt",
"btrfs subvol create /mnt/boot",
"btrfs subvol create /mnt/nixos",
"btrfs subvol create /mnt/nixos/default",
"umount /mnt",
"mount -o defaults,subvol=nixos LABEL=root /mnt",
"mount -o defaults,subvol=nixos/default LABEL=root /mnt",
"mkdir /mnt/boot",
"mount -o defaults,subvol=boot LABEL=root /mnt/boot",
);
'';
};
# Test to see if we can detect default and aux subvolumes correctly
btrfsSubvolDefault = makeInstallerTest "btrfsSubvolDefault" {
createPartitions = ''
$machine->succeed(
"sgdisk -Z /dev/vda",
"sgdisk -n 1:0:+1M -n 2:0:+1G -N 3 -t 1:ef02 -t 2:8200 -t 3:8300 -c 3:root /dev/vda",
"mkswap /dev/vda2 -L swap",
"swapon -L swap",
"mkfs.btrfs -L root /dev/vda3",
"btrfs device scan",
"mount LABEL=root /mnt",
"btrfs subvol create /mnt/badpath",
"btrfs subvol create /mnt/badpath/boot",
"btrfs subvol create /mnt/nixos",
"btrfs subvol set-default \$(btrfs subvol list /mnt | grep 'nixos' | awk '{print \$2}') /mnt",
"umount /mnt",
"mount -o defaults LABEL=root /mnt",
"mkdir -p /mnt/badpath/boot", # Help ensure the detection mechanism is actually looking up subvolumes
"mkdir /mnt/boot",
"mount -o defaults,subvol=badpath/boot LABEL=root /mnt/boot",
);
'';
};
}

View file

@ -16,6 +16,8 @@ import ./make-test.nix {
services.jenkinsSlave.enable = true;
users.extraUsers.jenkins.extraGroups = [ "users" ];
systemd.services.jenkins.unitConfig.TimeoutSec = 240;
};
slave =

View file

@ -18,6 +18,7 @@ import ./make-test.nix {
'';
};
};
systemd.services.munin-node.unitConfig.TimeoutSec = 240;
};
};

83
nixos/tests/nsd.nix Normal file
View file

@ -0,0 +1,83 @@
let
common = { pkgs, ... }: {
networking.firewall.enable = false;
networking.useDHCP = false;
# for a host utility with IPv6 support
environment.systemPackages = [ pkgs.bind ];
};
in import ./make-test.nix {
name = "nsd";
nodes = {
clientv4 = { lib, nodes, ... }: {
imports = [ common ];
networking.nameservers = lib.mkForce [
nodes.server.config.networking.interfaces.eth1.ipAddress
];
networking.interfaces.eth1.ipAddress = "192.168.0.2";
networking.interfaces.eth1.prefixLength = 24;
};
clientv6 = { lib, nodes, ... }: {
imports = [ common ];
networking.nameservers = lib.mkForce [
nodes.server.config.networking.interfaces.eth1.ipv6Address
];
networking.interfaces.eth1.ipv6Address = "dead:beef::2";
};
server = { lib, ... }: {
imports = [ common ];
networking.interfaces.eth1.ipAddress = "192.168.0.1";
networking.interfaces.eth1.prefixLength = 24;
networking.interfaces.eth1.ipv6Address = "dead:beef::1";
services.nsd.enable = true;
services.nsd.interfaces = lib.mkForce [];
services.nsd.zones."example.com.".data = ''
@ SOA ns.example.com noc.example.com 666 7200 3600 1209600 3600
ipv4 A 1.2.3.4
ipv6 AAAA abcd::eeff
deleg NS ns.example.com
ns A 192.168.0.1
ns AAAA dead:beef::1
'';
services.nsd.zones."deleg.example.com.".data = ''
@ SOA ns.example.com noc.example.com 666 7200 3600 1209600 3600
@ A 9.8.7.6
@ AAAA fedc::bbaa
'';
};
};
testScript = ''
startAll;
$clientv4->waitForUnit("network.target");
$clientv6->waitForUnit("network.target");
$server->waitForUnit("nsd.service");
sub assertHost {
my ($type, $rr, $query, $expected) = @_;
my $self = $type eq 4 ? $clientv4 : $clientv6;
my $out = $self->succeed("host -$type -t $rr $query");
$self->log("output: $out");
chomp $out;
die "DNS IPv$type query on $query gave '$out' instead of '$expected'"
if ($out !~ $expected);
}
foreach (4, 6) {
subtest "ipv$_", sub {
assertHost($_, "a", "example.com", qr/has no [^ ]+ record/);
assertHost($_, "aaaa", "example.com", qr/has no [^ ]+ record/);
assertHost($_, "soa", "example.com", qr/SOA.*?noc\.example\.com/);
assertHost($_, "a", "ipv4.example.com", qr/address 1.2.3.4$/);
assertHost($_, "aaaa", "ipv6.example.com", qr/address abcd::eeff$/);
assertHost($_, "a", "deleg.example.com", qr/address 9.8.7.6$/);
assertHost($_, "aaaa", "deleg.example.com", qr/address fedc::bbaa$/);
};
}
'';
}

View file

@ -32,9 +32,16 @@ import ./make-test.nix ({ pkgs, ... }: {
}];
};
phd = {
enable = true;
};
mysql = {
enable = true;
package = pkgs.mysql;
extraOptions = ''
sql_mode=STRICT_ALL_TABLES
'';
};
};

View file

@ -16,6 +16,6 @@ stdenv.mkDerivation rec {
homepage = http://xiph.org/flac/;
description = "Library and tools for encoding and decoding the FLAC lossless audio file format";
platforms = platforms.all;
maintainers = maintainers.mornfall;
maintainers = [ maintainers.mornfall ];
};
}

View file

@ -2,13 +2,13 @@
libsamplerate, pulseaudio, libXinerama, gettext, pkgconfig, alsaLib }:
stdenv.mkDerivation rec {
version = "3.21.82";
version = "3.21.83";
pname = "fldigi";
name = "${pname}-${version}";
src = fetchurl {
url = "http://www.w1hkj.com/downloads/${pname}/${name}.tar.gz";
sha256 = "1q2fc1zm9kfsjir4g6fh95vmjdq984iyxfcs6q4gjqy1znhqcyqs";
sha256 = "1fyg6dc6xnxb620rrhws60wj10lsgbwsmnhz8vc6ncspx8mx7019";
};
buildInputs = [ libXinerama gettext hamlib fltk13 libjpeg libpng portaudio

View file

@ -1,5 +1,6 @@
{ pkgs, stdenv, fetchurl, python, buildPythonPackage, pythonPackages, mygpoclient, intltool,
ipodSupport ? true, libgpod, gpodderHome ? "", gpodderDownloadDir ? "" }:
ipodSupport ? true, libgpod, gpodderHome ? "", gpodderDownloadDir ? "",
gnome3, hicolor_icon_theme }:
with pkgs.lib;
@ -7,14 +8,18 @@ let
inherit (pythonPackages) coverage feedparser minimock sqlite3 dbus pygtk eyeD3;
in buildPythonPackage rec {
name = "gpodder-3.7.0";
name = "gpodder-3.8.0";
src = fetchurl {
url = "http://gpodder.org/src/${name}.tar.gz";
sha256 = "fa90ef4bdd3fd9eef95404f7f43f70912ae3ab4f8d24078484a2f3e11b14dc47";
sha256 = "0731f08f4270c81872b841b55200ae80feb4502706397d0085079471fb9a8fe4";
};
buildInputs = [ coverage feedparser minimock sqlite3 mygpoclient intltool ];
buildInputs = [
coverage feedparser minimock sqlite3 mygpoclient intltool
gnome3.gnome_icon_theme gnome3.gnome_icon_theme_symbolic
hicolor_icon_theme
];
propagatedBuildInputs = [ feedparser dbus mygpoclient sqlite3 pygtk eyeD3 ]
++ stdenv.lib.optional ipodSupport libgpod;
@ -26,7 +31,30 @@ in buildPythonPackage rec {
preFixup = ''
wrapProgram $out/bin/gpodder \
${optionalString (gpodderHome != "") "--set GPODDER_HOME ${gpodderHome}"} \
${optionalString (gpodderDownloadDir != "") "--set GPODDER_DOWNLOAD_DIR ${gpodderDownloadDir}"}
${optionalString (gpodderDownloadDir != "") "--set GPODDER_DOWNLOAD_DIR ${gpodderDownloadDir}"} \
--prefix XDG_DATA_DIRS : "${gnome3.gnome_themes_standard}/share:$XDG_ICON_DIRS:$GSETTINGS_SCHEMAS_PATH"
'';
# The `wrapPythonPrograms` script in the postFixup phase breaks gpodder. The
# easiest way to fix this is to call wrapPythonPrograms and then to clean up
# the wrapped file.
postFixup = ''
wrapPythonPrograms
if test -e $out/nix-support/propagated-build-inputs; then
ln -s $out/nix-support/propagated-build-inputs $out/nix-support/propagated-user-env-packages
fi
createBuildInputsPth build-inputs "$buildInputStrings"
for inputsfile in propagated-build-inputs propagated-native-build-inputs; do
if test -e $out/nix-support/$inputsfile; then
createBuildInputsPth $inputsfile "$(cat $out/nix-support/$inputsfile)"
fi
done
sed -i "$out/bin/..gpodder-wrapped-wrapped" -e '{
/import sys; sys.argv/d
}'
'';
installPhase = "DESTDIR=/ PREFIX=$out make install";

Some files were not shown because too many files have changed in this diff Show more