3
0
Fork 0
forked from mirrors/nixpkgs
nixpkgs/nixos/modules/system/activation/top-level.nix

356 lines
12 KiB
Nix
Raw Normal View History

{ config, lib, pkgs, extendModules, noUserModules, ... }:
with lib;
let
# This attribute is responsible for creating boot entries for
# child configuration. They are only (directly) accessible
# when the parent configuration is boot default. For example,
# you can provide an easy way to boot the same configuration
# as you use, but with another kernel
# !!! fix this
children =
mapAttrs
(childName: childConfig: childConfig.configuration.system.build.toplevel)
config.specialisation;
systemBuilder =
let
kernelPath = "${config.boot.kernelPackages.kernel}/" +
"${config.system.boot.loader.kernelFile}";
initrdPath = "${config.system.build.initialRamdisk}/" +
"${config.system.boot.loader.initrdFile}";
in ''
mkdir $out
Add support for lightweight NixOS containers You can now say: systemd.containers.foo.config = { services.openssh.enable = true; services.openssh.ports = [ 2022 ]; users.extraUsers.root.openssh.authorizedKeys.keys = [ "ssh-dss ..." ]; }; which defines a NixOS instance with the given configuration running inside a lightweight container. You can also manage the configuration of the container independently from the host: systemd.containers.foo.path = "/nix/var/nix/profiles/containers/foo"; where "path" is a NixOS system profile. It can be created/updated by doing: $ nix-env --set -p /nix/var/nix/profiles/containers/foo \ -f '<nixos>' -A system -I nixos-config=foo.nix The container configuration (foo.nix) should define boot.isContainer = true; to optimise away the building of a kernel and initrd. This is done automatically when using the "config" route. On the host, a lightweight container appears as the service "container-<name>.service". The container is like a regular NixOS (virtual) machine, except that it doesn't have its own kernel. It has its own root file system (by default /var/lib/containers/<name>), but shares the Nix store of the host (as a read-only bind mount). It also has access to the network devices of the host. Currently, if the configuration of the container changes, running "nixos-rebuild switch" on the host will cause the container to be rebooted. In the future we may want to send some message to the container so that it can activate the new container configuration without rebooting. Containers are not perfectly isolated yet. In particular, the host's /sys/fs/cgroup is mounted (writable!) in the guest.
2013-11-27 15:54:20 +00:00
# Containers don't have their own kernel or initrd. They boot
# directly into stage 2.
${optionalString (!config.boot.isContainer) ''
if [ ! -f ${kernelPath} ]; then
echo "The bootloader cannot find the proper kernel image."
echo "(Expecting ${kernelPath})"
false
fi
Add support for lightweight NixOS containers You can now say: systemd.containers.foo.config = { services.openssh.enable = true; services.openssh.ports = [ 2022 ]; users.extraUsers.root.openssh.authorizedKeys.keys = [ "ssh-dss ..." ]; }; which defines a NixOS instance with the given configuration running inside a lightweight container. You can also manage the configuration of the container independently from the host: systemd.containers.foo.path = "/nix/var/nix/profiles/containers/foo"; where "path" is a NixOS system profile. It can be created/updated by doing: $ nix-env --set -p /nix/var/nix/profiles/containers/foo \ -f '<nixos>' -A system -I nixos-config=foo.nix The container configuration (foo.nix) should define boot.isContainer = true; to optimise away the building of a kernel and initrd. This is done automatically when using the "config" route. On the host, a lightweight container appears as the service "container-<name>.service". The container is like a regular NixOS (virtual) machine, except that it doesn't have its own kernel. It has its own root file system (by default /var/lib/containers/<name>), but shares the Nix store of the host (as a read-only bind mount). It also has access to the network devices of the host. Currently, if the configuration of the container changes, running "nixos-rebuild switch" on the host will cause the container to be rebooted. In the future we may want to send some message to the container so that it can activate the new container configuration without rebooting. Containers are not perfectly isolated yet. In particular, the host's /sys/fs/cgroup is mounted (writable!) in the guest.
2013-11-27 15:54:20 +00:00
ln -s ${kernelPath} $out/kernel
ln -s ${config.system.modulesTree} $out/kernel-modules
${optionalString (config.hardware.deviceTree.package != null) ''
ln -s ${config.hardware.deviceTree.package} $out/dtbs
''}
Add support for lightweight NixOS containers You can now say: systemd.containers.foo.config = { services.openssh.enable = true; services.openssh.ports = [ 2022 ]; users.extraUsers.root.openssh.authorizedKeys.keys = [ "ssh-dss ..." ]; }; which defines a NixOS instance with the given configuration running inside a lightweight container. You can also manage the configuration of the container independently from the host: systemd.containers.foo.path = "/nix/var/nix/profiles/containers/foo"; where "path" is a NixOS system profile. It can be created/updated by doing: $ nix-env --set -p /nix/var/nix/profiles/containers/foo \ -f '<nixos>' -A system -I nixos-config=foo.nix The container configuration (foo.nix) should define boot.isContainer = true; to optimise away the building of a kernel and initrd. This is done automatically when using the "config" route. On the host, a lightweight container appears as the service "container-<name>.service". The container is like a regular NixOS (virtual) machine, except that it doesn't have its own kernel. It has its own root file system (by default /var/lib/containers/<name>), but shares the Nix store of the host (as a read-only bind mount). It also has access to the network devices of the host. Currently, if the configuration of the container changes, running "nixos-rebuild switch" on the host will cause the container to be rebooted. In the future we may want to send some message to the container so that it can activate the new container configuration without rebooting. Containers are not perfectly isolated yet. In particular, the host's /sys/fs/cgroup is mounted (writable!) in the guest.
2013-11-27 15:54:20 +00:00
echo -n "$kernelParams" > $out/kernel-params
ln -s ${initrdPath} $out/initrd
Add support for lightweight NixOS containers You can now say: systemd.containers.foo.config = { services.openssh.enable = true; services.openssh.ports = [ 2022 ]; users.extraUsers.root.openssh.authorizedKeys.keys = [ "ssh-dss ..." ]; }; which defines a NixOS instance with the given configuration running inside a lightweight container. You can also manage the configuration of the container independently from the host: systemd.containers.foo.path = "/nix/var/nix/profiles/containers/foo"; where "path" is a NixOS system profile. It can be created/updated by doing: $ nix-env --set -p /nix/var/nix/profiles/containers/foo \ -f '<nixos>' -A system -I nixos-config=foo.nix The container configuration (foo.nix) should define boot.isContainer = true; to optimise away the building of a kernel and initrd. This is done automatically when using the "config" route. On the host, a lightweight container appears as the service "container-<name>.service". The container is like a regular NixOS (virtual) machine, except that it doesn't have its own kernel. It has its own root file system (by default /var/lib/containers/<name>), but shares the Nix store of the host (as a read-only bind mount). It also has access to the network devices of the host. Currently, if the configuration of the container changes, running "nixos-rebuild switch" on the host will cause the container to be rebooted. In the future we may want to send some message to the container so that it can activate the new container configuration without rebooting. Containers are not perfectly isolated yet. In particular, the host's /sys/fs/cgroup is mounted (writable!) in the guest.
2013-11-27 15:54:20 +00:00
ln -s ${config.system.build.initialRamdiskSecretAppender}/bin/append-initrd-secrets $out
ln -s ${config.hardware.firmware}/lib/firmware $out/firmware
Add support for lightweight NixOS containers You can now say: systemd.containers.foo.config = { services.openssh.enable = true; services.openssh.ports = [ 2022 ]; users.extraUsers.root.openssh.authorizedKeys.keys = [ "ssh-dss ..." ]; }; which defines a NixOS instance with the given configuration running inside a lightweight container. You can also manage the configuration of the container independently from the host: systemd.containers.foo.path = "/nix/var/nix/profiles/containers/foo"; where "path" is a NixOS system profile. It can be created/updated by doing: $ nix-env --set -p /nix/var/nix/profiles/containers/foo \ -f '<nixos>' -A system -I nixos-config=foo.nix The container configuration (foo.nix) should define boot.isContainer = true; to optimise away the building of a kernel and initrd. This is done automatically when using the "config" route. On the host, a lightweight container appears as the service "container-<name>.service". The container is like a regular NixOS (virtual) machine, except that it doesn't have its own kernel. It has its own root file system (by default /var/lib/containers/<name>), but shares the Nix store of the host (as a read-only bind mount). It also has access to the network devices of the host. Currently, if the configuration of the container changes, running "nixos-rebuild switch" on the host will cause the container to be rebooted. In the future we may want to send some message to the container so that it can activate the new container configuration without rebooting. Containers are not perfectly isolated yet. In particular, the host's /sys/fs/cgroup is mounted (writable!) in the guest.
2013-11-27 15:54:20 +00:00
''}
echo "$activationScript" > $out/activate
echo "$dryActivationScript" > $out/dry-activate
substituteInPlace $out/activate --subst-var out
substituteInPlace $out/dry-activate --subst-var out
chmod u+x $out/activate $out/dry-activate
unset activationScript dryActivationScript
${pkgs.stdenv.shellDryRun} $out/activate
${pkgs.stdenv.shellDryRun} $out/dry-activate
cp ${config.system.build.bootStage2} $out/init
substituteInPlace $out/init --subst-var-by systemConfig $out
ln -s ${config.system.build.etc}/etc $out/etc
ln -s ${config.system.path} $out/sw
ln -s "$systemd" $out/systemd
echo -n "$configurationName" > $out/configuration-name
echo -n "systemd ${toString config.systemd.package.interfaceVersion}" > $out/init-interface-version
echo -n "$nixosLabel" > $out/nixos-version
echo -n "${config.boot.kernelPackages.stdenv.hostPlatform.system}" > $out/system
mkdir $out/specialisation
${concatStringsSep "\n"
(mapAttrsToList (name: path: "ln -s ${path} $out/specialisation/${name}") children)}
mkdir $out/bin
export localeArchive="${config.i18n.glibcLocales}/lib/locale/locale-archive"
substituteAll ${./switch-to-configuration.pl} $out/bin/switch-to-configuration
chmod +x $out/bin/switch-to-configuration
${optionalString (pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) ''
if ! output=$($perl/bin/perl -c $out/bin/switch-to-configuration 2>&1); then
echo "switch-to-configuration syntax is not valid:"
echo "$output"
exit 1
fi
''}
echo -n "${toString config.system.extraDependencies}" > $out/extra-dependencies
${config.system.extraSystemBuilderCmds}
'';
# Putting it all together. This builds a store path containing
# symlinks to the various parts of the built configuration (the
2013-10-31 12:26:06 +00:00
# kernel, systemd units, init scripts, etc.) as well as a script
# `switch-to-configuration' that activates the configuration and
# makes it bootable.
baseSystem = pkgs.stdenvNoCC.mkDerivation {
name = "nixos-system-${config.system.name}-${config.system.nixos.label}";
2018-09-24 20:43:55 +01:00
preferLocalBuild = true;
allowSubstitutes = false;
buildCommand = systemBuilder;
2020-11-24 15:29:28 +00:00
inherit (pkgs) coreutils;
2018-09-24 20:43:55 +01:00
systemd = config.systemd.package;
shell = "${pkgs.bash}/bin/sh";
su = "${pkgs.shadow.su}/bin/su";
2020-11-24 15:29:28 +00:00
utillinux = pkgs.util-linux;
2018-09-24 20:43:55 +01:00
kernelParams = config.boot.kernelParams;
installBootLoader = config.system.build.installBootLoader;
2018-09-24 20:43:55 +01:00
activationScript = config.system.activationScripts.script;
dryActivationScript = config.system.dryActivationScript;
2018-09-24 20:43:55 +01:00
nixosLabel = config.system.nixos.label;
configurationName = config.boot.loader.grub.configurationName;
# Needed by switch-to-configuration.
nixos/switch-to-configuration: Proper unit file parser This replaces the naive K=V unit parser with a proper INI parser from a library and adds proper support for override files. Also adds a bunch of comments about parsing, I hope this makes it easier to understand and maintain in the future. There are multiple reasons to do so, the first one is just general correctness with is nice imo. But to get to more serious reasons (I didn't put in all that effort for nothing) is that this is the first step torwards more clever restart/reload handling. By using a library like Data::Compare a future PR could replace the current way of fingerprinting units (which is to compare store paths) by comparing the hashes. This is more precise because units won't get restarted because the order of the options change, comments are added, some dependency of writeText changes, .... Also this allows us to add a feature like `X-Reload-Triggers` so the unit can either be reloaded when these change or restarted when everything else changes, giving module authors the ability to have their services reloaded without having to fear that updates are not applied because the service doesn't get restarted. Another reason why this feature is nice is that now that the unit files are parsed correctly (and values are just extracted from one section), potential future rewrites can just rely on some INI library without having to implement their own weird parser that is compatible with this script. This also comes with a new subroutine to handle systemd booleans because I thought the current way of handling it was just ugly. This also allows overriding values this script reads in an override file. Apart from making this script more compatible with the world around it, this also fixes two issues I saw bugging exactly 0 (zero) people. First is that this script now supports multiple override files, also ones that are not called override.conf and the second one is that `1` and `on` are treated as bools by systemd but were previously not parsed as such by switch-to-configuration.
2022-01-05 11:59:47 +00:00
perl = pkgs.perl.withPackages (p: with p; [ FileSlurp NetDBus XMLParser XMLTwig ConfigIniFiles ]);
};
# Handle assertions and warnings
failedAssertions = map (x: x.message) (filter (x: !x.assertion) config.assertions);
baseSystemAssertWarn = if failedAssertions != []
then throw "\nFailed assertions:\n${concatStringsSep "\n" (map (x: "- ${x}") failedAssertions)}"
else showWarnings config.warnings baseSystem;
# Replace runtime dependencies
2021-01-25 06:57:48 +00:00
system = foldr ({ oldDependency, newDependency }: drv:
pkgs.replaceDependency { inherit oldDependency newDependency drv; }
) baseSystemAssertWarn config.system.replaceRuntimeDependencies;
2022-01-20 14:05:45 +00:00
/* Workaround until https://github.com/NixOS/nixpkgs/pull/156533
Call can be replaced by argument when that's merged.
*/
tmpFixupSubmoduleBoundary = subopts:
lib.mkOption {
type = lib.types.submoduleWith {
modules = [ { options = subopts; } ];
};
};
in
{
imports = [
../build.nix
(mkRemovedOptionModule [ "nesting" "clone" ] "Use `specialisation.«name» = { inheritParentConfig = true; configuration = { ... }; }` instead.")
(mkRemovedOptionModule [ "nesting" "children" ] "Use `specialisation.«name».configuration = { ... }` instead.")
];
options = {
specialisation = mkOption {
default = {};
nixos/nix-daemon: use structural settings The `nix.*` options, apart from options for setting up the daemon itself, currently provide a lot of setting mappings for the Nix daemon configuration. The scope of the mapping yields convience, but the line where an option is considered essential is blurry. For instance, the `extra-sandbox-paths` mapping is provided without its primary consumer, and the corresponding `sandbox-paths` option is also not mapped. The current system increases the maintenance burden as maintainers have to closely follow upstream changes. In this case, there are two state versions of Nix which have to be maintained collectively, with different options avaliable. This commit aims to following the standard outlined in RFC 42[1] to implement a structural setting pattern. The Nix configuration is encoded at its core as key-value pairs which maps nicely to attribute sets, making it feasible to express in the Nix language itself. Some existing options are kept such as `buildMachines` and `registry` which present a simplified interface to managing the respective settings. The interface is exposed as `nix.settings`. Legacy configurations are mapped to their corresponding options under `nix.settings` for backwards compatibility. Various options settings in other nixos modules and relevant tests have been updated to use structural setting for consistency. The generation and validation of the configration file has been modified to use `writeTextFile` instead of `runCommand` for clarity. Note that validation is now mandatory as strict checking of options has been pushed down to the derivation level due to freeformType consuming unmatched options. Furthermore, validation can not occur when cross-compiling due to current limitations. A new option `publicHostKey` was added to the `buildMachines` submodule corresponding to the base64 encoded public host key settings exposed in the builder syntax. The build machine generation was subsequently rewritten to use `concatStringsSep` for better performance by grouping concatenations. [1] - https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md
2021-11-19 22:36:26 +00:00
example = lib.literalExpression "{ fewJobsManyCores.configuration = { nix.settings = { core = 0; max-jobs = 1; }; }";
description = ''
Additional configurations to build. If
<literal>inheritParentConfig</literal> is true, the system
will be based on the overall system configuration.
To switch to a specialised configuration
(e.g. <literal>fewJobsManyCores</literal>) at runtime, run:
<screen>
<prompt># </prompt>sudo /run/current-system/specialisation/fewJobsManyCores/bin/switch-to-configuration test
</screen>
'';
type = types.attrsOf (types.submodule (
local@{ ... }: let
extend = if local.config.inheritParentConfig
then extendModules
else noUserModules.extendModules;
in {
options.inheritParentConfig = mkOption {
type = types.bool;
default = true;
description = "Include the entire system's configuration. Set to false to make a completely differently configured system.";
};
options.configuration = mkOption {
default = {};
description = ''
Arbitrary NixOS configuration.
Anything you can add to a normal NixOS configuration, you can add
here, including imports and config values, although nested
specialisations will be ignored.
'';
visible = "shallow";
inherit (extend { modules = [ ./no-clone.nix ]; }) type;
};
})
);
};
system.boot.loader.id = mkOption {
internal = true;
default = "";
description = ''
Id string of the used bootloader.
'';
};
system.boot.loader.kernelFile = mkOption {
internal = true;
default = pkgs.stdenv.hostPlatform.linux-kernel.target;
defaultText = literalExpression "pkgs.stdenv.hostPlatform.linux-kernel.target";
type = types.str;
description = ''
Name of the kernel file to be passed to the bootloader.
'';
};
system.boot.loader.initrdFile = mkOption {
internal = true;
default = "initrd";
type = types.str;
description = ''
Name of the initrd file to be passed to the bootloader.
'';
};
2022-01-20 14:05:45 +00:00
system.build = tmpFixupSubmoduleBoundary {
installBootLoader = mkOption {
internal = true;
# "; true" => make the `$out` argument from switch-to-configuration.pl
# go to `true` instead of `echo`, hiding the useless path
# from the log.
default = "echo 'Warning: do not know how to make this configuration bootable; please enable a boot loader.' 1>&2; true";
description = ''
A program that writes a bootloader installation script to the path passed in the first command line argument.
See <literal>nixos/modules/system/activation/switch-to-configuration.pl</literal>.
'';
type = types.unique {
message = ''
Only one bootloader can be enabled at a time. This requirement has not
been checked until NixOS 22.05. Earlier versions defaulted to the last
definition. Change your configuration to enable only one bootloader.
'';
} (types.either types.str types.package);
};
2022-01-20 14:05:45 +00:00
toplevel = mkOption {
type = types.package;
readOnly = true;
description = ''
This option contains the store path that typically represents a NixOS system.
You can read this path in a custom deployment tool for example.
'';
};
};
system.copySystemConfiguration = mkOption {
2013-10-30 16:37:45 +00:00
type = types.bool;
default = false;
description = ''
If enabled, copies the NixOS configuration file
(usually <filename>/etc/nixos/configuration.nix</filename>)
and links it from the resulting system
(getting to <filename>/run/current-system/configuration.nix</filename>).
Note that only this single file is copied, even if it imports others.
'';
};
system.extraSystemBuilderCmds = mkOption {
type = types.lines;
internal = true;
default = "";
description = ''
This code will be added to the builder creating the system store path.
'';
};
system.extraDependencies = mkOption {
type = types.listOf types.package;
default = [];
description = ''
A list of packages that should be included in the system
closure but not otherwise made available to users. This is
primarily used by the installation tests.
'';
};
system.replaceRuntimeDependencies = mkOption {
default = [];
example = lib.literalExpression "[ ({ original = pkgs.openssl; replacement = pkgs.callPackage /path/to/openssl { }; }) ]";
type = types.listOf (types.submodule (
{ ... }: {
options.original = mkOption {
type = types.package;
description = "The original package to override.";
};
options.replacement = mkOption {
type = types.package;
description = "The replacement package.";
};
})
);
apply = map ({ original, replacement, ... }: {
oldDependency = original;
newDependency = replacement;
});
description = ''
List of packages to override without doing a full rebuild.
The original derivation and replacement derivation must have the same
name length, and ideally should have close-to-identical directory layout.
'';
};
system.name = mkOption {
type = types.str;
default =
if config.networking.hostName == ""
then "unnamed"
else config.networking.hostName;
defaultText = literalExpression ''
if config.networking.hostName == ""
then "unnamed"
else config.networking.hostName;
'';
description = ''
The name of the system used in the <option>system.build.toplevel</option> derivation.
</para><para>
That derivation has the following name:
<literal>"nixos-system-''${config.system.name}-''${config.system.nixos.label}"</literal>
'';
};
};
config = {
system.extraSystemBuilderCmds =
optionalString
config.system.copySystemConfiguration
''ln -s '${import ../../../lib/from-env.nix "NIXOS_CONFIG" <nixos-config>}' \
"$out/configuration.nix"
'';
system.build.toplevel = system;
};
# uses extendModules to generate a type
meta.buildDocsInSandbox = false;
}