3
0
Fork 0
forked from mirrors/nixpkgs

Merge pull request #167902 from DeterminateSystems/openstack-zfs-cleanup

openstack-image: init, make-single-disk-zfs-image: init
This commit is contained in:
Graham Christensen 2022-04-16 19:44:53 -04:00 committed by GitHub
commit d65eff4fb6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 547 additions and 19 deletions

View file

@ -0,0 +1,322 @@
# Note: This is a private API, internal to NixOS. Its interface is subject
# to change without notice.
#
# The result of this builder is a single disk image, partitioned like this:
#
# * partition #1: a very small, 1MiB partition to leave room for Grub.
#
# * partition #2: boot, a partition formatted with FAT to be used for /boot.
# FAT is chosen to support EFI.
#
# * partition #3: nixos, a partition dedicated to a zpool.
#
# This single-disk approach does not satisfy ZFS's requirements for autoexpand,
# however automation can expand it anyway. For example, with
# `services.zfs.expandOnBoot`.
{ lib
, pkgs
, # The NixOS configuration to be installed onto the disk image.
config
, # size of the FAT partition, in megabytes.
bootSize ? 1024
, # The size of the root partition, in megabytes.
rootSize ? 2048
, # The name of the ZFS pool
rootPoolName ? "tank"
, # zpool properties
rootPoolProperties ? {
autoexpand = "on";
}
, # pool-wide filesystem properties
rootPoolFilesystemProperties ? {
acltype = "posixacl";
atime = "off";
compression = "on";
mountpoint = "legacy";
xattr = "sa";
}
, # datasets, with per-attribute options:
# mount: (optional) mount point in the VM
# properties: (optional) ZFS properties on the dataset, like filesystemProperties
# Notes:
# 1. datasets will be created from shorter to longer names as a simple topo-sort
# 2. you should define a root's dataset's mount for `/`
datasets ? { }
, # The files and directories to be placed in the target file system.
# This is a list of attribute sets {source, target} where `source'
# is the file system object (regular file or directory) to be
# grafted in the file system at path `target'.
contents ? [ ]
, # The initial NixOS configuration file to be copied to
# /etc/nixos/configuration.nix. This configuration will be embedded
# inside a configuration which includes the described ZFS fileSystems.
configFile ? null
, # Shell code executed after the VM has finished.
postVM ? ""
, name ? "nixos-disk-image"
, # Disk image format, one of qcow2, qcow2-compressed, vdi, vpc, raw.
format ? "raw"
, # Include a copy of Nixpkgs in the disk image
includeChannel ? true
}:
let
formatOpt = if format == "qcow2-compressed" then "qcow2" else format;
compress = lib.optionalString (format == "qcow2-compressed") "-c";
filenameSuffix = "." + {
qcow2 = "qcow2";
vdi = "vdi";
vpc = "vhd";
raw = "img";
}.${formatOpt} or formatOpt;
rootFilename = "nixos.root${filenameSuffix}";
# FIXME: merge with channel.nix / make-channel.nix.
channelSources =
let
nixpkgs = lib.cleanSource pkgs.path;
in
pkgs.runCommand "nixos-${config.system.nixos.version}" { } ''
mkdir -p $out
cp -prd ${nixpkgs.outPath} $out/nixos
chmod -R u+w $out/nixos
if [ ! -e $out/nixos/nixpkgs ]; then
ln -s . $out/nixos/nixpkgs
fi
rm -rf $out/nixos/.git
echo -n ${config.system.nixos.versionSuffix} > $out/nixos/.version-suffix
'';
closureInfo = pkgs.closureInfo {
rootPaths = [ config.system.build.toplevel ]
++ (lib.optional includeChannel channelSources);
};
modulesTree = pkgs.aggregateModules
(with config.boot.kernelPackages; [ kernel zfs ]);
tools = lib.makeBinPath (
with pkgs; [
config.system.build.nixos-enter
config.system.build.nixos-install
dosfstools
e2fsprogs
gptfdisk
nix
parted
utillinux
zfs
]
);
hasDefinedMount = disk: ((disk.mount or null) != null);
stringifyProperties = prefix: properties: lib.concatStringsSep " \\\n" (
lib.mapAttrsToList
(
property: value: "${prefix} ${lib.escapeShellArg property}=${lib.escapeShellArg value}"
)
properties
);
featuresToProperties = features:
lib.listToAttrs
(builtins.map
(feature: {
name = "feature@${feature}";
value = "enabled";
})
features);
createDatasets =
let
datasetlist = lib.mapAttrsToList lib.nameValuePair datasets;
sorted = lib.sort (left: right: (lib.stringLength left.name) < (lib.stringLength right.name)) datasetlist;
cmd = { name, value }:
let
properties = stringifyProperties "-o" (value.properties or { });
in
"zfs create -p ${properties} ${name}";
in
lib.concatMapStringsSep "\n" cmd sorted;
mountDatasets =
let
datasetlist = lib.mapAttrsToList lib.nameValuePair datasets;
mounts = lib.filter ({ value, ... }: hasDefinedMount value) datasetlist;
sorted = lib.sort (left: right: (lib.stringLength left.value.mount) < (lib.stringLength right.value.mount)) mounts;
cmd = { name, value }:
''
mkdir -p /mnt${lib.escapeShellArg value.mount}
mount -t zfs ${name} /mnt${lib.escapeShellArg value.mount}
'';
in
lib.concatMapStringsSep "\n" cmd sorted;
unmountDatasets =
let
datasetlist = lib.mapAttrsToList lib.nameValuePair datasets;
mounts = lib.filter ({ value, ... }: hasDefinedMount value) datasetlist;
sorted = lib.sort (left: right: (lib.stringLength left.value.mount) > (lib.stringLength right.value.mount)) mounts;
cmd = { name, value }:
''
umount /mnt${lib.escapeShellArg value.mount}
'';
in
lib.concatMapStringsSep "\n" cmd sorted;
fileSystemsCfgFile =
let
mountable = lib.filterAttrs (_: value: hasDefinedMount value) datasets;
in
pkgs.runCommand "filesystem-config.nix"
{
buildInputs = with pkgs; [ jq nixpkgs-fmt ];
filesystems = builtins.toJSON {
fileSystems = lib.mapAttrs'
(
dataset: attrs:
{
name = attrs.mount;
value = {
fsType = "zfs";
device = "${dataset}";
};
}
)
mountable;
};
passAsFile = [ "filesystems" ];
} ''
(
echo "builtins.fromJSON '''"
jq . < "$filesystemsPath"
echo "'''"
) > $out
nixpkgs-fmt $out
'';
mergedConfig =
if configFile == null
then fileSystemsCfgFile
else
pkgs.runCommand "configuration.nix"
{
buildInputs = with pkgs; [ nixpkgs-fmt ];
}
''
(
echo '{ imports = ['
printf "(%s)\n" "$(cat ${fileSystemsCfgFile})";
printf "(%s)\n" "$(cat ${configFile})";
echo ']; }'
) > $out
nixpkgs-fmt $out
'';
image = (
pkgs.vmTools.override {
rootModules =
[ "zfs" "9p" "9pnet_virtio" "virtio_pci" "virtio_blk" ] ++
(pkgs.lib.optional pkgs.stdenv.hostPlatform.isx86 "rtc_cmos");
kernel = modulesTree;
}
).runInLinuxVM (
pkgs.runCommand name
{
memSize = 1024;
QEMU_OPTS = "-drive file=$rootDiskImage,if=virtio,cache=unsafe,werror=report";
preVM = ''
PATH=$PATH:${pkgs.qemu_kvm}/bin
mkdir $out
rootDiskImage=root.raw
qemu-img create -f raw $rootDiskImage ${toString (bootSize + rootSize)}M
'';
postVM = ''
${if formatOpt == "raw" then ''
mv $rootDiskImage $out/${rootFilename}
'' else ''
${pkgs.qemu}/bin/qemu-img convert -f raw -O ${formatOpt} ${compress} $rootDiskImage $out/${rootFilename}
''}
rootDiskImage=$out/${rootFilename}
set -x
${postVM}
'';
} ''
export PATH=${tools}:$PATH
set -x
cp -sv /dev/vda /dev/sda
cp -sv /dev/vda /dev/xvda
parted --script /dev/vda -- \
mklabel gpt \
mkpart no-fs 1MiB 2MiB \
set 1 bios_grub on \
align-check optimal 1 \
mkpart primary fat32 2MiB ${toString bootSize}MiB \
align-check optimal 2 \
mkpart primary fat32 ${toString bootSize}MiB -1MiB \
align-check optimal 3 \
print
sfdisk --dump /dev/vda
zpool create \
${stringifyProperties " -o" rootPoolProperties} \
${stringifyProperties " -O" rootPoolFilesystemProperties} \
${rootPoolName} /dev/vda3
parted --script /dev/vda -- print
${createDatasets}
${mountDatasets}
mkdir -p /mnt/boot
mkfs.vfat -n ESP /dev/vda2
mount /dev/vda2 /mnt/boot
mount
# Install a configuration.nix
mkdir -p /mnt/etc/nixos
# `cat` so it is mutable on the fs
cat ${mergedConfig} > /mnt/etc/nixos/configuration.nix
export NIX_STATE_DIR=$TMPDIR/state
nix-store --load-db < ${closureInfo}/registration
nixos-install \
--root /mnt \
--no-root-passwd \
--system ${config.system.build.toplevel} \
--substituters "" \
${lib.optionalString includeChannel ''--channel ${channelSources}''}
df -h
umount /mnt/boot
${unmountDatasets}
zpool export ${rootPoolName}
''
);
in
image

View file

@ -73,7 +73,7 @@ in {
}
'';
zfsBuilder = import ../../../lib/make-zfs-image.nix {
zfsBuilder = import ../../../lib/make-multi-disk-zfs-image.nix {
inherit lib config configFile;
inherit (cfg) contents format name;
pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package

View file

@ -0,0 +1,101 @@
# nix-build '<nixpkgs/nixos>' -A config.system.build.openstackImage --arg configuration "{ imports = [ ./nixos/maintainers/scripts/openstack/openstack-image.nix ]; }"
{ config, lib, pkgs, ... }:
let
inherit (lib) mkOption types;
copyChannel = true;
cfg = config.openstackImage;
imageBootMode = if config.openstack.efi then "uefi" else "legacy-bios";
in
{
imports = [
../../../modules/virtualisation/openstack-config.nix
] ++ (lib.optional copyChannel ../../../modules/installer/cd-dvd/channel.nix);
options.openstackImage = {
name = mkOption {
type = types.str;
description = "The name of the generated derivation";
default = "nixos-openstack-image-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
};
sizeMB = mkOption {
type = types.int;
default = 8192;
description = "The size in MB of the image";
};
format = mkOption {
type = types.enum [ "raw" "qcow2" ];
default = "qcow2";
description = "The image format to output";
};
};
config = {
documentation.enable = copyChannel;
openstack = {
efi = true;
zfs = {
enable = true;
datasets = {
"tank/system/root".mount = "/";
"tank/system/var".mount = "/var";
"tank/local/nix".mount = "/nix";
"tank/user/home".mount = "/home";
};
};
};
system.build.openstackImage = import ../../../lib/make-single-disk-zfs-image.nix {
inherit lib config;
inherit (cfg) contents format name;
pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package
configFile = pkgs.writeText "configuration.nix"
''
{ modulesPath, ... }: {
imports = [ "''${modulesPath}/virtualisation/openstack-config.nix" ];
openstack.zfs.enable = true;
}
'';
includeChannel = copyChannel;
bootSize = 1000;
rootSize = cfg.sizeMB;
rootPoolProperties = {
ashift = 12;
autoexpand = "on";
};
datasets = config.openstack.zfs.datasets;
postVM = ''
extension=''${rootDiskImage##*.}
friendlyName=$out/${cfg.name}
rootDisk="$friendlyName.root.$extension"
mv "$rootDiskImage" "$rootDisk"
mkdir -p $out/nix-support
echo "file ${cfg.format} $rootDisk" >> $out/nix-support/hydra-build-products
${pkgs.jq}/bin/jq -n \
--arg system_label ${lib.escapeShellArg config.system.nixos.label} \
--arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \
--arg root_logical_bytes "$(${pkgs.qemu}/bin/qemu-img info --output json "$rootDisk" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
--arg boot_mode "${imageBootMode}" \
--arg root "$rootDisk" \
'{}
| .label = $system_label
| .boot_mode = $boot_mode
| .system = $system
| .disks.root.logical_bytes = $root_logical_bytes
| .disks.root.file = $root
' > $out/nix-support/image-info.json
'';
};
};
}

View file

@ -1,17 +1,18 @@
# nix-build '<nixpkgs/nixos>' -A config.system.build.openstackImage --arg configuration "{ imports = [ ./nixos/maintainers/scripts/openstack/openstack-image.nix ]; }"
{ config, lib, pkgs, ... }:
with lib;
let
copyChannel = true;
in
{
imports =
[ ../../../modules/installer/cd-dvd/channel.nix
../../../modules/virtualisation/openstack-config.nix
];
imports = [
../../../modules/virtualisation/openstack-config.nix
] ++ (lib.optional copyChannel ../../../modules/installer/cd-dvd/channel.nix);
documentation.enable = copyChannel;
system.build.openstackImage = import ../../../lib/make-disk-image.nix {
inherit lib config;
inherit lib config copyChannel;
additionalSpace = "1024M";
pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package
format = "qcow2";

View file

@ -1240,6 +1240,7 @@
./virtualisation/amazon-options.nix
./virtualisation/hyperv-guest.nix
./virtualisation/kvmgt.nix
./virtualisation/openstack-options.nix
./virtualisation/openvswitch.nix
./virtualisation/parallels-guest.nix
./virtualisation/podman/default.nix

View file

@ -1,8 +1,11 @@
{ pkgs, lib, ... }:
{ config, pkgs, lib, ... }:
with lib;
# image metadata:
# hw_firmware_type=uefi
let
inherit (lib) mkIf mkDefault;
cfg = config.openstack;
metadataFetcher = import ./openstack-metadata-fetcher.nix {
targetRoot = "/";
wgetExtraOptions = "--retry-connrefused";
@ -11,23 +14,47 @@ in
{
imports = [
../profiles/qemu-guest.nix
# Note: While we do use the headless profile, we also explicitly
# turn on the serial console on tty1 below.
# Note that I could not find any documentation indicating tty1 was
# the correct choice. I picked tty1 because that is what one
# particular host was using.
../profiles/headless.nix
# The Openstack Metadata service exposes data on an EC2 API also.
./ec2-data.nix
./amazon-init.nix
];
config = {
fileSystems."/" = {
fileSystems."/" = mkIf (!cfg.zfs.enable) {
device = "/dev/disk/by-label/nixos";
fsType = "ext4";
autoResize = true;
};
fileSystems."/boot" = mkIf (cfg.efi || cfg.zfs.enable) {
# The ZFS image uses a partition labeled ESP whether or not we're
# booting with EFI.
device = "/dev/disk/by-label/ESP";
fsType = "vfat";
};
boot.growPartition = true;
boot.kernelParams = [ "console=ttyS0" ];
boot.loader.grub.device = "/dev/vda";
boot.loader.timeout = 0;
boot.kernelParams = [ "console=tty1" ];
boot.loader.grub.device = if (!cfg.efi) then "/dev/vda" else "nodev";
boot.loader.grub.efiSupport = cfg.efi;
boot.loader.grub.efiInstallAsRemovable = cfg.efi;
boot.loader.timeout = 1;
boot.loader.grub.extraConfig = ''
serial --unit=1 --speed=115200 --word=8 --parity=no --stop=1
terminal_output console serial
terminal_input console serial
'';
services.zfs.expandOnBoot = mkIf cfg.zfs.enable (lib.mkDefault "all");
boot.zfs.devNodes = mkIf cfg.zfs.enable "/dev/";
# Allow root logins
services.openssh = {
@ -36,6 +63,11 @@ in
passwordAuthentication = mkDefault false;
};
users.users.root.initialPassword = "foobar";
# Enable the serial console on tty1
systemd.services."serial-getty@tty1".enable = true;
# Force getting the hostname from Openstack metadata.
networking.hostName = mkDefault "";
@ -43,7 +75,7 @@ in
path = [ pkgs.wget ];
description = "Fetch Metadata on startup";
wantedBy = [ "multi-user.target" ];
before = [ "apply-ec2-data.service" "amazon-init.service"];
before = [ "apply-ec2-data.service" "amazon-init.service" ];
wants = [ "network-online.target" ];
after = [ "network-online.target" ];
script = metadataFetcher;

View file

@ -14,9 +14,9 @@
wget ${wgetExtraOptions} "$@"
}
wget_imds -O "$metaDir/ami-manifest-path" http://169.254.169.254/1.0/meta-data/ami-manifest-path
wget_imds -O "$metaDir/ami-manifest-path" http://169.254.169.254/1.0/meta-data/ami-manifest-path || true
# When no user-data is provided, the OpenStack metadata server doesn't expose the user-data route.
(umask 077 && wget_imds -O "$metaDir/user-data" http://169.254.169.254/1.0/user-data || rm -f "$metaDir/user-data")
wget_imds -O "$metaDir/hostname" http://169.254.169.254/1.0/meta-data/hostname
wget_imds -O "$metaDir/public-keys-0-openssh-key" http://169.254.169.254/1.0/meta-data/public-keys/0/openssh-key
wget_imds -O "$metaDir/hostname" http://169.254.169.254/1.0/meta-data/hostname || true
wget_imds -O "$metaDir/public-keys-0-openssh-key" http://169.254.169.254/1.0/meta-data/public-keys/0/openssh-key || true
''

View file

@ -0,0 +1,71 @@
{ config, lib, pkgs, ... }:
let
inherit (lib) literalExpression types;
in
{
options = {
openstack = {
zfs = {
enable = lib.mkOption {
default = false;
internal = true;
description = ''
Whether the OpenStack instance uses a ZFS root.
'';
};
datasets = lib.mkOption {
description = ''
Datasets to create under the `tank` and `boot` zpools.
**NOTE:** This option is used only at image creation time, and
does not attempt to declaratively create or manage datasets
on an existing system.
'';
default = { };
type = types.attrsOf (types.submodule {
options = {
mount = lib.mkOption {
description = "Where to mount this dataset.";
type = types.nullOr types.string;
default = null;
};
properties = lib.mkOption {
description = "Properties to set on this dataset.";
type = types.attrsOf types.string;
default = { };
};
};
});
};
};
efi = lib.mkOption {
default = pkgs.stdenv.hostPlatform.isAarch64;
defaultText = literalExpression "pkgs.stdenv.hostPlatform.isAarch64";
internal = true;
description = ''
Whether the instance is using EFI.
'';
};
};
};
config = lib.mkIf config.openstack.zfs.enable {
networking.hostId = lib.mkDefault "00000000";
fileSystems =
let
mountable = lib.filterAttrs (_: value: ((value.mount or null) != null)) config.openstack.zfs.datasets;
in
lib.mapAttrs'
(dataset: opts: lib.nameValuePair opts.mount {
device = dataset;
fsType = "zfs";
})
mountable;
};
}