2016-09-28 00:42:05 +01:00
|
|
|
{
|
2020-12-02 13:51:06 +00:00
|
|
|
bashInteractive,
|
2019-09-11 00:02:19 +01:00
|
|
|
buildPackages,
|
2019-06-27 19:15:42 +01:00
|
|
|
cacert,
|
|
|
|
callPackage,
|
|
|
|
closureInfo,
|
2016-09-28 00:42:05 +01:00
|
|
|
coreutils,
|
|
|
|
docker,
|
|
|
|
e2fsprogs,
|
2021-03-19 01:25:26 +00:00
|
|
|
fakeroot,
|
2016-09-28 00:42:05 +01:00
|
|
|
findutils,
|
|
|
|
go,
|
2017-07-26 20:53:35 +01:00
|
|
|
jq,
|
2019-06-27 19:15:42 +01:00
|
|
|
jshon,
|
2016-09-28 00:42:05 +01:00
|
|
|
lib,
|
2020-06-08 10:47:46 +01:00
|
|
|
makeWrapper,
|
2019-06-27 19:15:42 +01:00
|
|
|
moreutils,
|
2018-10-01 08:51:52 +01:00
|
|
|
nix,
|
2019-06-27 19:15:42 +01:00
|
|
|
pigz,
|
2016-10-03 21:12:29 +01:00
|
|
|
rsync,
|
2019-06-27 19:15:42 +01:00
|
|
|
runCommand,
|
|
|
|
runtimeShell,
|
2016-09-28 00:42:05 +01:00
|
|
|
shadow,
|
2019-06-27 19:15:42 +01:00
|
|
|
skopeo,
|
2016-09-28 00:42:05 +01:00
|
|
|
storeDir ? builtins.storeDir,
|
2019-06-27 19:15:42 +01:00
|
|
|
substituteAll,
|
|
|
|
symlinkJoin,
|
2020-11-24 15:29:28 +00:00
|
|
|
util-linux,
|
2016-09-28 00:42:05 +01:00
|
|
|
vmTools,
|
|
|
|
writeReferencesToFile,
|
|
|
|
writeScript,
|
|
|
|
writeText,
|
2020-12-02 11:16:56 +00:00
|
|
|
writeTextDir,
|
2020-06-08 10:47:46 +01:00
|
|
|
writePython3,
|
2020-11-19 14:03:44 +00:00
|
|
|
system, # Note: This is the cross system we're compiling for
|
2016-09-28 00:42:05 +01:00
|
|
|
}:
|
2015-11-19 12:11:17 +00:00
|
|
|
|
2020-05-02 15:30:43 +01:00
|
|
|
let
|
|
|
|
|
2021-05-25 14:04:45 +01:00
|
|
|
inherit (lib)
|
|
|
|
optionals
|
|
|
|
;
|
|
|
|
|
2020-05-02 15:30:43 +01:00
|
|
|
mkDbExtraCommand = contents: let
|
|
|
|
contentsList = if builtins.isList contents then contents else [ contents ];
|
|
|
|
in ''
|
|
|
|
echo "Generating the nix database..."
|
|
|
|
echo "Warning: only the database of the deepest Nix layer is loaded."
|
|
|
|
echo " If you want to use nix commands in the container, it would"
|
|
|
|
echo " be better to only have one layer that contains a nix store."
|
|
|
|
|
|
|
|
export NIX_REMOTE=local?root=$PWD
|
|
|
|
# A user is required by nix
|
|
|
|
# https://github.com/NixOS/nix/blob/9348f9291e5d9e4ba3c4347ea1b235640f54fd79/src/libutil/util.cc#L478
|
|
|
|
export USER=nobody
|
2020-11-19 13:31:14 +00:00
|
|
|
${buildPackages.nix}/bin/nix-store --load-db < ${closureInfo {rootPaths = contentsList;}}/registration
|
2020-05-02 15:30:43 +01:00
|
|
|
|
|
|
|
mkdir -p nix/var/nix/gcroots/docker/
|
|
|
|
for i in ${lib.concatStringsSep " " contentsList}; do
|
|
|
|
ln -s $i nix/var/nix/gcroots/docker/$(basename $i)
|
|
|
|
done;
|
|
|
|
'';
|
2016-09-28 00:42:05 +01:00
|
|
|
|
2020-12-12 23:42:31 +00:00
|
|
|
# The OCI Image specification recommends that configurations use values listed
|
|
|
|
# in the Go Language document for GOARCH.
|
2020-12-12 02:54:44 +00:00
|
|
|
# Reference: https://github.com/opencontainers/image-spec/blob/master/config.md#properties
|
2020-12-12 23:42:31 +00:00
|
|
|
# For the mapping from Nixpkgs system parameters to GOARCH, we can reuse the
|
|
|
|
# mapping from the go package.
|
|
|
|
defaultArch = go.GOARCH;
|
2020-11-19 14:03:44 +00:00
|
|
|
|
2020-05-02 15:30:43 +01:00
|
|
|
in
|
2015-11-19 12:11:17 +00:00
|
|
|
rec {
|
|
|
|
|
2019-06-27 19:15:42 +01:00
|
|
|
examples = callPackage ./examples.nix {
|
2020-12-02 11:16:56 +00:00
|
|
|
inherit buildImage buildLayeredImage fakeNss pullImage shadowSetup buildImageWithNixDb;
|
2016-10-03 18:07:33 +01:00
|
|
|
};
|
|
|
|
|
2018-05-06 03:38:47 +01:00
|
|
|
pullImage = let
|
|
|
|
fixName = name: builtins.replaceStrings ["/" ":"] ["-" "-"] name;
|
|
|
|
in
|
|
|
|
{ imageName
|
2018-04-03 09:26:03 +01:00
|
|
|
# To find the digest of an image, you can use skopeo:
|
2018-06-13 01:56:13 +01:00
|
|
|
# see doc/functions.xml
|
2018-05-06 03:38:47 +01:00
|
|
|
, imageDigest
|
|
|
|
, sha256
|
2018-06-13 01:56:13 +01:00
|
|
|
, os ? "linux"
|
2020-12-12 23:42:31 +00:00
|
|
|
, arch ? defaultArch
|
2019-03-26 09:35:21 +00:00
|
|
|
|
|
|
|
# This is used to set name to the pulled image
|
|
|
|
, finalImageName ? imageName
|
2018-04-03 09:26:03 +01:00
|
|
|
# This used to set a tag to the pulled image
|
2018-05-06 03:38:47 +01:00
|
|
|
, finalImageTag ? "latest"
|
2021-05-05 09:40:08 +01:00
|
|
|
# This is used to disable TLS certificate verification, allowing access to http registries on (hopefully) trusted networks
|
|
|
|
, tlsVerify ? true
|
2019-03-26 09:35:21 +00:00
|
|
|
|
|
|
|
, name ? fixName "docker-image-${finalImageName}-${finalImageTag}.tar"
|
2018-05-06 03:38:47 +01:00
|
|
|
}:
|
|
|
|
|
|
|
|
runCommand name {
|
2019-03-26 09:35:21 +00:00
|
|
|
inherit imageDigest;
|
|
|
|
imageName = finalImageName;
|
2018-06-03 21:58:23 +01:00
|
|
|
imageTag = finalImageTag;
|
2021-01-24 00:40:18 +00:00
|
|
|
impureEnvVars = lib.fetchers.proxyImpureEnvVars;
|
2018-05-06 03:38:47 +01:00
|
|
|
outputHashMode = "flat";
|
|
|
|
outputHashAlgo = "sha256";
|
|
|
|
outputHash = sha256;
|
|
|
|
|
2019-06-27 19:15:42 +01:00
|
|
|
nativeBuildInputs = lib.singleton skopeo;
|
|
|
|
SSL_CERT_FILE = "${cacert.out}/etc/ssl/certs/ca-bundle.crt";
|
2018-05-06 03:38:47 +01:00
|
|
|
|
|
|
|
sourceURL = "docker://${imageName}@${imageDigest}";
|
2019-03-26 09:35:21 +00:00
|
|
|
destNameTag = "${finalImageName}:${finalImageTag}";
|
2018-05-06 03:38:47 +01:00
|
|
|
} ''
|
2021-05-05 09:40:08 +01:00
|
|
|
skopeo \
|
|
|
|
--src-tls-verify=${lib.boolToString tlsVerify} \
|
|
|
|
--insecure-policy \
|
|
|
|
--tmpdir=$TMPDIR \
|
|
|
|
--override-os ${os} \
|
|
|
|
--override-arch ${arch} \
|
|
|
|
copy "$sourceURL" "docker-archive://$out:$destNameTag"
|
2018-05-06 03:38:47 +01:00
|
|
|
'';
|
2016-09-28 00:42:05 +01:00
|
|
|
|
2015-11-19 12:11:17 +00:00
|
|
|
# We need to sum layer.tar, not a directory, hence tarsum instead of nix-hash.
|
2021-01-15 13:22:34 +00:00
|
|
|
# And we cannot untar it, because then we cannot preserve permissions etc.
|
2015-11-19 12:11:17 +00:00
|
|
|
tarsum = runCommand "tarsum" {
|
2019-01-30 04:29:18 +00:00
|
|
|
nativeBuildInputs = [ go ];
|
2015-11-19 12:11:17 +00:00
|
|
|
} ''
|
|
|
|
mkdir tarsum
|
|
|
|
cd tarsum
|
|
|
|
|
|
|
|
cp ${./tarsum.go} tarsum.go
|
|
|
|
export GOPATH=$(pwd)
|
2019-03-05 18:53:21 +00:00
|
|
|
export GOCACHE="$TMPDIR/go-cache"
|
2018-03-26 17:47:31 +01:00
|
|
|
mkdir -p src/github.com/docker/docker/pkg
|
2021-03-04 13:37:12 +00:00
|
|
|
ln -sT ${docker.moby-src}/pkg/tarsum src/github.com/docker/docker/pkg/tarsum
|
2015-11-19 12:11:17 +00:00
|
|
|
go build
|
|
|
|
|
2018-09-25 18:54:45 +01:00
|
|
|
mkdir -p $out/bin
|
|
|
|
|
|
|
|
cp tarsum $out/bin/
|
2015-11-19 12:11:17 +00:00
|
|
|
'';
|
2016-09-28 00:42:05 +01:00
|
|
|
|
2015-11-19 12:11:17 +00:00
|
|
|
# buildEnv creates symlinks to dirs, which is hard to edit inside the overlay VM
|
2016-09-28 00:42:05 +01:00
|
|
|
mergeDrvs = {
|
|
|
|
derivations,
|
|
|
|
onlyDeps ? false
|
|
|
|
}:
|
2015-11-19 12:11:17 +00:00
|
|
|
runCommand "merge-drvs" {
|
2016-09-28 00:42:05 +01:00
|
|
|
inherit derivations onlyDeps;
|
2015-11-19 12:11:17 +00:00
|
|
|
} ''
|
2016-09-28 00:42:05 +01:00
|
|
|
if [[ -n "$onlyDeps" ]]; then
|
|
|
|
echo $derivations > $out
|
2015-11-19 12:11:17 +00:00
|
|
|
exit 0
|
|
|
|
fi
|
2016-09-28 00:42:05 +01:00
|
|
|
|
2015-11-19 12:11:17 +00:00
|
|
|
mkdir $out
|
2016-09-28 00:42:05 +01:00
|
|
|
for derivation in $derivations; do
|
|
|
|
echo "Merging $derivation..."
|
|
|
|
if [[ -d "$derivation" ]]; then
|
|
|
|
# If it's a directory, copy all of its contents into $out.
|
|
|
|
cp -drf --preserve=mode -f $derivation/* $out/
|
2015-11-19 12:11:17 +00:00
|
|
|
else
|
2016-09-28 00:42:05 +01:00
|
|
|
# Otherwise treat the derivation as a tarball and extract it
|
|
|
|
# into $out.
|
2015-11-19 12:11:17 +00:00
|
|
|
tar -C $out -xpf $drv || true
|
|
|
|
fi
|
|
|
|
done
|
|
|
|
'';
|
|
|
|
|
2016-09-28 00:42:05 +01:00
|
|
|
# Helper for setting up the base files for managing users and
|
|
|
|
# groups, only if such files don't exist already. It is suitable for
|
|
|
|
# being used in a runAsRoot script.
|
2015-11-19 12:11:17 +00:00
|
|
|
shadowSetup = ''
|
|
|
|
export PATH=${shadow}/bin:$PATH
|
|
|
|
mkdir -p /etc/pam.d
|
2016-09-28 00:42:05 +01:00
|
|
|
if [[ ! -f /etc/passwd ]]; then
|
2019-02-26 11:45:54 +00:00
|
|
|
echo "root:x:0:0::/root:${runtimeShell}" > /etc/passwd
|
2015-11-19 12:11:17 +00:00
|
|
|
echo "root:!x:::::::" > /etc/shadow
|
|
|
|
fi
|
2016-09-28 00:42:05 +01:00
|
|
|
if [[ ! -f /etc/group ]]; then
|
2015-11-19 12:11:17 +00:00
|
|
|
echo "root:x:0:" > /etc/group
|
|
|
|
echo "root:x::" > /etc/gshadow
|
|
|
|
fi
|
2016-09-28 00:42:05 +01:00
|
|
|
if [[ ! -f /etc/pam.d/other ]]; then
|
2015-11-19 12:11:17 +00:00
|
|
|
cat > /etc/pam.d/other <<EOF
|
2016-09-28 00:42:05 +01:00
|
|
|
account sufficient pam_unix.so
|
|
|
|
auth sufficient pam_rootok.so
|
|
|
|
password requisite pam_unix.so nullok sha512
|
|
|
|
session required pam_unix.so
|
|
|
|
EOF
|
2015-11-19 12:11:17 +00:00
|
|
|
fi
|
2016-09-28 00:42:05 +01:00
|
|
|
if [[ ! -f /etc/login.defs ]]; then
|
2015-11-19 12:11:17 +00:00
|
|
|
touch /etc/login.defs
|
|
|
|
fi
|
|
|
|
'';
|
|
|
|
|
2016-09-28 00:42:05 +01:00
|
|
|
# Run commands in a virtual machine.
|
|
|
|
runWithOverlay = {
|
|
|
|
name,
|
|
|
|
fromImage ? null,
|
|
|
|
fromImageName ? null,
|
|
|
|
fromImageTag ? null,
|
|
|
|
diskSize ? 1024,
|
|
|
|
preMount ? "",
|
|
|
|
postMount ? "",
|
|
|
|
postUmount ? ""
|
|
|
|
}:
|
2019-06-27 19:15:42 +01:00
|
|
|
let
|
|
|
|
result = vmTools.runInLinuxVM (
|
2015-11-19 12:11:17 +00:00
|
|
|
runCommand name {
|
2016-09-28 00:42:05 +01:00
|
|
|
preVM = vmTools.createEmptyImage {
|
|
|
|
size = diskSize;
|
|
|
|
fullName = "docker-run-disk";
|
|
|
|
};
|
2015-11-19 12:11:17 +00:00
|
|
|
inherit fromImage fromImageName fromImageTag;
|
2016-09-28 00:42:05 +01:00
|
|
|
|
2020-11-24 15:29:28 +00:00
|
|
|
nativeBuildInputs = [ util-linux e2fsprogs jshon rsync jq ];
|
2015-11-19 12:11:17 +00:00
|
|
|
} ''
|
|
|
|
mkdir disk
|
|
|
|
mkfs /dev/${vmTools.hd}
|
|
|
|
mount /dev/${vmTools.hd} disk
|
|
|
|
cd disk
|
|
|
|
|
2016-09-28 00:42:05 +01:00
|
|
|
if [[ -n "$fromImage" ]]; then
|
|
|
|
echo "Unpacking base image..."
|
2015-11-19 12:11:17 +00:00
|
|
|
mkdir image
|
|
|
|
tar -C image -xpf "$fromImage"
|
2017-09-28 11:56:23 +01:00
|
|
|
|
2020-06-08 10:47:46 +01:00
|
|
|
if [[ -n "$fromImageName" ]] && [[ -n "$fromImageTag" ]]; then
|
|
|
|
parentID="$(
|
|
|
|
cat "image/manifest.json" |
|
|
|
|
jq -r '.[] | select(.RepoTags | contains([$desiredTag])) | rtrimstr(".json")' \
|
|
|
|
--arg desiredTag "$fromImageName:$fromImageTag"
|
|
|
|
)"
|
|
|
|
else
|
|
|
|
echo "From-image name or tag wasn't set. Reading the first ID."
|
|
|
|
parentID="$(cat "image/manifest.json" | jq -r '.[0].Config | rtrimstr(".json")')"
|
2017-09-28 11:56:23 +01:00
|
|
|
fi
|
|
|
|
|
dockerTools.buildImage: support using a layered image in fromImage
Docker images used to be, essentially, a linked list of layers. Each
layer would have a tarball and a json document pointing to its parent,
and the image pointed to the top layer:
imageA ----> layerA
|
v
layerB
|
v
layerC
The current image spec changed this format to where the Image defined
the order and set of layers:
imageA ---> layerA
|--> layerB
`--> layerC
For backwards compatibility, docker produces images which follow both
specs: layers point to parents, and images also point to the entire
list:
imageA ---> layerA
| |
| v
|--> layerB
| |
| v
`--> layerC
This is nice for tooling which supported the older version and never
updated to support the newer format.
Our `buildImage` code only supported the old version, so in order for
`buildImage` to properly generate an image based on another image
with `fromImage`, the parent image's layers must fully support the old
mechanism.
This is not a problem in general, but is a problem with
`buildLayeredImage`.
`buildLayeredImage` creates images with newer image spec, because
individual store paths don't have a guaranteed parent layer. Including
a specific parent ID in the layer's json makes the output less likely
to cache hit when published or pulled.
This means until now, `buildLayeredImage` could not be the input to
`buildImage`.
The changes in this PR change `buildImage` to only use the layer's
manifest when locating parent IDs. This does break buildImage on
extremely old Docker images, though I do wonder how many of these
exist.
This work has been sponsored by Target.
2018-12-04 17:18:06 +00:00
|
|
|
cat ./image/manifest.json | jq -r '.[0].Layers | .[]' > layer-list
|
|
|
|
else
|
|
|
|
touch layer-list
|
2015-11-19 12:11:17 +00:00
|
|
|
fi
|
|
|
|
|
2017-09-28 11:56:23 +01:00
|
|
|
# Unpack all of the parent layers into the image.
|
2015-11-19 12:11:17 +00:00
|
|
|
lowerdir=""
|
dockerTools.buildImage: support using a layered image in fromImage
Docker images used to be, essentially, a linked list of layers. Each
layer would have a tarball and a json document pointing to its parent,
and the image pointed to the top layer:
imageA ----> layerA
|
v
layerB
|
v
layerC
The current image spec changed this format to where the Image defined
the order and set of layers:
imageA ---> layerA
|--> layerB
`--> layerC
For backwards compatibility, docker produces images which follow both
specs: layers point to parents, and images also point to the entire
list:
imageA ---> layerA
| |
| v
|--> layerB
| |
| v
`--> layerC
This is nice for tooling which supported the older version and never
updated to support the newer format.
Our `buildImage` code only supported the old version, so in order for
`buildImage` to properly generate an image based on another image
with `fromImage`, the parent image's layers must fully support the old
mechanism.
This is not a problem in general, but is a problem with
`buildLayeredImage`.
`buildLayeredImage` creates images with newer image spec, because
individual store paths don't have a guaranteed parent layer. Including
a specific parent ID in the layer's json makes the output less likely
to cache hit when published or pulled.
This means until now, `buildLayeredImage` could not be the input to
`buildImage`.
The changes in this PR change `buildImage` to only use the layer's
manifest when locating parent IDs. This does break buildImage on
extremely old Docker images, though I do wonder how many of these
exist.
This work has been sponsored by Target.
2018-12-04 17:18:06 +00:00
|
|
|
extractionID=0
|
2019-03-12 10:59:57 +00:00
|
|
|
for layerTar in $(tac layer-list); do
|
dockerTools.buildImage: support using a layered image in fromImage
Docker images used to be, essentially, a linked list of layers. Each
layer would have a tarball and a json document pointing to its parent,
and the image pointed to the top layer:
imageA ----> layerA
|
v
layerB
|
v
layerC
The current image spec changed this format to where the Image defined
the order and set of layers:
imageA ---> layerA
|--> layerB
`--> layerC
For backwards compatibility, docker produces images which follow both
specs: layers point to parents, and images also point to the entire
list:
imageA ---> layerA
| |
| v
|--> layerB
| |
| v
`--> layerC
This is nice for tooling which supported the older version and never
updated to support the newer format.
Our `buildImage` code only supported the old version, so in order for
`buildImage` to properly generate an image based on another image
with `fromImage`, the parent image's layers must fully support the old
mechanism.
This is not a problem in general, but is a problem with
`buildLayeredImage`.
`buildLayeredImage` creates images with newer image spec, because
individual store paths don't have a guaranteed parent layer. Including
a specific parent ID in the layer's json makes the output less likely
to cache hit when published or pulled.
This means until now, `buildLayeredImage` could not be the input to
`buildImage`.
The changes in this PR change `buildImage` to only use the layer's
manifest when locating parent IDs. This does break buildImage on
extremely old Docker images, though I do wonder how many of these
exist.
This work has been sponsored by Target.
2018-12-04 17:18:06 +00:00
|
|
|
echo "Unpacking layer $layerTar"
|
|
|
|
extractionID=$((extractionID + 1))
|
|
|
|
|
|
|
|
mkdir -p image/$extractionID/layer
|
2018-12-14 20:48:58 +00:00
|
|
|
tar -C image/$extractionID/layer -xpf image/$layerTar
|
|
|
|
rm image/$layerTar
|
2015-11-19 12:11:17 +00:00
|
|
|
|
dockerTools.buildImage: support using a layered image in fromImage
Docker images used to be, essentially, a linked list of layers. Each
layer would have a tarball and a json document pointing to its parent,
and the image pointed to the top layer:
imageA ----> layerA
|
v
layerB
|
v
layerC
The current image spec changed this format to where the Image defined
the order and set of layers:
imageA ---> layerA
|--> layerB
`--> layerC
For backwards compatibility, docker produces images which follow both
specs: layers point to parents, and images also point to the entire
list:
imageA ---> layerA
| |
| v
|--> layerB
| |
| v
`--> layerC
This is nice for tooling which supported the older version and never
updated to support the newer format.
Our `buildImage` code only supported the old version, so in order for
`buildImage` to properly generate an image based on another image
with `fromImage`, the parent image's layers must fully support the old
mechanism.
This is not a problem in general, but is a problem with
`buildLayeredImage`.
`buildLayeredImage` creates images with newer image spec, because
individual store paths don't have a guaranteed parent layer. Including
a specific parent ID in the layer's json makes the output less likely
to cache hit when published or pulled.
This means until now, `buildLayeredImage` could not be the input to
`buildImage`.
The changes in this PR change `buildImage` to only use the layer's
manifest when locating parent IDs. This does break buildImage on
extremely old Docker images, though I do wonder how many of these
exist.
This work has been sponsored by Target.
2018-12-04 17:18:06 +00:00
|
|
|
find image/$extractionID/layer -name ".wh.*" -exec bash -c 'name="$(basename {}|sed "s/^.wh.//")"; mknod "$(dirname {})/$name" c 0 0; rm {}' \;
|
2015-11-19 12:11:17 +00:00
|
|
|
|
2016-09-28 00:42:05 +01:00
|
|
|
# Get the next lower directory and continue the loop.
|
2019-04-30 07:42:24 +01:00
|
|
|
lowerdir=image/$extractionID/layer''${lowerdir:+:}$lowerdir
|
2015-11-19 12:11:17 +00:00
|
|
|
done
|
|
|
|
|
|
|
|
mkdir work
|
|
|
|
mkdir layer
|
|
|
|
mkdir mnt
|
|
|
|
|
2016-09-28 00:42:05 +01:00
|
|
|
${lib.optionalString (preMount != "") ''
|
|
|
|
# Execute pre-mount steps
|
|
|
|
echo "Executing pre-mount steps..."
|
|
|
|
${preMount}
|
|
|
|
''}
|
2015-11-19 12:11:17 +00:00
|
|
|
|
|
|
|
if [ -n "$lowerdir" ]; then
|
|
|
|
mount -t overlay overlay -olowerdir=$lowerdir,workdir=work,upperdir=layer mnt
|
|
|
|
else
|
|
|
|
mount --bind layer mnt
|
|
|
|
fi
|
|
|
|
|
2016-09-28 00:42:05 +01:00
|
|
|
${lib.optionalString (postMount != "") ''
|
|
|
|
# Execute post-mount steps
|
|
|
|
echo "Executing post-mount steps..."
|
|
|
|
${postMount}
|
|
|
|
''}
|
|
|
|
|
2015-11-19 12:11:17 +00:00
|
|
|
umount mnt
|
|
|
|
|
2016-09-28 00:42:05 +01:00
|
|
|
(
|
|
|
|
cd layer
|
|
|
|
cmd='name="$(basename {})"; touch "$(dirname {})/.wh.$name"; rm "{}"'
|
|
|
|
find . -type c -exec bash -c "$cmd" \;
|
|
|
|
)
|
2015-11-19 12:11:17 +00:00
|
|
|
|
|
|
|
${postUmount}
|
|
|
|
'');
|
2019-06-27 19:15:42 +01:00
|
|
|
in
|
|
|
|
runCommand name {} ''
|
|
|
|
mkdir -p $out
|
|
|
|
cd ${result}
|
|
|
|
cp layer.tar json VERSION $out
|
|
|
|
'';
|
2015-11-19 12:11:17 +00:00
|
|
|
|
|
|
|
exportImage = { name ? fromImage.name, fromImage, fromImageName ? null, fromImageTag ? null, diskSize ? 1024 }:
|
|
|
|
runWithOverlay {
|
|
|
|
inherit name fromImage fromImageName fromImageTag diskSize;
|
|
|
|
|
|
|
|
postMount = ''
|
2016-09-28 00:42:05 +01:00
|
|
|
echo "Packing raw image..."
|
2018-03-12 17:26:15 +00:00
|
|
|
tar -C mnt --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" -cf $out .
|
2015-11-19 12:11:17 +00:00
|
|
|
'';
|
|
|
|
};
|
2016-09-28 00:42:05 +01:00
|
|
|
|
|
|
|
|
|
|
|
# Create an executable shell script which has the coreutils in its
|
|
|
|
# PATH. Since root scripts are executed in a blank environment, even
|
|
|
|
# things like `ls` or `echo` will be missing.
|
|
|
|
shellScript = name: text:
|
|
|
|
writeScript name ''
|
2019-02-26 11:45:54 +00:00
|
|
|
#!${runtimeShell}
|
2016-09-28 00:42:05 +01:00
|
|
|
set -e
|
|
|
|
export PATH=${coreutils}/bin:/bin
|
|
|
|
${text}
|
|
|
|
'';
|
|
|
|
|
|
|
|
# Create a "layer" (set of files).
|
|
|
|
mkPureLayer = {
|
|
|
|
# Name of the layer
|
|
|
|
name,
|
|
|
|
# JSON containing configuration and metadata for this layer.
|
|
|
|
baseJson,
|
|
|
|
# Files to add to the layer.
|
|
|
|
contents ? null,
|
2017-04-23 14:45:21 +01:00
|
|
|
# When copying the contents into the image, preserve symlinks to
|
|
|
|
# directories (see `rsync -K`). Otherwise, transform those symlinks
|
|
|
|
# into directories.
|
|
|
|
keepContentsDirlinks ? false,
|
2016-09-28 00:42:05 +01:00
|
|
|
# Additional commands to run on the layer before it is tar'd up.
|
2017-07-01 12:23:12 +01:00
|
|
|
extraCommands ? "", uid ? 0, gid ? 0
|
2016-09-28 00:42:05 +01:00
|
|
|
}:
|
|
|
|
runCommand "docker-layer-${name}" {
|
2015-11-19 12:11:17 +00:00
|
|
|
inherit baseJson contents extraCommands;
|
2019-01-30 04:29:18 +00:00
|
|
|
nativeBuildInputs = [ jshon rsync tarsum ];
|
2016-09-28 00:42:05 +01:00
|
|
|
}
|
|
|
|
''
|
2015-11-19 12:11:17 +00:00
|
|
|
mkdir layer
|
2016-09-28 00:42:05 +01:00
|
|
|
if [[ -n "$contents" ]]; then
|
|
|
|
echo "Adding contents..."
|
|
|
|
for item in $contents; do
|
|
|
|
echo "Adding $item"
|
2017-04-23 14:45:21 +01:00
|
|
|
rsync -a${if keepContentsDirlinks then "K" else "k"} --chown=0:0 $item/ layer/
|
2015-11-19 12:11:17 +00:00
|
|
|
done
|
2016-09-28 00:42:05 +01:00
|
|
|
else
|
|
|
|
echo "No contents to add to layer."
|
|
|
|
fi
|
|
|
|
|
2017-07-01 15:59:15 +01:00
|
|
|
chmod ug+w layer
|
|
|
|
|
2020-06-21 01:11:56 +01:00
|
|
|
if [[ -n "$extraCommands" ]]; then
|
2016-09-28 00:42:05 +01:00
|
|
|
(cd layer; eval "$extraCommands")
|
2015-11-19 12:11:17 +00:00
|
|
|
fi
|
|
|
|
|
2016-09-28 00:42:05 +01:00
|
|
|
# Tar up the layer and throw it into 'layer.tar'.
|
|
|
|
echo "Packing layer..."
|
2015-11-19 12:11:17 +00:00
|
|
|
mkdir $out
|
2020-11-03 11:33:11 +00:00
|
|
|
tarhash=$(tar -C layer --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=${toString uid} --group=${toString gid} -cf - . | tee -p $out/layer.tar | tarsum)
|
2016-09-28 00:42:05 +01:00
|
|
|
|
|
|
|
# Add a 'checksum' field to the JSON, with the value set to the
|
|
|
|
# checksum of the tarball.
|
2018-09-25 18:54:45 +01:00
|
|
|
cat ${baseJson} | jshon -s "$tarhash" -i checksum > $out/json
|
2016-09-28 00:42:05 +01:00
|
|
|
|
|
|
|
# Indicate to docker that we're using schema version 1.0.
|
2015-11-19 12:11:17 +00:00
|
|
|
echo -n "1.0" > $out/VERSION
|
2016-09-28 00:42:05 +01:00
|
|
|
|
|
|
|
echo "Finished building layer '${name}'"
|
2015-11-19 12:11:17 +00:00
|
|
|
'';
|
|
|
|
|
2016-09-28 00:42:05 +01:00
|
|
|
# Make a "root" layer; required if we need to execute commands as a
|
|
|
|
# privileged user on the image. The commands themselves will be
|
|
|
|
# performed in a virtual machine sandbox.
|
|
|
|
mkRootLayer = {
|
|
|
|
# Name of the image.
|
|
|
|
name,
|
|
|
|
# Script to run as root. Bash.
|
|
|
|
runAsRoot,
|
|
|
|
# Files to add to the layer. If null, an empty layer will be created.
|
|
|
|
contents ? null,
|
2017-04-23 14:45:21 +01:00
|
|
|
# When copying the contents into the image, preserve symlinks to
|
|
|
|
# directories (see `rsync -K`). Otherwise, transform those symlinks
|
|
|
|
# into directories.
|
|
|
|
keepContentsDirlinks ? false,
|
2016-09-28 00:42:05 +01:00
|
|
|
# JSON containing configuration and metadata for this layer.
|
|
|
|
baseJson,
|
|
|
|
# Existing image onto which to append the new layer.
|
|
|
|
fromImage ? null,
|
|
|
|
# Name of the image we're appending onto.
|
|
|
|
fromImageName ? null,
|
|
|
|
# Tag of the image we're appending onto.
|
|
|
|
fromImageTag ? null,
|
|
|
|
# How much disk to allocate for the temporary virtual machine.
|
|
|
|
diskSize ? 1024,
|
|
|
|
# Commands (bash) to run on the layer; these do not require sudo.
|
|
|
|
extraCommands ? ""
|
|
|
|
}:
|
|
|
|
# Generate an executable script from the `runAsRoot` text.
|
2018-05-23 00:53:28 +01:00
|
|
|
let
|
|
|
|
runAsRootScript = shellScript "run-as-root.sh" runAsRoot;
|
|
|
|
extraCommandsScript = shellScript "extra-commands.sh" extraCommands;
|
2015-11-19 12:11:17 +00:00
|
|
|
in runWithOverlay {
|
2016-09-28 00:42:05 +01:00
|
|
|
name = "docker-layer-${name}";
|
|
|
|
|
2015-11-19 12:11:17 +00:00
|
|
|
inherit fromImage fromImageName fromImageTag diskSize;
|
|
|
|
|
2016-09-28 00:42:05 +01:00
|
|
|
preMount = lib.optionalString (contents != null && contents != []) ''
|
|
|
|
echo "Adding contents..."
|
|
|
|
for item in ${toString contents}; do
|
|
|
|
echo "Adding $item..."
|
2017-04-23 14:45:21 +01:00
|
|
|
rsync -a${if keepContentsDirlinks then "K" else "k"} --chown=0:0 $item/ layer/
|
2015-11-19 12:11:17 +00:00
|
|
|
done
|
2017-07-01 15:59:15 +01:00
|
|
|
|
|
|
|
chmod ug+w layer
|
2015-11-19 12:11:17 +00:00
|
|
|
'';
|
|
|
|
|
|
|
|
postMount = ''
|
2016-09-28 00:42:05 +01:00
|
|
|
mkdir -p mnt/{dev,proc,sys} mnt${storeDir}
|
|
|
|
|
|
|
|
# Mount /dev, /sys and the nix store as shared folders.
|
2015-11-19 12:11:17 +00:00
|
|
|
mount --rbind /dev mnt/dev
|
|
|
|
mount --rbind /sys mnt/sys
|
2016-09-28 00:42:05 +01:00
|
|
|
mount --rbind ${storeDir} mnt${storeDir}
|
2015-11-19 12:11:17 +00:00
|
|
|
|
2016-09-28 00:42:05 +01:00
|
|
|
# Execute the run as root script. See 'man unshare' for
|
|
|
|
# details on what's going on here; basically this command
|
|
|
|
# means that the runAsRootScript will be executed in a nearly
|
|
|
|
# completely isolated environment.
|
2020-11-04 07:11:05 +00:00
|
|
|
#
|
|
|
|
# Ideally we would use --mount-proc=mnt/proc or similar, but this
|
|
|
|
# doesn't work. The workaround is to setup proc after unshare.
|
|
|
|
# See: https://github.com/karelzak/util-linux/issues/648
|
|
|
|
unshare -imnpuf --mount-proc sh -c 'mount --rbind /proc mnt/proc && chroot mnt ${runAsRootScript}'
|
2016-09-28 00:42:05 +01:00
|
|
|
|
|
|
|
# Unmount directories and remove them.
|
|
|
|
umount -R mnt/dev mnt/sys mnt${storeDir}
|
|
|
|
rmdir --ignore-fail-on-non-empty \
|
|
|
|
mnt/dev mnt/proc mnt/sys mnt${storeDir} \
|
|
|
|
mnt$(dirname ${storeDir})
|
2015-11-19 12:11:17 +00:00
|
|
|
'';
|
2016-09-28 00:42:05 +01:00
|
|
|
|
2015-11-19 12:11:17 +00:00
|
|
|
postUmount = ''
|
2018-05-23 00:53:28 +01:00
|
|
|
(cd layer; ${extraCommandsScript})
|
2015-11-19 12:11:17 +00:00
|
|
|
|
2016-09-28 00:42:05 +01:00
|
|
|
echo "Packing layer..."
|
2019-06-27 19:15:42 +01:00
|
|
|
mkdir -p $out
|
2020-05-06 10:17:47 +01:00
|
|
|
tarhash=$(tar -C layer --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" -cf - . |
|
2020-11-03 11:33:11 +00:00
|
|
|
tee -p $out/layer.tar |
|
2020-05-06 10:17:47 +01:00
|
|
|
${tarsum}/bin/tarsum)
|
2016-09-28 00:42:05 +01:00
|
|
|
|
2018-09-25 18:54:45 +01:00
|
|
|
cat ${baseJson} | jshon -s "$tarhash" -i checksum > $out/json
|
2016-09-28 00:42:05 +01:00
|
|
|
# Indicate to docker that we're using schema version 1.0.
|
2015-11-19 12:11:17 +00:00
|
|
|
echo -n "1.0" > $out/VERSION
|
2016-09-28 00:42:05 +01:00
|
|
|
|
|
|
|
echo "Finished building layer '${name}'"
|
2015-11-19 12:11:17 +00:00
|
|
|
'';
|
|
|
|
};
|
|
|
|
|
2020-06-08 10:47:46 +01:00
|
|
|
buildLayeredImage = {name, ...}@args:
|
dockerTools.buildLayeredImage: init
Create a many-layered Docker Image.
Implements much less than buildImage:
- Doesn't support specific uids/gids
- Doesn't support runninng commands after building
- Doesn't require qemu
- Doesn't create mutable copies of the files in the path
- Doesn't support parent images
If you want those feature, I recommend using buildLayeredImage as an
input to buildImage.
Notably, it does support:
- Caching low level, common paths based on a graph traversial
algorithm, see referencesByPopularity in
0a80233487993256e811f566b1c80a40394c03d6
- Configurable number of layers. If you're not using AUFS or not
extending the image, you can specify a larger number of layers at
build time:
pkgs.dockerTools.buildLayeredImage {
name = "hello";
maxLayers = 128;
config.Cmd = [ "${pkgs.gitFull}/bin/git" ];
};
- Parallelized creation of the layers, improving build speed.
- The contents of the image includes the closure of the configuration,
so you don't have to specify paths in contents and config.
With buildImage, paths referred to by the config were not included
automatically in the image. Thus, if you wanted to call Git, you
had to specify it twice:
pkgs.dockerTools.buildImage {
name = "hello";
contents = [ pkgs.gitFull ];
config.Cmd = [ "${pkgs.gitFull}/bin/git" ];
};
buildLayeredImage on the other hand includes the runtime closure of
the config when calculating the contents of the image:
pkgs.dockerTools.buildImage {
name = "hello";
config.Cmd = [ "${pkgs.gitFull}/bin/git" ];
};
Minor Problems
- If any of the store paths change, every layer will be rebuilt in
the nix-build. However, beacuse the layers are bit-for-bit
reproducable, when these images are loaded in to Docker they will
match existing layers and not be imported or uploaded twice.
Common Questions
- Aren't Docker layers ordered?
No. People who have used a Dockerfile before assume Docker's
Layers are inherently ordered. However, this is not true -- Docker
layers are content-addressable and are not explicitly layered until
they are composed in to an Image.
- What happens if I have more than maxLayers of store paths?
The first (maxLayers-2) most "popular" paths will have their own
individual layers, then layer #(maxLayers-1) will contain all the
remaining "unpopular" paths, and finally layer #(maxLayers) will
contain the Image configuration.
2018-09-25 15:53:42 +01:00
|
|
|
let
|
2020-06-08 10:47:46 +01:00
|
|
|
stream = streamLayeredImage args;
|
dockerTools.buildLayeredImage: init
Create a many-layered Docker Image.
Implements much less than buildImage:
- Doesn't support specific uids/gids
- Doesn't support runninng commands after building
- Doesn't require qemu
- Doesn't create mutable copies of the files in the path
- Doesn't support parent images
If you want those feature, I recommend using buildLayeredImage as an
input to buildImage.
Notably, it does support:
- Caching low level, common paths based on a graph traversial
algorithm, see referencesByPopularity in
0a80233487993256e811f566b1c80a40394c03d6
- Configurable number of layers. If you're not using AUFS or not
extending the image, you can specify a larger number of layers at
build time:
pkgs.dockerTools.buildLayeredImage {
name = "hello";
maxLayers = 128;
config.Cmd = [ "${pkgs.gitFull}/bin/git" ];
};
- Parallelized creation of the layers, improving build speed.
- The contents of the image includes the closure of the configuration,
so you don't have to specify paths in contents and config.
With buildImage, paths referred to by the config were not included
automatically in the image. Thus, if you wanted to call Git, you
had to specify it twice:
pkgs.dockerTools.buildImage {
name = "hello";
contents = [ pkgs.gitFull ];
config.Cmd = [ "${pkgs.gitFull}/bin/git" ];
};
buildLayeredImage on the other hand includes the runtime closure of
the config when calculating the contents of the image:
pkgs.dockerTools.buildImage {
name = "hello";
config.Cmd = [ "${pkgs.gitFull}/bin/git" ];
};
Minor Problems
- If any of the store paths change, every layer will be rebuilt in
the nix-build. However, beacuse the layers are bit-for-bit
reproducable, when these images are loaded in to Docker they will
match existing layers and not be imported or uploaded twice.
Common Questions
- Aren't Docker layers ordered?
No. People who have used a Dockerfile before assume Docker's
Layers are inherently ordered. However, this is not true -- Docker
layers are content-addressable and are not explicitly layered until
they are composed in to an Image.
- What happens if I have more than maxLayers of store paths?
The first (maxLayers-2) most "popular" paths will have their own
individual layers, then layer #(maxLayers-1) will contain all the
remaining "unpopular" paths, and finally layer #(maxLayers) will
contain the Image configuration.
2018-09-25 15:53:42 +01:00
|
|
|
in
|
2021-03-09 18:32:54 +00:00
|
|
|
runCommand "${baseNameOf name}.tar.gz" {
|
2020-06-08 10:47:46 +01:00
|
|
|
inherit (stream) imageName;
|
2020-07-11 14:51:58 +01:00
|
|
|
passthru = { inherit (stream) imageTag; };
|
2020-11-19 13:31:14 +00:00
|
|
|
nativeBuildInputs = [ pigz ];
|
2020-06-08 10:47:46 +01:00
|
|
|
} "${stream} | pigz -nT > $out";
|
dockerTools.buildLayeredImage: init
Create a many-layered Docker Image.
Implements much less than buildImage:
- Doesn't support specific uids/gids
- Doesn't support runninng commands after building
- Doesn't require qemu
- Doesn't create mutable copies of the files in the path
- Doesn't support parent images
If you want those feature, I recommend using buildLayeredImage as an
input to buildImage.
Notably, it does support:
- Caching low level, common paths based on a graph traversial
algorithm, see referencesByPopularity in
0a80233487993256e811f566b1c80a40394c03d6
- Configurable number of layers. If you're not using AUFS or not
extending the image, you can specify a larger number of layers at
build time:
pkgs.dockerTools.buildLayeredImage {
name = "hello";
maxLayers = 128;
config.Cmd = [ "${pkgs.gitFull}/bin/git" ];
};
- Parallelized creation of the layers, improving build speed.
- The contents of the image includes the closure of the configuration,
so you don't have to specify paths in contents and config.
With buildImage, paths referred to by the config were not included
automatically in the image. Thus, if you wanted to call Git, you
had to specify it twice:
pkgs.dockerTools.buildImage {
name = "hello";
contents = [ pkgs.gitFull ];
config.Cmd = [ "${pkgs.gitFull}/bin/git" ];
};
buildLayeredImage on the other hand includes the runtime closure of
the config when calculating the contents of the image:
pkgs.dockerTools.buildImage {
name = "hello";
config.Cmd = [ "${pkgs.gitFull}/bin/git" ];
};
Minor Problems
- If any of the store paths change, every layer will be rebuilt in
the nix-build. However, beacuse the layers are bit-for-bit
reproducable, when these images are loaded in to Docker they will
match existing layers and not be imported or uploaded twice.
Common Questions
- Aren't Docker layers ordered?
No. People who have used a Dockerfile before assume Docker's
Layers are inherently ordered. However, this is not true -- Docker
layers are content-addressable and are not explicitly layered until
they are composed in to an Image.
- What happens if I have more than maxLayers of store paths?
The first (maxLayers-2) most "popular" paths will have their own
individual layers, then layer #(maxLayers-1) will contain all the
remaining "unpopular" paths, and finally layer #(maxLayers) will
contain the Image configuration.
2018-09-25 15:53:42 +01:00
|
|
|
|
2015-11-19 12:11:17 +00:00
|
|
|
# 1. extract the base image
|
|
|
|
# 2. create the layer
|
|
|
|
# 3. add layer deps to the layer itself, diffing with the base image
|
|
|
|
# 4. compute the layer id
|
|
|
|
# 5. put the layer in the image
|
|
|
|
# 6. repack the image
|
2016-09-28 00:42:05 +01:00
|
|
|
buildImage = args@{
|
|
|
|
# Image name.
|
|
|
|
name,
|
2018-06-29 18:20:55 +01:00
|
|
|
# Image tag, when null then the nix output hash will be used.
|
|
|
|
tag ? null,
|
2016-09-28 00:42:05 +01:00
|
|
|
# Parent image, to append to.
|
|
|
|
fromImage ? null,
|
|
|
|
# Name of the parent image; will be read from the image otherwise.
|
|
|
|
fromImageName ? null,
|
|
|
|
# Tag of the parent image; will be read from the image otherwise.
|
|
|
|
fromImageTag ? null,
|
|
|
|
# Files to put on the image (a nix store path or list of paths).
|
|
|
|
contents ? null,
|
2017-04-23 14:45:21 +01:00
|
|
|
# When copying the contents into the image, preserve symlinks to
|
|
|
|
# directories (see `rsync -K`). Otherwise, transform those symlinks
|
|
|
|
# into directories.
|
|
|
|
keepContentsDirlinks ? false,
|
2016-09-28 00:42:05 +01:00
|
|
|
# Docker config; e.g. what command to run on the container.
|
|
|
|
config ? null,
|
|
|
|
# Optional bash script to run on the files prior to fixturizing the layer.
|
2017-07-01 12:23:12 +01:00
|
|
|
extraCommands ? "", uid ? 0, gid ? 0,
|
2016-09-28 00:42:05 +01:00
|
|
|
# Optional bash script to run as root on the image when provisioning.
|
|
|
|
runAsRoot ? null,
|
|
|
|
# Size of the virtual machine disk to provision when building the image.
|
|
|
|
diskSize ? 1024,
|
2017-06-27 21:11:17 +01:00
|
|
|
# Time of creation of the image.
|
|
|
|
created ? "1970-01-01T00:00:01Z",
|
2016-09-28 00:42:05 +01:00
|
|
|
}:
|
2015-11-19 12:11:17 +00:00
|
|
|
|
|
|
|
let
|
2016-02-18 16:16:15 +00:00
|
|
|
baseName = baseNameOf name;
|
|
|
|
|
2016-09-28 00:42:05 +01:00
|
|
|
# Create a JSON blob of the configuration. Set the date to unix zero.
|
2018-09-20 16:40:36 +01:00
|
|
|
baseJson = let
|
|
|
|
pure = writeText "${baseName}-config.json" (builtins.toJSON {
|
|
|
|
inherit created config;
|
2020-12-12 23:42:31 +00:00
|
|
|
architecture = defaultArch;
|
2018-09-20 16:40:36 +01:00
|
|
|
os = "linux";
|
|
|
|
});
|
|
|
|
impure = runCommand "${baseName}-config.json"
|
2019-01-30 04:29:18 +00:00
|
|
|
{ nativeBuildInputs = [ jq ]; }
|
2018-09-20 16:40:36 +01:00
|
|
|
''
|
|
|
|
jq ".created = \"$(TZ=utc date --iso-8601="seconds")\"" ${pure} > $out
|
|
|
|
'';
|
|
|
|
in if created == "now" then impure else pure;
|
2016-03-10 07:29:28 +00:00
|
|
|
|
2016-09-28 00:42:05 +01:00
|
|
|
layer =
|
|
|
|
if runAsRoot == null
|
2016-10-31 15:11:48 +00:00
|
|
|
then mkPureLayer {
|
|
|
|
name = baseName;
|
2017-11-03 14:53:00 +00:00
|
|
|
inherit baseJson contents keepContentsDirlinks extraCommands uid gid;
|
2016-10-31 15:11:48 +00:00
|
|
|
} else mkRootLayer {
|
|
|
|
name = baseName;
|
|
|
|
inherit baseJson fromImage fromImageName fromImageTag
|
2017-04-23 14:45:21 +01:00
|
|
|
contents keepContentsDirlinks runAsRoot diskSize
|
|
|
|
extraCommands;
|
2016-10-31 15:11:48 +00:00
|
|
|
};
|
2016-09-28 00:42:05 +01:00
|
|
|
result = runCommand "docker-image-${baseName}.tar.gz" {
|
2019-06-27 19:15:42 +01:00
|
|
|
nativeBuildInputs = [ jshon pigz coreutils findutils jq moreutils ];
|
2021-03-09 09:06:04 +00:00
|
|
|
# Image name must be lowercase
|
2017-07-25 09:47:51 +01:00
|
|
|
imageName = lib.toLower name;
|
2021-03-09 09:06:04 +00:00
|
|
|
imageTag = if tag == null then "" else tag;
|
2015-11-19 12:11:17 +00:00
|
|
|
inherit fromImage baseJson;
|
2016-03-10 07:29:28 +00:00
|
|
|
layerClosure = writeReferencesToFile layer;
|
2016-09-28 00:42:05 +01:00
|
|
|
passthru.buildArgs = args;
|
|
|
|
passthru.layer = layer;
|
2020-07-11 14:51:58 +01:00
|
|
|
passthru.imageTag =
|
|
|
|
if tag != null
|
2021-06-01 13:42:21 +01:00
|
|
|
then tag
|
2020-07-11 14:51:58 +01:00
|
|
|
else
|
|
|
|
lib.head (lib.strings.splitString "-" (baseNameOf result.outPath));
|
2020-01-17 19:03:03 +00:00
|
|
|
# Docker can't be made to run darwin binaries
|
|
|
|
meta.badPlatforms = lib.platforms.darwin;
|
2015-11-19 12:11:17 +00:00
|
|
|
} ''
|
2018-06-29 18:20:55 +01:00
|
|
|
${lib.optionalString (tag == null) ''
|
|
|
|
outName="$(basename "$out")"
|
|
|
|
outHash=$(echo "$outName" | cut -d - -f 1)
|
|
|
|
|
|
|
|
imageTag=$outHash
|
|
|
|
''}
|
|
|
|
|
2016-09-23 22:10:47 +01:00
|
|
|
# Print tar contents:
|
|
|
|
# 1: Interpreted as relative to the root directory
|
|
|
|
# 2: With no trailing slashes on directories
|
2016-09-28 00:42:05 +01:00
|
|
|
# This is useful for ensuring that the output matches the
|
|
|
|
# values generated by the "find" command
|
2016-09-23 22:10:47 +01:00
|
|
|
ls_tar() {
|
2016-09-28 00:42:05 +01:00
|
|
|
for f in $(tar -tf $1 | xargs realpath -ms --relative-to=.); do
|
|
|
|
if [[ "$f" != "." ]]; then
|
|
|
|
echo "/$f"
|
|
|
|
fi
|
|
|
|
done
|
2016-09-23 22:10:47 +01:00
|
|
|
}
|
2016-09-28 00:42:05 +01:00
|
|
|
|
2015-11-19 12:11:17 +00:00
|
|
|
mkdir image
|
|
|
|
touch baseFiles
|
2020-05-08 10:49:16 +01:00
|
|
|
baseEnvs='[]'
|
2016-09-28 00:42:05 +01:00
|
|
|
if [[ -n "$fromImage" ]]; then
|
|
|
|
echo "Unpacking base image..."
|
2015-11-19 12:11:17 +00:00
|
|
|
tar -C image -xpf "$fromImage"
|
dockerTools.buildImage: support using a layered image in fromImage
Docker images used to be, essentially, a linked list of layers. Each
layer would have a tarball and a json document pointing to its parent,
and the image pointed to the top layer:
imageA ----> layerA
|
v
layerB
|
v
layerC
The current image spec changed this format to where the Image defined
the order and set of layers:
imageA ---> layerA
|--> layerB
`--> layerC
For backwards compatibility, docker produces images which follow both
specs: layers point to parents, and images also point to the entire
list:
imageA ---> layerA
| |
| v
|--> layerB
| |
| v
`--> layerC
This is nice for tooling which supported the older version and never
updated to support the newer format.
Our `buildImage` code only supported the old version, so in order for
`buildImage` to properly generate an image based on another image
with `fromImage`, the parent image's layers must fully support the old
mechanism.
This is not a problem in general, but is a problem with
`buildLayeredImage`.
`buildLayeredImage` creates images with newer image spec, because
individual store paths don't have a guaranteed parent layer. Including
a specific parent ID in the layer's json makes the output less likely
to cache hit when published or pulled.
This means until now, `buildLayeredImage` could not be the input to
`buildImage`.
The changes in this PR change `buildImage` to only use the layer's
manifest when locating parent IDs. This does break buildImage on
extremely old Docker images, though I do wonder how many of these
exist.
This work has been sponsored by Target.
2018-12-04 17:18:06 +00:00
|
|
|
|
2020-05-08 10:49:16 +01:00
|
|
|
# Store the layers and the environment variables from the base image
|
dockerTools.buildImage: support using a layered image in fromImage
Docker images used to be, essentially, a linked list of layers. Each
layer would have a tarball and a json document pointing to its parent,
and the image pointed to the top layer:
imageA ----> layerA
|
v
layerB
|
v
layerC
The current image spec changed this format to where the Image defined
the order and set of layers:
imageA ---> layerA
|--> layerB
`--> layerC
For backwards compatibility, docker produces images which follow both
specs: layers point to parents, and images also point to the entire
list:
imageA ---> layerA
| |
| v
|--> layerB
| |
| v
`--> layerC
This is nice for tooling which supported the older version and never
updated to support the newer format.
Our `buildImage` code only supported the old version, so in order for
`buildImage` to properly generate an image based on another image
with `fromImage`, the parent image's layers must fully support the old
mechanism.
This is not a problem in general, but is a problem with
`buildLayeredImage`.
`buildLayeredImage` creates images with newer image spec, because
individual store paths don't have a guaranteed parent layer. Including
a specific parent ID in the layer's json makes the output less likely
to cache hit when published or pulled.
This means until now, `buildLayeredImage` could not be the input to
`buildImage`.
The changes in this PR change `buildImage` to only use the layer's
manifest when locating parent IDs. This does break buildImage on
extremely old Docker images, though I do wonder how many of these
exist.
This work has been sponsored by Target.
2018-12-04 17:18:06 +00:00
|
|
|
cat ./image/manifest.json | jq -r '.[0].Layers | .[]' > layer-list
|
2020-05-08 10:49:16 +01:00
|
|
|
configName="$(cat ./image/manifest.json | jq -r '.[0].Config')"
|
|
|
|
baseEnvs="$(cat "./image/$configName" | jq '.config.Env // []')"
|
dockerTools.buildImage: support using a layered image in fromImage
Docker images used to be, essentially, a linked list of layers. Each
layer would have a tarball and a json document pointing to its parent,
and the image pointed to the top layer:
imageA ----> layerA
|
v
layerB
|
v
layerC
The current image spec changed this format to where the Image defined
the order and set of layers:
imageA ---> layerA
|--> layerB
`--> layerC
For backwards compatibility, docker produces images which follow both
specs: layers point to parents, and images also point to the entire
list:
imageA ---> layerA
| |
| v
|--> layerB
| |
| v
`--> layerC
This is nice for tooling which supported the older version and never
updated to support the newer format.
Our `buildImage` code only supported the old version, so in order for
`buildImage` to properly generate an image based on another image
with `fromImage`, the parent image's layers must fully support the old
mechanism.
This is not a problem in general, but is a problem with
`buildLayeredImage`.
`buildLayeredImage` creates images with newer image spec, because
individual store paths don't have a guaranteed parent layer. Including
a specific parent ID in the layer's json makes the output less likely
to cache hit when published or pulled.
This means until now, `buildLayeredImage` could not be the input to
`buildImage`.
The changes in this PR change `buildImage` to only use the layer's
manifest when locating parent IDs. This does break buildImage on
extremely old Docker images, though I do wonder how many of these
exist.
This work has been sponsored by Target.
2018-12-04 17:18:06 +00:00
|
|
|
|
2020-06-08 10:47:46 +01:00
|
|
|
# Extract the parentID from the manifest
|
|
|
|
if [[ -n "$fromImageName" ]] && [[ -n "$fromImageTag" ]]; then
|
|
|
|
parentID="$(
|
|
|
|
cat "image/manifest.json" |
|
|
|
|
jq -r '.[] | select(.RepoTags | contains([$desiredTag])) | rtrimstr(".json")' \
|
|
|
|
--arg desiredTag "$fromImageName:$fromImageTag"
|
|
|
|
)"
|
|
|
|
else
|
|
|
|
echo "From-image name or tag wasn't set. Reading the first ID."
|
|
|
|
parentID="$(cat "image/manifest.json" | jq -r '.[0].Config | rtrimstr(".json")')"
|
|
|
|
fi
|
|
|
|
|
2020-05-08 10:49:16 +01:00
|
|
|
# Otherwise do not import the base image configuration and manifest
|
2017-09-28 11:56:23 +01:00
|
|
|
chmod a+w image image/*.json
|
|
|
|
rm -f image/*.json
|
|
|
|
|
|
|
|
for l in image/*/layer.tar; do
|
|
|
|
ls_tar $l >> baseFiles
|
2015-11-19 12:11:17 +00:00
|
|
|
done
|
dockerTools.buildImage: support using a layered image in fromImage
Docker images used to be, essentially, a linked list of layers. Each
layer would have a tarball and a json document pointing to its parent,
and the image pointed to the top layer:
imageA ----> layerA
|
v
layerB
|
v
layerC
The current image spec changed this format to where the Image defined
the order and set of layers:
imageA ---> layerA
|--> layerB
`--> layerC
For backwards compatibility, docker produces images which follow both
specs: layers point to parents, and images also point to the entire
list:
imageA ---> layerA
| |
| v
|--> layerB
| |
| v
`--> layerC
This is nice for tooling which supported the older version and never
updated to support the newer format.
Our `buildImage` code only supported the old version, so in order for
`buildImage` to properly generate an image based on another image
with `fromImage`, the parent image's layers must fully support the old
mechanism.
This is not a problem in general, but is a problem with
`buildLayeredImage`.
`buildLayeredImage` creates images with newer image spec, because
individual store paths don't have a guaranteed parent layer. Including
a specific parent ID in the layer's json makes the output less likely
to cache hit when published or pulled.
This means until now, `buildLayeredImage` could not be the input to
`buildImage`.
The changes in this PR change `buildImage` to only use the layer's
manifest when locating parent IDs. This does break buildImage on
extremely old Docker images, though I do wonder how many of these
exist.
This work has been sponsored by Target.
2018-12-04 17:18:06 +00:00
|
|
|
else
|
|
|
|
touch layer-list
|
2015-11-19 12:11:17 +00:00
|
|
|
fi
|
|
|
|
|
|
|
|
chmod -R ug+rw image
|
2016-09-28 00:42:05 +01:00
|
|
|
|
2015-11-19 12:11:17 +00:00
|
|
|
mkdir temp
|
|
|
|
cp ${layer}/* temp/
|
|
|
|
chmod ug+w temp/*
|
2016-03-10 07:29:28 +00:00
|
|
|
|
|
|
|
for dep in $(cat $layerClosure); do
|
2016-09-28 00:42:05 +01:00
|
|
|
find $dep >> layerFiles
|
2016-03-10 07:29:28 +00:00
|
|
|
done
|
|
|
|
|
2016-09-28 00:42:05 +01:00
|
|
|
echo "Adding layer..."
|
|
|
|
# Record the contents of the tarball with ls_tar.
|
2016-09-23 22:10:47 +01:00
|
|
|
ls_tar temp/layer.tar >> baseFiles
|
2016-03-10 07:29:28 +00:00
|
|
|
|
2018-04-12 12:03:56 +01:00
|
|
|
# Append nix/store directory to the layer so that when the layer is loaded in the
|
|
|
|
# image /nix/store has read permissions for non-root users.
|
|
|
|
# nix/store is added only if the layer has /nix/store paths in it.
|
|
|
|
if [ $(wc -l < $layerClosure) -gt 1 ] && [ $(grep -c -e "^/nix/store$" baseFiles) -eq 0 ]; then
|
|
|
|
mkdir -p nix/store
|
|
|
|
chmod -R 555 nix
|
|
|
|
echo "./nix" >> layerFiles
|
|
|
|
echo "./nix/store" >> layerFiles
|
|
|
|
fi
|
|
|
|
|
2016-09-28 00:42:05 +01:00
|
|
|
# Get the files in the new layer which were *not* present in
|
|
|
|
# the old layer, and record them as newFiles.
|
|
|
|
comm <(sort -n baseFiles|uniq) \
|
|
|
|
<(sort -n layerFiles|uniq|grep -v ${layer}) -1 -3 > newFiles
|
|
|
|
# Append the new files to the layer.
|
2018-03-12 17:26:15 +00:00
|
|
|
tar -rpf temp/layer.tar --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" \
|
2017-04-16 07:39:02 +01:00
|
|
|
--owner=0 --group=0 --no-recursion --files-from newFiles
|
2016-09-28 00:42:05 +01:00
|
|
|
|
2017-09-28 11:56:23 +01:00
|
|
|
echo "Adding meta..."
|
|
|
|
|
|
|
|
# If we have a parentID, add it to the json metadata.
|
|
|
|
if [[ -n "$parentID" ]]; then
|
|
|
|
cat temp/json | jshon -s "$parentID" -i parent > tmpjson
|
|
|
|
mv tmpjson temp/json
|
|
|
|
fi
|
|
|
|
|
|
|
|
# Take the sha256 sum of the generated json and use it as the layer ID.
|
|
|
|
# Compute the size and add it to the json under the 'Size' field.
|
|
|
|
layerID=$(sha256sum temp/json|cut -d ' ' -f 1)
|
|
|
|
size=$(stat --printf="%s" temp/layer.tar)
|
|
|
|
cat temp/json | jshon -s "$layerID" -i id -n $size -i Size > tmpjson
|
|
|
|
mv tmpjson temp/json
|
|
|
|
|
|
|
|
# Use the temp folder we've been working on to create a new image.
|
|
|
|
mv temp image/$layerID
|
2016-09-28 00:42:05 +01:00
|
|
|
|
2019-04-30 07:42:24 +01:00
|
|
|
# Add the new layer ID to the end of the layer list
|
dockerTools.buildImage: support using a layered image in fromImage
Docker images used to be, essentially, a linked list of layers. Each
layer would have a tarball and a json document pointing to its parent,
and the image pointed to the top layer:
imageA ----> layerA
|
v
layerB
|
v
layerC
The current image spec changed this format to where the Image defined
the order and set of layers:
imageA ---> layerA
|--> layerB
`--> layerC
For backwards compatibility, docker produces images which follow both
specs: layers point to parents, and images also point to the entire
list:
imageA ---> layerA
| |
| v
|--> layerB
| |
| v
`--> layerC
This is nice for tooling which supported the older version and never
updated to support the newer format.
Our `buildImage` code only supported the old version, so in order for
`buildImage` to properly generate an image based on another image
with `fromImage`, the parent image's layers must fully support the old
mechanism.
This is not a problem in general, but is a problem with
`buildLayeredImage`.
`buildLayeredImage` creates images with newer image spec, because
individual store paths don't have a guaranteed parent layer. Including
a specific parent ID in the layer's json makes the output less likely
to cache hit when published or pulled.
This means until now, `buildLayeredImage` could not be the input to
`buildImage`.
The changes in this PR change `buildImage` to only use the layer's
manifest when locating parent IDs. This does break buildImage on
extremely old Docker images, though I do wonder how many of these
exist.
This work has been sponsored by Target.
2018-12-04 17:18:06 +00:00
|
|
|
(
|
2019-04-30 07:42:24 +01:00
|
|
|
cat layer-list
|
dockerTools.buildImage: support using a layered image in fromImage
Docker images used to be, essentially, a linked list of layers. Each
layer would have a tarball and a json document pointing to its parent,
and the image pointed to the top layer:
imageA ----> layerA
|
v
layerB
|
v
layerC
The current image spec changed this format to where the Image defined
the order and set of layers:
imageA ---> layerA
|--> layerB
`--> layerC
For backwards compatibility, docker produces images which follow both
specs: layers point to parents, and images also point to the entire
list:
imageA ---> layerA
| |
| v
|--> layerB
| |
| v
`--> layerC
This is nice for tooling which supported the older version and never
updated to support the newer format.
Our `buildImage` code only supported the old version, so in order for
`buildImage` to properly generate an image based on another image
with `fromImage`, the parent image's layers must fully support the old
mechanism.
This is not a problem in general, but is a problem with
`buildLayeredImage`.
`buildLayeredImage` creates images with newer image spec, because
individual store paths don't have a guaranteed parent layer. Including
a specific parent ID in the layer's json makes the output less likely
to cache hit when published or pulled.
This means until now, `buildLayeredImage` could not be the input to
`buildImage`.
The changes in this PR change `buildImage` to only use the layer's
manifest when locating parent IDs. This does break buildImage on
extremely old Docker images, though I do wonder how many of these
exist.
This work has been sponsored by Target.
2018-12-04 17:18:06 +00:00
|
|
|
# originally this used `sed -i "1i$layerID" layer-list`, but
|
|
|
|
# would fail if layer-list was completely empty.
|
|
|
|
echo "$layerID/layer.tar"
|
2019-06-27 19:15:42 +01:00
|
|
|
) | sponge layer-list
|
dockerTools.buildImage: support using a layered image in fromImage
Docker images used to be, essentially, a linked list of layers. Each
layer would have a tarball and a json document pointing to its parent,
and the image pointed to the top layer:
imageA ----> layerA
|
v
layerB
|
v
layerC
The current image spec changed this format to where the Image defined
the order and set of layers:
imageA ---> layerA
|--> layerB
`--> layerC
For backwards compatibility, docker produces images which follow both
specs: layers point to parents, and images also point to the entire
list:
imageA ---> layerA
| |
| v
|--> layerB
| |
| v
`--> layerC
This is nice for tooling which supported the older version and never
updated to support the newer format.
Our `buildImage` code only supported the old version, so in order for
`buildImage` to properly generate an image based on another image
with `fromImage`, the parent image's layers must fully support the old
mechanism.
This is not a problem in general, but is a problem with
`buildLayeredImage`.
`buildLayeredImage` creates images with newer image spec, because
individual store paths don't have a guaranteed parent layer. Including
a specific parent ID in the layer's json makes the output less likely
to cache hit when published or pulled.
This means until now, `buildLayeredImage` could not be the input to
`buildImage`.
The changes in this PR change `buildImage` to only use the layer's
manifest when locating parent IDs. This does break buildImage on
extremely old Docker images, though I do wonder how many of these
exist.
This work has been sponsored by Target.
2018-12-04 17:18:06 +00:00
|
|
|
|
2017-09-28 11:56:23 +01:00
|
|
|
# Create image json and image manifest
|
2020-05-08 10:49:16 +01:00
|
|
|
imageJson=$(cat ${baseJson} | jq '.config.Env = $baseenv + .config.Env' --argjson baseenv "$baseEnvs")
|
|
|
|
imageJson=$(echo "$imageJson" | jq ". + {\"rootfs\": {\"diff_ids\": [], \"type\": \"layers\"}}")
|
2017-08-02 18:27:19 +01:00
|
|
|
manifestJson=$(jq -n "[{\"RepoTags\":[\"$imageName:$imageTag\"]}]")
|
dockerTools.buildImage: support using a layered image in fromImage
Docker images used to be, essentially, a linked list of layers. Each
layer would have a tarball and a json document pointing to its parent,
and the image pointed to the top layer:
imageA ----> layerA
|
v
layerB
|
v
layerC
The current image spec changed this format to where the Image defined
the order and set of layers:
imageA ---> layerA
|--> layerB
`--> layerC
For backwards compatibility, docker produces images which follow both
specs: layers point to parents, and images also point to the entire
list:
imageA ---> layerA
| |
| v
|--> layerB
| |
| v
`--> layerC
This is nice for tooling which supported the older version and never
updated to support the newer format.
Our `buildImage` code only supported the old version, so in order for
`buildImage` to properly generate an image based on another image
with `fromImage`, the parent image's layers must fully support the old
mechanism.
This is not a problem in general, but is a problem with
`buildLayeredImage`.
`buildLayeredImage` creates images with newer image spec, because
individual store paths don't have a guaranteed parent layer. Including
a specific parent ID in the layer's json makes the output less likely
to cache hit when published or pulled.
This means until now, `buildLayeredImage` could not be the input to
`buildImage`.
The changes in this PR change `buildImage` to only use the layer's
manifest when locating parent IDs. This does break buildImage on
extremely old Docker images, though I do wonder how many of these
exist.
This work has been sponsored by Target.
2018-12-04 17:18:06 +00:00
|
|
|
|
2019-04-30 07:42:24 +01:00
|
|
|
for layerTar in $(cat ./layer-list); do
|
dockerTools.buildImage: support using a layered image in fromImage
Docker images used to be, essentially, a linked list of layers. Each
layer would have a tarball and a json document pointing to its parent,
and the image pointed to the top layer:
imageA ----> layerA
|
v
layerB
|
v
layerC
The current image spec changed this format to where the Image defined
the order and set of layers:
imageA ---> layerA
|--> layerB
`--> layerC
For backwards compatibility, docker produces images which follow both
specs: layers point to parents, and images also point to the entire
list:
imageA ---> layerA
| |
| v
|--> layerB
| |
| v
`--> layerC
This is nice for tooling which supported the older version and never
updated to support the newer format.
Our `buildImage` code only supported the old version, so in order for
`buildImage` to properly generate an image based on another image
with `fromImage`, the parent image's layers must fully support the old
mechanism.
This is not a problem in general, but is a problem with
`buildLayeredImage`.
`buildLayeredImage` creates images with newer image spec, because
individual store paths don't have a guaranteed parent layer. Including
a specific parent ID in the layer's json makes the output less likely
to cache hit when published or pulled.
This means until now, `buildLayeredImage` could not be the input to
`buildImage`.
The changes in this PR change `buildImage` to only use the layer's
manifest when locating parent IDs. This does break buildImage on
extremely old Docker images, though I do wonder how many of these
exist.
This work has been sponsored by Target.
2018-12-04 17:18:06 +00:00
|
|
|
layerChecksum=$(sha256sum image/$layerTar | cut -d ' ' -f1)
|
2019-04-30 07:42:24 +01:00
|
|
|
imageJson=$(echo "$imageJson" | jq ".history |= . + [{\"created\": \"$(jq -r .created ${baseJson})\"}]")
|
|
|
|
# diff_ids order is from the bottom-most to top-most layer
|
|
|
|
imageJson=$(echo "$imageJson" | jq ".rootfs.diff_ids |= . + [\"sha256:$layerChecksum\"]")
|
|
|
|
manifestJson=$(echo "$manifestJson" | jq ".[0].Layers |= . + [\"$layerTar\"]")
|
2017-08-02 18:27:19 +01:00
|
|
|
done
|
|
|
|
|
|
|
|
imageJsonChecksum=$(echo "$imageJson" | sha256sum | cut -d ' ' -f1)
|
2017-09-28 11:56:23 +01:00
|
|
|
echo "$imageJson" > "image/$imageJsonChecksum.json"
|
|
|
|
manifestJson=$(echo "$manifestJson" | jq ".[0].Config = \"$imageJsonChecksum.json\"")
|
2017-08-02 18:27:19 +01:00
|
|
|
echo "$manifestJson" > image/manifest.json
|
2017-07-26 20:53:35 +01:00
|
|
|
|
2017-09-28 11:56:23 +01:00
|
|
|
# Store the json under the name image/repositories.
|
|
|
|
jshon -n object \
|
|
|
|
-n object -s "$layerID" -i "$imageTag" \
|
|
|
|
-i "$imageName" > image/repositories
|
|
|
|
|
2016-09-28 00:42:05 +01:00
|
|
|
# Make the image read-only.
|
2015-11-19 12:11:17 +00:00
|
|
|
chmod -R a-w image
|
|
|
|
|
2016-09-28 00:42:05 +01:00
|
|
|
echo "Cooking the image..."
|
2018-04-03 09:26:03 +01:00
|
|
|
tar -C image --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=0 --group=0 --xform s:'^./':: -c . | pigz -nT > $out
|
2016-09-28 00:42:05 +01:00
|
|
|
|
|
|
|
echo "Finished."
|
2015-11-19 12:11:17 +00:00
|
|
|
'';
|
|
|
|
|
|
|
|
in
|
2016-09-28 00:42:05 +01:00
|
|
|
result;
|
2017-08-25 10:47:28 +01:00
|
|
|
|
2021-03-30 10:31:25 +01:00
|
|
|
# Merge the tarballs of images built with buildImage into a single
|
|
|
|
# tarball that contains all images. Running `docker load` on the resulting
|
|
|
|
# tarball will load the images into the docker daemon.
|
|
|
|
mergeImages = images: runCommand "merge-docker-images"
|
2021-03-30 08:04:47 +01:00
|
|
|
{
|
2021-03-30 10:31:25 +01:00
|
|
|
inherit images;
|
2021-03-30 08:04:47 +01:00
|
|
|
nativeBuildInputs = [ pigz jq ];
|
|
|
|
} ''
|
2021-03-30 10:31:25 +01:00
|
|
|
mkdir image inputs
|
2021-03-30 08:04:47 +01:00
|
|
|
# Extract images
|
2021-03-30 18:24:44 +01:00
|
|
|
repos=()
|
|
|
|
manifests=()
|
2021-03-30 10:31:25 +01:00
|
|
|
for item in $images; do
|
2021-03-30 18:24:44 +01:00
|
|
|
name=$(basename $item)
|
|
|
|
mkdir inputs/$name
|
|
|
|
tar -I pigz -xf $item -C inputs/$name
|
|
|
|
if [ -f inputs/$name/repositories ]; then
|
|
|
|
repos+=(inputs/$name/repositories)
|
|
|
|
fi
|
|
|
|
if [ -f inputs/$name/manifest.json ]; then
|
|
|
|
manifests+=(inputs/$name/manifest.json)
|
|
|
|
fi
|
2021-03-30 10:31:25 +01:00
|
|
|
done
|
|
|
|
# Copy all layers from input images to output image directory
|
|
|
|
cp -R --no-clobber inputs/*/* image/
|
|
|
|
# Merge repositories objects and manifests
|
2021-03-30 18:24:44 +01:00
|
|
|
jq -s add "''${repos[@]}" > repositories
|
|
|
|
jq -s add "''${manifests[@]}" > manifest.json
|
2021-03-30 10:31:25 +01:00
|
|
|
# Replace output image repositories and manifest with merged versions
|
2021-03-30 08:04:47 +01:00
|
|
|
mv repositories image/repositories
|
|
|
|
mv manifest.json image/manifest.json
|
|
|
|
# Create tarball and gzip
|
|
|
|
tar -C image --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=0 --group=0 --xform s:'^./':: -c . | pigz -nT > $out
|
|
|
|
'';
|
|
|
|
|
|
|
|
|
2020-12-02 11:16:56 +00:00
|
|
|
# Provide a /etc/passwd and /etc/group that contain root and nobody.
|
|
|
|
# Useful when packaging binaries that insist on using nss to look up
|
|
|
|
# username/groups (like nginx).
|
2020-12-02 13:51:06 +00:00
|
|
|
# /bin/sh is fine to not exist, and provided by another shim.
|
2020-12-02 11:16:56 +00:00
|
|
|
fakeNss = symlinkJoin {
|
|
|
|
name = "fake-nss";
|
|
|
|
paths = [
|
|
|
|
(writeTextDir "etc/passwd" ''
|
|
|
|
root:x:0:0:root user:/var/empty:/bin/sh
|
|
|
|
nobody:x:65534:65534:nobody:/var/empty:/bin/sh
|
|
|
|
'')
|
|
|
|
(writeTextDir "etc/group" ''
|
|
|
|
root:x:0:
|
|
|
|
nobody:x:65534:
|
|
|
|
'')
|
|
|
|
(runCommand "var-empty" {} ''
|
|
|
|
mkdir -p $out/var/empty
|
|
|
|
'')
|
|
|
|
];
|
|
|
|
};
|
|
|
|
|
2020-12-02 13:51:06 +00:00
|
|
|
# This provides /bin/sh, pointing to bashInteractive.
|
|
|
|
binSh = runCommand "bin-sh" {} ''
|
|
|
|
mkdir -p $out/bin
|
|
|
|
ln -s ${bashInteractive}/bin/bash $out/bin/sh
|
|
|
|
'';
|
|
|
|
|
2017-08-25 10:47:28 +01:00
|
|
|
# Build an image and populate its nix database with the provided
|
|
|
|
# contents. The main purpose is to be able to use nix commands in
|
|
|
|
# the container.
|
|
|
|
# Be careful since this doesn't work well with multilayer.
|
2020-05-02 15:30:43 +01:00
|
|
|
buildImageWithNixDb = args@{ contents ? null, extraCommands ? "", ... }: (
|
|
|
|
buildImage (args // {
|
|
|
|
extraCommands = (mkDbExtraCommand contents) + extraCommands;
|
|
|
|
})
|
|
|
|
);
|
|
|
|
|
|
|
|
buildLayeredImageWithNixDb = args@{ contents ? null, extraCommands ? "", ... }: (
|
|
|
|
buildLayeredImage (args // {
|
|
|
|
extraCommands = (mkDbExtraCommand contents) + extraCommands;
|
|
|
|
})
|
|
|
|
);
|
|
|
|
|
2020-06-08 10:47:46 +01:00
|
|
|
streamLayeredImage = {
|
|
|
|
# Image Name
|
|
|
|
name,
|
|
|
|
# Image tag, the Nix's output hash will be used if null
|
|
|
|
tag ? null,
|
2021-03-08 20:36:13 +00:00
|
|
|
# Parent image, to append to.
|
|
|
|
fromImage ? null,
|
2020-06-08 10:47:46 +01:00
|
|
|
# Files to put on the image (a nix store path or list of paths).
|
|
|
|
contents ? [],
|
|
|
|
# Docker config; e.g. what command to run on the container.
|
|
|
|
config ? {},
|
|
|
|
# Time of creation of the image. Passing "now" will make the
|
|
|
|
# created date be the time of building.
|
|
|
|
created ? "1970-01-01T00:00:01Z",
|
|
|
|
# Optional bash script to run on the files prior to fixturizing the layer.
|
|
|
|
extraCommands ? "",
|
2021-03-19 01:25:26 +00:00
|
|
|
# Optional bash script to run inside fakeroot environment.
|
|
|
|
# Could be used for changing ownership of files in customisation layer.
|
|
|
|
fakeRootCommands ? "",
|
2020-06-08 10:47:46 +01:00
|
|
|
# We pick 100 to ensure there is plenty of room for extension. I
|
|
|
|
# believe the actual maximum is 128.
|
2021-05-25 14:03:49 +01:00
|
|
|
maxLayers ? 100,
|
2021-05-25 14:04:45 +01:00
|
|
|
# Whether to include store paths in the image. You generally want to leave
|
|
|
|
# this on, but tooling may disable this to insert the store paths more
|
|
|
|
# efficiently via other means, such as bind mounting the host store.
|
|
|
|
includeStorePaths ? true,
|
2020-06-08 10:47:46 +01:00
|
|
|
}:
|
|
|
|
assert
|
|
|
|
(lib.assertMsg (maxLayers > 1)
|
|
|
|
"the maxLayers argument of dockerTools.buildLayeredImage function must be greather than 1 (current value: ${toString maxLayers})");
|
|
|
|
let
|
2021-03-09 18:32:54 +00:00
|
|
|
baseName = baseNameOf name;
|
|
|
|
|
2020-06-08 10:47:46 +01:00
|
|
|
streamScript = writePython3 "stream" {} ./stream_layered_image.py;
|
2021-03-09 18:32:54 +00:00
|
|
|
baseJson = writeText "${baseName}-base.json" (builtins.toJSON {
|
2020-06-08 10:47:46 +01:00
|
|
|
inherit config;
|
2020-12-12 23:42:31 +00:00
|
|
|
architecture = defaultArch;
|
2020-06-08 10:47:46 +01:00
|
|
|
os = "linux";
|
|
|
|
});
|
|
|
|
|
2020-08-14 10:06:00 +01:00
|
|
|
contentsList = if builtins.isList contents then contents else [ contents ];
|
|
|
|
|
|
|
|
# We store the customisation layer as a tarball, to make sure that
|
|
|
|
# things like permissions set on 'extraCommands' are not overriden
|
|
|
|
# by Nix. Then we precompute the sha256 for performance.
|
|
|
|
customisationLayer = symlinkJoin {
|
2021-03-09 18:32:54 +00:00
|
|
|
name = "${baseName}-customisation-layer";
|
2020-08-14 10:06:00 +01:00
|
|
|
paths = contentsList;
|
2021-03-19 01:56:00 +00:00
|
|
|
inherit extraCommands fakeRootCommands;
|
2021-03-19 01:25:26 +00:00
|
|
|
nativeBuildInputs = [ fakeroot ];
|
2020-08-14 10:06:00 +01:00
|
|
|
postBuild = ''
|
|
|
|
mv $out old_out
|
|
|
|
(cd old_out; eval "$extraCommands" )
|
|
|
|
|
|
|
|
mkdir $out
|
|
|
|
|
2021-03-19 01:25:26 +00:00
|
|
|
fakeroot bash -c '
|
2021-03-19 02:01:10 +00:00
|
|
|
source $stdenv/setup
|
2021-03-19 01:25:26 +00:00
|
|
|
cd old_out
|
|
|
|
eval "$fakeRootCommands"
|
|
|
|
tar \
|
|
|
|
--sort name \
|
|
|
|
--numeric-owner --mtime "@$SOURCE_DATE_EPOCH" \
|
|
|
|
--hard-dereference \
|
|
|
|
-cf $out/layer.tar .
|
|
|
|
'
|
2020-08-14 10:06:00 +01:00
|
|
|
|
|
|
|
sha256sum $out/layer.tar \
|
|
|
|
| cut -f 1 -d ' ' \
|
|
|
|
> $out/checksum
|
|
|
|
'';
|
2020-06-08 10:47:46 +01:00
|
|
|
};
|
|
|
|
|
2021-05-25 14:04:45 +01:00
|
|
|
closureRoots = optionals includeStorePaths /* normally true */ (
|
|
|
|
[ baseJson ] ++ contentsList
|
|
|
|
);
|
2020-08-14 10:06:00 +01:00
|
|
|
overallClosure = writeText "closure" (lib.concatStringsSep " " closureRoots);
|
|
|
|
|
|
|
|
# These derivations are only created as implementation details of docker-tools,
|
|
|
|
# so they'll be excluded from the created images.
|
|
|
|
unnecessaryDrvs = [ baseJson overallClosure ];
|
|
|
|
|
2021-03-09 18:32:54 +00:00
|
|
|
conf = runCommand "${baseName}-conf.json" {
|
2021-03-08 20:36:13 +00:00
|
|
|
inherit fromImage maxLayers created;
|
2020-06-08 10:47:46 +01:00
|
|
|
imageName = lib.toLower name;
|
2020-07-11 14:51:58 +01:00
|
|
|
passthru.imageTag =
|
|
|
|
if tag != null
|
|
|
|
then tag
|
|
|
|
else
|
|
|
|
lib.head (lib.strings.splitString "-" (baseNameOf conf.outPath));
|
2020-11-24 01:55:40 +00:00
|
|
|
paths = buildPackages.referencesByPopularity overallClosure;
|
2020-11-19 13:31:14 +00:00
|
|
|
nativeBuildInputs = [ jq ];
|
2020-06-08 10:47:46 +01:00
|
|
|
} ''
|
|
|
|
${if (tag == null) then ''
|
|
|
|
outName="$(basename "$out")"
|
|
|
|
outHash=$(echo "$outName" | cut -d - -f 1)
|
|
|
|
|
|
|
|
imageTag=$outHash
|
|
|
|
'' else ''
|
|
|
|
imageTag="${tag}"
|
|
|
|
''}
|
|
|
|
|
2020-06-11 01:44:04 +01:00
|
|
|
# convert "created" to iso format
|
|
|
|
if [[ "$created" != "now" ]]; then
|
|
|
|
created="$(date -Iseconds -d "$created")"
|
2020-06-08 10:47:46 +01:00
|
|
|
fi
|
|
|
|
|
2020-08-14 10:06:00 +01:00
|
|
|
paths() {
|
|
|
|
cat $paths ${lib.concatMapStringsSep " "
|
|
|
|
(path: "| (grep -v ${path} || true)")
|
|
|
|
unnecessaryDrvs}
|
|
|
|
}
|
|
|
|
|
2021-03-08 20:36:13 +00:00
|
|
|
# Compute the number of layers that are already used by a potential
|
|
|
|
# 'fromImage' as well as the customization layer. Ensure that there is
|
|
|
|
# still at least one layer available to store the image contents.
|
|
|
|
usedLayers=0
|
|
|
|
|
|
|
|
# subtract number of base image layers
|
|
|
|
if [[ -n "$fromImage" ]]; then
|
|
|
|
(( usedLayers += $(tar -xOf "$fromImage" manifest.json | jq '.[0].Layers | length') ))
|
|
|
|
fi
|
|
|
|
|
|
|
|
# one layer will be taken up by the customisation layer
|
|
|
|
(( usedLayers += 1 ))
|
|
|
|
|
|
|
|
if ! (( $usedLayers < $maxLayers )); then
|
|
|
|
echo >&2 "Error: usedLayers $usedLayers layers to store 'fromImage' and" \
|
|
|
|
"'extraCommands', but only maxLayers=$maxLayers were" \
|
|
|
|
"allowed. At least 1 layer is required to store contents."
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
availableLayers=$(( maxLayers - usedLayers ))
|
|
|
|
|
2020-06-08 10:47:46 +01:00
|
|
|
# Create $maxLayers worth of Docker Layers, one layer per store path
|
|
|
|
# unless there are more paths than $maxLayers. In that case, create
|
|
|
|
# $maxLayers-1 for the most popular layers, and smush the remainaing
|
|
|
|
# store paths in to one final layer.
|
2020-06-18 03:34:27 +01:00
|
|
|
#
|
|
|
|
# The following code is fiddly w.r.t. ensuring every layer is
|
|
|
|
# created, and that no paths are missed. If you change the
|
|
|
|
# following lines, double-check that your code behaves properly
|
|
|
|
# when the number of layers equals:
|
|
|
|
# maxLayers-1, maxLayers, and maxLayers+1, 0
|
2020-06-08 10:47:46 +01:00
|
|
|
store_layers="$(
|
|
|
|
paths |
|
|
|
|
jq -sR '
|
|
|
|
rtrimstr("\n") | split("\n")
|
|
|
|
| (.[:$maxLayers-1] | map([.])) + [ .[$maxLayers-1:] ]
|
|
|
|
| map(select(length > 0))
|
|
|
|
' \
|
2021-03-08 20:36:13 +00:00
|
|
|
--argjson maxLayers "$availableLayers"
|
2020-06-08 10:47:46 +01:00
|
|
|
)"
|
|
|
|
|
|
|
|
cat ${baseJson} | jq '
|
|
|
|
. + {
|
2021-03-08 11:24:29 +00:00
|
|
|
"store_dir": $store_dir,
|
2021-03-08 20:36:13 +00:00
|
|
|
"from_image": $from_image,
|
2020-06-08 10:47:46 +01:00
|
|
|
"store_layers": $store_layers,
|
|
|
|
"customisation_layer", $customisation_layer,
|
|
|
|
"repo_tag": $repo_tag,
|
|
|
|
"created": $created
|
|
|
|
}
|
2021-03-08 11:24:29 +00:00
|
|
|
' --arg store_dir "${storeDir}" \
|
2021-03-08 20:36:13 +00:00
|
|
|
--argjson from_image ${if fromImage == null then "null" else "'\"${fromImage}\"'"} \
|
2021-03-08 11:24:29 +00:00
|
|
|
--argjson store_layers "$store_layers" \
|
2020-06-08 10:47:46 +01:00
|
|
|
--arg customisation_layer ${customisationLayer} \
|
|
|
|
--arg repo_tag "$imageName:$imageTag" \
|
|
|
|
--arg created "$created" |
|
|
|
|
tee $out
|
|
|
|
'';
|
2021-03-09 18:32:54 +00:00
|
|
|
result = runCommand "stream-${baseName}" {
|
2020-06-08 10:47:46 +01:00
|
|
|
inherit (conf) imageName;
|
2020-10-05 09:48:17 +01:00
|
|
|
passthru = {
|
|
|
|
inherit (conf) imageTag;
|
|
|
|
|
|
|
|
# Distinguish tarballs and exes at the Nix level so functions that
|
|
|
|
# take images can know in advance how the image is supposed to be used.
|
|
|
|
isExe = true;
|
|
|
|
};
|
2020-11-19 13:31:14 +00:00
|
|
|
nativeBuildInputs = [ makeWrapper ];
|
2020-06-08 10:47:46 +01:00
|
|
|
} ''
|
|
|
|
makeWrapper ${streamScript} $out --add-flags ${conf}
|
|
|
|
'';
|
|
|
|
in result;
|
2015-11-19 12:11:17 +00:00
|
|
|
}
|