3
0
Fork 0
forked from mirrors/nixpkgs

Merge pull request #11156 from lethalman/docker-image

Composable docker images with Nix
This commit is contained in:
lethalman 2016-01-13 10:48:36 +01:00
commit 5b1d1a384a
7 changed files with 890 additions and 0 deletions

View file

@ -291,4 +291,340 @@ c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
</para>
</section>
<section xml:id="sec-pkgs-dockerTools">
<title>pkgs.dockerTools</title>
<para>
<varname>pkgs.dockerTools</varname> is a set of functions for creating and
manipulating Docker images according to the
<link xlink:href="https://github.com/docker/docker/blob/master/image/spec/v1.md#docker-image-specification-v100">
Docker Image Specification v1.0.0
</link>. Docker itself is not used to perform any of the operations done by these
functions.
</para>
<warning>
<para>
The <varname>dockerTools</varname> API is unstable and may be subject to
backwards-incompatible changes in the future.
</para>
</warning>
<section xml:id="ssec-pkgs-dockerTools-buildImage">
<title>buildImage</title>
<para>
This function is analogous to the <command>docker build</command> command,
in that can used to build a Docker-compatible repository tarball containing
a single image with one or multiple layers. As such, the result
is suitable for being loaded in Docker with <command>docker load</command>.
</para>
<para>
The parameters of <varname>buildImage</varname> with relative example values are
described below:
</para>
<example xml:id='ex-dockerTools-buildImage'><title>Docker build</title>
<programlisting>
buildImage {
name = "redis"; <co xml:id='ex-dockerTools-buildImage-1' />
tag = "latest"; <co xml:id='ex-dockerTools-buildImage-2' />
fromImage = someBaseImage; <co xml:id='ex-dockerTools-buildImage-3' />
fromImageName = null; <co xml:id='ex-dockerTools-buildImage-4' />
fromImageTag = "latest"; <co xml:id='ex-dockerTools-buildImage-5' />
contents = pkgs.redis; <co xml:id='ex-dockerTools-buildImage-6' />
runAsRoot = '' <co xml:id='ex-dockerTools-buildImage-runAsRoot' />
#!${stdenv.shell}
mkdir -p /data
'';
config = { <co xml:id='ex-dockerTools-buildImage-8' />
Cmd = [ "/bin/redis-server" ];
WorkingDir = "/data";
Volumes = {
"/data" = {};
};
};
}
</programlisting>
</example>
<para>The above example will build a Docker image <literal>redis/latest</literal>
from the given base image. Loading and running this image in Docker results in
<literal>redis-server</literal> being started automatically.
</para>
<calloutlist>
<callout arearefs='ex-dockerTools-buildImage-1'>
<para>
<varname>name</varname> specifies the name of the resulting image.
This is the only required argument for <varname>buildImage</varname>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-2'>
<para>
<varname>tag</varname> specifies the tag of the resulting image.
By default it's <literal>latest</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-3'>
<para>
<varname>fromImage</varname> is the repository tarball containing the base image.
It must be a valid Docker image, such as exported by <command>docker save</command>.
By default it's <literal>null</literal>, which can be seen as equivalent
to <literal>FROM scratch</literal> of a <filename>Dockerfile</filename>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-4'>
<para>
<varname>fromImageName</varname> can be used to further specify
the base image within the repository, in case it contains multiple images.
By default it's <literal>null</literal>, in which case
<varname>buildImage</varname> will peek the first image available
in the repository.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-5'>
<para>
<varname>fromImageTag</varname> can be used to further specify the tag
of the base image within the repository, in case an image contains multiple tags.
By default it's <literal>null</literal>, in which case
<varname>buildImage</varname> will peek the first tag available for the base image.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-6'>
<para>
<varname>contents</varname> is a derivation that will be copied in the new
layer of the resulting image. This can be similarly seen as
<command>ADD contents/ /</command> in a <filename>Dockerfile</filename>.
By default it's <literal>null</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-runAsRoot'>
<para>
<varname>runAsRoot</varname> is a bash script that will run as root
in an environment that overlays the existing layers of the base image with
the new resulting layer, including the previously copied
<varname>contents</varname> derivation.
This can be similarly seen as
<command>RUN ...</command> in a <filename>Dockerfile</filename>.
<note>
<para>
Using this parameter requires the <literal>kvm</literal>
device to be available.
</para>
</note>
</para>
</callout>
<callout arearefs='ex-dockerTools-buildImage-8'>
<para>
<varname>config</varname> is used to specify the configuration of the
containers that will be started off the built image in Docker.
The available options are listed in the
<link xlink:href="https://github.com/docker/docker/blob/master/image/spec/v1.md#container-runconfig-field-descriptions">
Docker Image Specification v1.0.0
</link>.
</para>
</callout>
</calloutlist>
<para>
After the new layer has been created, its closure
(to which <varname>contents</varname>, <varname>config</varname> and
<varname>runAsRoot</varname> contribute) will be copied in the layer itself.
Only new dependencies that are not already in the existing layers will be copied.
</para>
<para>
At the end of the process, only one new single layer will be produced and
added to the resulting image.
</para>
<para>
The resulting repository will only list the single image
<varname>image/tag</varname>. In the case of <xref linkend='ex-dockerTools-buildImage'/>
it would be <varname>redis/latest</varname>.
</para>
<para>
It is possible to inspect the arguments with which an image was built
using its <varname>buildArgs</varname> attribute.
</para>
</section>
<section xml:id="ssec-pkgs-dockerTools-fetchFromRegistry">
<title>pullImage</title>
<para>
This function is analogous to the <command>docker pull</command> command,
in that can be used to fetch a Docker image from a Docker registry.
Currently only registry <literal>v1</literal> is supported.
By default <link xlink:href="https://hub.docker.com/">Docker Hub</link>
is used to pull images.
</para>
<para>
Its parameters are described in the example below:
</para>
<example xml:id='ex-dockerTools-pullImage'><title>Docker pull</title>
<programlisting>
pullImage {
imageName = "debian"; <co xml:id='ex-dockerTools-pullImage-1' />
imageTag = "jessie"; <co xml:id='ex-dockerTools-pullImage-2' />
imageId = null; <co xml:id='ex-dockerTools-pullImage-3' />
sha256 = "1bhw5hkz6chrnrih0ymjbmn69hyfriza2lr550xyvpdrnbzr4gk2"; <co xml:id='ex-dockerTools-pullImage-4' />
indexUrl = "https://index.docker.io"; <co xml:id='ex-dockerTools-pullImage-5' />
registryUrl = "https://registry-1.docker.io";
registryVersion = "v1";
}
</programlisting>
</example>
<calloutlist>
<callout arearefs='ex-dockerTools-pullImage-1'>
<para>
<varname>imageName</varname> specifies the name of the image to be downloaded,
which can also include the registry namespace (e.g. <literal>library/debian</literal>).
This argument is required.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-2'>
<para>
<varname>imageTag</varname> specifies the tag of the image to be downloaded.
By default it's <literal>latest</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-3'>
<para>
<varname>imageId</varname>, if specified this exact image will be fetched, instead
of <varname>imageName/imageTag</varname>. However, the resulting repository
will still be named <varname>imageName/imageTag</varname>.
By default it's <literal>null</literal>.
</para>
</callout>
<callout arearefs='ex-dockerTools-pullImage-4'>
<para>
<varname>sha256</varname> is the checksum of the whole fetched image.
This argument is required.
</para>
<note>
<para>The checksum is computed on the unpacked directory, not on the final tarball.</para>
</note>
</callout>
<callout arearefs='ex-dockerTools-pullImage-5'>
<para>
In the above example the default values are shown for the variables <varname>indexUrl</varname>,
<varname>registryUrl</varname> and <varname>registryVersion</varname>.
Hence by default the Docker.io registry is used to pull the images.
</para>
</callout>
</calloutlist>
</section>
<section xml:id="ssec-pkgs-dockerTools-exportImage">
<title>exportImage</title>
<para>
This function is analogous to the <command>docker export</command> command,
in that can used to flatten a Docker image that contains multiple layers.
It is in fact the result of the merge of all the layers of the image.
As such, the result is suitable for being imported in Docker
with <command>docker import</command>.
</para>
<note>
<para>
Using this function requires the <literal>kvm</literal>
device to be available.
</para>
</note>
<para>
The parameters of <varname>exportImage</varname> are the following:
</para>
<example xml:id='ex-dockerTools-exportImage'><title>Docker export</title>
<programlisting>
exportImage {
fromImage = someLayeredImage;
fromImageName = null;
fromImageTag = null;
name = someLayeredImage.name;
}
</programlisting>
</example>
<para>
The parameters relative to the base image have the same synopsis as
described in <xref linkend='ssec-pkgs-dockerTools-buildImage'/>, except that
<varname>fromImage</varname> is the only required argument in this case.
</para>
<para>
The <varname>name</varname> argument is the name of the derivation output,
which defaults to <varname>fromImage.name</varname>.
</para>
</section>
<section xml:id="ssec-pkgs-dockerTools-shadowSetup">
<title>shadowSetup</title>
<para>
This constant string is a helper for setting up the base files for managing
users and groups, only if such files don't exist already.
It is suitable for being used in a
<varname>runAsRoot</varname> <xref linkend='ex-dockerTools-buildImage-runAsRoot'/> script for cases like
in the example below:
</para>
<example xml:id='ex-dockerTools-shadowSetup'><title>Shadow base files</title>
<programlisting>
buildImage {
name = "shadow-basic";
runAsRoot = ''
#!${stdenv.shell}
${shadowSetup}
groupadd -r redis
useradd -r -g redis redis
mkdir /data
chown redis:redis /data
'';
}
</programlisting>
</example>
<para>
Creating base files like <literal>/etc/passwd</literal> or
<literal>/etc/login.defs</literal> are necessary for shadow-utils to
manipulate users and groups.
</para>
</section>
</section>
</chapter>

View file

@ -0,0 +1,365 @@
{ stdenv, lib, callPackage, runCommand, writeReferencesToFile, writeText, vmTools, writeScript
, docker, shadow, utillinux, coreutils, jshon, e2fsprogs, goPackages }:
# WARNING: this API is unstable and may be subject to backwards-incompatible changes in the future.
rec {
pullImage = callPackage ./pull.nix {};
# We need to sum layer.tar, not a directory, hence tarsum instead of nix-hash.
# And we cannot untar it, because then we cannot preserve permissions ecc.
tarsum = runCommand "tarsum" {
buildInputs = [ goPackages.go ];
} ''
mkdir tarsum
cd tarsum
cp ${./tarsum.go} tarsum.go
export GOPATH=$(pwd)
mkdir src
ln -sT ${docker.src}/pkg/tarsum src/tarsum
go build
cp tarsum $out
'';
# buildEnv creates symlinks to dirs, which is hard to edit inside the overlay VM
mergeDrvs = { drvs, onlyDeps ? false }:
runCommand "merge-drvs" {
inherit drvs onlyDeps;
} ''
if [ -n "$onlyDeps" ]; then
echo $drvs > $out
exit 0
fi
mkdir $out
for drv in $drvs; do
echo Merging $drv
if [ -d "$drv" ]; then
cp -drf --preserve=mode -f $drv/* $out/
else
tar -C $out -xpf $drv || true
fi
done
'';
mkTarball = { name ? "docker-tar", drv, onlyDeps ? false }:
runCommand "${name}.tar.gz" rec {
inherit drv onlyDeps;
drvClosure = writeReferencesToFile drv;
} ''
while read dep; do
echo Copying $dep
dir="$(dirname "$dep")"
mkdir -p "rootfs/$dir"
cp -drf --preserve=mode $dep "rootfs/$dir/"
done < "$drvClosure"
if [ -z "$onlyDeps" ]; then
cp -drf --preserve=mode $drv/* rootfs/
fi
tar -C rootfs/ -cpzf $out .
'';
shellScript = text:
writeScript "script.sh" ''
#!${stdenv.shell}
set -e
export PATH=${coreutils}/bin:/bin
${text}
'';
shadowSetup = ''
export PATH=${shadow}/bin:$PATH
mkdir -p /etc/pam.d
if [ ! -f /etc/passwd ]; then
echo "root:x:0:0::/root:/bin/sh" > /etc/passwd
echo "root:!x:::::::" > /etc/shadow
fi
if [ ! -f /etc/group ]; then
echo "root:x:0:" > /etc/group
echo "root:x::" > /etc/gshadow
fi
if [ ! -f /etc/pam.d/other ]; then
cat > /etc/pam.d/other <<EOF
account sufficient pam_unix.so
auth sufficient pam_rootok.so
password requisite pam_unix.so nullok sha512
session required pam_unix.so
EOF
fi
if [ ! -f /etc/login.defs ]; then
touch /etc/login.defs
fi
'';
# Append to tar instead of unpacking
mergeTarballs = tarballs:
runCommand "merge-tars" { inherit tarballs; } ''
mkdir tmp
for tb in $tarballs; do
tar -C tmp -xkpf $tb
done
tar -C tmp -cpzf $out .
'';
runWithOverlay = { name , fromImage ? null, fromImageName ? null, fromImageTag ? null
, diskSize ? 1024, preMount ? "", postMount ? "", postUmount ? "" }:
vmTools.runInLinuxVM (
runCommand name {
preVM = vmTools.createEmptyImage { size = diskSize; fullName = "docker-run-disk"; };
inherit fromImage fromImageName fromImageTag;
buildInputs = [ utillinux e2fsprogs jshon ];
} ''
rm -rf $out
mkdir disk
mkfs /dev/${vmTools.hd}
mount /dev/${vmTools.hd} disk
cd disk
if [ -n "$fromImage" ]; then
echo Unpacking base image
mkdir image
tar -C image -xpf "$fromImage"
if [ -z "$fromImageName" ]; then
fromImageName=$(jshon -k < image/repositories|head -n1)
fi
if [ -z "$fromImageTag" ]; then
fromImageTag=$(jshon -e $fromImageName -k < image/repositories|head -n1)
fi
parentID=$(jshon -e $fromImageName -e $fromImageTag -u < image/repositories)
fi
lowerdir=""
while [ -n "$parentID" ]; do
echo Unpacking layer $parentID
mkdir -p image/$parentID/layer
tar -C image/$parentID/layer -xpf image/$parentID/layer.tar
rm image/$parentID/layer.tar
find image/$parentID/layer -name ".wh.*" -exec bash -c 'name="$(basename {}|sed "s/^.wh.//")"; mknod "$(dirname {})/$name" c 0 0; rm {}' \;
lowerdir=$lowerdir''${lowerdir:+:}image/$parentID/layer
parentID=$(cat image/$parentID/json|(jshon -e parent -u 2>/dev/null || true))
done
mkdir work
mkdir layer
mkdir mnt
${preMount}
if [ -n "$lowerdir" ]; then
mount -t overlay overlay -olowerdir=$lowerdir,workdir=work,upperdir=layer mnt
else
mount --bind layer mnt
fi
${postMount}
umount mnt
pushd layer
find . -type c -exec bash -c 'name="$(basename {})"; touch "$(dirname {})/.wh.$name"; rm "{}"' \;
popd
${postUmount}
'');
exportImage = { name ? fromImage.name, fromImage, fromImageName ? null, fromImageTag ? null, diskSize ? 1024 }:
runWithOverlay {
inherit name fromImage fromImageName fromImageTag diskSize;
postMount = ''
echo Packing raw image
tar -C mnt -czf $out .
'';
};
mkPureLayer = { baseJson, contents ? null, extraCommands ? "" }:
runCommand "docker-layer" {
inherit baseJson contents extraCommands;
buildInputs = [ jshon ];
} ''
mkdir layer
if [ -n "$contents" ]; then
echo Adding contents
for c in $contents; do
cp -drf $c/* layer/
chmod -R ug+w layer/
done
fi
pushd layer
${extraCommands}
popd
echo Packing layer
mkdir $out
tar -C layer -cf $out/layer.tar .
ts=$(${tarsum} < $out/layer.tar)
cat ${baseJson} | jshon -s "$ts" -i checksum > $out/json
echo -n "1.0" > $out/VERSION
'';
mkRootLayer = { runAsRoot, baseJson, fromImage ? null, fromImageName ? null, fromImageTag ? null
, diskSize ? 1024, contents ? null, extraCommands ? "" }:
let runAsRootScript = writeScript "run-as-root.sh" runAsRoot;
in runWithOverlay {
name = "docker-layer";
inherit fromImage fromImageName fromImageTag diskSize;
preMount = lib.optionalString (contents != null) ''
echo Adding contents
for c in ${builtins.toString contents}; do
cp -drf $c/* layer/
chmod -R ug+w layer/
done
'';
postMount = ''
mkdir -p mnt/{dev,proc,sys,nix/store}
mount --rbind /dev mnt/dev
mount --rbind /sys mnt/sys
mount --rbind /nix/store mnt/nix/store
unshare -imnpuf --mount-proc chroot mnt ${runAsRootScript}
umount -R mnt/dev mnt/sys mnt/nix/store
rmdir --ignore-fail-on-non-empty mnt/dev mnt/proc mnt/sys mnt/nix/store mnt/nix
'';
postUmount = ''
pushd layer
${extraCommands}
popd
echo Packing layer
mkdir $out
tar -C layer -cf $out/layer.tar .
ts=$(${tarsum} < $out/layer.tar)
cat ${baseJson} | jshon -s "$ts" -i checksum > $out/json
echo -n "1.0" > $out/VERSION
'';
};
# 1. extract the base image
# 2. create the layer
# 3. add layer deps to the layer itself, diffing with the base image
# 4. compute the layer id
# 5. put the layer in the image
# 6. repack the image
buildImage = args@{ name, tag ? "latest"
, fromImage ? null, fromImageName ? null, fromImageTag ? null
, contents ? null, tarballs ? [], config ? null
, runAsRoot ? null, diskSize ? 1024, extraCommands ? "" }:
let
baseJson = writeText "${name}-config.json" (builtins.toJSON {
created = "1970-01-01T00:00:01Z";
architecture = "amd64";
os = "linux";
config = config;
});
layer = (if runAsRoot == null
then mkPureLayer { inherit baseJson contents extraCommands; }
else mkRootLayer { inherit baseJson fromImage fromImageName fromImageTag contents runAsRoot diskSize extraCommands; });
depsTarball = mkTarball { name = "${name}-deps";
drv = layer;
onlyDeps = true; };
result = runCommand "${name}.tar.gz" {
buildInputs = [ jshon ];
imageName = name;
imageTag = tag;
inherit fromImage baseJson;
mergedTarball = if tarballs == [] then depsTarball else mergeTarballs ([ depsTarball ] ++ tarballs);
passthru = {
buildArgs = args;
};
} ''
mkdir image
touch baseFiles
if [ -n "$fromImage" ]; then
echo Unpacking base image
tar -C image -xpf "$fromImage"
if [ -z "$fromImageName" ]; then
fromImageName=$(jshon -k < image/repositories|head -n1)
fi
if [ -z "$fromImageTag" ]; then
fromImageTag=$(jshon -e $fromImageName -k < image/repositories|head -n1)
fi
parentID=$(jshon -e $fromImageName -e $fromImageTag -u < image/repositories)
for l in image/*/layer.tar; do
tar -tf $l >> baseFiles
done
fi
chmod -R ug+rw image
mkdir temp
cp ${layer}/* temp/
chmod ug+w temp/*
echo Adding dependencies
tar -tf temp/layer.tar >> baseFiles
tar -tf "$mergedTarball" | grep -v ${layer} > layerFiles
if [ "$(wc -l layerFiles|cut -d ' ' -f 1)" -gt 3 ]; then
sed -i -e 's|^[\./]\+||' baseFiles layerFiles
comm <(sort -n baseFiles|uniq) <(sort -n layerFiles|uniq) -1 -3 > newFiles
mkdir deps
pushd deps
tar -xpf "$mergedTarball" --no-recursion --files-from ../newFiles 2>/dev/null || true
tar -rf ../temp/layer.tar --no-recursion --files-from ../newFiles 2>/dev/null || true
popd
else
echo No new deps, no diffing needed
fi
echo Adding meta
if [ -n "$parentID" ]; then
cat temp/json | jshon -s "$parentID" -i parent > tmpjson
mv tmpjson temp/json
fi
layerID=$(sha256sum temp/json|cut -d ' ' -f 1)
size=$(stat --printf="%s" temp/layer.tar)
cat temp/json | jshon -s "$layerID" -i id -n $size -i Size > tmpjson
mv tmpjson temp/json
mv temp image/$layerID
jshon -n object \
-n object -s "$layerID" -i "$imageTag" \
-i "$imageName" > image/repositories
chmod -R a-w image
echo Cooking the image
tar -C image -czf $out .
'';
in
result;
}

View file

@ -0,0 +1,38 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Deterministic layer json: https://github.com/docker/hub-feedback/issues/488
import sys
reload(sys)
sys.setdefaultencoding('UTF8')
import json
# If any of the keys below are equal to a certain value
# then we can delete it because it's the default value
SAFEDELS = {
"Size": 0,
"config": {
"ExposedPorts": None,
"MacAddress": "",
"NetworkDisabled": False,
"PortSpecs": None,
"VolumeDriver": ""
}
}
SAFEDELS["container_config"] = SAFEDELS["config"]
def makedet(j, safedels):
for k,v in safedels.items():
if type(v) == dict:
makedet(j[k], v)
elif k in j and j[k] == v:
del j[k]
def main():
j = json.load(sys.stdin)
makedet(j, SAFEDELS)
json.dump(j, sys.stdout, sort_keys=True)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,50 @@
{ stdenv, lib, curl, jshon, python, runCommand }:
# Inspired and simplified version of fetchurl.
# For simplicity we only support sha256.
# Currently only registry v1 is supported, compatible with Docker Hub.
{ imageName, imageTag ? "latest", imageId ? null
, sha256, name ? "${imageName}-${imageTag}"
, indexUrl ? "https://index.docker.io"
, registryUrl ? "https://registry-1.docker.io"
, registryVersion ? "v1"
, curlOpts ? "" }:
let layer = stdenv.mkDerivation {
inherit name imageName imageTag imageId
indexUrl registryUrl registryVersion curlOpts;
builder = ./pull.sh;
detjson = ./detjson.py;
buildInputs = [ curl jshon python ];
outputHashAlgo = "sha256";
outputHash = sha256;
outputHashMode = "recursive";
impureEnvVars = [
# We borrow these environment variables from the caller to allow
# easy proxy configuration. This is impure, but a fixed-output
# derivation like fetchurl is allowed to do so since its result is
# by definition pure.
"http_proxy" "https_proxy" "ftp_proxy" "all_proxy" "no_proxy"
# This variable allows the user to pass additional options to curl
"NIX_CURL_FLAGS"
# This variable allows overriding the timeout for connecting to
# the hashed mirrors.
"NIX_CONNECT_TIMEOUT"
];
# Doing the download on a remote machine just duplicates network
# traffic, so don't do that.
preferLocalBuild = true;
};
in runCommand "${name}.tar.gz" {} ''
tar -C ${layer} -czf $out .
''

View file

@ -0,0 +1,75 @@
# Reference: docker src contrib/download-frozen-image.sh
source $stdenv/setup
# Curl flags to handle redirects, not use EPSV, handle cookies for
# servers to need them during redirects, and work on SSL without a
# certificate (this isn't a security problem because we check the
# cryptographic hash of the output anyway).
curl="curl \
--location --max-redirs 20 \
--retry 3 \
--fail \
--disable-epsv \
--cookie-jar cookies \
--insecure \
$curlOpts \
$NIX_CURL_FLAGS"
baseUrl="$registryUrl/$registryVersion"
fetchLayer() {
local url="$1"
local dest="$2"
local curlexit=18;
# if we get error code 18, resume partial download
while [ $curlexit -eq 18 ]; do
# keep this inside an if statement, since on failure it doesn't abort the script
if $curl -H "Authorization: Token $token" "$url" --output "$dest"; then
return 0
else
curlexit=$?;
fi
done
return $curlexit
}
token="$($curl -o /dev/null -D- -H 'X-Docker-Token: true' "$indexUrl/$registryVersion/repositories/$imageName/images" | grep X-Docker-Token | tr -d '\r' | cut -d ' ' -f 2)"
if [ -z "$token" ]; then
echo "error: registry returned no token"
exit 1
fi
# token="${token//\"/\\\"}"
if [ -z "$imageId" ]; then
imageId="$($curl -H "Authorization: Token $token" "$baseUrl/repositories/$imageName/tags/$imageTag")"
imageId="${imageId//\"/}"
if [ -z "$imageId" ]; then
echo "error: no image ID found for ${imageName}:${imageTag}"
exit 1
fi
echo "found image ${imageName}:${imageTag}@$imageId"
fi
mkdir -p $out
jshon -n object \
-n object -s "$imageId" -i "$imageTag" \
-i "$imageName" > $out/repositories
$curl -H "Authorization: Token $token" "$baseUrl/images/$imageId/ancestry" -o ancestry.json
layerIds=$(jshon -a -u < ancestry.json)
for layerId in $layerIds; do
echo "fetching layer $layerId"
mkdir "$out/$layerId"
echo '1.0' > "$out/$layerId/VERSION"
$curl -H "Authorization: Token $token" "$baseUrl/images/$layerId/json" | python $detjson > "$out/$layerId/json"
fetchLayer "$baseUrl/images/$layerId/layer" "$out/$layerId/layer.tar"
done

View file

@ -0,0 +1,24 @@
package main
import (
"tarsum"
"io"
"io/ioutil"
"fmt"
"os"
)
func main() {
ts, err := tarsum.NewTarSum(os.Stdin, false, tarsum.Version1)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if _, err = io.Copy(ioutil.Discard, ts); err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println(ts.Sum(nil))
}

View file

@ -289,6 +289,8 @@ let
cmark = callPackage ../development/libraries/cmark { };
dockerTools = callPackage ../build-support/docker { };
dotnetenv = callPackage ../build-support/dotnetenv {
dotnetfx = dotnetfx40;
};