uboot: (firmwareOdroidC2/C4) don't invoke patch tool, use patches = [] instead
https://github.com/NixOS/nixpkgs/blob/master/pkgs/stdenv/generic/setup.sh#L948 this can do it nicely. Signed-off-by: Anton Arapov <anton@deadbeef.mx>
This commit is contained in:
commit
56de2bcd43
30691 changed files with 3076956 additions and 0 deletions
997
pkgs/build-support/docker/default.nix
Normal file
997
pkgs/build-support/docker/default.nix
Normal file
|
|
@ -0,0 +1,997 @@
|
|||
{ bashInteractive
|
||||
, buildPackages
|
||||
, cacert
|
||||
, callPackage
|
||||
, closureInfo
|
||||
, coreutils
|
||||
, e2fsprogs
|
||||
, fakechroot
|
||||
, fakeNss
|
||||
, fakeroot
|
||||
, go
|
||||
, jq
|
||||
, jshon
|
||||
, lib
|
||||
, makeWrapper
|
||||
, moreutils
|
||||
, nix
|
||||
, nixosTests
|
||||
, pigz
|
||||
, rsync
|
||||
, runCommand
|
||||
, runtimeShell
|
||||
, shadow
|
||||
, skopeo
|
||||
, storeDir ? builtins.storeDir
|
||||
, substituteAll
|
||||
, symlinkJoin
|
||||
, tarsum
|
||||
, util-linux
|
||||
, vmTools
|
||||
, writeReferencesToFile
|
||||
, writeScript
|
||||
, writeText
|
||||
, writeTextDir
|
||||
, writePython3
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (lib)
|
||||
optionals
|
||||
optionalString
|
||||
;
|
||||
|
||||
inherit (lib)
|
||||
escapeShellArgs
|
||||
toList
|
||||
;
|
||||
|
||||
mkDbExtraCommand = contents:
|
||||
let
|
||||
contentsList = if builtins.isList contents then contents else [ contents ];
|
||||
in
|
||||
''
|
||||
echo "Generating the nix database..."
|
||||
echo "Warning: only the database of the deepest Nix layer is loaded."
|
||||
echo " If you want to use nix commands in the container, it would"
|
||||
echo " be better to only have one layer that contains a nix store."
|
||||
|
||||
export NIX_REMOTE=local?root=$PWD
|
||||
# A user is required by nix
|
||||
# https://github.com/NixOS/nix/blob/9348f9291e5d9e4ba3c4347ea1b235640f54fd79/src/libutil/util.cc#L478
|
||||
export USER=nobody
|
||||
${buildPackages.nix}/bin/nix-store --load-db < ${closureInfo {rootPaths = contentsList;}}/registration
|
||||
|
||||
mkdir -p nix/var/nix/gcroots/docker/
|
||||
for i in ${lib.concatStringsSep " " contentsList}; do
|
||||
ln -s $i nix/var/nix/gcroots/docker/$(basename $i)
|
||||
done;
|
||||
'';
|
||||
|
||||
# The OCI Image specification recommends that configurations use values listed
|
||||
# in the Go Language document for GOARCH.
|
||||
# Reference: https://github.com/opencontainers/image-spec/blob/master/config.md#properties
|
||||
# For the mapping from Nixpkgs system parameters to GOARCH, we can reuse the
|
||||
# mapping from the go package.
|
||||
defaultArch = go.GOARCH;
|
||||
|
||||
in
|
||||
rec {
|
||||
examples = callPackage ./examples.nix {
|
||||
inherit buildImage buildLayeredImage fakeNss pullImage shadowSetup buildImageWithNixDb;
|
||||
};
|
||||
|
||||
tests = {
|
||||
inherit (nixosTests)
|
||||
docker-tools
|
||||
docker-tools-overlay
|
||||
# requires remote builder
|
||||
# docker-tools-cross
|
||||
;
|
||||
};
|
||||
|
||||
pullImage =
|
||||
let
|
||||
fixName = name: builtins.replaceStrings [ "/" ":" ] [ "-" "-" ] name;
|
||||
in
|
||||
{ imageName
|
||||
# To find the digest of an image, you can use skopeo:
|
||||
# see doc/functions.xml
|
||||
, imageDigest
|
||||
, sha256
|
||||
, os ? "linux"
|
||||
, arch ? defaultArch
|
||||
|
||||
# This is used to set name to the pulled image
|
||||
, finalImageName ? imageName
|
||||
# This used to set a tag to the pulled image
|
||||
, finalImageTag ? "latest"
|
||||
# This is used to disable TLS certificate verification, allowing access to http registries on (hopefully) trusted networks
|
||||
, tlsVerify ? true
|
||||
|
||||
, name ? fixName "docker-image-${finalImageName}-${finalImageTag}.tar"
|
||||
}:
|
||||
|
||||
runCommand name
|
||||
{
|
||||
inherit imageDigest;
|
||||
imageName = finalImageName;
|
||||
imageTag = finalImageTag;
|
||||
impureEnvVars = lib.fetchers.proxyImpureEnvVars;
|
||||
outputHashMode = "flat";
|
||||
outputHashAlgo = "sha256";
|
||||
outputHash = sha256;
|
||||
|
||||
nativeBuildInputs = [ skopeo ];
|
||||
SSL_CERT_FILE = "${cacert.out}/etc/ssl/certs/ca-bundle.crt";
|
||||
|
||||
sourceURL = "docker://${imageName}@${imageDigest}";
|
||||
destNameTag = "${finalImageName}:${finalImageTag}";
|
||||
} ''
|
||||
skopeo \
|
||||
--insecure-policy \
|
||||
--tmpdir=$TMPDIR \
|
||||
--override-os ${os} \
|
||||
--override-arch ${arch} \
|
||||
copy \
|
||||
--src-tls-verify=${lib.boolToString tlsVerify} \
|
||||
"$sourceURL" "docker-archive://$out:$destNameTag" \
|
||||
| cat # pipe through cat to force-disable progress bar
|
||||
'';
|
||||
|
||||
# We need to sum layer.tar, not a directory, hence tarsum instead of nix-hash.
|
||||
# And we cannot untar it, because then we cannot preserve permissions etc.
|
||||
inherit tarsum; # pkgs.dockerTools.tarsum
|
||||
|
||||
# buildEnv creates symlinks to dirs, which is hard to edit inside the overlay VM
|
||||
mergeDrvs =
|
||||
{ derivations
|
||||
, onlyDeps ? false
|
||||
}:
|
||||
runCommand "merge-drvs"
|
||||
{
|
||||
inherit derivations onlyDeps;
|
||||
} ''
|
||||
if [[ -n "$onlyDeps" ]]; then
|
||||
echo $derivations > $out
|
||||
exit 0
|
||||
fi
|
||||
|
||||
mkdir $out
|
||||
for derivation in $derivations; do
|
||||
echo "Merging $derivation..."
|
||||
if [[ -d "$derivation" ]]; then
|
||||
# If it's a directory, copy all of its contents into $out.
|
||||
cp -drf --preserve=mode -f $derivation/* $out/
|
||||
else
|
||||
# Otherwise treat the derivation as a tarball and extract it
|
||||
# into $out.
|
||||
tar -C $out -xpf $drv || true
|
||||
fi
|
||||
done
|
||||
'';
|
||||
|
||||
# Helper for setting up the base files for managing users and
|
||||
# groups, only if such files don't exist already. It is suitable for
|
||||
# being used in a runAsRoot script.
|
||||
shadowSetup = ''
|
||||
export PATH=${shadow}/bin:$PATH
|
||||
mkdir -p /etc/pam.d
|
||||
if [[ ! -f /etc/passwd ]]; then
|
||||
echo "root:x:0:0::/root:${runtimeShell}" > /etc/passwd
|
||||
echo "root:!x:::::::" > /etc/shadow
|
||||
fi
|
||||
if [[ ! -f /etc/group ]]; then
|
||||
echo "root:x:0:" > /etc/group
|
||||
echo "root:x::" > /etc/gshadow
|
||||
fi
|
||||
if [[ ! -f /etc/pam.d/other ]]; then
|
||||
cat > /etc/pam.d/other <<EOF
|
||||
account sufficient pam_unix.so
|
||||
auth sufficient pam_rootok.so
|
||||
password requisite pam_unix.so nullok sha512
|
||||
session required pam_unix.so
|
||||
EOF
|
||||
fi
|
||||
if [[ ! -f /etc/login.defs ]]; then
|
||||
touch /etc/login.defs
|
||||
fi
|
||||
'';
|
||||
|
||||
# Run commands in a virtual machine.
|
||||
runWithOverlay =
|
||||
{ name
|
||||
, fromImage ? null
|
||||
, fromImageName ? null
|
||||
, fromImageTag ? null
|
||||
, diskSize ? 1024
|
||||
, preMount ? ""
|
||||
, postMount ? ""
|
||||
, postUmount ? ""
|
||||
}:
|
||||
vmTools.runInLinuxVM (
|
||||
runCommand name
|
||||
{
|
||||
preVM = vmTools.createEmptyImage {
|
||||
size = diskSize;
|
||||
fullName = "docker-run-disk";
|
||||
destination = "./image";
|
||||
};
|
||||
inherit fromImage fromImageName fromImageTag;
|
||||
|
||||
nativeBuildInputs = [ util-linux e2fsprogs jshon rsync jq ];
|
||||
} ''
|
||||
mkdir disk
|
||||
mkfs /dev/${vmTools.hd}
|
||||
mount /dev/${vmTools.hd} disk
|
||||
cd disk
|
||||
|
||||
if [[ -n "$fromImage" ]]; then
|
||||
echo "Unpacking base image..."
|
||||
mkdir image
|
||||
tar -C image -xpf "$fromImage"
|
||||
|
||||
if [[ -n "$fromImageName" ]] && [[ -n "$fromImageTag" ]]; then
|
||||
parentID="$(
|
||||
cat "image/manifest.json" |
|
||||
jq -r '.[] | select(.RepoTags | contains([$desiredTag])) | rtrimstr(".json")' \
|
||||
--arg desiredTag "$fromImageName:$fromImageTag"
|
||||
)"
|
||||
else
|
||||
echo "From-image name or tag wasn't set. Reading the first ID."
|
||||
parentID="$(cat "image/manifest.json" | jq -r '.[0].Config | rtrimstr(".json")')"
|
||||
fi
|
||||
|
||||
cat ./image/manifest.json | jq -r '.[0].Layers | .[]' > layer-list
|
||||
else
|
||||
touch layer-list
|
||||
fi
|
||||
|
||||
# Unpack all of the parent layers into the image.
|
||||
lowerdir=""
|
||||
extractionID=0
|
||||
for layerTar in $(cat layer-list); do
|
||||
echo "Unpacking layer $layerTar"
|
||||
extractionID=$((extractionID + 1))
|
||||
|
||||
mkdir -p image/$extractionID/layer
|
||||
tar -C image/$extractionID/layer -xpf image/$layerTar
|
||||
rm image/$layerTar
|
||||
|
||||
find image/$extractionID/layer -name ".wh.*" -exec bash -c 'name="$(basename {}|sed "s/^.wh.//")"; mknod "$(dirname {})/$name" c 0 0; rm {}' \;
|
||||
|
||||
# Get the next lower directory and continue the loop.
|
||||
lowerdir=image/$extractionID/layer''${lowerdir:+:}$lowerdir
|
||||
done
|
||||
|
||||
mkdir work
|
||||
mkdir layer
|
||||
mkdir mnt
|
||||
|
||||
${lib.optionalString (preMount != "") ''
|
||||
# Execute pre-mount steps
|
||||
echo "Executing pre-mount steps..."
|
||||
${preMount}
|
||||
''}
|
||||
|
||||
if [ -n "$lowerdir" ]; then
|
||||
mount -t overlay overlay -olowerdir=$lowerdir,workdir=work,upperdir=layer mnt
|
||||
else
|
||||
mount --bind layer mnt
|
||||
fi
|
||||
|
||||
${lib.optionalString (postMount != "") ''
|
||||
# Execute post-mount steps
|
||||
echo "Executing post-mount steps..."
|
||||
${postMount}
|
||||
''}
|
||||
|
||||
umount mnt
|
||||
|
||||
(
|
||||
cd layer
|
||||
cmd='name="$(basename {})"; touch "$(dirname {})/.wh.$name"; rm "{}"'
|
||||
find . -type c -exec bash -c "$cmd" \;
|
||||
)
|
||||
|
||||
${postUmount}
|
||||
'');
|
||||
|
||||
exportImage = { name ? fromImage.name, fromImage, fromImageName ? null, fromImageTag ? null, diskSize ? 1024 }:
|
||||
runWithOverlay {
|
||||
inherit name fromImage fromImageName fromImageTag diskSize;
|
||||
|
||||
postMount = ''
|
||||
echo "Packing raw image..."
|
||||
tar -C mnt --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" -cf $out/layer.tar .
|
||||
'';
|
||||
|
||||
postUmount = ''
|
||||
mv $out/layer.tar .
|
||||
rm -rf $out
|
||||
mv layer.tar $out
|
||||
'';
|
||||
};
|
||||
|
||||
# Create an executable shell script which has the coreutils in its
|
||||
# PATH. Since root scripts are executed in a blank environment, even
|
||||
# things like `ls` or `echo` will be missing.
|
||||
shellScript = name: text:
|
||||
writeScript name ''
|
||||
#!${runtimeShell}
|
||||
set -e
|
||||
export PATH=${coreutils}/bin:/bin
|
||||
${text}
|
||||
'';
|
||||
|
||||
# Create a "layer" (set of files).
|
||||
mkPureLayer =
|
||||
{
|
||||
# Name of the layer
|
||||
name
|
||||
, # JSON containing configuration and metadata for this layer.
|
||||
baseJson
|
||||
, # Files to add to the layer.
|
||||
contents ? null
|
||||
, # When copying the contents into the image, preserve symlinks to
|
||||
# directories (see `rsync -K`). Otherwise, transform those symlinks
|
||||
# into directories.
|
||||
keepContentsDirlinks ? false
|
||||
, # Additional commands to run on the layer before it is tar'd up.
|
||||
extraCommands ? ""
|
||||
, uid ? 0
|
||||
, gid ? 0
|
||||
}:
|
||||
runCommand "docker-layer-${name}"
|
||||
{
|
||||
inherit baseJson contents extraCommands;
|
||||
nativeBuildInputs = [ jshon rsync tarsum ];
|
||||
}
|
||||
''
|
||||
mkdir layer
|
||||
if [[ -n "$contents" ]]; then
|
||||
echo "Adding contents..."
|
||||
for item in $contents; do
|
||||
echo "Adding $item"
|
||||
rsync -a${if keepContentsDirlinks then "K" else "k"} --chown=0:0 $item/ layer/
|
||||
done
|
||||
else
|
||||
echo "No contents to add to layer."
|
||||
fi
|
||||
|
||||
chmod ug+w layer
|
||||
|
||||
if [[ -n "$extraCommands" ]]; then
|
||||
(cd layer; eval "$extraCommands")
|
||||
fi
|
||||
|
||||
# Tar up the layer and throw it into 'layer.tar'.
|
||||
echo "Packing layer..."
|
||||
mkdir $out
|
||||
tarhash=$(tar -C layer --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=${toString uid} --group=${toString gid} -cf - . | tee -p $out/layer.tar | tarsum)
|
||||
|
||||
# Add a 'checksum' field to the JSON, with the value set to the
|
||||
# checksum of the tarball.
|
||||
cat ${baseJson} | jshon -s "$tarhash" -i checksum > $out/json
|
||||
|
||||
# Indicate to docker that we're using schema version 1.0.
|
||||
echo -n "1.0" > $out/VERSION
|
||||
|
||||
echo "Finished building layer '${name}'"
|
||||
'';
|
||||
|
||||
# Make a "root" layer; required if we need to execute commands as a
|
||||
# privileged user on the image. The commands themselves will be
|
||||
# performed in a virtual machine sandbox.
|
||||
mkRootLayer =
|
||||
{
|
||||
# Name of the image.
|
||||
name
|
||||
, # Script to run as root. Bash.
|
||||
runAsRoot
|
||||
, # Files to add to the layer. If null, an empty layer will be created.
|
||||
contents ? null
|
||||
, # When copying the contents into the image, preserve symlinks to
|
||||
# directories (see `rsync -K`). Otherwise, transform those symlinks
|
||||
# into directories.
|
||||
keepContentsDirlinks ? false
|
||||
, # JSON containing configuration and metadata for this layer.
|
||||
baseJson
|
||||
, # Existing image onto which to append the new layer.
|
||||
fromImage ? null
|
||||
, # Name of the image we're appending onto.
|
||||
fromImageName ? null
|
||||
, # Tag of the image we're appending onto.
|
||||
fromImageTag ? null
|
||||
, # How much disk to allocate for the temporary virtual machine.
|
||||
diskSize ? 1024
|
||||
, # Commands (bash) to run on the layer; these do not require sudo.
|
||||
extraCommands ? ""
|
||||
}:
|
||||
# Generate an executable script from the `runAsRoot` text.
|
||||
let
|
||||
runAsRootScript = shellScript "run-as-root.sh" runAsRoot;
|
||||
extraCommandsScript = shellScript "extra-commands.sh" extraCommands;
|
||||
in
|
||||
runWithOverlay {
|
||||
name = "docker-layer-${name}";
|
||||
|
||||
inherit fromImage fromImageName fromImageTag diskSize;
|
||||
|
||||
preMount = lib.optionalString (contents != null && contents != [ ]) ''
|
||||
echo "Adding contents..."
|
||||
for item in ${escapeShellArgs (map (c: "${c}") (toList contents))}; do
|
||||
echo "Adding $item..."
|
||||
rsync -a${if keepContentsDirlinks then "K" else "k"} --chown=0:0 $item/ layer/
|
||||
done
|
||||
|
||||
chmod ug+w layer
|
||||
'';
|
||||
|
||||
postMount = ''
|
||||
mkdir -p mnt/{dev,proc,sys} mnt${storeDir}
|
||||
|
||||
# Mount /dev, /sys and the nix store as shared folders.
|
||||
mount --rbind /dev mnt/dev
|
||||
mount --rbind /sys mnt/sys
|
||||
mount --rbind ${storeDir} mnt${storeDir}
|
||||
|
||||
# Execute the run as root script. See 'man unshare' for
|
||||
# details on what's going on here; basically this command
|
||||
# means that the runAsRootScript will be executed in a nearly
|
||||
# completely isolated environment.
|
||||
#
|
||||
# Ideally we would use --mount-proc=mnt/proc or similar, but this
|
||||
# doesn't work. The workaround is to setup proc after unshare.
|
||||
# See: https://github.com/karelzak/util-linux/issues/648
|
||||
unshare -imnpuf --mount-proc sh -c 'mount --rbind /proc mnt/proc && chroot mnt ${runAsRootScript}'
|
||||
|
||||
# Unmount directories and remove them.
|
||||
umount -R mnt/dev mnt/sys mnt${storeDir}
|
||||
rmdir --ignore-fail-on-non-empty \
|
||||
mnt/dev mnt/proc mnt/sys mnt${storeDir} \
|
||||
mnt$(dirname ${storeDir})
|
||||
'';
|
||||
|
||||
postUmount = ''
|
||||
(cd layer; ${extraCommandsScript})
|
||||
|
||||
echo "Packing layer..."
|
||||
mkdir -p $out
|
||||
tarhash=$(tar -C layer --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" -cf - . |
|
||||
tee -p $out/layer.tar |
|
||||
${tarsum}/bin/tarsum)
|
||||
|
||||
cat ${baseJson} | jshon -s "$tarhash" -i checksum > $out/json
|
||||
# Indicate to docker that we're using schema version 1.0.
|
||||
echo -n "1.0" > $out/VERSION
|
||||
|
||||
echo "Finished building layer '${name}'"
|
||||
'';
|
||||
};
|
||||
|
||||
buildLayeredImage = { name, ... }@args:
|
||||
let
|
||||
stream = streamLayeredImage args;
|
||||
in
|
||||
runCommand "${baseNameOf name}.tar.gz"
|
||||
{
|
||||
inherit (stream) imageName;
|
||||
passthru = { inherit (stream) imageTag; };
|
||||
nativeBuildInputs = [ pigz ];
|
||||
} "${stream} | pigz -nT > $out";
|
||||
|
||||
# 1. extract the base image
|
||||
# 2. create the layer
|
||||
# 3. add layer deps to the layer itself, diffing with the base image
|
||||
# 4. compute the layer id
|
||||
# 5. put the layer in the image
|
||||
# 6. repack the image
|
||||
buildImage =
|
||||
args@{
|
||||
# Image name.
|
||||
name
|
||||
, # Image tag, when null then the nix output hash will be used.
|
||||
tag ? null
|
||||
, # Parent image, to append to.
|
||||
fromImage ? null
|
||||
, # Name of the parent image; will be read from the image otherwise.
|
||||
fromImageName ? null
|
||||
, # Tag of the parent image; will be read from the image otherwise.
|
||||
fromImageTag ? null
|
||||
, # Files to put on the image (a nix store path or list of paths).
|
||||
contents ? null
|
||||
, # When copying the contents into the image, preserve symlinks to
|
||||
# directories (see `rsync -K`). Otherwise, transform those symlinks
|
||||
# into directories.
|
||||
keepContentsDirlinks ? false
|
||||
, # Docker config; e.g. what command to run on the container.
|
||||
config ? null
|
||||
, # Optional bash script to run on the files prior to fixturizing the layer.
|
||||
extraCommands ? ""
|
||||
, uid ? 0
|
||||
, gid ? 0
|
||||
, # Optional bash script to run as root on the image when provisioning.
|
||||
runAsRoot ? null
|
||||
, # Size of the virtual machine disk to provision when building the image.
|
||||
diskSize ? 1024
|
||||
, # Time of creation of the image.
|
||||
created ? "1970-01-01T00:00:01Z"
|
||||
,
|
||||
}:
|
||||
|
||||
let
|
||||
baseName = baseNameOf name;
|
||||
|
||||
# Create a JSON blob of the configuration. Set the date to unix zero.
|
||||
baseJson =
|
||||
let
|
||||
pure = writeText "${baseName}-config.json" (builtins.toJSON {
|
||||
inherit created config;
|
||||
architecture = defaultArch;
|
||||
os = "linux";
|
||||
});
|
||||
impure = runCommand "${baseName}-config.json"
|
||||
{ nativeBuildInputs = [ jq ]; }
|
||||
''
|
||||
jq ".created = \"$(TZ=utc date --iso-8601="seconds")\"" ${pure} > $out
|
||||
'';
|
||||
in
|
||||
if created == "now" then impure else pure;
|
||||
|
||||
layer =
|
||||
if runAsRoot == null
|
||||
then
|
||||
mkPureLayer
|
||||
{
|
||||
name = baseName;
|
||||
inherit baseJson contents keepContentsDirlinks extraCommands uid gid;
|
||||
} else
|
||||
mkRootLayer {
|
||||
name = baseName;
|
||||
inherit baseJson fromImage fromImageName fromImageTag
|
||||
contents keepContentsDirlinks runAsRoot diskSize
|
||||
extraCommands;
|
||||
};
|
||||
result = runCommand "docker-image-${baseName}.tar.gz"
|
||||
{
|
||||
nativeBuildInputs = [ jshon pigz jq moreutils ];
|
||||
# Image name must be lowercase
|
||||
imageName = lib.toLower name;
|
||||
imageTag = if tag == null then "" else tag;
|
||||
inherit fromImage baseJson;
|
||||
layerClosure = writeReferencesToFile layer;
|
||||
passthru.buildArgs = args;
|
||||
passthru.layer = layer;
|
||||
passthru.imageTag =
|
||||
if tag != null
|
||||
then tag
|
||||
else
|
||||
lib.head (lib.strings.splitString "-" (baseNameOf result.outPath));
|
||||
} ''
|
||||
${lib.optionalString (tag == null) ''
|
||||
outName="$(basename "$out")"
|
||||
outHash=$(echo "$outName" | cut -d - -f 1)
|
||||
|
||||
imageTag=$outHash
|
||||
''}
|
||||
|
||||
# Print tar contents:
|
||||
# 1: Interpreted as relative to the root directory
|
||||
# 2: With no trailing slashes on directories
|
||||
# This is useful for ensuring that the output matches the
|
||||
# values generated by the "find" command
|
||||
ls_tar() {
|
||||
for f in $(tar -tf $1 | xargs realpath -ms --relative-to=.); do
|
||||
if [[ "$f" != "." ]]; then
|
||||
echo "/$f"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
mkdir image
|
||||
touch baseFiles
|
||||
baseEnvs='[]'
|
||||
if [[ -n "$fromImage" ]]; then
|
||||
echo "Unpacking base image..."
|
||||
tar -C image -xpf "$fromImage"
|
||||
|
||||
# Store the layers and the environment variables from the base image
|
||||
cat ./image/manifest.json | jq -r '.[0].Layers | .[]' > layer-list
|
||||
configName="$(cat ./image/manifest.json | jq -r '.[0].Config')"
|
||||
baseEnvs="$(cat "./image/$configName" | jq '.config.Env // []')"
|
||||
|
||||
# Extract the parentID from the manifest
|
||||
if [[ -n "$fromImageName" ]] && [[ -n "$fromImageTag" ]]; then
|
||||
parentID="$(
|
||||
cat "image/manifest.json" |
|
||||
jq -r '.[] | select(.RepoTags | contains([$desiredTag])) | rtrimstr(".json")' \
|
||||
--arg desiredTag "$fromImageName:$fromImageTag"
|
||||
)"
|
||||
else
|
||||
echo "From-image name or tag wasn't set. Reading the first ID."
|
||||
parentID="$(cat "image/manifest.json" | jq -r '.[0].Config | rtrimstr(".json")')"
|
||||
fi
|
||||
|
||||
# Otherwise do not import the base image configuration and manifest
|
||||
chmod a+w image image/*.json
|
||||
rm -f image/*.json
|
||||
|
||||
for l in image/*/layer.tar; do
|
||||
ls_tar $l >> baseFiles
|
||||
done
|
||||
else
|
||||
touch layer-list
|
||||
fi
|
||||
|
||||
chmod -R ug+rw image
|
||||
|
||||
mkdir temp
|
||||
cp ${layer}/* temp/
|
||||
chmod ug+w temp/*
|
||||
|
||||
for dep in $(cat $layerClosure); do
|
||||
find $dep >> layerFiles
|
||||
done
|
||||
|
||||
echo "Adding layer..."
|
||||
# Record the contents of the tarball with ls_tar.
|
||||
ls_tar temp/layer.tar >> baseFiles
|
||||
|
||||
# Append nix/store directory to the layer so that when the layer is loaded in the
|
||||
# image /nix/store has read permissions for non-root users.
|
||||
# nix/store is added only if the layer has /nix/store paths in it.
|
||||
if [ $(wc -l < $layerClosure) -gt 1 ] && [ $(grep -c -e "^/nix/store$" baseFiles) -eq 0 ]; then
|
||||
mkdir -p nix/store
|
||||
chmod -R 555 nix
|
||||
echo "./nix" >> layerFiles
|
||||
echo "./nix/store" >> layerFiles
|
||||
fi
|
||||
|
||||
# Get the files in the new layer which were *not* present in
|
||||
# the old layer, and record them as newFiles.
|
||||
comm <(sort -n baseFiles|uniq) \
|
||||
<(sort -n layerFiles|uniq|grep -v ${layer}) -1 -3 > newFiles
|
||||
# Append the new files to the layer.
|
||||
tar -rpf temp/layer.tar --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" \
|
||||
--owner=0 --group=0 --no-recursion --verbatim-files-from --files-from newFiles
|
||||
|
||||
echo "Adding meta..."
|
||||
|
||||
# If we have a parentID, add it to the json metadata.
|
||||
if [[ -n "$parentID" ]]; then
|
||||
cat temp/json | jshon -s "$parentID" -i parent > tmpjson
|
||||
mv tmpjson temp/json
|
||||
fi
|
||||
|
||||
# Take the sha256 sum of the generated json and use it as the layer ID.
|
||||
# Compute the size and add it to the json under the 'Size' field.
|
||||
layerID=$(sha256sum temp/json|cut -d ' ' -f 1)
|
||||
size=$(stat --printf="%s" temp/layer.tar)
|
||||
cat temp/json | jshon -s "$layerID" -i id -n $size -i Size > tmpjson
|
||||
mv tmpjson temp/json
|
||||
|
||||
# Use the temp folder we've been working on to create a new image.
|
||||
mv temp image/$layerID
|
||||
|
||||
# Add the new layer ID to the end of the layer list
|
||||
(
|
||||
cat layer-list
|
||||
# originally this used `sed -i "1i$layerID" layer-list`, but
|
||||
# would fail if layer-list was completely empty.
|
||||
echo "$layerID/layer.tar"
|
||||
) | sponge layer-list
|
||||
|
||||
# Create image json and image manifest
|
||||
imageJson=$(cat ${baseJson} | jq '.config.Env = $baseenv + .config.Env' --argjson baseenv "$baseEnvs")
|
||||
imageJson=$(echo "$imageJson" | jq ". + {\"rootfs\": {\"diff_ids\": [], \"type\": \"layers\"}}")
|
||||
manifestJson=$(jq -n "[{\"RepoTags\":[\"$imageName:$imageTag\"]}]")
|
||||
|
||||
for layerTar in $(cat ./layer-list); do
|
||||
layerChecksum=$(sha256sum image/$layerTar | cut -d ' ' -f1)
|
||||
imageJson=$(echo "$imageJson" | jq ".history |= . + [{\"created\": \"$(jq -r .created ${baseJson})\"}]")
|
||||
# diff_ids order is from the bottom-most to top-most layer
|
||||
imageJson=$(echo "$imageJson" | jq ".rootfs.diff_ids |= . + [\"sha256:$layerChecksum\"]")
|
||||
manifestJson=$(echo "$manifestJson" | jq ".[0].Layers |= . + [\"$layerTar\"]")
|
||||
done
|
||||
|
||||
imageJsonChecksum=$(echo "$imageJson" | sha256sum | cut -d ' ' -f1)
|
||||
echo "$imageJson" > "image/$imageJsonChecksum.json"
|
||||
manifestJson=$(echo "$manifestJson" | jq ".[0].Config = \"$imageJsonChecksum.json\"")
|
||||
echo "$manifestJson" > image/manifest.json
|
||||
|
||||
# Store the json under the name image/repositories.
|
||||
jshon -n object \
|
||||
-n object -s "$layerID" -i "$imageTag" \
|
||||
-i "$imageName" > image/repositories
|
||||
|
||||
# Make the image read-only.
|
||||
chmod -R a-w image
|
||||
|
||||
echo "Cooking the image..."
|
||||
tar -C image --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=0 --group=0 --xform s:'^./':: -c . | pigz -nT > $out
|
||||
|
||||
echo "Finished."
|
||||
'';
|
||||
|
||||
in
|
||||
result;
|
||||
|
||||
# Merge the tarballs of images built with buildImage into a single
|
||||
# tarball that contains all images. Running `docker load` on the resulting
|
||||
# tarball will load the images into the docker daemon.
|
||||
mergeImages = images: runCommand "merge-docker-images"
|
||||
{
|
||||
inherit images;
|
||||
nativeBuildInputs = [ pigz jq ];
|
||||
} ''
|
||||
mkdir image inputs
|
||||
# Extract images
|
||||
repos=()
|
||||
manifests=()
|
||||
for item in $images; do
|
||||
name=$(basename $item)
|
||||
mkdir inputs/$name
|
||||
tar -I pigz -xf $item -C inputs/$name
|
||||
if [ -f inputs/$name/repositories ]; then
|
||||
repos+=(inputs/$name/repositories)
|
||||
fi
|
||||
if [ -f inputs/$name/manifest.json ]; then
|
||||
manifests+=(inputs/$name/manifest.json)
|
||||
fi
|
||||
done
|
||||
# Copy all layers from input images to output image directory
|
||||
cp -R --no-clobber inputs/*/* image/
|
||||
# Merge repositories objects and manifests
|
||||
jq -s add "''${repos[@]}" > repositories
|
||||
jq -s add "''${manifests[@]}" > manifest.json
|
||||
# Replace output image repositories and manifest with merged versions
|
||||
mv repositories image/repositories
|
||||
mv manifest.json image/manifest.json
|
||||
# Create tarball and gzip
|
||||
tar -C image --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=0 --group=0 --xform s:'^./':: -c . | pigz -nT > $out
|
||||
'';
|
||||
|
||||
|
||||
# Provide a /etc/passwd and /etc/group that contain root and nobody.
|
||||
# Useful when packaging binaries that insist on using nss to look up
|
||||
# username/groups (like nginx).
|
||||
# /bin/sh is fine to not exist, and provided by another shim.
|
||||
inherit fakeNss; # alias
|
||||
|
||||
# This provides a /usr/bin/env, for shell scripts using the
|
||||
# "#!/usr/bin/env executable" shebang.
|
||||
usrBinEnv = runCommand "usr-bin-env" { } ''
|
||||
mkdir -p $out/usr/bin
|
||||
ln -s ${coreutils}/bin/env $out/usr/bin
|
||||
'';
|
||||
|
||||
# This provides /bin/sh, pointing to bashInteractive.
|
||||
binSh = runCommand "bin-sh" { } ''
|
||||
mkdir -p $out/bin
|
||||
ln -s ${bashInteractive}/bin/bash $out/bin/sh
|
||||
'';
|
||||
|
||||
# Build an image and populate its nix database with the provided
|
||||
# contents. The main purpose is to be able to use nix commands in
|
||||
# the container.
|
||||
# Be careful since this doesn't work well with multilayer.
|
||||
buildImageWithNixDb = args@{ contents ? null, extraCommands ? "", ... }: (
|
||||
buildImage (args // {
|
||||
extraCommands = (mkDbExtraCommand contents) + extraCommands;
|
||||
})
|
||||
);
|
||||
|
||||
buildLayeredImageWithNixDb = args@{ contents ? null, extraCommands ? "", ... }: (
|
||||
buildLayeredImage (args // {
|
||||
extraCommands = (mkDbExtraCommand contents) + extraCommands;
|
||||
})
|
||||
);
|
||||
|
||||
streamLayeredImage =
|
||||
{
|
||||
# Image Name
|
||||
name
|
||||
, # Image tag, the Nix's output hash will be used if null
|
||||
tag ? null
|
||||
, # Parent image, to append to.
|
||||
fromImage ? null
|
||||
, # Files to put on the image (a nix store path or list of paths).
|
||||
contents ? [ ]
|
||||
, # Docker config; e.g. what command to run on the container.
|
||||
config ? { }
|
||||
, # Time of creation of the image. Passing "now" will make the
|
||||
# created date be the time of building.
|
||||
created ? "1970-01-01T00:00:01Z"
|
||||
, # Optional bash script to run on the files prior to fixturizing the layer.
|
||||
extraCommands ? ""
|
||||
, # Optional bash script to run inside fakeroot environment.
|
||||
# Could be used for changing ownership of files in customisation layer.
|
||||
fakeRootCommands ? ""
|
||||
, # Whether to run fakeRootCommands in fakechroot as well, so that they
|
||||
# appear to run inside the image, but have access to the normal Nix store.
|
||||
# Perhaps this could be enabled on by default on pkgs.stdenv.buildPlatform.isLinux
|
||||
enableFakechroot ? false
|
||||
, # We pick 100 to ensure there is plenty of room for extension. I
|
||||
# believe the actual maximum is 128.
|
||||
maxLayers ? 100
|
||||
, # Whether to include store paths in the image. You generally want to leave
|
||||
# this on, but tooling may disable this to insert the store paths more
|
||||
# efficiently via other means, such as bind mounting the host store.
|
||||
includeStorePaths ? true
|
||||
, # Passthru arguments for the underlying derivation.
|
||||
passthru ? {}
|
||||
,
|
||||
}:
|
||||
assert
|
||||
(lib.assertMsg (maxLayers > 1)
|
||||
"the maxLayers argument of dockerTools.buildLayeredImage function must be greather than 1 (current value: ${toString maxLayers})");
|
||||
let
|
||||
baseName = baseNameOf name;
|
||||
|
||||
streamScript = writePython3 "stream" { } ./stream_layered_image.py;
|
||||
baseJson = writeText "${baseName}-base.json" (builtins.toJSON {
|
||||
inherit config;
|
||||
architecture = defaultArch;
|
||||
os = "linux";
|
||||
});
|
||||
|
||||
contentsList = if builtins.isList contents then contents else [ contents ];
|
||||
|
||||
# We store the customisation layer as a tarball, to make sure that
|
||||
# things like permissions set on 'extraCommands' are not overriden
|
||||
# by Nix. Then we precompute the sha256 for performance.
|
||||
customisationLayer = symlinkJoin {
|
||||
name = "${baseName}-customisation-layer";
|
||||
paths = contentsList;
|
||||
inherit extraCommands fakeRootCommands;
|
||||
nativeBuildInputs = [
|
||||
fakeroot
|
||||
] ++ optionals enableFakechroot [
|
||||
fakechroot
|
||||
# for chroot
|
||||
coreutils
|
||||
# fakechroot needs getopt, which is provided by util-linux
|
||||
util-linux
|
||||
];
|
||||
postBuild = ''
|
||||
mv $out old_out
|
||||
(cd old_out; eval "$extraCommands" )
|
||||
|
||||
mkdir $out
|
||||
${optionalString enableFakechroot ''
|
||||
export FAKECHROOT_EXCLUDE_PATH=/dev:/proc:/sys:${builtins.storeDir}:$out/layer.tar
|
||||
''}
|
||||
${optionalString enableFakechroot ''fakechroot chroot $PWD/old_out ''}fakeroot bash -c '
|
||||
source $stdenv/setup
|
||||
${optionalString (!enableFakechroot) ''cd old_out''}
|
||||
eval "$fakeRootCommands"
|
||||
tar \
|
||||
--sort name \
|
||||
--numeric-owner --mtime "@$SOURCE_DATE_EPOCH" \
|
||||
--hard-dereference \
|
||||
-cf $out/layer.tar .
|
||||
'
|
||||
|
||||
sha256sum $out/layer.tar \
|
||||
| cut -f 1 -d ' ' \
|
||||
> $out/checksum
|
||||
'';
|
||||
};
|
||||
|
||||
closureRoots = lib.optionals includeStorePaths /* normally true */ (
|
||||
[ baseJson customisationLayer ]
|
||||
);
|
||||
overallClosure = writeText "closure" (lib.concatStringsSep " " closureRoots);
|
||||
|
||||
# These derivations are only created as implementation details of docker-tools,
|
||||
# so they'll be excluded from the created images.
|
||||
unnecessaryDrvs = [ baseJson overallClosure customisationLayer ];
|
||||
|
||||
conf = runCommand "${baseName}-conf.json"
|
||||
{
|
||||
inherit fromImage maxLayers created;
|
||||
imageName = lib.toLower name;
|
||||
passthru.imageTag =
|
||||
if tag != null
|
||||
then tag
|
||||
else
|
||||
lib.head (lib.strings.splitString "-" (baseNameOf conf.outPath));
|
||||
paths = buildPackages.referencesByPopularity overallClosure;
|
||||
nativeBuildInputs = [ jq ];
|
||||
} ''
|
||||
${if (tag == null) then ''
|
||||
outName="$(basename "$out")"
|
||||
outHash=$(echo "$outName" | cut -d - -f 1)
|
||||
|
||||
imageTag=$outHash
|
||||
'' else ''
|
||||
imageTag="${tag}"
|
||||
''}
|
||||
|
||||
# convert "created" to iso format
|
||||
if [[ "$created" != "now" ]]; then
|
||||
created="$(date -Iseconds -d "$created")"
|
||||
fi
|
||||
|
||||
paths() {
|
||||
cat $paths ${lib.concatMapStringsSep " "
|
||||
(path: "| (grep -v ${path} || true)")
|
||||
unnecessaryDrvs}
|
||||
}
|
||||
|
||||
# Compute the number of layers that are already used by a potential
|
||||
# 'fromImage' as well as the customization layer. Ensure that there is
|
||||
# still at least one layer available to store the image contents.
|
||||
usedLayers=0
|
||||
|
||||
# subtract number of base image layers
|
||||
if [[ -n "$fromImage" ]]; then
|
||||
(( usedLayers += $(tar -xOf "$fromImage" manifest.json | jq '.[0].Layers | length') ))
|
||||
fi
|
||||
|
||||
# one layer will be taken up by the customisation layer
|
||||
(( usedLayers += 1 ))
|
||||
|
||||
if ! (( $usedLayers < $maxLayers )); then
|
||||
echo >&2 "Error: usedLayers $usedLayers layers to store 'fromImage' and" \
|
||||
"'extraCommands', but only maxLayers=$maxLayers were" \
|
||||
"allowed. At least 1 layer is required to store contents."
|
||||
exit 1
|
||||
fi
|
||||
availableLayers=$(( maxLayers - usedLayers ))
|
||||
|
||||
# Create $maxLayers worth of Docker Layers, one layer per store path
|
||||
# unless there are more paths than $maxLayers. In that case, create
|
||||
# $maxLayers-1 for the most popular layers, and smush the remainaing
|
||||
# store paths in to one final layer.
|
||||
#
|
||||
# The following code is fiddly w.r.t. ensuring every layer is
|
||||
# created, and that no paths are missed. If you change the
|
||||
# following lines, double-check that your code behaves properly
|
||||
# when the number of layers equals:
|
||||
# maxLayers-1, maxLayers, and maxLayers+1, 0
|
||||
store_layers="$(
|
||||
paths |
|
||||
jq -sR '
|
||||
rtrimstr("\n") | split("\n")
|
||||
| (.[:$maxLayers-1] | map([.])) + [ .[$maxLayers-1:] ]
|
||||
| map(select(length > 0))
|
||||
' \
|
||||
--argjson maxLayers "$availableLayers"
|
||||
)"
|
||||
|
||||
cat ${baseJson} | jq '
|
||||
. + {
|
||||
"store_dir": $store_dir,
|
||||
"from_image": $from_image,
|
||||
"store_layers": $store_layers,
|
||||
"customisation_layer", $customisation_layer,
|
||||
"repo_tag": $repo_tag,
|
||||
"created": $created
|
||||
}
|
||||
' --arg store_dir "${storeDir}" \
|
||||
--argjson from_image ${if fromImage == null then "null" else "'\"${fromImage}\"'"} \
|
||||
--argjson store_layers "$store_layers" \
|
||||
--arg customisation_layer ${customisationLayer} \
|
||||
--arg repo_tag "$imageName:$imageTag" \
|
||||
--arg created "$created" |
|
||||
tee $out
|
||||
'';
|
||||
result = runCommand "stream-${baseName}"
|
||||
{
|
||||
inherit (conf) imageName;
|
||||
passthru = passthru // {
|
||||
inherit (conf) imageTag;
|
||||
|
||||
# Distinguish tarballs and exes at the Nix level so functions that
|
||||
# take images can know in advance how the image is supposed to be used.
|
||||
isExe = true;
|
||||
};
|
||||
nativeBuildInputs = [ makeWrapper ];
|
||||
} ''
|
||||
makeWrapper ${streamScript} $out --add-flags ${conf}
|
||||
'';
|
||||
in
|
||||
result;
|
||||
}
|
||||
40
pkgs/build-support/docker/detjson.py
Normal file
40
pkgs/build-support/docker/detjson.py
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Deterministic layer json: https://github.com/docker/hub-feedback/issues/488
|
||||
|
||||
import sys
|
||||
reload(sys)
|
||||
sys.setdefaultencoding('UTF8')
|
||||
import json
|
||||
|
||||
# If any of the keys below are equal to a certain value
|
||||
# then we can delete it because it's the default value
|
||||
SAFEDELS = {
|
||||
"Size": 0,
|
||||
"config": {
|
||||
"ExposedPorts": None,
|
||||
"MacAddress": "",
|
||||
"NetworkDisabled": False,
|
||||
"PortSpecs": None,
|
||||
"VolumeDriver": ""
|
||||
}
|
||||
}
|
||||
SAFEDELS["container_config"] = SAFEDELS["config"]
|
||||
|
||||
def makedet(j, safedels):
|
||||
for k,v in safedels.items():
|
||||
if k not in j:
|
||||
continue
|
||||
if type(v) == dict:
|
||||
makedet(j[k], v)
|
||||
elif j[k] == v:
|
||||
del j[k]
|
||||
|
||||
def main():
|
||||
j = json.load(sys.stdin)
|
||||
makedet(j, SAFEDELS)
|
||||
json.dump(j, sys.stdout, sort_keys=True)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
613
pkgs/build-support/docker/examples.nix
Normal file
613
pkgs/build-support/docker/examples.nix
Normal file
|
|
@ -0,0 +1,613 @@
|
|||
# Examples of using the docker tools to build packages.
|
||||
#
|
||||
# This file defines several docker images. In order to use an image,
|
||||
# build its derivation with `nix-build`, and then load the result with
|
||||
# `docker load`. For example:
|
||||
#
|
||||
# $ nix-build '<nixpkgs>' -A dockerTools.examples.redis
|
||||
# $ docker load < result
|
||||
|
||||
{ pkgs, buildImage, buildLayeredImage, fakeNss, pullImage, shadowSetup, buildImageWithNixDb, pkgsCross }:
|
||||
|
||||
rec {
|
||||
# 1. basic example
|
||||
bash = buildImage {
|
||||
name = "bash";
|
||||
tag = "latest";
|
||||
contents = pkgs.bashInteractive;
|
||||
};
|
||||
|
||||
# 2. service example, layered on another image
|
||||
redis = buildImage {
|
||||
name = "redis";
|
||||
tag = "latest";
|
||||
|
||||
# for example's sake, we can layer redis on top of bash or debian
|
||||
fromImage = bash;
|
||||
# fromImage = debian;
|
||||
|
||||
contents = pkgs.redis;
|
||||
runAsRoot = ''
|
||||
mkdir -p /data
|
||||
'';
|
||||
|
||||
config = {
|
||||
Cmd = [ "/bin/redis-server" ];
|
||||
WorkingDir = "/data";
|
||||
Volumes = {
|
||||
"/data" = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# 3. another service example
|
||||
nginx = let
|
||||
nginxPort = "80";
|
||||
nginxConf = pkgs.writeText "nginx.conf" ''
|
||||
user nobody nobody;
|
||||
daemon off;
|
||||
error_log /dev/stdout info;
|
||||
pid /dev/null;
|
||||
events {}
|
||||
http {
|
||||
access_log /dev/stdout;
|
||||
server {
|
||||
listen ${nginxPort};
|
||||
index index.html;
|
||||
location / {
|
||||
root ${nginxWebRoot};
|
||||
}
|
||||
}
|
||||
}
|
||||
'';
|
||||
nginxWebRoot = pkgs.writeTextDir "index.html" ''
|
||||
<html><body><h1>Hello from NGINX</h1></body></html>
|
||||
'';
|
||||
in
|
||||
buildLayeredImage {
|
||||
name = "nginx-container";
|
||||
tag = "latest";
|
||||
contents = [
|
||||
fakeNss
|
||||
pkgs.nginx
|
||||
];
|
||||
|
||||
extraCommands = ''
|
||||
# nginx still tries to read this directory even if error_log
|
||||
# directive is specifying another file :/
|
||||
mkdir -p var/log/nginx
|
||||
mkdir -p var/cache/nginx
|
||||
'';
|
||||
|
||||
config = {
|
||||
Cmd = [ "nginx" "-c" nginxConf ];
|
||||
ExposedPorts = {
|
||||
"${nginxPort}/tcp" = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# 4. example of pulling an image. could be used as a base for other images
|
||||
nixFromDockerHub = pullImage {
|
||||
imageName = "nixos/nix";
|
||||
imageDigest = "sha256:85299d86263a3059cf19f419f9d286cc9f06d3c13146a8ebbb21b3437f598357";
|
||||
sha256 = "19fw0n3wmddahzr20mhdqv6jkjn1kanh6n2mrr08ai53dr8ph5n7";
|
||||
finalImageTag = "2.2.1";
|
||||
finalImageName = "nix";
|
||||
};
|
||||
# Same example, but re-fetches every time the fetcher implementation changes.
|
||||
# NOTE: Only use this for testing, or you'd be wasting a lot of time, network and space.
|
||||
testNixFromDockerHub = pkgs.testers.invalidateFetcherByDrvHash pullImage {
|
||||
imageName = "nixos/nix";
|
||||
imageDigest = "sha256:85299d86263a3059cf19f419f9d286cc9f06d3c13146a8ebbb21b3437f598357";
|
||||
sha256 = "19fw0n3wmddahzr20mhdqv6jkjn1kanh6n2mrr08ai53dr8ph5n7";
|
||||
finalImageTag = "2.2.1";
|
||||
finalImageName = "nix";
|
||||
};
|
||||
|
||||
# 5. example of multiple contents, emacs and vi happily coexisting
|
||||
editors = buildImage {
|
||||
name = "editors";
|
||||
contents = [
|
||||
pkgs.coreutils
|
||||
pkgs.bash
|
||||
pkgs.emacs
|
||||
pkgs.vim
|
||||
pkgs.nano
|
||||
];
|
||||
};
|
||||
|
||||
# 6. nix example to play with the container nix store
|
||||
# docker run -it --rm nix nix-store -qR $(nix-build '<nixpkgs>' -A nix)
|
||||
nix = buildImageWithNixDb {
|
||||
name = "nix";
|
||||
tag = "latest";
|
||||
contents = [
|
||||
# nix-store uses cat program to display results as specified by
|
||||
# the image env variable NIX_PAGER.
|
||||
pkgs.coreutils
|
||||
pkgs.nix
|
||||
pkgs.bash
|
||||
];
|
||||
config = {
|
||||
Env = [
|
||||
"NIX_PAGER=cat"
|
||||
# A user is required by nix
|
||||
# https://github.com/NixOS/nix/blob/9348f9291e5d9e4ba3c4347ea1b235640f54fd79/src/libutil/util.cc#L478
|
||||
"USER=nobody"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
# 7. example of adding something on top of an image pull by our
|
||||
# dockerTools chain.
|
||||
onTopOfPulledImage = buildImage {
|
||||
name = "onTopOfPulledImage";
|
||||
tag = "latest";
|
||||
fromImage = nixFromDockerHub;
|
||||
contents = [ pkgs.hello ];
|
||||
};
|
||||
|
||||
# 8. regression test for erroneous use of eval and string expansion.
|
||||
# See issue #34779 and PR #40947 for details.
|
||||
runAsRootExtraCommands = pkgs.dockerTools.buildImage {
|
||||
name = "runAsRootExtraCommands";
|
||||
tag = "latest";
|
||||
contents = [ pkgs.coreutils ];
|
||||
# The parens here are to create problematic bash to embed and eval. In case
|
||||
# this is *embedded* into the script (with nix expansion) the initial quotes
|
||||
# will close the string and the following parens are unexpected
|
||||
runAsRoot = ''echo "(runAsRoot)" > runAsRoot'';
|
||||
extraCommands = ''echo "(extraCommand)" > extraCommands'';
|
||||
};
|
||||
|
||||
# 9. Ensure that setting created to now results in a date which
|
||||
# isn't the epoch + 1
|
||||
unstableDate = pkgs.dockerTools.buildImage {
|
||||
name = "unstable-date";
|
||||
tag = "latest";
|
||||
contents = [ pkgs.coreutils ];
|
||||
created = "now";
|
||||
};
|
||||
|
||||
# 10. Create a layered image
|
||||
layered-image = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "layered-image";
|
||||
tag = "latest";
|
||||
extraCommands = ''echo "(extraCommand)" > extraCommands'';
|
||||
config.Cmd = [ "${pkgs.hello}/bin/hello" ];
|
||||
contents = [ pkgs.hello pkgs.bash pkgs.coreutils ];
|
||||
};
|
||||
|
||||
# 11. Create an image on top of a layered image
|
||||
layered-on-top = pkgs.dockerTools.buildImage {
|
||||
name = "layered-on-top";
|
||||
tag = "latest";
|
||||
fromImage = layered-image;
|
||||
extraCommands = ''
|
||||
mkdir ./example-output
|
||||
chmod 777 ./example-output
|
||||
'';
|
||||
config = {
|
||||
Env = [ "PATH=${pkgs.coreutils}/bin/" ];
|
||||
WorkingDir = "/example-output";
|
||||
Cmd = [
|
||||
"${pkgs.bash}/bin/bash" "-c" "echo hello > foo; cat foo"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
# 12 Create a layered image on top of a layered image
|
||||
layered-on-top-layered = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "layered-on-top-layered";
|
||||
tag = "latest";
|
||||
fromImage = layered-image;
|
||||
extraCommands = ''
|
||||
mkdir ./example-output
|
||||
chmod 777 ./example-output
|
||||
'';
|
||||
config = {
|
||||
Env = [ "PATH=${pkgs.coreutils}/bin/" ];
|
||||
WorkingDir = "/example-output";
|
||||
Cmd = [
|
||||
"${pkgs.bash}/bin/bash" "-c" "echo hello > foo; cat foo"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
# 13. example of running something as root on top of a parent image
|
||||
# Regression test related to PR #52109
|
||||
runAsRootParentImage = buildImage {
|
||||
name = "runAsRootParentImage";
|
||||
tag = "latest";
|
||||
runAsRoot = "touch /example-file";
|
||||
fromImage = bash;
|
||||
};
|
||||
|
||||
# 14. example of 3 layers images This image is used to verify the
|
||||
# order of layers is correct.
|
||||
# It allows to validate
|
||||
# - the layer of parent are below
|
||||
# - the order of parent layer is preserved at image build time
|
||||
# (this is why there are 3 images)
|
||||
layersOrder = let
|
||||
l1 = pkgs.dockerTools.buildImage {
|
||||
name = "l1";
|
||||
tag = "latest";
|
||||
extraCommands = ''
|
||||
mkdir -p tmp
|
||||
echo layer1 > tmp/layer1
|
||||
echo layer1 > tmp/layer2
|
||||
echo layer1 > tmp/layer3
|
||||
'';
|
||||
};
|
||||
l2 = pkgs.dockerTools.buildImage {
|
||||
name = "l2";
|
||||
fromImage = l1;
|
||||
tag = "latest";
|
||||
extraCommands = ''
|
||||
mkdir -p tmp
|
||||
echo layer2 > tmp/layer2
|
||||
echo layer2 > tmp/layer3
|
||||
'';
|
||||
};
|
||||
in pkgs.dockerTools.buildImage {
|
||||
name = "l3";
|
||||
fromImage = l2;
|
||||
tag = "latest";
|
||||
contents = [ pkgs.coreutils ];
|
||||
extraCommands = ''
|
||||
mkdir -p tmp
|
||||
echo layer3 > tmp/layer3
|
||||
'';
|
||||
};
|
||||
|
||||
# 15. Environment variable inheritance.
|
||||
# Child image should inherit parents environment variables,
|
||||
# optionally overriding them.
|
||||
environmentVariablesParent = pkgs.dockerTools.buildImage {
|
||||
name = "parent";
|
||||
tag = "latest";
|
||||
config = {
|
||||
Env = [
|
||||
"FROM_PARENT=true"
|
||||
"LAST_LAYER=parent"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
environmentVariables = pkgs.dockerTools.buildImage {
|
||||
name = "child";
|
||||
fromImage = environmentVariablesParent;
|
||||
tag = "latest";
|
||||
contents = [ pkgs.coreutils ];
|
||||
config = {
|
||||
Env = [
|
||||
"FROM_CHILD=true"
|
||||
"LAST_LAYER=child"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
environmentVariablesLayered = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "child";
|
||||
fromImage = environmentVariablesParent;
|
||||
tag = "latest";
|
||||
contents = [ pkgs.coreutils ];
|
||||
config = {
|
||||
Env = [
|
||||
"FROM_CHILD=true"
|
||||
"LAST_LAYER=child"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
# 16. Create another layered image, for comparing layers with image 10.
|
||||
another-layered-image = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "another-layered-image";
|
||||
tag = "latest";
|
||||
config.Cmd = [ "${pkgs.hello}/bin/hello" ];
|
||||
};
|
||||
|
||||
# 17. Create a layered image with only 2 layers
|
||||
two-layered-image = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "two-layered-image";
|
||||
tag = "latest";
|
||||
config.Cmd = [ "${pkgs.hello}/bin/hello" ];
|
||||
contents = [ pkgs.bash pkgs.hello ];
|
||||
maxLayers = 2;
|
||||
};
|
||||
|
||||
# 18. Create a layered image with more packages than max layers.
|
||||
# coreutils and hello are part of the same layer
|
||||
bulk-layer = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "bulk-layer";
|
||||
tag = "latest";
|
||||
contents = with pkgs; [
|
||||
coreutils hello
|
||||
];
|
||||
maxLayers = 2;
|
||||
};
|
||||
|
||||
# 19. Create a layered image with a base image and more packages than max
|
||||
# layers. coreutils and hello are part of the same layer
|
||||
layered-bulk-layer = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "layered-bulk-layer";
|
||||
tag = "latest";
|
||||
fromImage = two-layered-image;
|
||||
contents = with pkgs; [
|
||||
coreutils hello
|
||||
];
|
||||
maxLayers = 4;
|
||||
};
|
||||
|
||||
# 20. Create a "layered" image without nix store layers. This is not
|
||||
# recommended, but can be useful for base images in rare cases.
|
||||
no-store-paths = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "no-store-paths";
|
||||
tag = "latest";
|
||||
extraCommands = ''
|
||||
# This removes sharing of busybox and is not recommended. We do this
|
||||
# to make the example suitable as a test case with working binaries.
|
||||
cp -r ${pkgs.pkgsStatic.busybox}/* .
|
||||
|
||||
# This is a "build" dependency that will not appear in the image
|
||||
${pkgs.hello}/bin/hello
|
||||
'';
|
||||
};
|
||||
|
||||
nixLayered = pkgs.dockerTools.buildLayeredImageWithNixDb {
|
||||
name = "nix-layered";
|
||||
tag = "latest";
|
||||
contents = [
|
||||
# nix-store uses cat program to display results as specified by
|
||||
# the image env variable NIX_PAGER.
|
||||
pkgs.coreutils
|
||||
pkgs.nix
|
||||
pkgs.bash
|
||||
];
|
||||
config = {
|
||||
Env = [
|
||||
"NIX_PAGER=cat"
|
||||
# A user is required by nix
|
||||
# https://github.com/NixOS/nix/blob/9348f9291e5d9e4ba3c4347ea1b235640f54fd79/src/libutil/util.cc#L478
|
||||
"USER=nobody"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
# 21. Support files in the store on buildLayeredImage
|
||||
# See: https://github.com/NixOS/nixpkgs/pull/91084#issuecomment-653496223
|
||||
filesInStore = pkgs.dockerTools.buildLayeredImageWithNixDb {
|
||||
name = "file-in-store";
|
||||
tag = "latest";
|
||||
contents = [
|
||||
pkgs.coreutils
|
||||
pkgs.nix
|
||||
(pkgs.writeScriptBin "myscript" ''
|
||||
#!${pkgs.runtimeShell}
|
||||
cat ${pkgs.writeText "somefile" "some data"}
|
||||
'')
|
||||
];
|
||||
config = {
|
||||
Cmd = [ "myscript" ];
|
||||
# For some reason 'nix-store --verify' requires this environment variable
|
||||
Env = [ "USER=root" ];
|
||||
};
|
||||
};
|
||||
|
||||
# 22. Ensure that setting created to now results in a date which
|
||||
# isn't the epoch + 1 for layered images.
|
||||
unstableDateLayered = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "unstable-date-layered";
|
||||
tag = "latest";
|
||||
contents = [ pkgs.coreutils ];
|
||||
created = "now";
|
||||
};
|
||||
|
||||
# 23. Ensure that layers are unpacked in the correct order before the
|
||||
# runAsRoot script is executed.
|
||||
layersUnpackOrder =
|
||||
let
|
||||
layerOnTopOf = parent: layerName:
|
||||
pkgs.dockerTools.buildImage {
|
||||
name = "layers-unpack-order-${layerName}";
|
||||
tag = "latest";
|
||||
fromImage = parent;
|
||||
contents = [ pkgs.coreutils ];
|
||||
runAsRoot = ''
|
||||
#!${pkgs.runtimeShell}
|
||||
echo -n "${layerName}" >> /layer-order
|
||||
'';
|
||||
};
|
||||
# When executing the runAsRoot script when building layer C, if layer B is
|
||||
# not unpacked on top of layer A, the contents of /layer-order will not be
|
||||
# "ABC".
|
||||
layerA = layerOnTopOf null "a";
|
||||
layerB = layerOnTopOf layerA "b";
|
||||
layerC = layerOnTopOf layerB "c";
|
||||
in layerC;
|
||||
|
||||
# buildImage without explicit tag
|
||||
bashNoTag = pkgs.dockerTools.buildImage {
|
||||
name = "bash-no-tag";
|
||||
contents = pkgs.bashInteractive;
|
||||
};
|
||||
|
||||
# buildLayeredImage without explicit tag
|
||||
bashNoTagLayered = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "bash-no-tag-layered";
|
||||
contents = pkgs.bashInteractive;
|
||||
};
|
||||
|
||||
# buildImage without explicit tag
|
||||
bashNoTagStreamLayered = pkgs.dockerTools.streamLayeredImage {
|
||||
name = "bash-no-tag-stream-layered";
|
||||
contents = pkgs.bashInteractive;
|
||||
};
|
||||
|
||||
# buildLayeredImage with non-root user
|
||||
bashLayeredWithUser =
|
||||
let
|
||||
nonRootShadowSetup = { user, uid, gid ? uid }: with pkgs; [
|
||||
(
|
||||
writeTextDir "etc/shadow" ''
|
||||
root:!x:::::::
|
||||
${user}:!:::::::
|
||||
''
|
||||
)
|
||||
(
|
||||
writeTextDir "etc/passwd" ''
|
||||
root:x:0:0::/root:${runtimeShell}
|
||||
${user}:x:${toString uid}:${toString gid}::/home/${user}:
|
||||
''
|
||||
)
|
||||
(
|
||||
writeTextDir "etc/group" ''
|
||||
root:x:0:
|
||||
${user}:x:${toString gid}:
|
||||
''
|
||||
)
|
||||
(
|
||||
writeTextDir "etc/gshadow" ''
|
||||
root:x::
|
||||
${user}:x::
|
||||
''
|
||||
)
|
||||
];
|
||||
in
|
||||
pkgs.dockerTools.buildLayeredImage {
|
||||
name = "bash-layered-with-user";
|
||||
tag = "latest";
|
||||
contents = [ pkgs.bash pkgs.coreutils ] ++ nonRootShadowSetup { uid = 999; user = "somebody"; };
|
||||
};
|
||||
|
||||
# basic example, with cross compilation
|
||||
cross = let
|
||||
# Cross compile for x86_64 if on aarch64
|
||||
crossPkgs =
|
||||
if pkgs.stdenv.hostPlatform.system == "aarch64-linux" then pkgsCross.gnu64
|
||||
else pkgsCross.aarch64-multiplatform;
|
||||
in crossPkgs.dockerTools.buildImage {
|
||||
name = "hello-cross";
|
||||
tag = "latest";
|
||||
contents = crossPkgs.hello;
|
||||
};
|
||||
|
||||
# layered image where a store path is itself a symlink
|
||||
layeredStoreSymlink =
|
||||
let
|
||||
target = pkgs.writeTextDir "dir/target" "Content doesn't matter.";
|
||||
symlink = pkgs.runCommand "symlink" {} "ln -s ${target} $out";
|
||||
in
|
||||
pkgs.dockerTools.buildLayeredImage {
|
||||
name = "layeredstoresymlink";
|
||||
tag = "latest";
|
||||
contents = [ pkgs.bash symlink ];
|
||||
} // { passthru = { inherit symlink; }; };
|
||||
|
||||
# image with registry/ prefix
|
||||
prefixedImage = pkgs.dockerTools.buildImage {
|
||||
name = "registry-1.docker.io/image";
|
||||
tag = "latest";
|
||||
config.Cmd = [ "${pkgs.hello}/bin/hello" ];
|
||||
};
|
||||
|
||||
# layered image with registry/ prefix
|
||||
prefixedLayeredImage = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "registry-1.docker.io/layered-image";
|
||||
tag = "latest";
|
||||
config.Cmd = [ "${pkgs.hello}/bin/hello" ];
|
||||
};
|
||||
|
||||
# layered image with files owned by a user other than root
|
||||
layeredImageWithFakeRootCommands = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "layered-image-with-fake-root-commands";
|
||||
tag = "latest";
|
||||
contents = [
|
||||
pkgs.pkgsStatic.busybox
|
||||
];
|
||||
fakeRootCommands = ''
|
||||
mkdir -p ./home/jane
|
||||
chown 1000 ./home/jane
|
||||
ln -s ${pkgs.hello.overrideAttrs (o: {
|
||||
# A unique `hello` to make sure that it isn't included via another mechanism by accident.
|
||||
configureFlags = o.configureFlags or "" + " --program-prefix=layeredImageWithFakeRootCommands-";
|
||||
doCheck = false;
|
||||
})} ./hello
|
||||
'';
|
||||
};
|
||||
|
||||
# tarball consisting of both bash and redis images
|
||||
mergedBashAndRedis = pkgs.dockerTools.mergeImages [
|
||||
bash
|
||||
redis
|
||||
];
|
||||
|
||||
# tarball consisting of bash (without tag) and redis images
|
||||
mergedBashNoTagAndRedis = pkgs.dockerTools.mergeImages [
|
||||
bashNoTag
|
||||
redis
|
||||
];
|
||||
|
||||
# tarball consisting of bash and layered image with different owner of the
|
||||
# /home/jane directory
|
||||
mergedBashFakeRoot = pkgs.dockerTools.mergeImages [
|
||||
bash
|
||||
layeredImageWithFakeRootCommands
|
||||
];
|
||||
|
||||
helloOnRoot = pkgs.dockerTools.streamLayeredImage {
|
||||
name = "hello";
|
||||
tag = "latest";
|
||||
contents = [
|
||||
(pkgs.buildEnv {
|
||||
name = "hello-root";
|
||||
paths = [ pkgs.hello ];
|
||||
})
|
||||
];
|
||||
config.Cmd = [ "hello" ];
|
||||
};
|
||||
|
||||
helloOnRootNoStore = pkgs.dockerTools.streamLayeredImage {
|
||||
name = "hello";
|
||||
tag = "latest";
|
||||
contents = [
|
||||
(pkgs.buildEnv {
|
||||
name = "hello-root";
|
||||
paths = [ pkgs.hello ];
|
||||
})
|
||||
];
|
||||
config.Cmd = [ "hello" ];
|
||||
includeStorePaths = false;
|
||||
};
|
||||
|
||||
# Example export of the bash image
|
||||
exportBash = pkgs.dockerTools.exportImage { fromImage = bash; };
|
||||
|
||||
imageViaFakeChroot = pkgs.dockerTools.streamLayeredImage {
|
||||
name = "image-via-fake-chroot";
|
||||
tag = "latest";
|
||||
config.Cmd = [ "hello" ];
|
||||
enableFakechroot = true;
|
||||
# Crucially, instead of a relative path, this creates /bin, which is
|
||||
# intercepted by fakechroot.
|
||||
# This functionality is not available on darwin as of 2021.
|
||||
fakeRootCommands = ''
|
||||
mkdir /bin
|
||||
ln -s ${pkgs.hello}/bin/hello /bin/hello
|
||||
'';
|
||||
};
|
||||
|
||||
build-image-with-path = buildImage {
|
||||
name = "build-image-with-path";
|
||||
tag = "latest";
|
||||
contents = [ pkgs.bashInteractive ./test-dummy ];
|
||||
};
|
||||
|
||||
layered-image-with-path = pkgs.dockerTools.streamLayeredImage {
|
||||
name = "layered-image-with-path";
|
||||
tag = "latest";
|
||||
contents = [ pkgs.bashInteractive ./test-dummy ];
|
||||
};
|
||||
}
|
||||
173
pkgs/build-support/docker/nix-prefetch-docker
Executable file
173
pkgs/build-support/docker/nix-prefetch-docker
Executable file
|
|
@ -0,0 +1,173 @@
|
|||
#! /usr/bin/env bash
|
||||
|
||||
set -e -o pipefail
|
||||
|
||||
os=
|
||||
arch=
|
||||
imageName=
|
||||
imageTag=
|
||||
imageDigest=
|
||||
finalImageName=
|
||||
finalImageTag=
|
||||
hashType=$NIX_HASH_ALGO
|
||||
hashFormat=$hashFormat
|
||||
format=nix
|
||||
|
||||
usage(){
|
||||
echo >&2 "syntax: nix-prefetch-docker [options] [IMAGE_NAME [IMAGE_TAG|IMAGE_DIGEST]]
|
||||
|
||||
Options:
|
||||
--os os OS to fetch image for
|
||||
--arch linux Arch to fetch image for
|
||||
--image-name name Name of the image to fetch
|
||||
--image-tag tag Image tag
|
||||
--image-digest digest Image digest
|
||||
--final-image-name name Desired name of the image
|
||||
--final-image-tag tag Desired image tag
|
||||
--json Output result in json format instead of nix
|
||||
--quiet Only print the final result
|
||||
"
|
||||
exit 1
|
||||
}
|
||||
|
||||
get_image_digest(){
|
||||
local imageName=$1
|
||||
local imageTag=$2
|
||||
|
||||
if test -z "$imageTag"; then
|
||||
imageTag="latest"
|
||||
fi
|
||||
|
||||
skopeo --insecure-policy --tmpdir=$TMPDIR inspect "docker://$imageName:$imageTag" | jq '.Digest' -r
|
||||
}
|
||||
|
||||
get_name() {
|
||||
local imageName=$1
|
||||
local imageTag=$2
|
||||
|
||||
echo "docker-image-$(echo "$imageName:$imageTag" | tr '/:' '-').tar"
|
||||
}
|
||||
|
||||
argi=0
|
||||
argfun=""
|
||||
for arg; do
|
||||
if test -z "$argfun"; then
|
||||
case $arg in
|
||||
--os) argfun=set_os;;
|
||||
--arch) argfun=set_arch;;
|
||||
--image-name) argfun=set_imageName;;
|
||||
--image-tag) argfun=set_imageTag;;
|
||||
--image-digest) argfun=set_imageDigest;;
|
||||
--final-image-name) argfun=set_finalImageName;;
|
||||
--final-image-tag) argfun=set_finalImageTag;;
|
||||
--quiet) QUIET=true;;
|
||||
--json) format=json;;
|
||||
--help) usage; exit;;
|
||||
*)
|
||||
: $((++argi))
|
||||
case $argi in
|
||||
1) imageName=$arg;;
|
||||
2) [[ $arg == *"sha256"* ]] && imageDigest=$arg || imageTag=$arg;;
|
||||
*) exit 1;;
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
else
|
||||
case $argfun in
|
||||
set_*)
|
||||
var=${argfun#set_}
|
||||
eval $var=$arg
|
||||
;;
|
||||
esac
|
||||
argfun=""
|
||||
fi
|
||||
done
|
||||
|
||||
if test -z "$imageName"; then
|
||||
usage
|
||||
fi
|
||||
|
||||
if test -z "$os"; then
|
||||
os=linux
|
||||
fi
|
||||
|
||||
if test -z "$arch"; then
|
||||
arch=amd64
|
||||
fi
|
||||
|
||||
if test -z "$hashType"; then
|
||||
hashType=sha256
|
||||
fi
|
||||
|
||||
if test -z "$hashFormat"; then
|
||||
hashFormat=base32
|
||||
fi
|
||||
|
||||
if test -z "$finalImageName"; then
|
||||
finalImageName="$imageName"
|
||||
fi
|
||||
|
||||
if test -z "$finalImageTag"; then
|
||||
if test -z "$imageTag"; then
|
||||
finalImageTag="latest"
|
||||
else
|
||||
finalImageTag="$imageTag"
|
||||
fi
|
||||
fi
|
||||
|
||||
if test -z "$imageDigest"; then
|
||||
imageDigest=$(get_image_digest $imageName $imageTag)
|
||||
fi
|
||||
|
||||
sourceUrl="docker://$imageName@$imageDigest"
|
||||
|
||||
tmpPath="$(mktemp -d "${TMPDIR:-/tmp}/skopeo-copy-tmp-XXXXXXXX")"
|
||||
trap "rm -rf \"$tmpPath\"" EXIT
|
||||
|
||||
tmpFile="$tmpPath/$(get_name $finalImageName $finalImageTag)"
|
||||
|
||||
if test -z "$QUIET"; then
|
||||
skopeo --insecure-policy --tmpdir=$TMPDIR --override-os ${os} --override-arch ${arch} copy "$sourceUrl" "docker-archive://$tmpFile:$finalImageName:$finalImageTag" >&2
|
||||
else
|
||||
skopeo --insecure-policy --tmpdir=$TMPDIR --override-os ${os} --override-arch ${arch} copy "$sourceUrl" "docker-archive://$tmpFile:$finalImageName:$finalImageTag" > /dev/null
|
||||
fi
|
||||
|
||||
# Compute the hash.
|
||||
imageHash=$(nix-hash --flat --type $hashType --base32 "$tmpFile")
|
||||
|
||||
# Add the downloaded file to Nix store.
|
||||
finalPath=$(nix-store --add-fixed "$hashType" "$tmpFile")
|
||||
|
||||
if test -z "$QUIET"; then
|
||||
echo "-> ImageName: $imageName" >&2
|
||||
echo "-> ImageDigest: $imageDigest" >&2
|
||||
echo "-> FinalImageName: $finalImageName" >&2
|
||||
echo "-> FinalImageTag: $finalImageTag" >&2
|
||||
echo "-> ImagePath: $finalPath" >&2
|
||||
echo "-> ImageHash: $imageHash" >&2
|
||||
fi
|
||||
|
||||
if [ "$format" == "nix" ]; then
|
||||
cat <<EOF
|
||||
{
|
||||
imageName = "$imageName";
|
||||
imageDigest = "$imageDigest";
|
||||
sha256 = "$imageHash";
|
||||
finalImageName = "$finalImageName";
|
||||
finalImageTag = "$finalImageTag";
|
||||
}
|
||||
EOF
|
||||
|
||||
else
|
||||
|
||||
cat <<EOF
|
||||
{
|
||||
"imageName": "$imageName",
|
||||
"imageDigest": "$imageDigest",
|
||||
"sha256": "$imageHash",
|
||||
"finalImageName": "$finalImageName",
|
||||
"finalImageTag": "$finalImageTag"
|
||||
}
|
||||
EOF
|
||||
|
||||
fi
|
||||
24
pkgs/build-support/docker/nix-prefetch-docker.nix
Normal file
24
pkgs/build-support/docker/nix-prefetch-docker.nix
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
{ lib, stdenv, makeWrapper, nix, skopeo, jq }:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
name = "nix-prefetch-docker";
|
||||
|
||||
nativeBuildInputs = [ makeWrapper ];
|
||||
|
||||
dontUnpack = true;
|
||||
|
||||
installPhase = ''
|
||||
install -vD ${./nix-prefetch-docker} $out/bin/$name;
|
||||
wrapProgram $out/bin/$name \
|
||||
--prefix PATH : ${lib.makeBinPath [ nix skopeo jq ]} \
|
||||
--set HOME /homeless-shelter
|
||||
'';
|
||||
|
||||
preferLocalBuild = true;
|
||||
|
||||
meta = with lib; {
|
||||
description = "Script used to obtain source hashes for dockerTools.pullImage";
|
||||
maintainers = with maintainers; [ offline ];
|
||||
platforms = platforms.unix;
|
||||
};
|
||||
}
|
||||
391
pkgs/build-support/docker/stream_layered_image.py
Normal file
391
pkgs/build-support/docker/stream_layered_image.py
Normal file
|
|
@ -0,0 +1,391 @@
|
|||
"""
|
||||
This script generates a Docker image from a set of store paths. Uses
|
||||
Docker Image Specification v1.2 as reference [1].
|
||||
|
||||
It expects a JSON file with the following properties and writes the
|
||||
image as an uncompressed tarball to stdout:
|
||||
|
||||
* "architecture", "config", "os", "created", "repo_tag" correspond to
|
||||
the fields with the same name on the image spec [2].
|
||||
* "created" can be "now".
|
||||
* "created" is also used as mtime for files added to the image.
|
||||
* "store_layers" is a list of layers in ascending order, where each
|
||||
layer is the list of store paths to include in that layer.
|
||||
|
||||
The main challenge for this script to create the final image in a
|
||||
streaming fashion, without dumping any intermediate data to disk
|
||||
for performance.
|
||||
|
||||
A docker image has each layer contents archived as separate tarballs,
|
||||
and they later all get enveloped into a single big tarball in a
|
||||
content addressed fashion. However, because how "tar" format works,
|
||||
we have to know about the name (which includes the checksum in our
|
||||
case) and the size of the tarball before we can start adding it to the
|
||||
outer tarball. We achieve that by creating the layer tarballs twice;
|
||||
on the first iteration we calculate the file size and the checksum,
|
||||
and on the second one we actually stream the contents. 'add_layer_dir'
|
||||
function does all this.
|
||||
|
||||
[1]: https://github.com/moby/moby/blob/master/image/spec/v1.2.md
|
||||
[2]: https://github.com/moby/moby/blob/4fb59c20a4fb54f944fe170d0ff1d00eb4a24d6f/image/spec/v1.2.md#image-json-field-descriptions
|
||||
""" # noqa: E501
|
||||
|
||||
|
||||
import io
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import json
|
||||
import hashlib
|
||||
import pathlib
|
||||
import tarfile
|
||||
import itertools
|
||||
import threading
|
||||
from datetime import datetime, timezone
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
def archive_paths_to(obj, paths, mtime):
|
||||
"""
|
||||
Writes the given store paths as a tar file to the given stream.
|
||||
|
||||
obj: Stream to write to. Should have a 'write' method.
|
||||
paths: List of store paths.
|
||||
"""
|
||||
|
||||
# gettarinfo makes the paths relative, this makes them
|
||||
# absolute again
|
||||
def append_root(ti):
|
||||
ti.name = "/" + ti.name
|
||||
return ti
|
||||
|
||||
def apply_filters(ti):
|
||||
ti.mtime = mtime
|
||||
ti.uid = 0
|
||||
ti.gid = 0
|
||||
ti.uname = "root"
|
||||
ti.gname = "root"
|
||||
return ti
|
||||
|
||||
def nix_root(ti):
|
||||
ti.mode = 0o0555 # r-xr-xr-x
|
||||
return ti
|
||||
|
||||
def dir(path):
|
||||
ti = tarfile.TarInfo(path)
|
||||
ti.type = tarfile.DIRTYPE
|
||||
return ti
|
||||
|
||||
with tarfile.open(fileobj=obj, mode="w|") as tar:
|
||||
# To be consistent with the docker utilities, we need to have
|
||||
# these directories first when building layer tarballs.
|
||||
tar.addfile(apply_filters(nix_root(dir("/nix"))))
|
||||
tar.addfile(apply_filters(nix_root(dir("/nix/store"))))
|
||||
|
||||
for path in paths:
|
||||
path = pathlib.Path(path)
|
||||
if path.is_symlink():
|
||||
files = [path]
|
||||
else:
|
||||
files = itertools.chain([path], path.rglob("*"))
|
||||
|
||||
for filename in sorted(files):
|
||||
ti = append_root(tar.gettarinfo(filename))
|
||||
|
||||
# copy hardlinks as regular files
|
||||
if ti.islnk():
|
||||
ti.type = tarfile.REGTYPE
|
||||
ti.linkname = ""
|
||||
ti.size = filename.stat().st_size
|
||||
|
||||
ti = apply_filters(ti)
|
||||
if ti.isfile():
|
||||
with open(filename, "rb") as f:
|
||||
tar.addfile(ti, f)
|
||||
else:
|
||||
tar.addfile(ti)
|
||||
|
||||
|
||||
class ExtractChecksum:
|
||||
"""
|
||||
A writable stream which only calculates the final file size and
|
||||
sha256sum, while discarding the actual contents.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._digest = hashlib.sha256()
|
||||
self._size = 0
|
||||
|
||||
def write(self, data):
|
||||
self._digest.update(data)
|
||||
self._size += len(data)
|
||||
|
||||
def extract(self):
|
||||
"""
|
||||
Returns: Hex-encoded sha256sum and size as a tuple.
|
||||
"""
|
||||
return (self._digest.hexdigest(), self._size)
|
||||
|
||||
|
||||
FromImage = namedtuple("FromImage", ["tar", "manifest_json", "image_json"])
|
||||
# Some metadata for a layer
|
||||
LayerInfo = namedtuple("LayerInfo", ["size", "checksum", "path", "paths"])
|
||||
|
||||
|
||||
def load_from_image(from_image_str):
|
||||
"""
|
||||
Loads the given base image, if any.
|
||||
|
||||
from_image_str: Path to the base image archive.
|
||||
|
||||
Returns: A 'FromImage' object with references to the loaded base image,
|
||||
or 'None' if no base image was provided.
|
||||
"""
|
||||
if from_image_str is None:
|
||||
return None
|
||||
|
||||
base_tar = tarfile.open(from_image_str)
|
||||
|
||||
manifest_json_tarinfo = base_tar.getmember("manifest.json")
|
||||
with base_tar.extractfile(manifest_json_tarinfo) as f:
|
||||
manifest_json = json.load(f)
|
||||
|
||||
image_json_tarinfo = base_tar.getmember(manifest_json[0]["Config"])
|
||||
with base_tar.extractfile(image_json_tarinfo) as f:
|
||||
image_json = json.load(f)
|
||||
|
||||
return FromImage(base_tar, manifest_json, image_json)
|
||||
|
||||
|
||||
def add_base_layers(tar, from_image):
|
||||
"""
|
||||
Adds the layers from the given base image to the final image.
|
||||
|
||||
tar: 'tarfile.TarFile' object for new layers to be added to.
|
||||
from_image: 'FromImage' object with references to the loaded base image.
|
||||
"""
|
||||
if from_image is None:
|
||||
print("No 'fromImage' provided", file=sys.stderr)
|
||||
return []
|
||||
|
||||
layers = from_image.manifest_json[0]["Layers"]
|
||||
checksums = from_image.image_json["rootfs"]["diff_ids"]
|
||||
layers_checksums = zip(layers, checksums)
|
||||
|
||||
for num, (layer, checksum) in enumerate(layers_checksums, start=1):
|
||||
layer_tarinfo = from_image.tar.getmember(layer)
|
||||
checksum = re.sub(r"^sha256:", "", checksum)
|
||||
|
||||
tar.addfile(layer_tarinfo, from_image.tar.extractfile(layer_tarinfo))
|
||||
path = layer_tarinfo.path
|
||||
size = layer_tarinfo.size
|
||||
|
||||
print("Adding base layer", num, "from", path, file=sys.stderr)
|
||||
yield LayerInfo(size=size, checksum=checksum, path=path, paths=[path])
|
||||
|
||||
from_image.tar.close()
|
||||
|
||||
|
||||
def overlay_base_config(from_image, final_config):
|
||||
"""
|
||||
Overlays the final image 'config' JSON on top of selected defaults from the
|
||||
base image 'config' JSON.
|
||||
|
||||
from_image: 'FromImage' object with references to the loaded base image.
|
||||
final_config: 'dict' object of the final image 'config' JSON.
|
||||
"""
|
||||
if from_image is None:
|
||||
return final_config
|
||||
|
||||
base_config = from_image.image_json["config"]
|
||||
|
||||
# Preserve environment from base image
|
||||
final_env = base_config.get("Env", []) + final_config.get("Env", [])
|
||||
if final_env:
|
||||
# Resolve duplicates (last one wins) and format back as list
|
||||
resolved_env = {entry.split("=", 1)[0]: entry for entry in final_env}
|
||||
final_config["Env"] = list(resolved_env.values())
|
||||
return final_config
|
||||
|
||||
|
||||
def add_layer_dir(tar, paths, store_dir, mtime):
|
||||
"""
|
||||
Appends given store paths to a TarFile object as a new layer.
|
||||
|
||||
tar: 'tarfile.TarFile' object for the new layer to be added to.
|
||||
paths: List of store paths.
|
||||
store_dir: the root directory of the nix store
|
||||
mtime: 'mtime' of the added files and the layer tarball.
|
||||
Should be an integer representing a POSIX time.
|
||||
|
||||
Returns: A 'LayerInfo' object containing some metadata of
|
||||
the layer added.
|
||||
"""
|
||||
|
||||
invalid_paths = [i for i in paths if not i.startswith(store_dir)]
|
||||
assert len(invalid_paths) == 0, \
|
||||
f"Expecting absolute paths from {store_dir}, but got: {invalid_paths}"
|
||||
|
||||
# First, calculate the tarball checksum and the size.
|
||||
extract_checksum = ExtractChecksum()
|
||||
archive_paths_to(
|
||||
extract_checksum,
|
||||
paths,
|
||||
mtime=mtime,
|
||||
)
|
||||
(checksum, size) = extract_checksum.extract()
|
||||
|
||||
path = f"{checksum}/layer.tar"
|
||||
layer_tarinfo = tarfile.TarInfo(path)
|
||||
layer_tarinfo.size = size
|
||||
layer_tarinfo.mtime = mtime
|
||||
|
||||
# Then actually stream the contents to the outer tarball.
|
||||
read_fd, write_fd = os.pipe()
|
||||
with open(read_fd, "rb") as read, open(write_fd, "wb") as write:
|
||||
def producer():
|
||||
archive_paths_to(
|
||||
write,
|
||||
paths,
|
||||
mtime=mtime,
|
||||
)
|
||||
write.close()
|
||||
|
||||
# Closing the write end of the fifo also closes the read end,
|
||||
# so we don't need to wait until this thread is finished.
|
||||
#
|
||||
# Any exception from the thread will get printed by the default
|
||||
# exception handler, and the 'addfile' call will fail since it
|
||||
# won't be able to read required amount of bytes.
|
||||
threading.Thread(target=producer).start()
|
||||
tar.addfile(layer_tarinfo, read)
|
||||
|
||||
return LayerInfo(size=size, checksum=checksum, path=path, paths=paths)
|
||||
|
||||
|
||||
def add_customisation_layer(target_tar, customisation_layer, mtime):
|
||||
"""
|
||||
Adds the customisation layer as a new layer. This is layer is structured
|
||||
differently; given store path has the 'layer.tar' and corresponding
|
||||
sha256sum ready.
|
||||
|
||||
tar: 'tarfile.TarFile' object for the new layer to be added to.
|
||||
customisation_layer: Path containing the layer archive.
|
||||
mtime: 'mtime' of the added layer tarball.
|
||||
"""
|
||||
|
||||
checksum_path = os.path.join(customisation_layer, "checksum")
|
||||
with open(checksum_path) as f:
|
||||
checksum = f.read().strip()
|
||||
assert len(checksum) == 64, f"Invalid sha256 at ${checksum_path}."
|
||||
|
||||
layer_path = os.path.join(customisation_layer, "layer.tar")
|
||||
|
||||
path = f"{checksum}/layer.tar"
|
||||
tarinfo = target_tar.gettarinfo(layer_path)
|
||||
tarinfo.name = path
|
||||
tarinfo.mtime = mtime
|
||||
|
||||
with open(layer_path, "rb") as f:
|
||||
target_tar.addfile(tarinfo, f)
|
||||
|
||||
return LayerInfo(
|
||||
size=None,
|
||||
checksum=checksum,
|
||||
path=path,
|
||||
paths=[customisation_layer]
|
||||
)
|
||||
|
||||
|
||||
def add_bytes(tar, path, content, mtime):
|
||||
"""
|
||||
Adds a file to the tarball with given path and contents.
|
||||
|
||||
tar: 'tarfile.TarFile' object.
|
||||
path: Path of the file as a string.
|
||||
content: Contents of the file.
|
||||
mtime: 'mtime' of the file. Should be an integer representing a POSIX time.
|
||||
"""
|
||||
assert type(content) is bytes
|
||||
|
||||
ti = tarfile.TarInfo(path)
|
||||
ti.size = len(content)
|
||||
ti.mtime = mtime
|
||||
tar.addfile(ti, io.BytesIO(content))
|
||||
|
||||
|
||||
def main():
|
||||
with open(sys.argv[1], "r") as f:
|
||||
conf = json.load(f)
|
||||
|
||||
created = (
|
||||
datetime.now(tz=timezone.utc)
|
||||
if conf["created"] == "now"
|
||||
else datetime.fromisoformat(conf["created"])
|
||||
)
|
||||
mtime = int(created.timestamp())
|
||||
store_dir = conf["store_dir"]
|
||||
|
||||
from_image = load_from_image(conf["from_image"])
|
||||
|
||||
with tarfile.open(mode="w|", fileobj=sys.stdout.buffer) as tar:
|
||||
layers = []
|
||||
layers.extend(add_base_layers(tar, from_image))
|
||||
|
||||
start = len(layers) + 1
|
||||
for num, store_layer in enumerate(conf["store_layers"], start=start):
|
||||
print("Creating layer", num, "from paths:", store_layer,
|
||||
file=sys.stderr)
|
||||
info = add_layer_dir(tar, store_layer, store_dir, mtime=mtime)
|
||||
layers.append(info)
|
||||
|
||||
print("Creating layer", len(layers) + 1, "with customisation...",
|
||||
file=sys.stderr)
|
||||
layers.append(
|
||||
add_customisation_layer(
|
||||
tar,
|
||||
conf["customisation_layer"],
|
||||
mtime=mtime
|
||||
)
|
||||
)
|
||||
|
||||
print("Adding manifests...", file=sys.stderr)
|
||||
|
||||
image_json = {
|
||||
"created": datetime.isoformat(created),
|
||||
"architecture": conf["architecture"],
|
||||
"os": "linux",
|
||||
"config": overlay_base_config(from_image, conf["config"]),
|
||||
"rootfs": {
|
||||
"diff_ids": [f"sha256:{layer.checksum}" for layer in layers],
|
||||
"type": "layers",
|
||||
},
|
||||
"history": [
|
||||
{
|
||||
"created": datetime.isoformat(created),
|
||||
"comment": f"store paths: {layer.paths}"
|
||||
}
|
||||
for layer in layers
|
||||
],
|
||||
}
|
||||
|
||||
image_json = json.dumps(image_json, indent=4).encode("utf-8")
|
||||
image_json_checksum = hashlib.sha256(image_json).hexdigest()
|
||||
image_json_path = f"{image_json_checksum}.json"
|
||||
add_bytes(tar, image_json_path, image_json, mtime=mtime)
|
||||
|
||||
manifest_json = [
|
||||
{
|
||||
"Config": image_json_path,
|
||||
"RepoTags": [conf["repo_tag"]],
|
||||
"Layers": [layer.path for layer in layers],
|
||||
}
|
||||
]
|
||||
manifest_json = json.dumps(manifest_json, indent=4).encode("utf-8")
|
||||
add_bytes(tar, "manifest.json", manifest_json, mtime=mtime)
|
||||
|
||||
print("Done.", file=sys.stderr)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
24
pkgs/build-support/docker/tarsum.go
Normal file
24
pkgs/build-support/docker/tarsum.go
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"github.com/docker/docker/pkg/tarsum"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ts, err := tarsum.NewTarSum(os.Stdin, true, tarsum.Version1)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if _, err = io.Copy(ioutil.Discard, ts); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Println(ts.Sum(nil))
|
||||
}
|
||||
42
pkgs/build-support/docker/tarsum.nix
Normal file
42
pkgs/build-support/docker/tarsum.nix
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
{ stdenv, go, docker, nixosTests }:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
name = "tarsum";
|
||||
|
||||
nativeBuildInputs = [ go ];
|
||||
disallowedReferences = [ go ];
|
||||
|
||||
dontUnpack = true;
|
||||
|
||||
CGO_ENABLED = 0;
|
||||
GOFLAGS = "-trimpath";
|
||||
GO111MODULE = "off";
|
||||
|
||||
buildPhase = ''
|
||||
runHook preBuild
|
||||
mkdir tarsum
|
||||
cd tarsum
|
||||
cp ${./tarsum.go} tarsum.go
|
||||
export GOPATH=$(pwd)
|
||||
export GOCACHE="$TMPDIR/go-cache"
|
||||
mkdir -p src/github.com/docker/docker/pkg
|
||||
ln -sT ${docker.moby-src}/pkg/tarsum src/github.com/docker/docker/pkg/tarsum
|
||||
go build
|
||||
runHook postBuild
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
mkdir -p $out/bin
|
||||
cp tarsum $out/bin/
|
||||
runHook postInstall
|
||||
'';
|
||||
|
||||
passthru = {
|
||||
tests = {
|
||||
dockerTools = nixosTests.docker-tools;
|
||||
};
|
||||
};
|
||||
|
||||
meta.platforms = go.meta.platforms;
|
||||
}
|
||||
1
pkgs/build-support/docker/test-dummy/hello.txt
Normal file
1
pkgs/build-support/docker/test-dummy/hello.txt
Normal file
|
|
@ -0,0 +1 @@
|
|||
Hello there!
|
||||
Loading…
Add table
Add a link
Reference in a new issue