uboot: (firmwareOdroidC2/C4) don't invoke patch tool, use patches = [] instead
https://github.com/NixOS/nixpkgs/blob/master/pkgs/stdenv/generic/setup.sh#L948 this can do it nicely. Signed-off-by: Anton Arapov <anton@deadbeef.mx>
This commit is contained in:
commit
56de2bcd43
30691 changed files with 3076956 additions and 0 deletions
12
pkgs/build-support/add-opengl-runpath/default.nix
Normal file
12
pkgs/build-support/add-opengl-runpath/default.nix
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
{ lib, stdenv }:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
name = "add-opengl-runpath";
|
||||
|
||||
driverLink = "/run/opengl-driver" + lib.optionalString stdenv.isi686 "-32";
|
||||
|
||||
buildCommand = ''
|
||||
mkdir -p $out/nix-support
|
||||
substituteAll ${./setup-hook.sh} $out/nix-support/setup-hook
|
||||
'';
|
||||
}
|
||||
29
pkgs/build-support/add-opengl-runpath/setup-hook.sh
Normal file
29
pkgs/build-support/add-opengl-runpath/setup-hook.sh
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
# Set RUNPATH so that driver libraries in /run/opengl-driver(-32)/lib can be found.
|
||||
# This is needed to not rely on LD_LIBRARY_PATH which does not work with setuid
|
||||
# executables. Fixes https://github.com/NixOS/nixpkgs/issues/22760. It must be run
|
||||
# in postFixup because RUNPATH stripping in fixup would undo it. Note that patchelf
|
||||
# actually sets RUNPATH not RPATH, which applies only to dependencies of the binary
|
||||
# it set on (including for dlopen), so the RUNPATH must indeed be set on these
|
||||
# libraries and would not work if set only on executables.
|
||||
addOpenGLRunpath() {
|
||||
local forceRpath=
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
--) shift; break;;
|
||||
--force-rpath) shift; forceRpath=1;;
|
||||
--*)
|
||||
echo "addOpenGLRunpath: ERROR: Invalid command line" \
|
||||
"argument: $1" >&2
|
||||
return 1;;
|
||||
*) break;;
|
||||
esac
|
||||
done
|
||||
|
||||
for file in "$@"; do
|
||||
if ! isELF "$file"; then continue; fi
|
||||
local origRpath="$(patchelf --print-rpath "$file")"
|
||||
patchelf --set-rpath "@driverLink@/lib:$origRpath" ${forceRpath:+--force-rpath} "$file"
|
||||
done
|
||||
}
|
||||
|
||||
94
pkgs/build-support/agda/default.nix
Normal file
94
pkgs/build-support/agda/default.nix
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
# Builder for Agda packages.
|
||||
|
||||
{ stdenv, lib, self, Agda, runCommand, makeWrapper, writeText, ghcWithPackages, nixosTests }:
|
||||
|
||||
with lib.strings;
|
||||
|
||||
let
|
||||
withPackages' = {
|
||||
pkgs,
|
||||
ghc ? ghcWithPackages (p: with p; [ ieee754 ])
|
||||
}: let
|
||||
pkgs' = if builtins.isList pkgs then pkgs else pkgs self;
|
||||
library-file = writeText "libraries" ''
|
||||
${(concatMapStringsSep "\n" (p: "${p}/${p.libraryFile}") pkgs')}
|
||||
'';
|
||||
pname = "agdaWithPackages";
|
||||
version = Agda.version;
|
||||
in runCommand "${pname}-${version}" {
|
||||
inherit pname version;
|
||||
nativeBuildInputs = [ makeWrapper ];
|
||||
passthru = {
|
||||
unwrapped = Agda;
|
||||
tests = { inherit (nixosTests) agda; };
|
||||
};
|
||||
} ''
|
||||
mkdir -p $out/bin
|
||||
makeWrapper ${Agda}/bin/agda $out/bin/agda \
|
||||
--add-flags "--with-compiler=${ghc}/bin/ghc" \
|
||||
--add-flags "--library-file=${library-file}" \
|
||||
--add-flags "--local-interfaces"
|
||||
makeWrapper ${Agda}/bin/agda-mode $out/bin/agda-mode
|
||||
''; # Local interfaces has been added for now: See https://github.com/agda/agda/issues/4526
|
||||
|
||||
withPackages = arg: if builtins.isAttrs arg then withPackages' arg else withPackages' { pkgs = arg; };
|
||||
|
||||
extensions = [
|
||||
"agda"
|
||||
"agda-lib"
|
||||
"agdai"
|
||||
"lagda"
|
||||
"lagda.md"
|
||||
"lagda.org"
|
||||
"lagda.rst"
|
||||
"lagda.tex"
|
||||
];
|
||||
|
||||
defaults =
|
||||
{ pname
|
||||
, meta
|
||||
, buildInputs ? []
|
||||
, everythingFile ? "./Everything.agda"
|
||||
, includePaths ? []
|
||||
, libraryName ? pname
|
||||
, libraryFile ? "${libraryName}.agda-lib"
|
||||
, buildPhase ? null
|
||||
, installPhase ? null
|
||||
, extraExtensions ? []
|
||||
, ...
|
||||
}: let
|
||||
agdaWithArgs = withPackages (builtins.filter (p: p ? isAgdaDerivation) buildInputs);
|
||||
includePathArgs = concatMapStrings (path: "-i" + path + " ") (includePaths ++ [(dirOf everythingFile)]);
|
||||
in
|
||||
{
|
||||
inherit libraryName libraryFile;
|
||||
|
||||
isAgdaDerivation = true;
|
||||
|
||||
buildInputs = buildInputs ++ [ agdaWithArgs ];
|
||||
|
||||
buildPhase = if buildPhase != null then buildPhase else ''
|
||||
runHook preBuild
|
||||
agda ${includePathArgs} ${everythingFile}
|
||||
runHook postBuild
|
||||
'';
|
||||
|
||||
installPhase = if installPhase != null then installPhase else ''
|
||||
runHook preInstall
|
||||
mkdir -p $out
|
||||
find -not \( -path ${everythingFile} -or -path ${lib.interfaceFile everythingFile} \) -and \( ${concatMapStringsSep " -or " (p: "-name '*.${p}'") (extensions ++ extraExtensions)} \) -exec cp -p --parents -t "$out" {} +
|
||||
runHook postInstall
|
||||
'';
|
||||
|
||||
meta = if meta.broken or false then meta // { hydraPlatforms = lib.platforms.none; } else meta;
|
||||
|
||||
# Retrieve all packages from the finished package set that have the current package as a dependency and build them
|
||||
passthru.tests = with builtins;
|
||||
lib.filterAttrs (name: pkg: self.lib.isUnbrokenAgdaPackage pkg && elem pname (map (pkg: pkg.pname) pkg.buildInputs)) self;
|
||||
};
|
||||
in
|
||||
{
|
||||
mkDerivation = args: stdenv.mkDerivation (args // defaults args);
|
||||
|
||||
inherit withPackages withPackages';
|
||||
}
|
||||
15
pkgs/build-support/agda/lib.nix
Normal file
15
pkgs/build-support/agda/lib.nix
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
{ lib }:
|
||||
{
|
||||
/* Returns the Agda interface file to a given Agda file.
|
||||
*
|
||||
* Examples:
|
||||
* interfaceFile "Everything.agda" == "Everything.agdai"
|
||||
* interfaceFile "src/Everything.lagda.tex" == "src/Everything.agdai"
|
||||
*/
|
||||
interfaceFile = agdaFile: lib.head (builtins.match ''(.*\.)l?agda(\.(md|org|rst|tex))?'' agdaFile) + "agdai";
|
||||
|
||||
/* Takes an arbitrary derivation and says whether it is an agda library package
|
||||
* that is not marked as broken.
|
||||
*/
|
||||
isUnbrokenAgdaPackage = pkg: pkg.isAgdaDerivation or false && !pkg.meta.broken;
|
||||
}
|
||||
143
pkgs/build-support/alternatives/blas/default.nix
Normal file
143
pkgs/build-support/alternatives/blas/default.nix
Normal file
|
|
@ -0,0 +1,143 @@
|
|||
{ lib, stdenv
|
||||
, lapack-reference, openblas
|
||||
, isILP64 ? false
|
||||
, blasProvider ? openblas }:
|
||||
|
||||
let
|
||||
blasFortranSymbols = [
|
||||
"caxpy" "ccopy" "cdotc" "cdotu" "cgbmv" "cgemm" "cgemv" "cgerc" "cgeru"
|
||||
"chbmv" "chemm" "chemv" "cher" "cher2" "cher2k" "cherk" "chpmv" "chpr"
|
||||
"chpr2" "crotg" "cscal" "csrot" "csscal" "cswap" "csymm" "csyr2k" "csyrk"
|
||||
"ctbmv" "ctbsv" "ctpmv" "ctpsv" "ctrmm" "ctrmv" "ctrsm" "ctrsv" "dasum"
|
||||
"daxpy" "dcabs1" "dcopy" "ddot" "dgbmv" "dgemm" "dgemv" "dger" "dnrm2"
|
||||
"drot" "drotg" "drotm" "drotmg" "dsbmv" "dscal" "dsdot" "dspmv" "dspr"
|
||||
"dspr2" "dswap" "dsymm" "dsymv" "dsyr" "dsyr2" "dsyr2k" "dsyrk" "dtbmv"
|
||||
"dtbsv" "dtpmv" "dtpsv" "dtrmm" "dtrmv" "dtrsm" "dtrsv" "dzasum" "dznrm2"
|
||||
"icamax" "idamax" "isamax" "izamax" "lsame" "sasum" "saxpy" "scabs1"
|
||||
"scasum" "scnrm2" "scopy" "sdot" "sdsdot" "sgbmv" "sgemm" "sgemv"
|
||||
"sger" "snrm2" "srot" "srotg" "srotm" "srotmg" "ssbmv" "sscal" "sspmv"
|
||||
"sspr" "sspr2" "sswap" "ssymm" "ssymv" "ssyr" "ssyr2" "ssyr2k" "ssyrk"
|
||||
"stbmv" "stbsv" "stpmv" "stpsv" "strmm" "strmv" "strsm" "strsv" "xerbla"
|
||||
"xerbla_array" "zaxpy" "zcopy" "zdotc" "zdotu" "zdrot" "zdscal" "zgbmv"
|
||||
"zgemm" "zgemv" "zgerc" "zgeru" "zhbmv" "zhemm" "zhemv" "zher" "zher2"
|
||||
"zher2k" "zherk" "zhpmv" "zhpr" "zhpr2" "zrotg" "zscal" "zswap" "zsymm"
|
||||
"zsyr2k" "zsyrk" "ztbmv" "ztbsv" "ztpmv" "ztpsv" "ztrmm" "ztrmv" "ztrsm"
|
||||
"ztrsv"
|
||||
];
|
||||
|
||||
version = "3";
|
||||
canonicalExtension = if stdenv.hostPlatform.isLinux
|
||||
then "${stdenv.hostPlatform.extensions.sharedLibrary}.${version}"
|
||||
else stdenv.hostPlatform.extensions.sharedLibrary;
|
||||
|
||||
|
||||
blasImplementation = lib.getName blasProvider;
|
||||
blasProvider' = if blasImplementation == "mkl"
|
||||
then blasProvider
|
||||
else blasProvider.override { blas64 = isILP64; };
|
||||
|
||||
in
|
||||
|
||||
assert isILP64 -> blasImplementation == "mkl" || blasProvider'.blas64;
|
||||
|
||||
stdenv.mkDerivation {
|
||||
pname = "blas";
|
||||
inherit version;
|
||||
|
||||
outputs = [ "out" "dev" ];
|
||||
|
||||
meta = (blasProvider'.meta or {}) // {
|
||||
description = "${lib.getName blasProvider} with just the BLAS C and FORTRAN ABI";
|
||||
};
|
||||
|
||||
passthru = {
|
||||
inherit isILP64;
|
||||
provider = blasProvider';
|
||||
implementation = blasImplementation;
|
||||
};
|
||||
|
||||
dontBuild = true;
|
||||
dontConfigure = true;
|
||||
unpackPhase = "src=$PWD";
|
||||
|
||||
dontPatchELF = true;
|
||||
|
||||
installPhase = (''
|
||||
mkdir -p $out/lib $dev/include $dev/lib/pkgconfig
|
||||
|
||||
libblas="${lib.getLib blasProvider'}/lib/libblas${canonicalExtension}"
|
||||
|
||||
if ! [ -e "$libblas" ]; then
|
||||
echo "$libblas does not exist, ${blasProvider'.name} does not provide libblas."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
$NM -an "$libblas" | cut -f3 -d' ' > symbols
|
||||
for symbol in ${toString blasFortranSymbols}; do
|
||||
grep -q "^$symbol_$" symbols || { echo "$symbol" was not found in "$libblas"; exit 1; }
|
||||
done
|
||||
|
||||
cp -L "$libblas" $out/lib/libblas${canonicalExtension}
|
||||
chmod +w $out/lib/libblas${canonicalExtension}
|
||||
|
||||
'' + (if stdenv.hostPlatform.parsed.kernel.execFormat.name == "elf" then ''
|
||||
patchelf --set-soname libblas${canonicalExtension} $out/lib/libblas${canonicalExtension}
|
||||
patchelf --set-rpath "$(patchelf --print-rpath $out/lib/libblas${canonicalExtension}):${lib.getLib blasProvider'}/lib" $out/lib/libblas${canonicalExtension}
|
||||
'' else if stdenv.hostPlatform.isDarwin then ''
|
||||
install_name_tool \
|
||||
-id $out/lib/libblas${canonicalExtension} \
|
||||
-add_rpath ${lib.getLib blasProvider'}/lib \
|
||||
$out/lib/libblas${canonicalExtension}
|
||||
'' else "") + ''
|
||||
|
||||
if [ "$out/lib/libblas${canonicalExtension}" != "$out/lib/libblas${stdenv.hostPlatform.extensions.sharedLibrary}" ]; then
|
||||
ln -s $out/lib/libblas${canonicalExtension} "$out/lib/libblas${stdenv.hostPlatform.extensions.sharedLibrary}"
|
||||
fi
|
||||
|
||||
cat <<EOF > $dev/lib/pkgconfig/blas.pc
|
||||
Name: blas
|
||||
Version: ${version}
|
||||
Description: BLAS FORTRAN implementation
|
||||
Libs: -L$out/lib -lblas
|
||||
Cflags: -I$dev/include
|
||||
EOF
|
||||
|
||||
libcblas="${lib.getLib blasProvider'}/lib/libcblas${canonicalExtension}"
|
||||
|
||||
if ! [ -e "$libcblas" ]; then
|
||||
echo "$libcblas does not exist, ${blasProvider'.name} does not provide libcblas."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cp -L "$libcblas" $out/lib/libcblas${canonicalExtension}
|
||||
chmod +w $out/lib/libcblas${canonicalExtension}
|
||||
|
||||
'' + (if stdenv.hostPlatform.parsed.kernel.execFormat.name == "elf" then ''
|
||||
patchelf --set-soname libcblas${canonicalExtension} $out/lib/libcblas${canonicalExtension}
|
||||
patchelf --set-rpath "$(patchelf --print-rpath $out/lib/libcblas${canonicalExtension}):${lib.getLib blasProvider'}/lib" $out/lib/libcblas${canonicalExtension}
|
||||
'' else if stdenv.hostPlatform.isDarwin then ''
|
||||
install_name_tool \
|
||||
-id $out/lib/libcblas${canonicalExtension} \
|
||||
-add_rpath ${lib.getLib blasProvider'}/lib \
|
||||
$out/lib/libcblas${canonicalExtension}
|
||||
'' else "") + ''
|
||||
if [ "$out/lib/libcblas${canonicalExtension}" != "$out/lib/libcblas${stdenv.hostPlatform.extensions.sharedLibrary}" ]; then
|
||||
ln -s $out/lib/libcblas${canonicalExtension} "$out/lib/libcblas${stdenv.hostPlatform.extensions.sharedLibrary}"
|
||||
fi
|
||||
|
||||
cp ${lib.getDev lapack-reference}/include/cblas{,_mangling}.h $dev/include
|
||||
|
||||
cat <<EOF > $dev/lib/pkgconfig/cblas.pc
|
||||
Name: cblas
|
||||
Version: ${version}
|
||||
Description: BLAS C implementation
|
||||
Cflags: -I$dev/include
|
||||
Libs: -L$out/lib -lcblas
|
||||
EOF
|
||||
'' + lib.optionalString (blasImplementation == "mkl") ''
|
||||
mkdir -p $out/nix-support
|
||||
echo 'export MKL_INTERFACE_LAYER=${lib.optionalString isILP64 "I"}LP64,GNU' > $out/nix-support/setup-hook
|
||||
ln -s $out/lib/libblas${canonicalExtension} $out/lib/libmkl_rt${stdenv.hostPlatform.extensions.sharedLibrary}
|
||||
ln -sf ${blasProvider'}/include/* $dev/include
|
||||
'');
|
||||
}
|
||||
110
pkgs/build-support/alternatives/lapack/default.nix
Normal file
110
pkgs/build-support/alternatives/lapack/default.nix
Normal file
|
|
@ -0,0 +1,110 @@
|
|||
{ lib, stdenv
|
||||
, lapack-reference, openblas
|
||||
, isILP64 ? false
|
||||
, lapackProvider ? openblas }:
|
||||
|
||||
let
|
||||
|
||||
version = "3";
|
||||
canonicalExtension = if stdenv.hostPlatform.isLinux
|
||||
then "${stdenv.hostPlatform.extensions.sharedLibrary}.${version}"
|
||||
else stdenv.hostPlatform.extensions.sharedLibrary;
|
||||
|
||||
lapackImplementation = lib.getName lapackProvider;
|
||||
lapackProvider' = if lapackImplementation == "mkl"
|
||||
then lapackProvider
|
||||
else lapackProvider.override { blas64 = isILP64; };
|
||||
|
||||
in
|
||||
|
||||
assert isILP64 -> lapackImplementation == "mkl" || lapackProvider'.blas64;
|
||||
|
||||
stdenv.mkDerivation {
|
||||
pname = "lapack";
|
||||
inherit version;
|
||||
|
||||
outputs = [ "out" "dev" ];
|
||||
|
||||
meta = (lapackProvider'.meta or {}) // {
|
||||
description = "${lib.getName lapackProvider'} with just the LAPACK C and FORTRAN ABI";
|
||||
};
|
||||
|
||||
passthru = {
|
||||
inherit isILP64;
|
||||
provider = lapackProvider';
|
||||
implementation = lapackImplementation;
|
||||
};
|
||||
|
||||
dontBuild = true;
|
||||
dontConfigure = true;
|
||||
unpackPhase = "src=$PWD";
|
||||
|
||||
dontPatchELF = true;
|
||||
|
||||
installPhase = (''
|
||||
mkdir -p $out/lib $dev/include $dev/lib/pkgconfig
|
||||
|
||||
liblapack="${lib.getLib lapackProvider'}/lib/liblapack${canonicalExtension}"
|
||||
|
||||
if ! [ -e "$liblapack" ]; then
|
||||
echo "$liblapack does not exist, ${lapackProvider'.name} does not provide liblapack."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cp -L "$liblapack" $out/lib/liblapack${canonicalExtension}
|
||||
chmod +w $out/lib/liblapack${canonicalExtension}
|
||||
|
||||
'' + (if stdenv.hostPlatform.parsed.kernel.execFormat.name == "elf" then ''
|
||||
patchelf --set-soname liblapack${canonicalExtension} $out/lib/liblapack${canonicalExtension}
|
||||
patchelf --set-rpath "$(patchelf --print-rpath $out/lib/liblapack${canonicalExtension}):${lapackProvider'}/lib" $out/lib/liblapack${canonicalExtension}
|
||||
'' else "") + ''
|
||||
|
||||
if [ "$out/lib/liblapack${canonicalExtension}" != "$out/lib/liblapack${stdenv.hostPlatform.extensions.sharedLibrary}" ]; then
|
||||
ln -s $out/lib/liblapack${canonicalExtension} "$out/lib/liblapack${stdenv.hostPlatform.extensions.sharedLibrary}"
|
||||
fi
|
||||
|
||||
install -D ${lib.getDev lapack-reference}/include/lapack.h $dev/include/lapack.h
|
||||
|
||||
cat <<EOF > $dev/lib/pkgconfig/lapack.pc
|
||||
Name: lapack
|
||||
Version: ${version}
|
||||
Description: LAPACK FORTRAN implementation
|
||||
Cflags: -I$dev/include
|
||||
Libs: -L$out/lib -llapack
|
||||
EOF
|
||||
|
||||
liblapacke="${lib.getLib lapackProvider'}/lib/liblapacke${canonicalExtension}"
|
||||
|
||||
if ! [ -e "$liblapacke" ]; then
|
||||
echo "$liblapacke does not exist, ${lapackProvider'.name} does not provide liblapacke."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cp -L "$liblapacke" $out/lib/liblapacke${canonicalExtension}
|
||||
chmod +w $out/lib/liblapacke${canonicalExtension}
|
||||
|
||||
'' + (if stdenv.hostPlatform.parsed.kernel.execFormat.name == "elf" then ''
|
||||
patchelf --set-soname liblapacke${canonicalExtension} $out/lib/liblapacke${canonicalExtension}
|
||||
patchelf --set-rpath "$(patchelf --print-rpath $out/lib/liblapacke${canonicalExtension}):${lib.getLib lapackProvider'}/lib" $out/lib/liblapacke${canonicalExtension}
|
||||
'' else "") + ''
|
||||
|
||||
if [ -f "$out/lib/liblapacke.so.3" ]; then
|
||||
ln -s $out/lib/liblapacke.so.3 $out/lib/liblapacke.so
|
||||
fi
|
||||
|
||||
cp ${lib.getDev lapack-reference}/include/lapacke{,_mangling,_config,_utils}.h $dev/include
|
||||
|
||||
cat <<EOF > $dev/lib/pkgconfig/lapacke.pc
|
||||
Name: lapacke
|
||||
Version: ${version}
|
||||
Description: LAPACK C implementation
|
||||
Cflags: -I$dev/include
|
||||
Libs: -L$out/lib -llapacke
|
||||
EOF
|
||||
'' + lib.optionalString (lapackImplementation == "mkl") ''
|
||||
mkdir -p $out/nix-support
|
||||
echo 'export MKL_INTERFACE_LAYER=${lib.optionalString isILP64 "I"}LP64,GNU' > $out/nix-support/setup-hook
|
||||
ln -s $out/lib/liblapack${canonicalExtension} $out/lib/libmkl_rt${stdenv.hostPlatform.extensions.sharedLibrary}
|
||||
ln -sf ${lapackProvider'}/include/* $dev/include
|
||||
'');
|
||||
}
|
||||
142
pkgs/build-support/appimage/appimage-exec.sh
Executable file
142
pkgs/build-support/appimage/appimage-exec.sh
Executable file
|
|
@ -0,0 +1,142 @@
|
|||
#!@shell@
|
||||
# shellcheck shell=bash
|
||||
|
||||
if [ -n "$DEBUG" ] ; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
PATH="@path@:$PATH"
|
||||
apprun_opt=true
|
||||
|
||||
# src : AppImage
|
||||
# dest : let's unpack() create the directory
|
||||
unpack() {
|
||||
local src="$1"
|
||||
local out="$2"
|
||||
|
||||
# https://github.com/AppImage/libappimage/blob/ca8d4b53bed5cbc0f3d0398e30806e0d3adeaaab/src/libappimage/utils/MagicBytesChecker.cpp#L45-L63
|
||||
local appimageSignature;
|
||||
appimageSignature="$(LC_ALL=C readelf -h "$src" | awk 'NR==2{print $10$11;}')"
|
||||
local appimageType;
|
||||
appimageType="$(LC_ALL=C readelf -h "$src" | awk 'NR==2{print $12;}')"
|
||||
|
||||
# check AppImage signature
|
||||
if [ "$appimageSignature" != "4149" ]; then
|
||||
echo "Not an AppImage file"
|
||||
exit
|
||||
fi
|
||||
|
||||
case "$appimageType" in
|
||||
"01")
|
||||
echo "Uncompress $(basename "$src") of type $appimageType"
|
||||
mkdir "$out"
|
||||
pv "$src" | bsdtar -x -C "$out" -f -
|
||||
;;
|
||||
|
||||
"02")
|
||||
# This method avoid issues with non executable appimages,
|
||||
# non-native packer, packer patching and squashfs-root destination prefix.
|
||||
|
||||
# multiarch offset one-liner using same method as AppImage
|
||||
# see https://gist.github.com/probonopd/a490ba3401b5ef7b881d5e603fa20c93
|
||||
offset=$(LC_ALL=C readelf -h "$src" | awk 'NR==13{e_shoff=$5} NR==18{e_shentsize=$5} NR==19{e_shnum=$5} END{print e_shoff+e_shentsize*e_shnum}')
|
||||
echo "Uncompress $(basename "$src") of type $appimageType @ offset $offset"
|
||||
unsquashfs -q -d "$out" -o "$offset" "$src"
|
||||
chmod go-w "$out"
|
||||
;;
|
||||
|
||||
# "03")
|
||||
# get ready, https://github.com/TheAssassin/type3-runtime
|
||||
|
||||
*)
|
||||
echo Unsupported AppImage Type: "$appimageType"
|
||||
exit
|
||||
;;
|
||||
esac
|
||||
echo "$(basename "$src") is now installed in $out"
|
||||
}
|
||||
|
||||
apprun() {
|
||||
|
||||
SHA256=$(sha256sum "$APPIMAGE" | awk '{print $1}')
|
||||
export APPDIR="${XDG_CACHE_HOME:-$HOME/.cache}/appimage-run/$SHA256"
|
||||
|
||||
#compatibility
|
||||
if [ -x "$APPDIR/squashfs-root" ]; then APPDIR="$APPDIR/squashfs-root"; fi
|
||||
|
||||
if [ ! -x "$APPDIR" ]; then
|
||||
mkdir -p "$(dirname "$APPDIR")"
|
||||
unpack "$APPIMAGE" "$APPDIR"
|
||||
else echo "$(basename "$APPIMAGE")" installed in "$APPDIR"
|
||||
fi
|
||||
|
||||
export PATH="$PATH:$PWD/usr/bin"
|
||||
}
|
||||
|
||||
wrap() {
|
||||
|
||||
# quite same in appimageTools
|
||||
export APPIMAGE_SILENT_INSTALL=1
|
||||
|
||||
if [ -n "$APPIMAGE_DEBUG_EXEC" ]; then
|
||||
cd "$APPDIR" || true
|
||||
exec "$APPIMAGE_DEBUG_EXEC"
|
||||
fi
|
||||
|
||||
exec "$APPDIR/AppRun" "$@"
|
||||
}
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage: appimage-run [appimage-run options] <AppImage> [AppImage options]
|
||||
|
||||
-h show this message
|
||||
-d debug mode
|
||||
-x <directory> : extract appimage in the directory then exit.
|
||||
-w <directory> : run uncompressed appimage directory (used in appimageTools)
|
||||
|
||||
[AppImage options]: Options are passed on to the appimage.
|
||||
If you want to execute a custom command in the appimage's environment, set the APPIMAGE_DEBUG_EXEC environment variable.
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
while getopts "x:w:dh" option; do
|
||||
case "${option}" in
|
||||
d) set -x
|
||||
;;
|
||||
x) # eXtract
|
||||
unpack_opt=true
|
||||
APPDIR=${OPTARG}
|
||||
;;
|
||||
w) # WrapAppImage
|
||||
export APPDIR=${OPTARG}
|
||||
wrap_opt=true
|
||||
;;
|
||||
h) usage
|
||||
;;
|
||||
*) usage
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift "$((OPTIND-1))"
|
||||
|
||||
if [ -n "$wrap_opt" ] && [ -d "$APPDIR" ]; then
|
||||
wrap "$@"
|
||||
exit
|
||||
else
|
||||
APPIMAGE="$(realpath "$1")" || usage
|
||||
shift
|
||||
fi
|
||||
|
||||
if [ -n "$unpack_opt" ] && [ -f "$APPIMAGE" ]; then
|
||||
unpack "$APPIMAGE" "$APPDIR"
|
||||
exit
|
||||
fi
|
||||
|
||||
if [ -n "$apprun_opt" ] && [ -f "$APPIMAGE" ]; then
|
||||
apprun
|
||||
wrap "$@"
|
||||
exit
|
||||
fi
|
||||
192
pkgs/build-support/appimage/default.nix
Normal file
192
pkgs/build-support/appimage/default.nix
Normal file
|
|
@ -0,0 +1,192 @@
|
|||
{ lib
|
||||
, bash
|
||||
, binutils-unwrapped
|
||||
, coreutils
|
||||
, gawk
|
||||
, libarchive
|
||||
, pv
|
||||
, squashfsTools
|
||||
, buildFHSUserEnv
|
||||
, pkgs
|
||||
}:
|
||||
|
||||
rec {
|
||||
appimage-exec = pkgs.substituteAll {
|
||||
src = ./appimage-exec.sh;
|
||||
isExecutable = true;
|
||||
dir = "bin";
|
||||
path = lib.makeBinPath [
|
||||
bash
|
||||
binutils-unwrapped
|
||||
coreutils
|
||||
gawk
|
||||
libarchive
|
||||
pv
|
||||
squashfsTools
|
||||
];
|
||||
};
|
||||
|
||||
extract = args@{ name ? "${args.pname}-${args.version}", src, ... }: pkgs.runCommand "${name}-extracted" {
|
||||
buildInputs = [ appimage-exec ];
|
||||
} ''
|
||||
appimage-exec.sh -x $out ${src}
|
||||
'';
|
||||
|
||||
# for compatibility, deprecated
|
||||
extractType1 = extract;
|
||||
extractType2 = extract;
|
||||
wrapType1 = wrapType2;
|
||||
|
||||
wrapAppImage = args@{ name ? "${args.pname}-${args.version}", src, extraPkgs, ... }: buildFHSUserEnv
|
||||
(defaultFhsEnvArgs // {
|
||||
inherit name;
|
||||
|
||||
targetPkgs = pkgs: [ appimage-exec ]
|
||||
++ defaultFhsEnvArgs.targetPkgs pkgs ++ extraPkgs pkgs;
|
||||
|
||||
runScript = "appimage-exec.sh -w ${src} --";
|
||||
} // (removeAttrs args ([ "pname" "version" ] ++ (builtins.attrNames (builtins.functionArgs wrapAppImage)))));
|
||||
|
||||
wrapType2 = args@{ name ? "${args.pname}-${args.version}", src, extraPkgs ? pkgs: [ ], ... }: wrapAppImage
|
||||
(args // {
|
||||
inherit name extraPkgs;
|
||||
src = extract { inherit name src; };
|
||||
});
|
||||
|
||||
defaultFhsEnvArgs = {
|
||||
name = "appimage-env";
|
||||
|
||||
# Most of the packages were taken from the Steam chroot
|
||||
targetPkgs = pkgs: with pkgs; [
|
||||
gtk3
|
||||
bashInteractive
|
||||
gnome.zenity
|
||||
python2
|
||||
xorg.xrandr
|
||||
which
|
||||
perl
|
||||
xdg-utils
|
||||
iana-etc
|
||||
krb5
|
||||
gsettings-desktop-schemas
|
||||
hicolor-icon-theme # dont show a gtk warning about hicolor not being installed
|
||||
];
|
||||
|
||||
# list of libraries expected in an appimage environment:
|
||||
# https://github.com/AppImage/pkg2appimage/blob/master/excludelist
|
||||
multiPkgs = pkgs: with pkgs; [
|
||||
desktop-file-utils
|
||||
xorg.libXcomposite
|
||||
xorg.libXtst
|
||||
xorg.libXrandr
|
||||
xorg.libXext
|
||||
xorg.libX11
|
||||
xorg.libXfixes
|
||||
libGL
|
||||
|
||||
gst_all_1.gstreamer
|
||||
gst_all_1.gst-plugins-ugly
|
||||
gst_all_1.gst-plugins-base
|
||||
libdrm
|
||||
xorg.xkeyboardconfig
|
||||
xorg.libpciaccess
|
||||
|
||||
glib
|
||||
gtk2
|
||||
bzip2
|
||||
zlib
|
||||
gdk-pixbuf
|
||||
|
||||
xorg.libXinerama
|
||||
xorg.libXdamage
|
||||
xorg.libXcursor
|
||||
xorg.libXrender
|
||||
xorg.libXScrnSaver
|
||||
xorg.libXxf86vm
|
||||
xorg.libXi
|
||||
xorg.libSM
|
||||
xorg.libICE
|
||||
freetype
|
||||
curlWithGnuTls
|
||||
nspr
|
||||
nss
|
||||
fontconfig
|
||||
cairo
|
||||
pango
|
||||
expat
|
||||
dbus
|
||||
cups
|
||||
libcap
|
||||
SDL2
|
||||
libusb1
|
||||
udev
|
||||
dbus-glib
|
||||
atk
|
||||
at-spi2-atk
|
||||
libudev0-shim
|
||||
|
||||
xorg.libXt
|
||||
xorg.libXmu
|
||||
xorg.libxcb
|
||||
xorg.xcbutil
|
||||
xorg.xcbutilwm
|
||||
xorg.xcbutilimage
|
||||
xorg.xcbutilkeysyms
|
||||
xorg.xcbutilrenderutil
|
||||
libGLU
|
||||
libuuid
|
||||
libogg
|
||||
libvorbis
|
||||
SDL
|
||||
SDL2_image
|
||||
glew110
|
||||
openssl
|
||||
libidn
|
||||
tbb
|
||||
wayland
|
||||
mesa
|
||||
libxkbcommon
|
||||
|
||||
flac
|
||||
freeglut
|
||||
libjpeg
|
||||
libpng12
|
||||
libsamplerate
|
||||
libmikmod
|
||||
libtheora
|
||||
libtiff
|
||||
pixman
|
||||
speex
|
||||
SDL_image
|
||||
SDL_ttf
|
||||
SDL_mixer
|
||||
SDL2_ttf
|
||||
SDL2_mixer
|
||||
libappindicator-gtk2
|
||||
libcaca
|
||||
libcanberra
|
||||
libgcrypt
|
||||
libvpx
|
||||
librsvg
|
||||
xorg.libXft
|
||||
libvdpau
|
||||
alsa-lib
|
||||
|
||||
harfbuzz
|
||||
e2fsprogs
|
||||
libgpg-error
|
||||
keyutils.lib
|
||||
libjack2
|
||||
fribidi
|
||||
p11-kit
|
||||
|
||||
gmp
|
||||
|
||||
# libraries not on the upstream include list, but nevertheless expected
|
||||
# by at least one appimage
|
||||
libtool.lib # for Synfigstudio
|
||||
xorg.libxshmfence # for apple-music-electron
|
||||
at-spi2-core
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,81 @@
|
|||
# Unconditionally adding in platform version flags will result in warnings that
|
||||
# will be treated as errors by some packages. Add any missing flags here.
|
||||
|
||||
# There are two things to be configured: the "platform version" (oldest
|
||||
# supported version of macos, ios, etc), and the "sdk version".
|
||||
#
|
||||
# The modern way of configuring these is to use:
|
||||
# -platform_version $platform $platform_version $sdk_version"
|
||||
#
|
||||
# The old way is still supported, and uses flags like:
|
||||
# -${platform}_version_min $platform_version
|
||||
# -sdk_version $sdk_version
|
||||
#
|
||||
# If both styles are specified ld will combine them. If multiple versions are
|
||||
# specified for the same platform, ld will emit an error.
|
||||
#
|
||||
# The following adds flags for whichever properties have not already been
|
||||
# provided.
|
||||
|
||||
havePlatformVersionFlag=
|
||||
haveDarwinSDKVersion=
|
||||
haveDarwinPlatformVersion=
|
||||
|
||||
# Roles will set by add-flags.sh, but add-flags.sh can be skipped when the
|
||||
# cc-wrapper has added the linker flags. Both the cc-wrapper and the binutils
|
||||
# wrapper mangle the same variable (MACOSX_DEPLOYMENT_TARGET), so if roles are
|
||||
# empty due to being run through the cc-wrapper then the mangle here is a no-op
|
||||
# and we still do the right thing.
|
||||
#
|
||||
# To be robust, make sure we always have the correct set of roles.
|
||||
accumulateRoles
|
||||
|
||||
mangleVarSingle @darwinMinVersionVariable@ ${role_suffixes[@]+"${role_suffixes[@]}"}
|
||||
|
||||
n=0
|
||||
nParams=${#params[@]}
|
||||
while (( n < nParams )); do
|
||||
p=${params[n]}
|
||||
case "$p" in
|
||||
# the current platform
|
||||
-@darwinPlatform@_version_min)
|
||||
haveDarwinPlatformVersion=1
|
||||
;;
|
||||
|
||||
# legacy aliases
|
||||
-macosx_version_min|-iphoneos_version_min|-iosmac_version_min|-uikitformac_version_min)
|
||||
haveDarwinPlatformVersion=1
|
||||
;;
|
||||
|
||||
-sdk_version)
|
||||
haveDarwinSDKVersion=1
|
||||
;;
|
||||
|
||||
-platform_version)
|
||||
havePlatformVersionFlag=1
|
||||
|
||||
# If clang can't determine the sdk version it will pass 0.0.0. This
|
||||
# has runtime effects so we override this to use the known sdk
|
||||
# version.
|
||||
if [ "${params[n+3]-}" = 0.0.0 ]; then
|
||||
params[n+3]=@darwinSdkVersion@
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
n=$((n + 1))
|
||||
done
|
||||
|
||||
# If the caller has set -platform_version, trust they're doing the right thing.
|
||||
# This will be the typical case for clang in nixpkgs.
|
||||
if [ ! "$havePlatformVersionFlag" ]; then
|
||||
if [ ! "$haveDarwinSDKVersion" ] && [ ! "$haveDarwinPlatformVersion" ]; then
|
||||
# Nothing provided. Use the modern "-platform_version" to set both.
|
||||
extraBefore+=(-platform_version @darwinPlatform@ "${@darwinMinVersionVariable@_@suffixSalt@:-@darwinMinVersion@}" @darwinSdkVersion@)
|
||||
elif [ ! "$haveDarwinSDKVersion" ]; then
|
||||
# Add missing sdk version
|
||||
extraBefore+=(-sdk_version @darwinSdkVersion@)
|
||||
elif [ ! "$haveDarwinPlatformVersion" ]; then
|
||||
# Add missing platform version
|
||||
extraBefore+=(-@darwinPlatform@_version_min "${@darwinMinVersionVariable@_@suffixSalt@:-@darwinMinVersion@}")
|
||||
fi
|
||||
fi
|
||||
37
pkgs/build-support/bintools-wrapper/add-flags.sh
Normal file
37
pkgs/build-support/bintools-wrapper/add-flags.sh
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
# See cc-wrapper for comments.
|
||||
var_templates_list=(
|
||||
NIX_IGNORE_LD_THROUGH_GCC
|
||||
NIX_LDFLAGS
|
||||
NIX_LDFLAGS_BEFORE
|
||||
NIX_DYNAMIC_LINKER
|
||||
NIX_LDFLAGS_AFTER
|
||||
NIX_LDFLAGS_HARDEN
|
||||
NIX_HARDENING_ENABLE
|
||||
)
|
||||
var_templates_bool=(
|
||||
NIX_SET_BUILD_ID
|
||||
NIX_DONT_SET_RPATH
|
||||
)
|
||||
|
||||
accumulateRoles
|
||||
|
||||
for var in "${var_templates_list[@]}"; do
|
||||
mangleVarList "$var" ${role_suffixes[@]+"${role_suffixes[@]}"}
|
||||
done
|
||||
for var in "${var_templates_bool[@]}"; do
|
||||
mangleVarBool "$var" ${role_suffixes[@]+"${role_suffixes[@]}"}
|
||||
done
|
||||
|
||||
if [ -e @out@/nix-support/libc-ldflags ]; then
|
||||
NIX_LDFLAGS_@suffixSalt@+=" $(< @out@/nix-support/libc-ldflags)"
|
||||
fi
|
||||
|
||||
if [ -z "$NIX_DYNAMIC_LINKER_@suffixSalt@" ] && [ -e @out@/nix-support/ld-set-dynamic-linker ]; then
|
||||
NIX_DYNAMIC_LINKER_@suffixSalt@="$(< @out@/nix-support/dynamic-linker)"
|
||||
fi
|
||||
|
||||
if [ -e @out@/nix-support/libc-ldflags-before ]; then
|
||||
NIX_LDFLAGS_BEFORE_@suffixSalt@="$(< @out@/nix-support/libc-ldflags-before) $NIX_LDFLAGS_BEFORE_@suffixSalt@"
|
||||
fi
|
||||
|
||||
export NIX_BINTOOLS_WRAPPER_FLAGS_SET_@suffixSalt@=1
|
||||
62
pkgs/build-support/bintools-wrapper/add-hardening.sh
Normal file
62
pkgs/build-support/bintools-wrapper/add-hardening.sh
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
declare -a hardeningLDFlags=()
|
||||
|
||||
declare -A hardeningEnableMap=()
|
||||
|
||||
# Intentionally word-split in case 'NIX_HARDENING_ENABLE' is defined in Nix. The
|
||||
# array expansion also prevents undefined variables from causing trouble with
|
||||
# `set -u`.
|
||||
for flag in ${NIX_HARDENING_ENABLE_@suffixSalt@-}; do
|
||||
hardeningEnableMap["$flag"]=1
|
||||
done
|
||||
|
||||
# Remove unsupported flags.
|
||||
for flag in @hardening_unsupported_flags@; do
|
||||
unset -v "hardeningEnableMap[$flag]"
|
||||
done
|
||||
|
||||
if (( "${NIX_DEBUG:-0}" >= 1 )); then
|
||||
declare -a allHardeningFlags=(pie relro bindnow)
|
||||
declare -A hardeningDisableMap=()
|
||||
|
||||
# Determine which flags were effectively disabled so we can report below.
|
||||
for flag in "${allHardeningFlags[@]}"; do
|
||||
if [[ -z "${hardeningEnableMap[$flag]-}" ]]; then
|
||||
hardeningDisableMap[$flag]=1
|
||||
fi
|
||||
done
|
||||
|
||||
printf 'HARDENING: disabled flags:' >&2
|
||||
(( "${#hardeningDisableMap[@]}" )) && printf ' %q' "${!hardeningDisableMap[@]}" >&2
|
||||
echo >&2
|
||||
|
||||
if (( "${#hardeningEnableMap[@]}" )); then
|
||||
echo 'HARDENING: Is active (not completely disabled with "all" flag)' >&2;
|
||||
fi
|
||||
fi
|
||||
|
||||
for flag in "${!hardeningEnableMap[@]}"; do
|
||||
case $flag in
|
||||
pie)
|
||||
if [[ ! (" $* " =~ " -shared " \
|
||||
|| " $* " =~ " -static " \
|
||||
|| " $* " =~ " -r " \
|
||||
|| " $* " =~ " -Ur " \
|
||||
|| " $* " =~ " -i ") ]]; then
|
||||
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling LDFlags -pie >&2; fi
|
||||
hardeningLDFlags+=('-pie')
|
||||
fi
|
||||
;;
|
||||
relro)
|
||||
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling relro >&2; fi
|
||||
hardeningLDFlags+=('-z' 'relro')
|
||||
;;
|
||||
bindnow)
|
||||
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling bindnow >&2; fi
|
||||
hardeningLDFlags+=('-z' 'now')
|
||||
;;
|
||||
*)
|
||||
# Ignore unsupported. Checked in Nix that at least *some*
|
||||
# tool supports each flag.
|
||||
;;
|
||||
esac
|
||||
done
|
||||
49
pkgs/build-support/bintools-wrapper/darwin-install_name_tool-wrapper.sh
Executable file
49
pkgs/build-support/bintools-wrapper/darwin-install_name_tool-wrapper.sh
Executable file
|
|
@ -0,0 +1,49 @@
|
|||
#! @shell@
|
||||
# shellcheck shell=bash
|
||||
|
||||
set -eu -o pipefail +o posix
|
||||
shopt -s nullglob
|
||||
|
||||
if (( "${NIX_DEBUG:-0}" >= 7 )); then
|
||||
set -x
|
||||
fi
|
||||
|
||||
source @signingUtils@
|
||||
|
||||
extraAfter=()
|
||||
extraBefore=()
|
||||
params=("$@")
|
||||
|
||||
input=
|
||||
|
||||
pprev=
|
||||
prev=
|
||||
for p in \
|
||||
${extraBefore+"${extraBefore[@]}"} \
|
||||
${params+"${params[@]}"} \
|
||||
${extraAfter+"${extraAfter[@]}"}
|
||||
do
|
||||
if [ "$pprev" != "-change" ] && [[ "$prev" != -* ]] && [[ "$p" != -* ]]; then
|
||||
input="$p"
|
||||
fi
|
||||
pprev="$prev"
|
||||
prev="$p"
|
||||
done
|
||||
|
||||
# Optionally print debug info.
|
||||
if (( "${NIX_DEBUG:-0}" >= 1 )); then
|
||||
# Old bash workaround, see above.
|
||||
echo "extra flags before to @prog@:" >&2
|
||||
printf " %q\n" ${extraBefore+"${extraBefore[@]}"} >&2
|
||||
echo "original flags to @prog@:" >&2
|
||||
printf " %q\n" ${params+"${params[@]}"} >&2
|
||||
echo "extra flags after to @prog@:" >&2
|
||||
printf " %q\n" ${extraAfter+"${extraAfter[@]}"} >&2
|
||||
fi
|
||||
|
||||
@prog@ \
|
||||
${extraBefore+"${extraBefore[@]}"} \
|
||||
${params+"${params[@]}"} \
|
||||
${extraAfter+"${extraAfter[@]}"}
|
||||
|
||||
sign "$input"
|
||||
78
pkgs/build-support/bintools-wrapper/darwin-strip-wrapper.sh
Executable file
78
pkgs/build-support/bintools-wrapper/darwin-strip-wrapper.sh
Executable file
|
|
@ -0,0 +1,78 @@
|
|||
#! @shell@
|
||||
# shellcheck shell=bash
|
||||
|
||||
set -eu -o pipefail +o posix
|
||||
shopt -s nullglob
|
||||
|
||||
if (( "${NIX_DEBUG:-0}" >= 7 )); then
|
||||
set -x
|
||||
fi
|
||||
|
||||
source @signingUtils@
|
||||
|
||||
extraAfter=()
|
||||
extraBefore=()
|
||||
params=("$@")
|
||||
|
||||
output=
|
||||
inputs=()
|
||||
|
||||
restAreFiles=
|
||||
prev=
|
||||
for p in \
|
||||
${extraBefore+"${extraBefore[@]}"} \
|
||||
${params+"${params[@]}"} \
|
||||
${extraAfter+"${extraAfter[@]}"}
|
||||
do
|
||||
if [ "$restAreFiles" ]; then
|
||||
inputs+=("$p")
|
||||
else
|
||||
case "$prev" in
|
||||
-s|-R|-d|-arch)
|
||||
# Unrelated arguments with values
|
||||
;;
|
||||
-o)
|
||||
# Explicit output
|
||||
output="$p"
|
||||
;;
|
||||
*)
|
||||
# Any other orgument either takes no value, or is a file.
|
||||
if [[ "$p" != -* ]]; then
|
||||
inputs+=("$p")
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ "$p" == - ]; then
|
||||
restAreFiles=1
|
||||
fi
|
||||
fi
|
||||
|
||||
prev="$p"
|
||||
done
|
||||
|
||||
# Optionally print debug info.
|
||||
if (( "${NIX_DEBUG:-0}" >= 1 )); then
|
||||
# Old bash workaround, see above.
|
||||
echo "extra flags before to @prog@:" >&2
|
||||
printf " %q\n" ${extraBefore+"${extraBefore[@]}"} >&2
|
||||
echo "original flags to @prog@:" >&2
|
||||
printf " %q\n" ${params+"${params[@]}"} >&2
|
||||
echo "extra flags after to @prog@:" >&2
|
||||
printf " %q\n" ${extraAfter+"${extraAfter[@]}"} >&2
|
||||
fi
|
||||
|
||||
@prog@ \
|
||||
${extraBefore+"${extraBefore[@]}"} \
|
||||
${params+"${params[@]}"} \
|
||||
${extraAfter+"${extraAfter[@]}"}
|
||||
|
||||
if [ "$output" ]; then
|
||||
# Single explicit output
|
||||
signIfRequired "$output"
|
||||
else
|
||||
# Multiple inputs, rewritten in place
|
||||
for input in "${inputs[@]}"; do
|
||||
signIfRequired "$input"
|
||||
done
|
||||
fi
|
||||
380
pkgs/build-support/bintools-wrapper/default.nix
Normal file
380
pkgs/build-support/bintools-wrapper/default.nix
Normal file
|
|
@ -0,0 +1,380 @@
|
|||
# The Nixpkgs CC is not directly usable, since it doesn't know where
|
||||
# the C library and standard header files are. Therefore the compiler
|
||||
# produced by that package cannot be installed directly in a user
|
||||
# environment and used from the command line. So we use a wrapper
|
||||
# script that sets up the right environment variables so that the
|
||||
# compiler and the linker just "work".
|
||||
|
||||
{ name ? ""
|
||||
, lib
|
||||
, stdenvNoCC
|
||||
, bintools ? null, libc ? null, coreutils ? null, shell ? stdenvNoCC.shell, gnugrep ? null
|
||||
, netbsd ? null, netbsdCross ? null
|
||||
, sharedLibraryLoader ?
|
||||
if libc == null then
|
||||
null
|
||||
else if stdenvNoCC.targetPlatform.isNetBSD then
|
||||
if !(targetPackages ? netbsdCross) then
|
||||
netbsd.ld_elf_so
|
||||
else if libc != targetPackages.netbsdCross.headers then
|
||||
targetPackages.netbsdCross.ld_elf_so
|
||||
else
|
||||
null
|
||||
else
|
||||
lib.getLib libc
|
||||
, nativeTools, noLibc ? false, nativeLibc, nativePrefix ? ""
|
||||
, propagateDoc ? bintools != null && bintools ? man
|
||||
, extraPackages ? [], extraBuildCommands ? ""
|
||||
, buildPackages ? {}
|
||||
, targetPackages ? {}
|
||||
, useMacosReexportHack ? false
|
||||
|
||||
# Darwin code signing support utilities
|
||||
, postLinkSignHook ? null, signingUtils ? null
|
||||
}:
|
||||
|
||||
with lib;
|
||||
|
||||
assert nativeTools -> !propagateDoc && nativePrefix != "";
|
||||
assert !nativeTools ->
|
||||
bintools != null && coreutils != null && gnugrep != null;
|
||||
assert !(nativeLibc && noLibc);
|
||||
assert (noLibc || nativeLibc) == (libc == null);
|
||||
|
||||
let
|
||||
stdenv = stdenvNoCC;
|
||||
inherit (stdenv) hostPlatform targetPlatform;
|
||||
|
||||
# Prefix for binaries. Customarily ends with a dash separator.
|
||||
#
|
||||
# TODO(@Ericson2314) Make unconditional, or optional but always true by
|
||||
# default.
|
||||
targetPrefix = lib.optionalString (targetPlatform != hostPlatform)
|
||||
(targetPlatform.config + "-");
|
||||
|
||||
bintoolsVersion = lib.getVersion bintools;
|
||||
bintoolsName = lib.removePrefix targetPrefix (lib.getName bintools);
|
||||
|
||||
libc_bin = if libc == null then null else getBin libc;
|
||||
libc_dev = if libc == null then null else getDev libc;
|
||||
libc_lib = if libc == null then null else getLib libc;
|
||||
bintools_bin = if nativeTools then "" else getBin bintools;
|
||||
# The wrapper scripts use 'cat' and 'grep', so we may need coreutils.
|
||||
coreutils_bin = if nativeTools then "" else getBin coreutils;
|
||||
|
||||
# See description in cc-wrapper.
|
||||
suffixSalt = replaceStrings ["-" "."] ["_" "_"] targetPlatform.config;
|
||||
|
||||
# The dynamic linker has different names on different platforms. This is a
|
||||
# shell glob that ought to match it.
|
||||
dynamicLinker =
|
||||
/**/ if sharedLibraryLoader == null then null
|
||||
else if targetPlatform.libc == "musl" then "${sharedLibraryLoader}/lib/ld-musl-*"
|
||||
else if (targetPlatform.libc == "bionic" && targetPlatform.is32bit) then "/system/bin/linker"
|
||||
else if (targetPlatform.libc == "bionic" && targetPlatform.is64bit) then "/system/bin/linker64"
|
||||
else if targetPlatform.libc == "nblibc" then "${sharedLibraryLoader}/libexec/ld.elf_so"
|
||||
else if targetPlatform.system == "i686-linux" then "${sharedLibraryLoader}/lib/ld-linux.so.2"
|
||||
else if targetPlatform.system == "x86_64-linux" then "${sharedLibraryLoader}/lib/ld-linux-x86-64.so.2"
|
||||
else if targetPlatform.system == "powerpc64le-linux" then "${sharedLibraryLoader}/lib/ld64.so.2"
|
||||
# ARM with a wildcard, which can be "" or "-armhf".
|
||||
else if (with targetPlatform; isAarch32 && isLinux) then "${sharedLibraryLoader}/lib/ld-linux*.so.3"
|
||||
else if targetPlatform.system == "aarch64-linux" then "${sharedLibraryLoader}/lib/ld-linux-aarch64.so.1"
|
||||
else if targetPlatform.system == "powerpc-linux" then "${sharedLibraryLoader}/lib/ld.so.1"
|
||||
else if targetPlatform.isMips then "${sharedLibraryLoader}/lib/ld.so.1"
|
||||
# `ld-linux-riscv{32,64}-<abi>.so.1`
|
||||
else if targetPlatform.isRiscV then "${sharedLibraryLoader}/lib/ld-linux-riscv*.so.1"
|
||||
else if targetPlatform.isDarwin then "/usr/lib/dyld"
|
||||
else if targetPlatform.isFreeBSD then "/libexec/ld-elf.so.1"
|
||||
else if lib.hasSuffix "pc-gnu" targetPlatform.config then "ld.so.1"
|
||||
else null;
|
||||
|
||||
expand-response-params =
|
||||
if buildPackages ? stdenv && buildPackages.stdenv.hasCC && buildPackages.stdenv.cc != "/dev/null"
|
||||
then import ../expand-response-params { inherit (buildPackages) stdenv; }
|
||||
else "";
|
||||
|
||||
in
|
||||
|
||||
stdenv.mkDerivation {
|
||||
pname = targetPrefix
|
||||
+ (if name != "" then name else "${bintoolsName}-wrapper");
|
||||
version = if bintools == null then null else bintoolsVersion;
|
||||
|
||||
preferLocalBuild = true;
|
||||
|
||||
inherit bintools_bin libc_bin libc_dev libc_lib coreutils_bin;
|
||||
shell = getBin shell + shell.shellPath or "";
|
||||
gnugrep_bin = if nativeTools then "" else gnugrep;
|
||||
|
||||
inherit targetPrefix suffixSalt;
|
||||
|
||||
outputs = [ "out" ] ++ optionals propagateDoc ([ "man" ] ++ optional (bintools ? info) "info");
|
||||
|
||||
passthru = {
|
||||
inherit bintools libc nativeTools nativeLibc nativePrefix;
|
||||
|
||||
emacsBufferSetup = pkgs: ''
|
||||
; We should handle propagation here too
|
||||
(mapc
|
||||
(lambda (arg)
|
||||
(when (file-directory-p (concat arg "/lib"))
|
||||
(setenv "NIX_LDFLAGS_${suffixSalt}" (concat (getenv "NIX_LDFLAGS_${suffixSalt}") " -L" arg "/lib")))
|
||||
(when (file-directory-p (concat arg "/lib64"))
|
||||
(setenv "NIX_LDFLAGS_${suffixSalt}" (concat (getenv "NIX_LDFLAGS_${suffixSalt}") " -L" arg "/lib64"))))
|
||||
'(${concatStringsSep " " (map (pkg: "\"${pkg}\"") pkgs)}))
|
||||
'';
|
||||
};
|
||||
|
||||
dontBuild = true;
|
||||
dontConfigure = true;
|
||||
|
||||
enableParallelBuilding = true;
|
||||
|
||||
unpackPhase = ''
|
||||
src=$PWD
|
||||
'';
|
||||
|
||||
installPhase =
|
||||
''
|
||||
mkdir -p $out/bin $out/nix-support
|
||||
|
||||
wrap() {
|
||||
local dst="$1"
|
||||
local wrapper="$2"
|
||||
export prog="$3"
|
||||
substituteAll "$wrapper" "$out/bin/$dst"
|
||||
chmod +x "$out/bin/$dst"
|
||||
}
|
||||
''
|
||||
|
||||
+ (if nativeTools then ''
|
||||
echo ${nativePrefix} > $out/nix-support/orig-bintools
|
||||
|
||||
ldPath="${nativePrefix}/bin"
|
||||
'' else ''
|
||||
echo $bintools_bin > $out/nix-support/orig-bintools
|
||||
|
||||
ldPath="${bintools_bin}/bin"
|
||||
''
|
||||
|
||||
# Solaris needs an additional ld wrapper.
|
||||
+ optionalString (targetPlatform.isSunOS && nativePrefix != "") ''
|
||||
ldPath="${nativePrefix}/bin"
|
||||
exec="$ldPath/${targetPrefix}ld"
|
||||
wrap ld-solaris ${./ld-solaris-wrapper.sh}
|
||||
'')
|
||||
|
||||
# Create a symlink to as (the assembler).
|
||||
+ ''
|
||||
if [ -e $ldPath/${targetPrefix}as ]; then
|
||||
ln -s $ldPath/${targetPrefix}as $out/bin/${targetPrefix}as
|
||||
fi
|
||||
|
||||
'' + (if !useMacosReexportHack then ''
|
||||
wrap ${targetPrefix}ld ${./ld-wrapper.sh} ''${ld:-$ldPath/${targetPrefix}ld}
|
||||
'' else ''
|
||||
ldInner="${targetPrefix}ld-reexport-delegate"
|
||||
wrap "$ldInner" ${./macos-sierra-reexport-hack.bash} ''${ld:-$ldPath/${targetPrefix}ld}
|
||||
wrap "${targetPrefix}ld" ${./ld-wrapper.sh} "$out/bin/$ldInner"
|
||||
unset ldInner
|
||||
'') + ''
|
||||
|
||||
for variant in ld.gold ld.bfd ld.lld; do
|
||||
local underlying=$ldPath/${targetPrefix}$variant
|
||||
[[ -e "$underlying" ]] || continue
|
||||
wrap ${targetPrefix}$variant ${./ld-wrapper.sh} $underlying
|
||||
done
|
||||
'';
|
||||
|
||||
strictDeps = true;
|
||||
depsTargetTargetPropagated = extraPackages;
|
||||
|
||||
wrapperName = "BINTOOLS_WRAPPER";
|
||||
|
||||
setupHooks = [
|
||||
../setup-hooks/role.bash
|
||||
./setup-hook.sh
|
||||
];
|
||||
|
||||
postFixup =
|
||||
##
|
||||
## General libc support
|
||||
##
|
||||
optionalString (libc != null) (''
|
||||
touch "$out/nix-support/libc-ldflags"
|
||||
echo "-L${libc_lib}${libc.libdir or "/lib"}" >> $out/nix-support/libc-ldflags
|
||||
|
||||
echo "${libc_lib}" > $out/nix-support/orig-libc
|
||||
echo "${libc_dev}" > $out/nix-support/orig-libc-dev
|
||||
''
|
||||
|
||||
##
|
||||
## Dynamic linker support
|
||||
##
|
||||
+ optionalString (sharedLibraryLoader != null) ''
|
||||
if [[ -z ''${dynamicLinker+x} ]]; then
|
||||
echo "Don't know the name of the dynamic linker for platform '${targetPlatform.config}', so guessing instead." >&2
|
||||
local dynamicLinker="${sharedLibraryLoader}/lib/ld*.so.?"
|
||||
fi
|
||||
''
|
||||
|
||||
# Expand globs to fill array of options
|
||||
+ ''
|
||||
dynamicLinker=($dynamicLinker)
|
||||
|
||||
case ''${#dynamicLinker[@]} in
|
||||
0) echo "No dynamic linker found for platform '${targetPlatform.config}'." >&2;;
|
||||
1) echo "Using dynamic linker: '$dynamicLinker'" >&2;;
|
||||
*) echo "Multiple dynamic linkers found for platform '${targetPlatform.config}'." >&2;;
|
||||
esac
|
||||
|
||||
if [ -n "''${dynamicLinker-}" ]; then
|
||||
echo $dynamicLinker > $out/nix-support/dynamic-linker
|
||||
|
||||
${if targetPlatform.isDarwin then ''
|
||||
printf "export LD_DYLD_PATH=%q\n" "$dynamicLinker" >> $out/nix-support/setup-hook
|
||||
'' else lib.optionalString (sharedLibraryLoader != null) ''
|
||||
if [ -e ${sharedLibraryLoader}/lib/32/ld-linux.so.2 ]; then
|
||||
echo ${sharedLibraryLoader}/lib/32/ld-linux.so.2 > $out/nix-support/dynamic-linker-m32
|
||||
fi
|
||||
touch $out/nix-support/ld-set-dynamic-linker
|
||||
''}
|
||||
fi
|
||||
'')
|
||||
|
||||
##
|
||||
## User env support
|
||||
##
|
||||
|
||||
# Propagate the underling unwrapped bintools so that if you
|
||||
# install the wrapper, you get tools like objdump (same for any
|
||||
# binaries of libc).
|
||||
+ optionalString (!nativeTools) ''
|
||||
printWords ${bintools_bin} ${if libc == null then "" else libc_bin} > $out/nix-support/propagated-user-env-packages
|
||||
''
|
||||
|
||||
##
|
||||
## Man page and info support
|
||||
##
|
||||
+ optionalString propagateDoc (''
|
||||
ln -s ${bintools.man} $man
|
||||
'' + optionalString (bintools ? info) ''
|
||||
ln -s ${bintools.info} $info
|
||||
'')
|
||||
|
||||
##
|
||||
## Hardening support
|
||||
##
|
||||
|
||||
# some linkers on some platforms don't support specific -z flags
|
||||
+ ''
|
||||
export hardening_unsupported_flags=""
|
||||
if [[ "$($ldPath/${targetPrefix}ld -z now 2>&1 || true)" =~ un(recognized|known)\ option ]]; then
|
||||
hardening_unsupported_flags+=" bindnow"
|
||||
fi
|
||||
if [[ "$($ldPath/${targetPrefix}ld -z relro 2>&1 || true)" =~ un(recognized|known)\ option ]]; then
|
||||
hardening_unsupported_flags+=" relro"
|
||||
fi
|
||||
''
|
||||
|
||||
+ optionalString hostPlatform.isCygwin ''
|
||||
hardening_unsupported_flags+=" pic"
|
||||
''
|
||||
|
||||
+ optionalString targetPlatform.isAvr ''
|
||||
hardening_unsupported_flags+=" relro bindnow"
|
||||
''
|
||||
|
||||
+ optionalString (libc != null && targetPlatform.isAvr) ''
|
||||
for isa in avr5 avr3 avr4 avr6 avr25 avr31 avr35 avr51 avrxmega2 avrxmega4 avrxmega5 avrxmega6 avrxmega7 tiny-stack; do
|
||||
echo "-L${getLib libc}/avr/lib/$isa" >> $out/nix-support/libc-cflags
|
||||
done
|
||||
''
|
||||
|
||||
+ optionalString stdenv.targetPlatform.isDarwin ''
|
||||
echo "-arch ${targetPlatform.darwinArch}" >> $out/nix-support/libc-ldflags
|
||||
''
|
||||
|
||||
##
|
||||
## GNU specific extra strip flags
|
||||
##
|
||||
|
||||
# TODO(@sternenseemann): make a generic strip wrapper?
|
||||
+ optionalString (bintools.isGNU or false) ''
|
||||
wrap ${targetPrefix}strip ${./gnu-binutils-strip-wrapper.sh} \
|
||||
"${bintools_bin}/bin/${targetPrefix}strip"
|
||||
''
|
||||
|
||||
###
|
||||
### Remove LC_UUID
|
||||
###
|
||||
+ optionalString (stdenv.targetPlatform.isDarwin && !(bintools.isGNU or false)) ''
|
||||
echo "-no_uuid" >> $out/nix-support/libc-ldflags-before
|
||||
''
|
||||
|
||||
+ ''
|
||||
for flags in "$out/nix-support"/*flags*; do
|
||||
substituteInPlace "$flags" --replace $'\n' ' '
|
||||
done
|
||||
|
||||
substituteAll ${./add-flags.sh} $out/nix-support/add-flags.sh
|
||||
substituteAll ${./add-hardening.sh} $out/nix-support/add-hardening.sh
|
||||
substituteAll ${../wrapper-common/utils.bash} $out/nix-support/utils.bash
|
||||
''
|
||||
|
||||
###
|
||||
### Ensure consistent LC_VERSION_MIN_MACOSX
|
||||
###
|
||||
+ optionalString stdenv.targetPlatform.isDarwin (
|
||||
let
|
||||
inherit (stdenv.targetPlatform)
|
||||
darwinPlatform darwinSdkVersion
|
||||
darwinMinVersion darwinMinVersionVariable;
|
||||
in ''
|
||||
export darwinPlatform=${darwinPlatform}
|
||||
export darwinMinVersion=${darwinMinVersion}
|
||||
export darwinSdkVersion=${darwinSdkVersion}
|
||||
export darwinMinVersionVariable=${darwinMinVersionVariable}
|
||||
substituteAll ${./add-darwin-ldflags-before.sh} $out/nix-support/add-local-ldflags-before.sh
|
||||
''
|
||||
)
|
||||
|
||||
##
|
||||
## Code signing on Apple Silicon
|
||||
##
|
||||
+ optionalString (targetPlatform.isDarwin && targetPlatform.isAarch64) ''
|
||||
echo 'source ${postLinkSignHook}' >> $out/nix-support/post-link-hook
|
||||
|
||||
export signingUtils=${signingUtils}
|
||||
|
||||
wrap \
|
||||
${targetPrefix}install_name_tool \
|
||||
${./darwin-install_name_tool-wrapper.sh} \
|
||||
"${bintools_bin}/bin/${targetPrefix}install_name_tool"
|
||||
|
||||
wrap \
|
||||
${targetPrefix}strip ${./darwin-strip-wrapper.sh} \
|
||||
"${bintools_bin}/bin/${targetPrefix}strip"
|
||||
''
|
||||
|
||||
##
|
||||
## Extra custom steps
|
||||
##
|
||||
+ extraBuildCommands;
|
||||
|
||||
inherit dynamicLinker expand-response-params;
|
||||
|
||||
# for substitution in utils.bash
|
||||
expandResponseParams = "${expand-response-params}/bin/expand-response-params";
|
||||
|
||||
meta =
|
||||
let bintools_ = if bintools != null then bintools else {}; in
|
||||
(if bintools_ ? meta then removeAttrs bintools.meta ["priority"] else {}) //
|
||||
{ description =
|
||||
lib.attrByPath ["meta" "description"] "System binary utilities" bintools_
|
||||
+ " (wrapper script)";
|
||||
priority = 10;
|
||||
} // optionalAttrs useMacosReexportHack {
|
||||
platforms = lib.platforms.darwin;
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
#! @shell@
|
||||
# shellcheck shell=bash
|
||||
|
||||
exec @prog@ --enable-deterministic-archives "$@"
|
||||
29
pkgs/build-support/bintools-wrapper/ld-solaris-wrapper.sh
Normal file
29
pkgs/build-support/bintools-wrapper/ld-solaris-wrapper.sh
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
#!@shell@
|
||||
set -eu -o pipefail
|
||||
shopt -s nullglob
|
||||
|
||||
if (( "${NIX_DEBUG:-0}" >= 7 )); then
|
||||
set -x
|
||||
fi
|
||||
|
||||
declare -a args=("$@")
|
||||
# I've also tried adding -z direct and -z lazyload, but it gave too many problems with C++ exceptions :'(
|
||||
# Also made sure libgcc would not be lazy-loaded, as suggested here: https://www.illumos.org/issues/2534#note-3
|
||||
# but still no success.
|
||||
declare -a argsBefore=(-z ignore) argsAfter=()
|
||||
|
||||
# This loop makes sure all -L arguments are before -l arguments, or ld may complain it cannot find a library.
|
||||
# GNU binutils does not have this problem:
|
||||
# http://stackoverflow.com/questions/5817269/does-the-order-of-l-and-l-options-in-the-gnu-linker-matter
|
||||
while (( $# )); do
|
||||
case "${args[$i]}" in
|
||||
-L) argsBefore+=("$1" "$2"); shift ;;
|
||||
-L?*) argsBefore+=("$1") ;;
|
||||
*) argsAfter+=("$1") ;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
# Trace:
|
||||
set -x
|
||||
exec "@ld@" "${argsBefore[@]}" "${argsAfter[@]}"
|
||||
255
pkgs/build-support/bintools-wrapper/ld-wrapper.sh
Normal file
255
pkgs/build-support/bintools-wrapper/ld-wrapper.sh
Normal file
|
|
@ -0,0 +1,255 @@
|
|||
#! @shell@
|
||||
set -eu -o pipefail +o posix
|
||||
shopt -s nullglob
|
||||
|
||||
if (( "${NIX_DEBUG:-0}" >= 7 )); then
|
||||
set -x
|
||||
fi
|
||||
|
||||
path_backup="$PATH"
|
||||
|
||||
# phase separation makes this look useless
|
||||
# shellcheck disable=SC2157
|
||||
if [ -n "@coreutils_bin@" ]; then
|
||||
PATH="@coreutils_bin@/bin"
|
||||
fi
|
||||
|
||||
source @out@/nix-support/utils.bash
|
||||
|
||||
if [ -z "${NIX_BINTOOLS_WRAPPER_FLAGS_SET_@suffixSalt@:-}" ]; then
|
||||
source @out@/nix-support/add-flags.sh
|
||||
fi
|
||||
|
||||
|
||||
# Optionally filter out paths not refering to the store.
|
||||
expandResponseParams "$@"
|
||||
|
||||
# NIX_LINK_TYPE is set if ld has been called through our cc wrapper. We take
|
||||
# advantage of this to avoid both recalculating it, and also repeating other
|
||||
# processing cc wrapper has already done.
|
||||
if [[ -n "${NIX_LINK_TYPE_@suffixSalt@:-}" ]]; then
|
||||
linkType=$NIX_LINK_TYPE_@suffixSalt@
|
||||
else
|
||||
linkType=$(checkLinkType "${params[@]}")
|
||||
fi
|
||||
|
||||
if [[ "${NIX_ENFORCE_PURITY:-}" = 1 && -n "${NIX_STORE:-}"
|
||||
&& ( -z "$NIX_IGNORE_LD_THROUGH_GCC_@suffixSalt@" || -z "${NIX_LINK_TYPE_@suffixSalt@:-}" ) ]]; then
|
||||
rest=()
|
||||
nParams=${#params[@]}
|
||||
declare -i n=0
|
||||
|
||||
while (( "$n" < "$nParams" )); do
|
||||
p=${params[n]}
|
||||
p2=${params[n+1]:-} # handle `p` being last one
|
||||
if [ "${p:0:3}" = -L/ ] && badPath "${p:2}"; then
|
||||
skip "${p:2}"
|
||||
elif [ "$p" = -L ] && badPath "$p2"; then
|
||||
n+=1; skip "$p2"
|
||||
elif [ "$p" = -rpath ] && badPath "$p2"; then
|
||||
n+=1; skip "$p2"
|
||||
elif [ "$p" = -dynamic-linker ] && badPath "$p2"; then
|
||||
n+=1; skip "$p2"
|
||||
elif [ "${p:0:1}" = / ] && badPath "$p"; then
|
||||
# We cannot skip this; barf.
|
||||
echo "impure path \`$p' used in link" >&2
|
||||
exit 1
|
||||
elif [ "${p:0:9}" = --sysroot ]; then
|
||||
# Our ld is not built with sysroot support (Can we fix that?)
|
||||
:
|
||||
else
|
||||
rest+=("$p")
|
||||
fi
|
||||
n+=1
|
||||
done
|
||||
# Old bash empty array hack
|
||||
params=(${rest+"${rest[@]}"})
|
||||
fi
|
||||
|
||||
|
||||
source @out@/nix-support/add-hardening.sh
|
||||
|
||||
extraAfter=()
|
||||
extraBefore=(${hardeningLDFlags[@]+"${hardeningLDFlags[@]}"})
|
||||
|
||||
if [ -z "${NIX_LINK_TYPE_@suffixSalt@:-}" ]; then
|
||||
extraAfter+=($(filterRpathFlags "$linkType" $NIX_LDFLAGS_@suffixSalt@))
|
||||
extraBefore+=($(filterRpathFlags "$linkType" $NIX_LDFLAGS_BEFORE_@suffixSalt@))
|
||||
|
||||
# By adding dynamic linker to extraBefore we allow the users set their
|
||||
# own dynamic linker as NIX_LD_FLAGS will override earlier set flags
|
||||
if [[ "$linkType" == dynamic && -n "$NIX_DYNAMIC_LINKER_@suffixSalt@" ]]; then
|
||||
extraBefore+=("-dynamic-linker" "$NIX_DYNAMIC_LINKER_@suffixSalt@")
|
||||
fi
|
||||
fi
|
||||
|
||||
extraAfter+=($(filterRpathFlags "$linkType" $NIX_LDFLAGS_AFTER_@suffixSalt@))
|
||||
|
||||
# These flags *must not* be pulled up to -Wl, flags, so they can't go in
|
||||
# add-flags.sh. They must always be set, so must not be disabled by
|
||||
# NIX_LDFLAGS_SET.
|
||||
if [ -e @out@/nix-support/add-local-ldflags-before.sh ]; then
|
||||
source @out@/nix-support/add-local-ldflags-before.sh
|
||||
fi
|
||||
|
||||
|
||||
# Three tasks:
|
||||
#
|
||||
# 1. Find all -L... switches for rpath
|
||||
#
|
||||
# 2. Find relocatable flag for build id.
|
||||
#
|
||||
# 3. Choose 32-bit dynamic linker if needed
|
||||
declare -a libDirs
|
||||
declare -A libs
|
||||
declare -i relocatable=0 link32=0
|
||||
|
||||
linkerOutput="a.out"
|
||||
|
||||
if
|
||||
[ "$NIX_DONT_SET_RPATH_@suffixSalt@" != 1 ] \
|
||||
|| [ "$NIX_SET_BUILD_ID_@suffixSalt@" = 1 ] \
|
||||
|| [ -e @out@/nix-support/dynamic-linker-m32 ]
|
||||
then
|
||||
prev=
|
||||
# Old bash thinks empty arrays are undefined, ugh.
|
||||
for p in \
|
||||
${extraBefore+"${extraBefore[@]}"} \
|
||||
${params+"${params[@]}"} \
|
||||
${extraAfter+"${extraAfter[@]}"}
|
||||
do
|
||||
case "$prev" in
|
||||
-L)
|
||||
libDirs+=("$p")
|
||||
;;
|
||||
-l)
|
||||
libs["lib${p}.so"]=1
|
||||
;;
|
||||
-m)
|
||||
# Presumably only the last `-m` flag has any effect.
|
||||
case "$p" in
|
||||
elf_i386) link32=1;;
|
||||
*) link32=0;;
|
||||
esac
|
||||
;;
|
||||
-dynamic-linker | -plugin)
|
||||
# Ignore this argument, or it will match *.so and be added to rpath.
|
||||
;;
|
||||
*)
|
||||
case "$p" in
|
||||
-L/*)
|
||||
libDirs+=("${p:2}")
|
||||
;;
|
||||
-l?*)
|
||||
libs["lib${p:2}.so"]=1
|
||||
;;
|
||||
"${NIX_STORE:-}"/*.so | "${NIX_STORE:-}"/*.so.*)
|
||||
# This is a direct reference to a shared library.
|
||||
libDirs+=("${p%/*}")
|
||||
libs["${p##*/}"]=1
|
||||
;;
|
||||
-r | --relocatable | -i)
|
||||
relocatable=1
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
prev="$p"
|
||||
done
|
||||
fi
|
||||
|
||||
# Determine linkerOutput
|
||||
prev=
|
||||
for p in \
|
||||
${extraBefore+"${extraBefore[@]}"} \
|
||||
${params+"${params[@]}"} \
|
||||
${extraAfter+"${extraAfter[@]}"}
|
||||
do
|
||||
case "$prev" in
|
||||
-o)
|
||||
# Informational for post-link-hook
|
||||
linkerOutput="$p"
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
prev="$p"
|
||||
done
|
||||
|
||||
if [[ "$link32" == "1" && "$linkType" == dynamic && -e "@out@/nix-support/dynamic-linker-m32" ]]; then
|
||||
# We have an alternate 32-bit linker and we're producing a 32-bit ELF, let's
|
||||
# use it.
|
||||
extraAfter+=(
|
||||
'-dynamic-linker'
|
||||
"$(< @out@/nix-support/dynamic-linker-m32)"
|
||||
)
|
||||
fi
|
||||
|
||||
# Add all used dynamic libraries to the rpath.
|
||||
if [[ "$NIX_DONT_SET_RPATH_@suffixSalt@" != 1 && "$linkType" != static-pie ]]; then
|
||||
# For each directory in the library search path (-L...),
|
||||
# see if it contains a dynamic library used by a -l... flag. If
|
||||
# so, add the directory to the rpath.
|
||||
# It's important to add the rpath in the order of -L..., so
|
||||
# the link time chosen objects will be those of runtime linking.
|
||||
declare -A rpaths
|
||||
for dir in ${libDirs+"${libDirs[@]}"}; do
|
||||
if [[ "$dir" =~ [/.][/.] ]] && dir2=$(readlink -f "$dir"); then
|
||||
dir="$dir2"
|
||||
fi
|
||||
if [ -n "${rpaths[$dir]:-}" ] || [[ "$dir" != "${NIX_STORE:-}"/* ]]; then
|
||||
# If the path is not in the store, don't add it to the rpath.
|
||||
# This typically happens for libraries in /tmp that are later
|
||||
# copied to $out/lib. If not, we're screwed.
|
||||
continue
|
||||
fi
|
||||
for path in "$dir"/*; do
|
||||
file="${path##*/}"
|
||||
if [ "${libs[$file]:-}" ]; then
|
||||
# This library may have been provided by a previous directory,
|
||||
# but if that library file is inside an output of the current
|
||||
# derivation, it can be deleted after this compilation and
|
||||
# should be found in a later directory, so we add all
|
||||
# directories that contain any of the libraries to rpath.
|
||||
rpaths["$dir"]=1
|
||||
extraAfter+=(-rpath "$dir")
|
||||
break
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
fi
|
||||
|
||||
# This is outside the DONT_SET_RPATH branch because it's more targeted and we
|
||||
# usually want it (on Darwin) even if DONT_SET_RPATH is set.
|
||||
if [ -n "${NIX_COREFOUNDATION_RPATH:-}" ]; then
|
||||
extraAfter+=(-rpath $NIX_COREFOUNDATION_RPATH)
|
||||
fi
|
||||
|
||||
# Only add --build-id if this is a final link. FIXME: should build gcc
|
||||
# with --enable-linker-build-id instead?
|
||||
if [ "$NIX_SET_BUILD_ID_@suffixSalt@" = 1 ] && ! (( "$relocatable" )); then
|
||||
extraAfter+=(--build-id)
|
||||
fi
|
||||
|
||||
|
||||
# Optionally print debug info.
|
||||
if (( "${NIX_DEBUG:-0}" >= 1 )); then
|
||||
# Old bash workaround, see above.
|
||||
echo "extra flags before to @prog@:" >&2
|
||||
printf " %q\n" ${extraBefore+"${extraBefore[@]}"} >&2
|
||||
echo "original flags to @prog@:" >&2
|
||||
printf " %q\n" ${params+"${params[@]}"} >&2
|
||||
echo "extra flags after to @prog@:" >&2
|
||||
printf " %q\n" ${extraAfter+"${extraAfter[@]}"} >&2
|
||||
fi
|
||||
|
||||
PATH="$path_backup"
|
||||
# Old bash workaround, see above.
|
||||
@prog@ \
|
||||
${extraBefore+"${extraBefore[@]}"} \
|
||||
${params+"${params[@]}"} \
|
||||
${extraAfter+"${extraAfter[@]}"}
|
||||
|
||||
if [ -e "@out@/nix-support/post-link-hook" ]; then
|
||||
source @out@/nix-support/post-link-hook
|
||||
fi
|
||||
|
|
@ -0,0 +1,246 @@
|
|||
#! @shell@
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
# For cmd | while read; do ...; done
|
||||
shopt -s lastpipe
|
||||
|
||||
path_backup="$PATH"
|
||||
if [ -n "@coreutils_bin@" ]; then
|
||||
PATH="@coreutils_bin@/bin"
|
||||
fi
|
||||
|
||||
declare -ri recurThreshold=200
|
||||
declare -i overflowCount=0
|
||||
|
||||
declare -ar origArgs=("$@")
|
||||
|
||||
# Throw away what we won't need
|
||||
declare -a parentArgs=()
|
||||
|
||||
while (( $# )); do
|
||||
case "$1" in
|
||||
-l)
|
||||
echo "cctools LD does not support '-l foo'" >&2
|
||||
exit 1
|
||||
;;
|
||||
-lazy_library | -reexport_library | -upward_library | -weak_library)
|
||||
overflowCount+=1
|
||||
shift 2
|
||||
;;
|
||||
-l* | *.so.* | *.dylib | -lazy-l* | -reexport-l* | -upward-l* | -weak-l*)
|
||||
overflowCount+=1
|
||||
shift 1
|
||||
;;
|
||||
*.a | *.o)
|
||||
shift 1
|
||||
;;
|
||||
-L | -F)
|
||||
# Evidentally ld doesn't like using the child's RPATH, so it still
|
||||
# needs these.
|
||||
parentArgs+=("$1" "$2")
|
||||
shift 2
|
||||
;;
|
||||
-L?* | -F?*)
|
||||
parentArgs+=("$1")
|
||||
shift 1
|
||||
;;
|
||||
-o)
|
||||
outputName="$2"
|
||||
parentArgs+=("$1" "$2")
|
||||
shift 2
|
||||
;;
|
||||
-install_name | -dylib_install_name | -dynamic-linker | -plugin)
|
||||
parentArgs+=("$1" "$2")
|
||||
shift 2
|
||||
;;
|
||||
-rpath)
|
||||
# Only an rpath to the child is needed, which we will add
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
if [[ -f "$1" ]]; then
|
||||
# Propabably a non-standard object file like Haskell's
|
||||
# `.dyn_o`. Skip it like other inputs
|
||||
:
|
||||
else
|
||||
parentArgs+=("$1")
|
||||
fi
|
||||
shift 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
|
||||
if (( "$overflowCount" <= "$recurThreshold" )); then
|
||||
if [ -n "${NIX_DEBUG:-}" ]; then
|
||||
echo "ld-wrapper: Only ${overflowCount} inputs counted while ${recurThreshold} is the ceiling, linking normally. " >&2
|
||||
fi
|
||||
PATH="$path_backup"
|
||||
exec @prog@ "${origArgs[@]}"
|
||||
fi
|
||||
|
||||
|
||||
|
||||
if [ -n "${NIX_DEBUG:-}" ]; then
|
||||
echo "ld-wrapper: ${overflowCount} inputs counted when ${recurThreshold} is the ceiling, inspecting further. " >&2
|
||||
fi
|
||||
|
||||
# Collect the normalized linker input
|
||||
declare -a norm=()
|
||||
|
||||
# Arguments are null-separated
|
||||
@prog@ --dump-normalized-lib-args "${origArgs[@]}" |
|
||||
while IFS= read -r -d '' input; do
|
||||
norm+=("$input")
|
||||
done
|
||||
|
||||
declare -i leafCount=0
|
||||
declare lastLeaf=''
|
||||
declare -a childrenInputs=() trailingInputs=()
|
||||
while (( "${#norm[@]}" )); do
|
||||
case "${norm[0]}" in
|
||||
-lazy_library | -upward_library)
|
||||
# TODO(@Ericson2314): Don't do that, but intersperse children
|
||||
# between such args.
|
||||
echo "ld-wrapper: Warning: Potentially changing link order" >&2
|
||||
trailingInputs+=("${norm[0]}" "${norm[1]}")
|
||||
norm=("${norm[@]:2}")
|
||||
;;
|
||||
-reexport_library | -weak_library)
|
||||
childrenInputs+=("${norm[0]}" "${norm[1]}")
|
||||
if [[ "${norm[1]}" != "$lastLeaf" ]]; then
|
||||
leafCount+=1
|
||||
lastLeaf="${norm[1]}"
|
||||
fi
|
||||
norm=("${norm[@]:2}")
|
||||
;;
|
||||
*.so | *.dylib)
|
||||
childrenInputs+=(-reexport_library "${norm[0]}")
|
||||
if [[ "${norm[0]}" != "$lastLeaf" ]]; then
|
||||
leafCount+=1
|
||||
lastLeaf="${norm[0]}"
|
||||
fi
|
||||
norm=("${norm[@]:1}")
|
||||
;;
|
||||
*.o | *.a)
|
||||
# Don't delegate object files or static libs
|
||||
parentArgs+=("${norm[0]}")
|
||||
norm=("${norm[@]:1}")
|
||||
;;
|
||||
*)
|
||||
if [[ -f "${norm[0]}" ]]; then
|
||||
# Propabably a non-standard object file. We'll let it by.
|
||||
parentArgs+=("${norm[0]}")
|
||||
norm=("${norm[@]:1}")
|
||||
else
|
||||
echo "ld-wrapper: Internal Error: Invalid normalized argument" >&2
|
||||
exit -1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
|
||||
if (( "$leafCount" <= "$recurThreshold" )); then
|
||||
if [ -n "${NIX_DEBUG:-}" ]; then
|
||||
echo "ld-wrapper: Only ${leafCount} *dynamic* inputs counted while ${recurThreshold} is the ceiling, linking normally. " >&2
|
||||
fi
|
||||
PATH="$path_backup"
|
||||
exec @prog@ "${origArgs[@]}"
|
||||
fi
|
||||
|
||||
|
||||
|
||||
if [ -n "${NIX_DEBUG:-}" ]; then
|
||||
echo "ld-wrapper: ${leafCount} *dynamic* inputs counted when ${recurThreshold} is the ceiling, delegating to children. " >&2
|
||||
fi
|
||||
|
||||
declare -r outputNameLibless=$( \
|
||||
if [[ -z "${outputName:+isUndefined}" ]]; then
|
||||
echo unnamed
|
||||
return 0;
|
||||
fi
|
||||
baseName=$(basename ${outputName})
|
||||
if [[ "$baseName" = lib* ]]; then
|
||||
baseName="${baseName:3}"
|
||||
fi
|
||||
echo "$baseName")
|
||||
|
||||
declare -ra children=(
|
||||
"$outputNameLibless-reexport-delegate-0"
|
||||
"$outputNameLibless-reexport-delegate-1"
|
||||
)
|
||||
|
||||
mkdir -p "$out/lib"
|
||||
|
||||
symbolBloatObject=$outputNameLibless-symbol-hack.o
|
||||
if [[ ! -f $symbolBloatObject ]]; then
|
||||
# `-Q` means use GNU Assembler rather than Clang, avoiding an awkward
|
||||
# dependency cycle.
|
||||
printf '.private_extern _______child_hack_foo\nchild_hack_foo:\n' |
|
||||
PATH="$PATH:@out@/bin" @targetPrefix@as -Q -- -o $symbolBloatObject
|
||||
fi
|
||||
|
||||
# Split inputs between children
|
||||
declare -a child0Inputs=() child1Inputs=("${childrenInputs[@]}")
|
||||
let "countFirstChild = $leafCount / 2" || true
|
||||
lastLeaf=''
|
||||
while (( "$countFirstChild" )); do
|
||||
case "${child1Inputs[0]}" in
|
||||
-reexport_library | -weak_library)
|
||||
child0Inputs+=("${child1Inputs[0]}" "${child1Inputs[1]}")
|
||||
if [[ "${child1Inputs[1]}" != "$lastLeaf" ]]; then
|
||||
let countFirstChild-=1 || true
|
||||
lastLeaf="${child1Inputs[1]}"
|
||||
fi
|
||||
child1Inputs=("${child1Inputs[@]:2}")
|
||||
;;
|
||||
*.so | *.dylib)
|
||||
child0Inputs+=(-reexport_library "${child1Inputs[0]}")
|
||||
if [[ "${child1Inputs[0]}" != "$lastLeaf" ]]; then
|
||||
let countFirstChild-=1 || true
|
||||
lastLeaf="${child1Inputs[1]}"
|
||||
fi
|
||||
child1Inputs=("${child1Inputs[@]:2}")
|
||||
;;
|
||||
*)
|
||||
echo "ld-wrapper: Internal Error: Invalid delegated input" >&2
|
||||
exit -1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
# First half of libs
|
||||
@out@/bin/@targetPrefix@ld \
|
||||
-macosx_version_min $MACOSX_DEPLOYMENT_TARGET -arch x86_64 -dylib \
|
||||
-o "$out/lib/lib${children[0]}.dylib" \
|
||||
-install_name "$out/lib/lib${children[0]}.dylib" \
|
||||
"$symbolBloatObject" "${child0Inputs[@]}" "${trailingInputs[@]}"
|
||||
|
||||
# Second half of libs
|
||||
@out@/bin/@targetPrefix@ld \
|
||||
-macosx_version_min $MACOSX_DEPLOYMENT_TARGET -arch x86_64 -dylib \
|
||||
-o "$out/lib/lib${children[1]}.dylib" \
|
||||
-install_name "$out/lib/lib${children[1]}.dylib" \
|
||||
"$symbolBloatObject" "${child1Inputs[@]}" "${trailingInputs[@]}"
|
||||
|
||||
parentArgs+=("-L$out/lib" -rpath "$out/lib")
|
||||
if [[ $outputName != *reexport-delegate* ]]; then
|
||||
parentArgs+=("-l${children[0]}" "-l${children[1]}")
|
||||
else
|
||||
parentArgs+=("-reexport-l${children[0]}" "-reexport-l${children[1]}")
|
||||
fi
|
||||
|
||||
parentArgs+=("${trailingInputs[@]}")
|
||||
|
||||
if [ -n "${NIX_DEBUG:-}" ]; then
|
||||
echo "flags using delegated children to @prog@:" >&2
|
||||
printf " %q\n" "${parentArgs[@]}" >&2
|
||||
fi
|
||||
|
||||
PATH="$path_backup"
|
||||
exec @prog@ "${parentArgs[@]}"
|
||||
72
pkgs/build-support/bintools-wrapper/setup-hook.sh
Normal file
72
pkgs/build-support/bintools-wrapper/setup-hook.sh
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
# Binutils Wrapper hygiene
|
||||
#
|
||||
# See comments in cc-wrapper's setup hook. This works exactly the same way.
|
||||
|
||||
# Skip setup hook if we're neither a build-time dep, nor, temporarily, doing a
|
||||
# native compile.
|
||||
#
|
||||
# TODO(@Ericson2314): No native exception
|
||||
[[ -z ${strictDeps-} ]] || (( "$hostOffset" < 0 )) || return 0
|
||||
|
||||
bintoolsWrapper_addLDVars () {
|
||||
# See ../setup-hooks/role.bash
|
||||
local role_post
|
||||
getHostRoleEnvHook
|
||||
|
||||
if [[ -d "$1/lib64" && ! -L "$1/lib64" ]]; then
|
||||
export NIX_LDFLAGS${role_post}+=" -L$1/lib64"
|
||||
fi
|
||||
|
||||
if [[ -d "$1/lib" ]]; then
|
||||
# Don't add the /lib directory if it actually doesn't contain any libraries. For instance,
|
||||
# Python and Haskell packages often only have directories like $out/lib/ghc-8.4.3/ or
|
||||
# $out/lib/python3.6/, so having them in LDFLAGS just makes the linker search unnecessary
|
||||
# directories and bloats the size of the environment variable space.
|
||||
local -a glob=( $1/lib/lib* )
|
||||
if [ "${#glob[*]}" -gt 0 ]; then
|
||||
export NIX_LDFLAGS${role_post}+=" -L$1/lib"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# See ../setup-hooks/role.bash
|
||||
getTargetRole
|
||||
getTargetRoleWrapper
|
||||
|
||||
addEnvHooks "$targetOffset" bintoolsWrapper_addLDVars
|
||||
|
||||
# shellcheck disable=SC2157
|
||||
if [ -n "@bintools_bin@" ]; then
|
||||
addToSearchPath _PATH @bintools_bin@/bin
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2157
|
||||
if [ -n "@libc_bin@" ]; then
|
||||
addToSearchPath _PATH @libc_bin@/bin
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2157
|
||||
if [ -n "@coreutils_bin@" ]; then
|
||||
addToSearchPath _PATH @coreutils_bin@/bin
|
||||
fi
|
||||
|
||||
# Export tool environment variables so various build systems use the right ones.
|
||||
|
||||
export NIX_BINTOOLS${role_post}=@out@
|
||||
|
||||
for cmd in \
|
||||
ar as ld nm objcopy objdump readelf ranlib strip strings size windres
|
||||
do
|
||||
if
|
||||
PATH=$_PATH type -p "@targetPrefix@${cmd}" > /dev/null
|
||||
then
|
||||
export "${cmd^^}${role_post}=@targetPrefix@${cmd}";
|
||||
fi
|
||||
done
|
||||
|
||||
# If unset, assume the default hardening flags.
|
||||
: ${NIX_HARDENING_ENABLE="fortify stackprotector pic strictoverflow format relro bindnow"}
|
||||
export NIX_HARDENING_ENABLE
|
||||
|
||||
# No local scope in sourced file
|
||||
unset -v role_post cmd upper_case
|
||||
229
pkgs/build-support/build-bazel-package/default.nix
Normal file
229
pkgs/build-support/build-bazel-package/default.nix
Normal file
|
|
@ -0,0 +1,229 @@
|
|||
{ stdenv
|
||||
, bazel
|
||||
, cacert
|
||||
, lib
|
||||
}:
|
||||
|
||||
let
|
||||
bazelPkg = bazel;
|
||||
in
|
||||
|
||||
args@{
|
||||
name ? "${args.pname}-${args.version}"
|
||||
, bazel ? bazelPkg
|
||||
, bazelFlags ? []
|
||||
, bazelBuildFlags ? []
|
||||
, bazelFetchFlags ? []
|
||||
, bazelTarget
|
||||
, buildAttrs
|
||||
, fetchAttrs
|
||||
|
||||
# Newer versions of Bazel are moving away from built-in rules_cc and instead
|
||||
# allow fetching it as an external dependency in a WORKSPACE file[1]. If
|
||||
# removed in the fixed-output fetch phase, building will fail to download it.
|
||||
# This can be seen e.g. in #73097
|
||||
#
|
||||
# This option allows configuring the removal of rules_cc in cases where a
|
||||
# project depends on it via an external dependency.
|
||||
#
|
||||
# [1]: https://github.com/bazelbuild/rules_cc
|
||||
, removeRulesCC ? true
|
||||
, removeLocalConfigCc ? true
|
||||
, removeLocal ? true
|
||||
|
||||
# Use build --nobuild instead of fetch. This allows fetching the dependencies
|
||||
# required for the build as configured, rather than fetching all the dependencies
|
||||
# which may not work in some situations (e.g. Java code which ends up relying on
|
||||
# Debian-specific /usr/share/java paths, but doesn't in the configured build).
|
||||
, fetchConfigured ? true
|
||||
|
||||
# Don’t add Bazel --copt and --linkopt from NIX_CFLAGS_COMPILE /
|
||||
# NIX_LDFLAGS. This is necessary when using a custom toolchain which
|
||||
# Bazel wants all headers / libraries to come from, like when using
|
||||
# CROSSTOOL. Weirdly, we can still get the flags through the wrapped
|
||||
# compiler.
|
||||
, dontAddBazelOpts ? false
|
||||
, ...
|
||||
}:
|
||||
|
||||
let
|
||||
fArgs = removeAttrs args [ "buildAttrs" "fetchAttrs" "removeRulesCC" ];
|
||||
fBuildAttrs = fArgs // buildAttrs;
|
||||
fFetchAttrs = fArgs // removeAttrs fetchAttrs [ "sha256" ];
|
||||
|
||||
in stdenv.mkDerivation (fBuildAttrs // {
|
||||
inherit name bazelFlags bazelBuildFlags bazelFetchFlags bazelTarget;
|
||||
|
||||
deps = stdenv.mkDerivation (fFetchAttrs // {
|
||||
name = "${name}-deps.tar.gz";
|
||||
inherit bazelFlags bazelBuildFlags bazelFetchFlags bazelTarget;
|
||||
|
||||
impureEnvVars = lib.fetchers.proxyImpureEnvVars;
|
||||
|
||||
nativeBuildInputs = fFetchAttrs.nativeBuildInputs or [] ++ [ bazel ];
|
||||
|
||||
preHook = fFetchAttrs.preHook or "" + ''
|
||||
export bazelOut="$(echo ''${NIX_BUILD_TOP}/output | sed -e 's,//,/,g')"
|
||||
export bazelUserRoot="$(echo ''${NIX_BUILD_TOP}/tmp | sed -e 's,//,/,g')"
|
||||
export HOME="$NIX_BUILD_TOP"
|
||||
export USER="nix"
|
||||
# This is needed for git_repository with https remotes
|
||||
export GIT_SSL_CAINFO="${cacert}/etc/ssl/certs/ca-bundle.crt"
|
||||
# This is needed for Bazel fetchers that are themselves programs (e.g.
|
||||
# rules_go using the go toolchain)
|
||||
export SSL_CERT_FILE="${cacert}/etc/ssl/certs/ca-bundle.crt"
|
||||
'';
|
||||
|
||||
buildPhase = fFetchAttrs.buildPhase or ''
|
||||
runHook preBuild
|
||||
|
||||
# Bazel computes the default value of output_user_root before parsing the
|
||||
# flag. The computation of the default value involves getting the $USER
|
||||
# from the environment. I don't have that variable when building with
|
||||
# sandbox enabled. Code here
|
||||
# https://github.com/bazelbuild/bazel/blob/9323c57607d37f9c949b60e293b573584906da46/src/main/cpp/startup_options.cc#L123-L124
|
||||
#
|
||||
# On macOS Bazel will use the system installed Xcode or CLT toolchain instead of the one in the PATH unless we pass BAZEL_USE_CPP_ONLY_TOOLCHAIN
|
||||
|
||||
# We disable multithreading for the fetching phase since it can lead to timeouts with many dependencies/threads:
|
||||
# https://github.com/bazelbuild/bazel/issues/6502
|
||||
BAZEL_USE_CPP_ONLY_TOOLCHAIN=1 \
|
||||
USER=homeless-shelter \
|
||||
bazel \
|
||||
--output_base="$bazelOut" \
|
||||
--output_user_root="$bazelUserRoot" \
|
||||
${if fetchConfigured then "build --nobuild" else "fetch"} \
|
||||
--loading_phase_threads=1 \
|
||||
$bazelFlags \
|
||||
$bazelFetchFlags \
|
||||
$bazelTarget
|
||||
|
||||
runHook postBuild
|
||||
'';
|
||||
|
||||
installPhase = fFetchAttrs.installPhase or ''
|
||||
runHook preInstall
|
||||
|
||||
# Remove all built in external workspaces, Bazel will recreate them when building
|
||||
rm -rf $bazelOut/external/{bazel_tools,\@bazel_tools.marker}
|
||||
${if removeRulesCC then "rm -rf $bazelOut/external/{rules_cc,\\@rules_cc.marker}" else ""}
|
||||
rm -rf $bazelOut/external/{embedded_jdk,\@embedded_jdk.marker}
|
||||
${if removeLocalConfigCc then "rm -rf $bazelOut/external/{local_config_cc,\\@local_config_cc.marker}" else ""}
|
||||
${if removeLocal then "rm -rf $bazelOut/external/{local_*,\\@local_*.marker}" else ""}
|
||||
|
||||
# Clear markers
|
||||
find $bazelOut/external -name '@*\.marker' -exec sh -c 'echo > {}' \;
|
||||
|
||||
# Remove all vcs files
|
||||
rm -rf $(find $bazelOut/external -type d -name .git)
|
||||
rm -rf $(find $bazelOut/external -type d -name .svn)
|
||||
rm -rf $(find $bazelOut/external -type d -name .hg)
|
||||
|
||||
# Removing top-level symlinks along with their markers.
|
||||
# This is needed because they sometimes point to temporary paths (?).
|
||||
# For example, in Tensorflow-gpu build:
|
||||
# platforms -> NIX_BUILD_TOP/tmp/install/35282f5123611afa742331368e9ae529/_embedded_binaries/platforms
|
||||
find $bazelOut/external -maxdepth 1 -type l | while read symlink; do
|
||||
name="$(basename "$symlink")"
|
||||
rm "$symlink"
|
||||
test -f "$bazelOut/external/@$name.marker" && rm "$bazelOut/external/@$name.marker" || true
|
||||
done
|
||||
|
||||
# Patching symlinks to remove build directory reference
|
||||
find $bazelOut/external -type l | while read symlink; do
|
||||
new_target="$(readlink "$symlink" | sed "s,$NIX_BUILD_TOP,NIX_BUILD_TOP,")"
|
||||
rm "$symlink"
|
||||
ln -sf "$new_target" "$symlink"
|
||||
done
|
||||
|
||||
echo '${bazel.name}' > $bazelOut/external/.nix-bazel-version
|
||||
|
||||
(cd $bazelOut/ && tar czf $out --sort=name --mtime='@1' --owner=0 --group=0 --numeric-owner external/)
|
||||
|
||||
runHook postInstall
|
||||
'';
|
||||
|
||||
dontFixup = true;
|
||||
allowedRequisites = [];
|
||||
|
||||
outputHashAlgo = "sha256";
|
||||
outputHash = fetchAttrs.sha256;
|
||||
});
|
||||
|
||||
nativeBuildInputs = fBuildAttrs.nativeBuildInputs or [] ++ [ (bazel.override { enableNixHacks = true; }) ];
|
||||
|
||||
preHook = fBuildAttrs.preHook or "" + ''
|
||||
export bazelOut="$NIX_BUILD_TOP/output"
|
||||
export bazelUserRoot="$NIX_BUILD_TOP/tmp"
|
||||
export HOME="$NIX_BUILD_TOP"
|
||||
'';
|
||||
|
||||
preConfigure = ''
|
||||
mkdir -p "$bazelOut"
|
||||
|
||||
(cd $bazelOut && tar xfz $deps)
|
||||
|
||||
test "${bazel.name}" = "$(<$bazelOut/external/.nix-bazel-version)" || {
|
||||
echo "fixed output derivation was built for a different bazel version" >&2
|
||||
echo " got: $(<$bazelOut/external/.nix-bazel-version)" >&2
|
||||
echo "expected: ${bazel.name}" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
chmod -R +w $bazelOut
|
||||
find $bazelOut -type l | while read symlink; do
|
||||
if [[ $(readlink "$symlink") == *NIX_BUILD_TOP* ]]; then
|
||||
ln -sf $(readlink "$symlink" | sed "s,NIX_BUILD_TOP,$NIX_BUILD_TOP,") "$symlink"
|
||||
fi
|
||||
done
|
||||
'' + fBuildAttrs.preConfigure or "";
|
||||
|
||||
inherit dontAddBazelOpts;
|
||||
|
||||
buildPhase = fBuildAttrs.buildPhase or ''
|
||||
runHook preBuild
|
||||
|
||||
# Bazel sandboxes the execution of the tools it invokes, so even though we are
|
||||
# calling the correct nix wrappers, the values of the environment variables
|
||||
# the wrappers are expecting will not be set. So instead of relying on the
|
||||
# wrappers picking them up, pass them in explicitly via `--copt`, `--linkopt`
|
||||
# and related flags.
|
||||
#
|
||||
copts=()
|
||||
host_copts=()
|
||||
linkopts=()
|
||||
host_linkopts=()
|
||||
if [ -z "''${dontAddBazelOpts:-}" ]; then
|
||||
for flag in $NIX_CFLAGS_COMPILE; do
|
||||
copts+=( "--copt=$flag" )
|
||||
host_copts+=( "--host_copt=$flag" )
|
||||
done
|
||||
for flag in $NIX_CXXSTDLIB_COMPILE; do
|
||||
copts+=( "--copt=$flag" )
|
||||
host_copts+=( "--host_copt=$flag" )
|
||||
done
|
||||
for flag in $NIX_LDFLAGS; do
|
||||
linkopts+=( "--linkopt=-Wl,$flag" )
|
||||
host_linkopts+=( "--host_linkopt=-Wl,$flag" )
|
||||
done
|
||||
fi
|
||||
|
||||
BAZEL_USE_CPP_ONLY_TOOLCHAIN=1 \
|
||||
USER=homeless-shelter \
|
||||
bazel \
|
||||
--output_base="$bazelOut" \
|
||||
--output_user_root="$bazelUserRoot" \
|
||||
build \
|
||||
--curses=no \
|
||||
-j $NIX_BUILD_CORES \
|
||||
"''${copts[@]}" \
|
||||
"''${host_copts[@]}" \
|
||||
"''${linkopts[@]}" \
|
||||
"''${host_linkopts[@]}" \
|
||||
$bazelFlags \
|
||||
$bazelBuildFlags \
|
||||
$bazelTarget
|
||||
|
||||
runHook postBuild
|
||||
'';
|
||||
})
|
||||
198
pkgs/build-support/build-fhs-userenv-bubblewrap/default.nix
Normal file
198
pkgs/build-support/build-fhs-userenv-bubblewrap/default.nix
Normal file
|
|
@ -0,0 +1,198 @@
|
|||
{ lib, callPackage, runCommandLocal, writeShellScriptBin, glibc, pkgsi686Linux, coreutils, bubblewrap }:
|
||||
|
||||
let buildFHSEnv = callPackage ./env.nix { }; in
|
||||
|
||||
args @ {
|
||||
name
|
||||
, runScript ? "bash"
|
||||
, extraInstallCommands ? ""
|
||||
, meta ? {}
|
||||
, passthru ? {}
|
||||
, extraBwrapArgs ? []
|
||||
, unshareUser ? true
|
||||
, unshareIpc ? true
|
||||
, unsharePid ? true
|
||||
, unshareNet ? false
|
||||
, unshareUts ? true
|
||||
, unshareCgroup ? true
|
||||
, dieWithParent ? true
|
||||
, ...
|
||||
}:
|
||||
|
||||
with builtins;
|
||||
let
|
||||
buildFHSEnv = callPackage ./env.nix { };
|
||||
|
||||
env = buildFHSEnv (removeAttrs args [
|
||||
"runScript" "extraInstallCommands" "meta" "passthru" "extraBwrapArgs" "dieWithParent"
|
||||
"unshareUser" "unshareCgroup" "unshareUts" "unshareNet" "unsharePid" "unshareIpc"
|
||||
]);
|
||||
|
||||
etcBindFlags = let
|
||||
files = [
|
||||
# NixOS Compatibility
|
||||
"static"
|
||||
"nix" # mainly for nixUnstable users, but also for access to nix/netrc
|
||||
# Shells
|
||||
"bashrc"
|
||||
"zshenv"
|
||||
"zshrc"
|
||||
"zinputrc"
|
||||
"zprofile"
|
||||
# Users, Groups, NSS
|
||||
"passwd"
|
||||
"group"
|
||||
"shadow"
|
||||
"hosts"
|
||||
"resolv.conf"
|
||||
"nsswitch.conf"
|
||||
# User profiles
|
||||
"profiles"
|
||||
# Sudo & Su
|
||||
"login.defs"
|
||||
"sudoers"
|
||||
"sudoers.d"
|
||||
# Time
|
||||
"localtime"
|
||||
"zoneinfo"
|
||||
# Other Core Stuff
|
||||
"machine-id"
|
||||
"os-release"
|
||||
# PAM
|
||||
"pam.d"
|
||||
# Fonts
|
||||
"fonts"
|
||||
# ALSA
|
||||
"alsa"
|
||||
"asound.conf"
|
||||
# SSL
|
||||
"ssl/certs"
|
||||
"ca-certificates"
|
||||
"pki"
|
||||
];
|
||||
in concatStringsSep "\n "
|
||||
(map (file: "--ro-bind-try $(${coreutils}/bin/readlink -f /etc/${file}) /etc/${file}") files);
|
||||
|
||||
# Create this on the fly instead of linking from /nix
|
||||
# The container might have to modify it and re-run ldconfig if there are
|
||||
# issues running some binary with LD_LIBRARY_PATH
|
||||
createLdConfCache = ''
|
||||
cat > /etc/ld.so.conf <<EOF
|
||||
/lib
|
||||
/lib/x86_64-linux-gnu
|
||||
/lib64
|
||||
/usr/lib
|
||||
/usr/lib/x86_64-linux-gnu
|
||||
/usr/lib64
|
||||
/lib/i386-linux-gnu
|
||||
/lib32
|
||||
/usr/lib/i386-linux-gnu
|
||||
/usr/lib32
|
||||
EOF
|
||||
ldconfig &> /dev/null
|
||||
'';
|
||||
init = run: writeShellScriptBin "${name}-init" ''
|
||||
source /etc/profile
|
||||
${createLdConfCache}
|
||||
exec ${run} "$@"
|
||||
'';
|
||||
|
||||
bwrapCmd = { initArgs ? "" }: ''
|
||||
blacklist=(/nix /dev /proc /etc)
|
||||
ro_mounts=()
|
||||
symlinks=()
|
||||
for i in ${env}/*; do
|
||||
path="/''${i##*/}"
|
||||
if [[ $path == '/etc' ]]; then
|
||||
:
|
||||
elif [[ -L $i ]]; then
|
||||
symlinks+=(--symlink "$(${coreutils}/bin/readlink "$i")" "$path")
|
||||
blacklist+=("$path")
|
||||
else
|
||||
ro_mounts+=(--ro-bind "$i" "$path")
|
||||
blacklist+=("$path")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ -d ${env}/etc ]]; then
|
||||
for i in ${env}/etc/*; do
|
||||
path="/''${i##*/}"
|
||||
# NOTE: we're binding /etc/fonts and /etc/ssl/certs from the host so we
|
||||
# don't want to override it with a path from the FHS environment.
|
||||
if [[ $path == '/fonts' || $path == '/ssl' ]]; then
|
||||
continue
|
||||
fi
|
||||
ro_mounts+=(--ro-bind "$i" "/etc$path")
|
||||
done
|
||||
fi
|
||||
|
||||
declare -a auto_mounts
|
||||
# loop through all directories in the root
|
||||
for dir in /*; do
|
||||
# if it is a directory and it is not in the blacklist
|
||||
if [[ -d "$dir" ]] && [[ ! "''${blacklist[@]}" =~ "$dir" ]]; then
|
||||
# add it to the mount list
|
||||
auto_mounts+=(--bind "$dir" "$dir")
|
||||
fi
|
||||
done
|
||||
|
||||
cmd=(
|
||||
${bubblewrap}/bin/bwrap
|
||||
--dev-bind /dev /dev
|
||||
--proc /proc
|
||||
--chdir "$(pwd)"
|
||||
${lib.optionalString unshareUser "--unshare-user"}
|
||||
${lib.optionalString unshareIpc "--unshare-ipc"}
|
||||
${lib.optionalString unsharePid "--unshare-pid"}
|
||||
${lib.optionalString unshareNet "--unshare-net"}
|
||||
${lib.optionalString unshareUts "--unshare-uts"}
|
||||
${lib.optionalString unshareCgroup "--unshare-cgroup"}
|
||||
${lib.optionalString dieWithParent "--die-with-parent"}
|
||||
--ro-bind /nix /nix
|
||||
# Our glibc will look for the cache in its own path in `/nix/store`.
|
||||
# As such, we need a cache to exist there, because pressure-vessel
|
||||
# depends on the existence of an ld cache. However, adding one
|
||||
# globally proved to be a bad idea (see #100655), the solution we
|
||||
# settled on being mounting one via bwrap.
|
||||
# Also, the cache needs to go to both 32 and 64 bit glibcs, for games
|
||||
# of both architectures to work.
|
||||
--tmpfs ${glibc}/etc \
|
||||
--symlink /etc/ld.so.conf ${glibc}/etc/ld.so.conf \
|
||||
--symlink /etc/ld.so.cache ${glibc}/etc/ld.so.cache \
|
||||
--ro-bind ${glibc}/etc/rpc ${glibc}/etc/rpc \
|
||||
--remount-ro ${glibc}/etc \
|
||||
--tmpfs ${pkgsi686Linux.glibc}/etc \
|
||||
--symlink /etc/ld.so.conf ${pkgsi686Linux.glibc}/etc/ld.so.conf \
|
||||
--symlink /etc/ld.so.cache ${pkgsi686Linux.glibc}/etc/ld.so.cache \
|
||||
--ro-bind ${pkgsi686Linux.glibc}/etc/rpc ${pkgsi686Linux.glibc}/etc/rpc \
|
||||
--remount-ro ${pkgsi686Linux.glibc}/etc \
|
||||
${etcBindFlags}
|
||||
"''${ro_mounts[@]}"
|
||||
"''${symlinks[@]}"
|
||||
"''${auto_mounts[@]}"
|
||||
${concatStringsSep "\n " extraBwrapArgs}
|
||||
${init runScript}/bin/${name}-init ${initArgs}
|
||||
)
|
||||
exec "''${cmd[@]}"
|
||||
'';
|
||||
|
||||
bin = writeShellScriptBin name (bwrapCmd { initArgs = ''"$@"''; });
|
||||
|
||||
in runCommandLocal name {
|
||||
inherit meta;
|
||||
|
||||
passthru = passthru // {
|
||||
env = runCommandLocal "${name}-shell-env" {
|
||||
shellHook = bwrapCmd {};
|
||||
} ''
|
||||
echo >&2 ""
|
||||
echo >&2 "*** User chroot 'env' attributes are intended for interactive nix-shell sessions, not for building! ***"
|
||||
echo >&2 ""
|
||||
exit 1
|
||||
'';
|
||||
};
|
||||
} ''
|
||||
mkdir -p $out/bin
|
||||
ln -s ${bin}/bin/${name} $out/bin/${name}
|
||||
${extraInstallCommands}
|
||||
''
|
||||
216
pkgs/build-support/build-fhs-userenv-bubblewrap/env.nix
Normal file
216
pkgs/build-support/build-fhs-userenv-bubblewrap/env.nix
Normal file
|
|
@ -0,0 +1,216 @@
|
|||
{ stdenv, lib, buildEnv, writeText, writeShellScriptBin, pkgs, pkgsi686Linux }:
|
||||
|
||||
{ name, profile ? ""
|
||||
, targetPkgs ? pkgs: [], multiPkgs ? pkgs: []
|
||||
, extraBuildCommands ? "", extraBuildCommandsMulti ? ""
|
||||
, extraOutputsToInstall ? []
|
||||
}:
|
||||
|
||||
# HOWTO:
|
||||
# All packages (most likely programs) returned from targetPkgs will only be
|
||||
# installed once--matching the host's architecture (64bit on x86_64 and 32bit on
|
||||
# x86).
|
||||
#
|
||||
# Packages (most likely libraries) returned from multiPkgs are installed
|
||||
# once on x86 systems and twice on x86_64 systems.
|
||||
# On x86 they are merged with packages from targetPkgs.
|
||||
# On x86_64 they are added to targetPkgs and in addition their 32bit
|
||||
# versions are also installed. The final directory structure looks as
|
||||
# follows:
|
||||
# /lib32 will include 32bit libraries from multiPkgs
|
||||
# /lib64 will include 64bit libraries from multiPkgs and targetPkgs
|
||||
# /lib will link to /lib32
|
||||
|
||||
let
|
||||
is64Bit = stdenv.hostPlatform.parsed.cpu.bits == 64;
|
||||
isMultiBuild = multiPkgs != null && is64Bit;
|
||||
isTargetBuild = !isMultiBuild;
|
||||
|
||||
# list of packages (usually programs) which are only be installed for the
|
||||
# host's architecture
|
||||
targetPaths = targetPkgs pkgs ++ (if multiPkgs == null then [] else multiPkgs pkgs);
|
||||
|
||||
# list of packages which are installed for both x86 and x86_64 on x86_64
|
||||
# systems
|
||||
multiPaths = multiPkgs pkgsi686Linux;
|
||||
|
||||
# base packages of the chroot
|
||||
# these match the host's architecture, glibc_multi is used for multilib
|
||||
# builds. glibcLocales must be before glibc or glibc_multi as otherwiese
|
||||
# the wrong LOCALE_ARCHIVE will be used where only C.UTF-8 is available.
|
||||
basePkgs = with pkgs;
|
||||
[ glibcLocales
|
||||
(if isMultiBuild then glibc_multi else glibc)
|
||||
(toString gcc.cc.lib) bashInteractiveFHS coreutils less shadow su
|
||||
gawk diffutils findutils gnused gnugrep
|
||||
gnutar gzip bzip2 xz
|
||||
];
|
||||
baseMultiPkgs = with pkgsi686Linux;
|
||||
[ (toString gcc.cc.lib)
|
||||
];
|
||||
|
||||
ldconfig = writeShellScriptBin "ldconfig" ''
|
||||
exec ${pkgs.glibc.bin}/bin/ldconfig -f /etc/ld.so.conf -C /etc/ld.so.cache "$@"
|
||||
'';
|
||||
etcProfile = writeText "profile" ''
|
||||
export PS1='${name}-chrootenv:\u@\h:\w\$ '
|
||||
export LOCALE_ARCHIVE='/usr/lib/locale/locale-archive'
|
||||
export LD_LIBRARY_PATH="/run/opengl-driver/lib:/run/opengl-driver-32/lib:/usr/lib:/usr/lib32''${LD_LIBRARY_PATH:+:}$LD_LIBRARY_PATH"
|
||||
export PATH="/run/wrappers/bin:/usr/bin:/usr/sbin:$PATH"
|
||||
export TZDIR='/etc/zoneinfo'
|
||||
|
||||
# XDG_DATA_DIRS is used by pressure-vessel (steam proton) and vulkan loaders to find the corresponding icd
|
||||
export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}/run/opengl-driver/share:/run/opengl-driver-32/share
|
||||
|
||||
# Force compilers and other tools to look in default search paths
|
||||
unset NIX_ENFORCE_PURITY
|
||||
export NIX_CC_WRAPPER_TARGET_HOST_${stdenv.cc.suffixSalt}=1
|
||||
export NIX_CFLAGS_COMPILE='-idirafter /usr/include'
|
||||
export NIX_CFLAGS_LINK='-L/usr/lib -L/usr/lib32'
|
||||
export NIX_LDFLAGS='-L/usr/lib -L/usr/lib32'
|
||||
export PKG_CONFIG_PATH=/usr/lib/pkgconfig
|
||||
export ACLOCAL_PATH=/usr/share/aclocal
|
||||
|
||||
${profile}
|
||||
'';
|
||||
|
||||
# Compose /etc for the chroot environment
|
||||
etcPkg = stdenv.mkDerivation {
|
||||
name = "${name}-chrootenv-etc";
|
||||
buildCommand = ''
|
||||
mkdir -p $out/etc
|
||||
cd $out/etc
|
||||
|
||||
# environment variables
|
||||
ln -s ${etcProfile} profile
|
||||
|
||||
# symlink /etc/mtab -> /proc/mounts (compat for old userspace progs)
|
||||
ln -s /proc/mounts mtab
|
||||
'';
|
||||
};
|
||||
|
||||
# Composes a /usr-like directory structure
|
||||
staticUsrProfileTarget = buildEnv {
|
||||
name = "${name}-usr-target";
|
||||
# ldconfig wrapper must come first so it overrides the original ldconfig
|
||||
paths = [ etcPkg ldconfig ] ++ basePkgs ++ targetPaths;
|
||||
extraOutputsToInstall = [ "out" "lib" "bin" ] ++ extraOutputsToInstall;
|
||||
ignoreCollisions = true;
|
||||
postBuild = ''
|
||||
if [[ -d $out/share/gsettings-schemas/ ]]; then
|
||||
# Recreate the standard schemas directory if its a symlink to make it writable
|
||||
if [[ -L $out/share/glib-2.0 ]]; then
|
||||
target=$(readlink $out/share/glib-2.0)
|
||||
rm $out/share/glib-2.0
|
||||
mkdir $out/share/glib-2.0
|
||||
ln -fs $target/* $out/share/glib-2.0
|
||||
fi
|
||||
|
||||
if [[ -L $out/share/glib-2.0/schemas ]]; then
|
||||
target=$(readlink $out/share/glib-2.0/schemas)
|
||||
rm $out/share/glib-2.0/schemas
|
||||
mkdir $out/share/glib-2.0/schemas
|
||||
ln -fs $target/* $out/share/glib-2.0/schemas
|
||||
fi
|
||||
|
||||
mkdir -p $out/share/glib-2.0/schemas
|
||||
|
||||
for d in $out/share/gsettings-schemas/*; do
|
||||
# Force symlink, in case there are duplicates
|
||||
ln -fs $d/glib-2.0/schemas/*.xml $out/share/glib-2.0/schemas
|
||||
ln -fs $d/glib-2.0/schemas/*.gschema.override $out/share/glib-2.0/schemas
|
||||
done
|
||||
|
||||
# and compile them
|
||||
${pkgs.glib.dev}/bin/glib-compile-schemas $out/share/glib-2.0/schemas
|
||||
fi
|
||||
'';
|
||||
};
|
||||
|
||||
staticUsrProfileMulti = buildEnv {
|
||||
name = "${name}-usr-multi";
|
||||
paths = baseMultiPkgs ++ multiPaths;
|
||||
extraOutputsToInstall = [ "out" "lib" ] ++ extraOutputsToInstall;
|
||||
ignoreCollisions = true;
|
||||
};
|
||||
|
||||
# setup library paths only for the targeted architecture
|
||||
setupLibDirsTarget = ''
|
||||
# link content of targetPaths
|
||||
cp -rsHf ${staticUsrProfileTarget}/lib lib
|
||||
ln -s lib lib${if is64Bit then "64" else "32"}
|
||||
'';
|
||||
|
||||
# setup /lib, /lib32 and /lib64
|
||||
setupLibDirsMulti = ''
|
||||
mkdir -m0755 lib32
|
||||
mkdir -m0755 lib64
|
||||
ln -s lib64 lib
|
||||
|
||||
# copy glibc stuff
|
||||
cp -rsHf ${staticUsrProfileTarget}/lib/32/* lib32/ && chmod u+w -R lib32/
|
||||
|
||||
# copy content of multiPaths (32bit libs)
|
||||
[ -d ${staticUsrProfileMulti}/lib ] && cp -rsHf ${staticUsrProfileMulti}/lib/* lib32/ && chmod u+w -R lib32/
|
||||
|
||||
# copy content of targetPaths (64bit libs)
|
||||
cp -rsHf ${staticUsrProfileTarget}/lib/* lib64/ && chmod u+w -R lib64/
|
||||
|
||||
# symlink 32-bit ld-linux.so
|
||||
ln -Ls ${staticUsrProfileTarget}/lib/32/ld-linux.so.2 lib/
|
||||
'';
|
||||
|
||||
setupLibDirs = if isTargetBuild then setupLibDirsTarget
|
||||
else setupLibDirsMulti;
|
||||
|
||||
# the target profile is the actual profile that will be used for the chroot
|
||||
setupTargetProfile = ''
|
||||
mkdir -m0755 usr
|
||||
cd usr
|
||||
${setupLibDirs}
|
||||
${lib.optionalString isMultiBuild ''
|
||||
if [ -d "${staticUsrProfileMulti}/share" ]; then
|
||||
cp -rLf ${staticUsrProfileMulti}/share share
|
||||
fi
|
||||
''}
|
||||
if [ -d "${staticUsrProfileTarget}/share" ]; then
|
||||
if [ -d share ]; then
|
||||
chmod -R 755 share
|
||||
cp -rLTf ${staticUsrProfileTarget}/share share
|
||||
else
|
||||
cp -rsHf ${staticUsrProfileTarget}/share share
|
||||
fi
|
||||
fi
|
||||
for i in bin sbin include; do
|
||||
if [ -d "${staticUsrProfileTarget}/$i" ]; then
|
||||
cp -rsHf "${staticUsrProfileTarget}/$i" "$i"
|
||||
fi
|
||||
done
|
||||
cd ..
|
||||
|
||||
for i in var etc opt; do
|
||||
if [ -d "${staticUsrProfileTarget}/$i" ]; then
|
||||
cp -rsHf "${staticUsrProfileTarget}/$i" "$i"
|
||||
fi
|
||||
done
|
||||
for i in usr/{bin,sbin,lib,lib32,lib64}; do
|
||||
if [ -d "$i" ]; then
|
||||
ln -s "$i"
|
||||
fi
|
||||
done
|
||||
'';
|
||||
|
||||
in stdenv.mkDerivation {
|
||||
name = "${name}-fhs";
|
||||
buildCommand = ''
|
||||
mkdir -p $out
|
||||
cd $out
|
||||
${setupTargetProfile}
|
||||
cd $out
|
||||
${extraBuildCommands}
|
||||
cd $out
|
||||
${if isMultiBuild then extraBuildCommandsMulti else ""}
|
||||
'';
|
||||
preferLocalBuild = true;
|
||||
allowSubstitutes = false;
|
||||
}
|
||||
16
pkgs/build-support/build-fhs-userenv/chrootenv/default.nix
Normal file
16
pkgs/build-support/build-fhs-userenv/chrootenv/default.nix
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
{ lib, stdenv, meson, ninja, pkg-config, glib }:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
name = "chrootenv";
|
||||
src = ./src;
|
||||
|
||||
nativeBuildInputs = [ meson ninja pkg-config ];
|
||||
buildInputs = [ glib ];
|
||||
|
||||
meta = with lib; {
|
||||
description = "Setup mount/user namespace for FHS emulation";
|
||||
license = licenses.mit;
|
||||
maintainers = with maintainers; [ yana ];
|
||||
platforms = platforms.linux;
|
||||
};
|
||||
}
|
||||
169
pkgs/build-support/build-fhs-userenv/chrootenv/src/chrootenv.c
Normal file
169
pkgs/build-support/build-fhs-userenv/chrootenv/src/chrootenv.c
Normal file
|
|
@ -0,0 +1,169 @@
|
|||
#define _GNU_SOURCE
|
||||
|
||||
#include <glib.h>
|
||||
#include <glib/gstdio.h>
|
||||
|
||||
#include <errno.h>
|
||||
#include <sched.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <sys/mount.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/wait.h>
|
||||
#include <sys/syscall.h>
|
||||
|
||||
#define fail(s, err) g_error("%s: %s: %s", __func__, s, g_strerror(err))
|
||||
#define fail_if(expr) \
|
||||
if (expr) \
|
||||
fail(#expr, errno);
|
||||
|
||||
const gchar *bind_blacklist[] = {"bin", "etc", "host", "real-host", "usr", "lib", "lib64", "lib32", "sbin", "opt", NULL};
|
||||
|
||||
int pivot_root(const char *new_root, const char *put_old) {
|
||||
return syscall(SYS_pivot_root, new_root, put_old);
|
||||
}
|
||||
|
||||
void mount_tmpfs(const gchar *target) {
|
||||
fail_if(mount("none", target, "tmpfs", 0, NULL));
|
||||
}
|
||||
|
||||
void bind_mount(const gchar *source, const gchar *target) {
|
||||
fail_if(g_mkdir(target, 0755));
|
||||
fail_if(mount(source, target, NULL, MS_BIND | MS_REC, NULL));
|
||||
}
|
||||
|
||||
const gchar *create_tmpdir() {
|
||||
gchar *prefix =
|
||||
g_build_filename(g_get_tmp_dir(), "chrootenvXXXXXX", NULL);
|
||||
fail_if(!g_mkdtemp_full(prefix, 0755));
|
||||
return prefix;
|
||||
}
|
||||
|
||||
void pivot_host(const gchar *guest) {
|
||||
g_autofree gchar *point = g_build_filename(guest, "host", NULL);
|
||||
fail_if(g_mkdir(point, 0755));
|
||||
fail_if(pivot_root(guest, point));
|
||||
}
|
||||
|
||||
void bind_mount_item(const gchar *host, const gchar *guest, const gchar *name) {
|
||||
g_autofree gchar *source = g_build_filename(host, name, NULL);
|
||||
g_autofree gchar *target = g_build_filename(guest, name, NULL);
|
||||
|
||||
if (G_LIKELY(g_file_test(source, G_FILE_TEST_IS_DIR)))
|
||||
bind_mount(source, target);
|
||||
}
|
||||
|
||||
void bind(const gchar *host, const gchar *guest) {
|
||||
mount_tmpfs(guest);
|
||||
|
||||
pivot_host(guest);
|
||||
|
||||
g_autofree gchar *host_dir = g_build_filename("/host", host, NULL);
|
||||
|
||||
g_autoptr(GError) err = NULL;
|
||||
g_autoptr(GDir) dir = g_dir_open(host_dir, 0, &err);
|
||||
|
||||
if (err != NULL)
|
||||
fail("g_dir_open", errno);
|
||||
|
||||
const gchar *item;
|
||||
|
||||
while ((item = g_dir_read_name(dir)))
|
||||
if (!g_strv_contains(bind_blacklist, item))
|
||||
bind_mount_item(host_dir, "/", item);
|
||||
}
|
||||
|
||||
void spit(const char *path, char *fmt, ...) {
|
||||
va_list args;
|
||||
va_start(args, fmt);
|
||||
|
||||
FILE *f = g_fopen(path, "w");
|
||||
|
||||
if (f == NULL)
|
||||
fail("g_fopen", errno);
|
||||
|
||||
g_vfprintf(f, fmt, args);
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
int main(gint argc, gchar **argv) {
|
||||
const gchar *self = *argv++;
|
||||
|
||||
if (argc < 2) {
|
||||
g_message("%s command [arguments...]", self);
|
||||
return 1;
|
||||
}
|
||||
|
||||
g_autofree const gchar *prefix = create_tmpdir();
|
||||
|
||||
pid_t cpid = fork();
|
||||
|
||||
if (cpid < 0)
|
||||
fail("fork", errno);
|
||||
|
||||
else if (cpid == 0) {
|
||||
uid_t uid = getuid();
|
||||
gid_t gid = getgid();
|
||||
|
||||
int namespaces = CLONE_NEWNS;
|
||||
if (uid != 0) {
|
||||
namespaces |= CLONE_NEWUSER;
|
||||
}
|
||||
if (unshare(namespaces) < 0) {
|
||||
int unshare_errno = errno;
|
||||
|
||||
g_message("Requires Linux version >= 3.19 built with CONFIG_USER_NS");
|
||||
if (g_file_test("/proc/sys/kernel/unprivileged_userns_clone",
|
||||
G_FILE_TEST_EXISTS))
|
||||
g_message("Run: sudo sysctl -w kernel.unprivileged_userns_clone=1");
|
||||
|
||||
fail("unshare", unshare_errno);
|
||||
}
|
||||
|
||||
// hide all mounts we do from the parent
|
||||
fail_if(mount(0, "/", 0, MS_SLAVE | MS_REC, 0));
|
||||
|
||||
if (uid != 0) {
|
||||
spit("/proc/self/setgroups", "deny");
|
||||
spit("/proc/self/uid_map", "%d %d 1", uid, uid);
|
||||
spit("/proc/self/gid_map", "%d %d 1", gid, gid);
|
||||
}
|
||||
|
||||
// If there is a /host directory, assume this is nested chrootenv and use it as host instead.
|
||||
gboolean nested_host = g_file_test("/host", G_FILE_TEST_EXISTS | G_FILE_TEST_IS_DIR);
|
||||
g_autofree const gchar *host = nested_host ? "/host" : "/";
|
||||
|
||||
bind(host, prefix);
|
||||
|
||||
// Replace /host by an actual (inner) /host.
|
||||
if (nested_host) {
|
||||
fail_if(g_mkdir("/real-host", 0755));
|
||||
fail_if(mount("/host/host", "/real-host", NULL, MS_BIND | MS_REC, NULL));
|
||||
// For some reason umount("/host") returns EBUSY even immediately after
|
||||
// pivot_root. We detach it at least to keep `/proc/mounts` from blowing
|
||||
// up in nested cases.
|
||||
fail_if(umount2("/host", MNT_DETACH));
|
||||
fail_if(mount("/real-host", "/host", NULL, MS_MOVE, NULL));
|
||||
fail_if(rmdir("/real-host"));
|
||||
}
|
||||
|
||||
fail_if(chdir("/"));
|
||||
fail_if(execvp(*argv, argv));
|
||||
}
|
||||
|
||||
else {
|
||||
int status;
|
||||
|
||||
fail_if(waitpid(cpid, &status, 0) != cpid);
|
||||
fail_if(rmdir(prefix));
|
||||
|
||||
if (WIFEXITED(status))
|
||||
return WEXITSTATUS(status);
|
||||
|
||||
else if (WIFSIGNALED(status))
|
||||
kill(getpid(), WTERMSIG(status));
|
||||
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
project('chrootenv', 'c')
|
||||
|
||||
glib = dependency('glib-2.0')
|
||||
|
||||
executable('chrootenv', 'chrootenv.c', dependencies: [glib], install: true)
|
||||
49
pkgs/build-support/build-fhs-userenv/default.nix
Normal file
49
pkgs/build-support/build-fhs-userenv/default.nix
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
{ callPackage, runCommandLocal, writeScript, stdenv, coreutils }:
|
||||
|
||||
let buildFHSEnv = callPackage ./env.nix { }; in
|
||||
|
||||
args@{ name, runScript ? "bash", extraInstallCommands ? "", meta ? {}, passthru ? {}, ... }:
|
||||
|
||||
let
|
||||
env = buildFHSEnv (removeAttrs args [ "runScript" "extraInstallCommands" "meta" "passthru" ]);
|
||||
|
||||
chrootenv = callPackage ./chrootenv {};
|
||||
|
||||
init = run: writeScript "${name}-init" ''
|
||||
#! ${stdenv.shell}
|
||||
for i in ${env}/* /host/*; do
|
||||
path="/''${i##*/}"
|
||||
[ -e "$path" ] || ${coreutils}/bin/ln -s "$i" "$path"
|
||||
done
|
||||
|
||||
[ -d "$1" ] && [ -r "$1" ] && cd "$1"
|
||||
shift
|
||||
|
||||
source /etc/profile
|
||||
exec ${run} "$@"
|
||||
'';
|
||||
|
||||
in runCommandLocal name {
|
||||
inherit meta;
|
||||
|
||||
passthru = passthru // {
|
||||
env = runCommandLocal "${name}-shell-env" {
|
||||
shellHook = ''
|
||||
exec ${chrootenv}/bin/chrootenv ${init runScript} "$(pwd)"
|
||||
'';
|
||||
} ''
|
||||
echo >&2 ""
|
||||
echo >&2 "*** User chroot 'env' attributes are intended for interactive nix-shell sessions, not for building! ***"
|
||||
echo >&2 ""
|
||||
exit 1
|
||||
'';
|
||||
};
|
||||
} ''
|
||||
mkdir -p $out/bin
|
||||
cat <<EOF >$out/bin/${name}
|
||||
#! ${stdenv.shell}
|
||||
exec ${chrootenv}/bin/chrootenv ${init runScript} "\$(pwd)" "\$@"
|
||||
EOF
|
||||
chmod +x $out/bin/${name}
|
||||
${extraInstallCommands}
|
||||
''
|
||||
244
pkgs/build-support/build-fhs-userenv/env.nix
Normal file
244
pkgs/build-support/build-fhs-userenv/env.nix
Normal file
|
|
@ -0,0 +1,244 @@
|
|||
{ stdenv, buildEnv, writeText, pkgs, pkgsi686Linux }:
|
||||
|
||||
{ name
|
||||
, profile ? ""
|
||||
, targetPkgs ? pkgs: []
|
||||
, multiPkgs ? pkgs: []
|
||||
, extraBuildCommands ? ""
|
||||
, extraBuildCommandsMulti ? ""
|
||||
, extraOutputsToInstall ? []
|
||||
}:
|
||||
|
||||
# HOWTO:
|
||||
# All packages (most likely programs) returned from targetPkgs will only be
|
||||
# installed once--matching the host's architecture (64bit on x86_64 and 32bit on
|
||||
# x86).
|
||||
#
|
||||
# Packages (most likely libraries) returned from multiPkgs are installed
|
||||
# once on x86 systems and twice on x86_64 systems.
|
||||
# On x86 they are merged with packages from targetPkgs.
|
||||
# On x86_64 they are added to targetPkgs and in addition their 32bit
|
||||
# versions are also installed. The final directory structure looks as
|
||||
# follows:
|
||||
# /lib32 will include 32bit libraries from multiPkgs
|
||||
# /lib64 will include 64bit libraries from multiPkgs and targetPkgs
|
||||
# /lib will link to /lib32
|
||||
|
||||
let
|
||||
is64Bit = stdenv.hostPlatform.parsed.cpu.bits == 64;
|
||||
# multi-lib glibc is only supported on x86_64
|
||||
isMultiBuild = multiPkgs != null && stdenv.hostPlatform.system == "x86_64-linux";
|
||||
isTargetBuild = !isMultiBuild;
|
||||
|
||||
# list of packages (usually programs) which are only be installed for the
|
||||
# host's architecture
|
||||
targetPaths = targetPkgs pkgs ++ (if multiPkgs == null then [] else multiPkgs pkgs);
|
||||
|
||||
# list of packages which are installed for both x86 and x86_64 on x86_64
|
||||
# systems
|
||||
multiPaths = multiPkgs pkgsi686Linux;
|
||||
|
||||
# base packages of the chroot
|
||||
# these match the host's architecture, glibc_multi is used for multilib
|
||||
# builds. glibcLocales must be before glibc or glibc_multi as otherwiese
|
||||
# the wrong LOCALE_ARCHIVE will be used where only C.UTF-8 is available.
|
||||
basePkgs = with pkgs;
|
||||
[ glibcLocales
|
||||
(if isMultiBuild then glibc_multi else glibc)
|
||||
(toString gcc.cc.lib) bashInteractiveFHS coreutils less shadow su
|
||||
gawk diffutils findutils gnused gnugrep
|
||||
gnutar gzip bzip2 xz
|
||||
];
|
||||
baseMultiPkgs = with pkgsi686Linux;
|
||||
[ (toString gcc.cc.lib)
|
||||
];
|
||||
|
||||
etcProfile = writeText "profile" ''
|
||||
export PS1='${name}-chrootenv:\u@\h:\w\$ '
|
||||
export LOCALE_ARCHIVE='/usr/lib/locale/locale-archive'
|
||||
export LD_LIBRARY_PATH="/run/opengl-driver/lib:/run/opengl-driver-32/lib:/usr/lib:/usr/lib32''${LD_LIBRARY_PATH:+:}$LD_LIBRARY_PATH"
|
||||
export PATH="/run/wrappers/bin:/usr/bin:/usr/sbin:$PATH"
|
||||
export TZDIR='/etc/zoneinfo'
|
||||
|
||||
# XDG_DATA_DIRS is used by pressure-vessel (steam proton) and vulkan loaders to find the corresponding icd
|
||||
export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}/run/opengl-driver/share:/run/opengl-driver-32/share
|
||||
|
||||
# Force compilers and other tools to look in default search paths
|
||||
unset NIX_ENFORCE_PURITY
|
||||
export NIX_CC_WRAPPER_TARGET_HOST_${stdenv.cc.suffixSalt}=1
|
||||
export NIX_CFLAGS_COMPILE='-idirafter /usr/include'
|
||||
export NIX_CFLAGS_LINK='-L/usr/lib -L/usr/lib32'
|
||||
export NIX_LDFLAGS='-L/usr/lib -L/usr/lib32'
|
||||
export PKG_CONFIG_PATH=/usr/lib/pkgconfig
|
||||
export ACLOCAL_PATH=/usr/share/aclocal
|
||||
|
||||
${profile}
|
||||
'';
|
||||
|
||||
# Compose /etc for the chroot environment
|
||||
etcPkg = stdenv.mkDerivation {
|
||||
name = "${name}-chrootenv-etc";
|
||||
buildCommand = ''
|
||||
mkdir -p $out/etc
|
||||
cd $out/etc
|
||||
|
||||
# environment variables
|
||||
ln -s ${etcProfile} profile
|
||||
|
||||
# compatibility with NixOS
|
||||
ln -s /host/etc/static static
|
||||
|
||||
# symlink nix config
|
||||
ln -s /host/etc/nix nix
|
||||
|
||||
# symlink some NSS stuff
|
||||
ln -s /host/etc/passwd passwd
|
||||
ln -s /host/etc/group group
|
||||
ln -s /host/etc/shadow shadow
|
||||
ln -s /host/etc/hosts hosts
|
||||
ln -s /host/etc/resolv.conf resolv.conf
|
||||
ln -s /host/etc/nsswitch.conf nsswitch.conf
|
||||
|
||||
# symlink user profiles
|
||||
ln -s /host/etc/profiles profiles
|
||||
|
||||
# symlink sudo and su stuff
|
||||
ln -s /host/etc/login.defs login.defs
|
||||
ln -s /host/etc/sudoers sudoers
|
||||
ln -s /host/etc/sudoers.d sudoers.d
|
||||
|
||||
# symlink other core stuff
|
||||
ln -s /host/etc/localtime localtime
|
||||
ln -s /host/etc/zoneinfo zoneinfo
|
||||
ln -s /host/etc/machine-id machine-id
|
||||
ln -s /host/etc/os-release os-release
|
||||
|
||||
# symlink PAM stuff
|
||||
ln -s /host/etc/pam.d pam.d
|
||||
|
||||
# symlink fonts stuff
|
||||
ln -s /host/etc/fonts fonts
|
||||
|
||||
# symlink ALSA stuff
|
||||
ln -s /host/etc/asound.conf asound.conf
|
||||
|
||||
# symlink SSL certs
|
||||
mkdir -p ssl
|
||||
ln -s /host/etc/ssl/certs ssl/certs
|
||||
|
||||
# symlink /etc/mtab -> /proc/mounts (compat for old userspace progs)
|
||||
ln -s /proc/mounts mtab
|
||||
'';
|
||||
};
|
||||
|
||||
# Composes a /usr-like directory structure
|
||||
staticUsrProfileTarget = buildEnv {
|
||||
name = "${name}-usr-target";
|
||||
paths = [ etcPkg ] ++ basePkgs ++ targetPaths;
|
||||
extraOutputsToInstall = [ "out" "lib" "bin" ] ++ extraOutputsToInstall;
|
||||
ignoreCollisions = true;
|
||||
postBuild = ''
|
||||
if [[ -d $out/share/gsettings-schemas/ ]]; then
|
||||
# Recreate the standard schemas directory if its a symlink to make it writable
|
||||
if [[ -L $out/share/glib-2.0 ]]; then
|
||||
target=$(readlink $out/share/glib-2.0)
|
||||
rm $out/share/glib-2.0
|
||||
mkdir $out/share/glib-2.0
|
||||
ln -fs $target/* $out/share/glib-2.0
|
||||
fi
|
||||
|
||||
if [[ -L $out/share/glib-2.0/schemas ]]; then
|
||||
target=$(readlink $out/share/glib-2.0/schemas)
|
||||
rm $out/share/glib-2.0/schemas
|
||||
mkdir $out/share/glib-2.0/schemas
|
||||
ln -fs $target/* $out/share/glib-2.0/schemas
|
||||
fi
|
||||
|
||||
mkdir -p $out/share/glib-2.0/schemas
|
||||
|
||||
for d in $out/share/gsettings-schemas/*; do
|
||||
# Force symlink, in case there are duplicates
|
||||
ln -fs $d/glib-2.0/schemas/*.xml $out/share/glib-2.0/schemas
|
||||
ln -fs $d/glib-2.0/schemas/*.gschema.override $out/share/glib-2.0/schemas
|
||||
done
|
||||
|
||||
# and compile them
|
||||
${pkgs.glib.dev}/bin/glib-compile-schemas $out/share/glib-2.0/schemas
|
||||
fi
|
||||
'';
|
||||
};
|
||||
|
||||
staticUsrProfileMulti = buildEnv {
|
||||
name = "${name}-usr-multi";
|
||||
paths = baseMultiPkgs ++ multiPaths;
|
||||
extraOutputsToInstall = [ "out" "lib" ] ++ extraOutputsToInstall;
|
||||
ignoreCollisions = true;
|
||||
};
|
||||
|
||||
# setup library paths only for the targeted architecture
|
||||
setupLibDirs_target = ''
|
||||
# link content of targetPaths
|
||||
cp -rsHf ${staticUsrProfileTarget}/lib lib
|
||||
ln -s lib lib${if is64Bit then "64" else "32"}
|
||||
'';
|
||||
|
||||
# setup /lib, /lib32 and /lib64
|
||||
setupLibDirs_multi = ''
|
||||
mkdir -m0755 lib32
|
||||
mkdir -m0755 lib64
|
||||
ln -s lib64 lib
|
||||
|
||||
# copy glibc stuff
|
||||
cp -rsHf ${staticUsrProfileTarget}/lib/32/* lib32/ && chmod u+w -R lib32/
|
||||
|
||||
# copy content of multiPaths (32bit libs)
|
||||
[ -d ${staticUsrProfileMulti}/lib ] && cp -rsHf ${staticUsrProfileMulti}/lib/* lib32/ && chmod u+w -R lib32/
|
||||
|
||||
# copy content of targetPaths (64bit libs)
|
||||
cp -rsHf ${staticUsrProfileTarget}/lib/* lib64/ && chmod u+w -R lib64/
|
||||
|
||||
# symlink 32-bit ld-linux.so
|
||||
ln -Ls ${staticUsrProfileTarget}/lib/32/ld-linux.so.2 lib/
|
||||
'';
|
||||
|
||||
setupLibDirs = if isTargetBuild then setupLibDirs_target
|
||||
else setupLibDirs_multi;
|
||||
|
||||
# the target profile is the actual profile that will be used for the chroot
|
||||
setupTargetProfile = ''
|
||||
mkdir -m0755 usr
|
||||
cd usr
|
||||
${setupLibDirs}
|
||||
for i in bin sbin share include; do
|
||||
if [ -d "${staticUsrProfileTarget}/$i" ]; then
|
||||
cp -rsHf "${staticUsrProfileTarget}/$i" "$i"
|
||||
fi
|
||||
done
|
||||
cd ..
|
||||
|
||||
for i in var etc opt; do
|
||||
if [ -d "${staticUsrProfileTarget}/$i" ]; then
|
||||
cp -rsHf "${staticUsrProfileTarget}/$i" "$i"
|
||||
fi
|
||||
done
|
||||
for i in usr/{bin,sbin,lib,lib32,lib64}; do
|
||||
if [ -d "$i" ]; then
|
||||
ln -s "$i"
|
||||
fi
|
||||
done
|
||||
'';
|
||||
|
||||
in stdenv.mkDerivation {
|
||||
name = "${name}-fhs";
|
||||
buildCommand = ''
|
||||
mkdir -p $out
|
||||
cd $out
|
||||
${setupTargetProfile}
|
||||
cd $out
|
||||
${extraBuildCommands}
|
||||
cd $out
|
||||
${if isMultiBuild then extraBuildCommandsMulti else ""}
|
||||
'';
|
||||
preferLocalBuild = true;
|
||||
allowSubstitutes = false;
|
||||
}
|
||||
61
pkgs/build-support/build-graalvm-native-image/default.nix
Normal file
61
pkgs/build-support/build-graalvm-native-image/default.nix
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
{ lib, stdenv, graalvm, glibcLocales }:
|
||||
|
||||
{ name ? "${args.pname}-${args.version}"
|
||||
# Final executable name
|
||||
, executable ? args.pname
|
||||
# JAR used as input for GraalVM derivation, defaults to src
|
||||
, jar ? args.src
|
||||
, dontUnpack ? (jar == args.src)
|
||||
# Default native-image arguments. You probably don't want to set this,
|
||||
# except in special cases. In most cases, use extraNativeBuildArgs instead
|
||||
, nativeImageBuildArgs ? [
|
||||
"-jar" jar
|
||||
"-H:CLibraryPath=${lib.getLib graalvm}/lib"
|
||||
(lib.optionalString stdenv.isDarwin "-H:-CheckToolchain")
|
||||
"-H:Name=${executable}"
|
||||
"--verbose"
|
||||
]
|
||||
# Extra arguments to be passed to the native-image
|
||||
, extraNativeImageBuildArgs ? [ ]
|
||||
# XMX size of GraalVM during build
|
||||
, graalvmXmx ? "-J-Xmx6g"
|
||||
# The GraalVM derivation to use
|
||||
, graalvmDrv ? graalvm
|
||||
, meta ? { }
|
||||
, ...
|
||||
} @ args:
|
||||
|
||||
stdenv.mkDerivation (args // {
|
||||
inherit dontUnpack;
|
||||
|
||||
nativeBuildInputs = (args.nativeBuildInputs or [ ]) ++ [ graalvmDrv glibcLocales ];
|
||||
|
||||
nativeImageBuildArgs = nativeImageBuildArgs ++ extraNativeImageBuildArgs ++ [ graalvmXmx ];
|
||||
|
||||
buildPhase = args.buildPhase or ''
|
||||
export LC_ALL="en_US.UTF-8"
|
||||
|
||||
runHook preBuild
|
||||
|
||||
native-image ''${nativeImageBuildArgs[@]}
|
||||
|
||||
runHook postBuild
|
||||
'';
|
||||
|
||||
installPhase = args.installPhase or ''
|
||||
runHook preInstall
|
||||
|
||||
install -Dm755 ${executable} -t $out/bin
|
||||
|
||||
runHook postInstall
|
||||
'';
|
||||
|
||||
meta = {
|
||||
# default to graalvm's platforms
|
||||
platforms = graalvmDrv.meta.platforms;
|
||||
# default to executable name
|
||||
mainProgram = executable;
|
||||
# need to have native-image-installable-svm available
|
||||
broken = !(builtins.elem "native-image-installable-svm" graalvmDrv.products);
|
||||
} // meta;
|
||||
})
|
||||
81
pkgs/build-support/build-maven.nix
Normal file
81
pkgs/build-support/build-maven.nix
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
{ stdenv, maven, runCommand, writeText, fetchurl, lib, requireFile }:
|
||||
/* Takes an info file generated by mvn2nix
|
||||
* (https://github.com/NixOS/mvn2nix-maven-plugin) and builds the maven
|
||||
* project with it.
|
||||
*
|
||||
* repo: A local maven repository with the project's dependencies.
|
||||
*
|
||||
* settings: A settings.xml to pass to maven to use the repo.
|
||||
*
|
||||
* build: A simple build derivation that uses mvn compile and package to build
|
||||
* the project.
|
||||
*/
|
||||
infoFile: let
|
||||
info = lib.importJSON infoFile;
|
||||
|
||||
script = writeText "build-maven-repository.sh" ''
|
||||
${lib.concatStrings (map (dep: let
|
||||
inherit (dep) sha1 groupId artifactId version metadata repository-id;
|
||||
|
||||
versionDir = dep.unresolved-version or version;
|
||||
authenticated = dep.authenticated or false;
|
||||
url = dep.url or "";
|
||||
|
||||
fetch = if (url != "") then ((if authenticated then requireFile else fetchurl) {
|
||||
inherit url sha1;
|
||||
}) else "";
|
||||
|
||||
fetchMetadata = (if authenticated then requireFile else fetchurl) {
|
||||
inherit (metadata) url sha1;
|
||||
};
|
||||
in ''
|
||||
dir=$out/$(echo ${groupId} | sed 's|\.|/|g')/${artifactId}/${versionDir}
|
||||
mkdir -p $dir
|
||||
|
||||
${lib.optionalString (fetch != "") ''
|
||||
ln -sv ${fetch} $dir/${fetch.name}
|
||||
''}
|
||||
${lib.optionalString (dep ? metadata) ''
|
||||
ln -svf ${fetchMetadata} $dir/maven-metadata-${repository-id}.xml
|
||||
${lib.optionalString (fetch != "") ''
|
||||
ln -sv ${fetch} $dir/$(echo ${fetch.name} | sed 's|${version}|${dep.unresolved-version}|')
|
||||
''}
|
||||
''}
|
||||
'') info.dependencies)}
|
||||
'';
|
||||
|
||||
repo = runCommand "maven-repository" {} ''
|
||||
bash ${script}
|
||||
'';
|
||||
|
||||
settings = writeText "settings.xml" ''
|
||||
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0
|
||||
http://maven.apache.org/xsd/settings-1.0.0.xsd">
|
||||
<localRepository>${repo}</localRepository>
|
||||
</settings>
|
||||
'';
|
||||
|
||||
src = dirOf infoFile;
|
||||
in {
|
||||
inherit repo settings info;
|
||||
|
||||
build = stdenv.mkDerivation {
|
||||
name = "${info.project.artifactId}-${info.project.version}.jar";
|
||||
|
||||
src = builtins.filterSource (path: type:
|
||||
(toString path) != (toString (src + "/target")) &&
|
||||
(toString path) != (toString (src + "/.git"))
|
||||
) src;
|
||||
|
||||
buildInputs = [ maven ];
|
||||
|
||||
buildPhase = "mvn --offline --settings ${settings} compile";
|
||||
|
||||
installPhase = ''
|
||||
mvn --offline --settings ${settings} package
|
||||
mv target/*.jar $out
|
||||
'';
|
||||
};
|
||||
}
|
||||
37
pkgs/build-support/build-pecl.nix
Normal file
37
pkgs/build-support/build-pecl.nix
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
{ stdenv, lib, php, autoreconfHook, fetchurl, re2c }:
|
||||
|
||||
{ pname
|
||||
, version
|
||||
, internalDeps ? [ ]
|
||||
, peclDeps ? [ ]
|
||||
, buildInputs ? [ ]
|
||||
, nativeBuildInputs ? [ ]
|
||||
, postPhpize ? ""
|
||||
, makeFlags ? [ ]
|
||||
, src ? fetchurl {
|
||||
url = "http://pecl.php.net/get/${pname}-${version}.tgz";
|
||||
inherit (args) sha256;
|
||||
}
|
||||
, ...
|
||||
}@args:
|
||||
|
||||
stdenv.mkDerivation (args // {
|
||||
name = "php-${pname}-${version}";
|
||||
extensionName = pname;
|
||||
|
||||
inherit src;
|
||||
|
||||
nativeBuildInputs = [ autoreconfHook re2c ] ++ nativeBuildInputs;
|
||||
buildInputs = [ php ] ++ peclDeps ++ buildInputs;
|
||||
|
||||
makeFlags = [ "EXTENSION_DIR=$(out)/lib/php/extensions" ] ++ makeFlags;
|
||||
|
||||
autoreconfPhase = ''
|
||||
phpize
|
||||
${postPhpize}
|
||||
${lib.concatMapStringsSep "\n"
|
||||
(dep: "mkdir -p ext; ln -s ${dep.dev}/include ext/${dep.extensionName}")
|
||||
internalDeps}
|
||||
'';
|
||||
checkPhase = "NO_INTERACTON=yes make test";
|
||||
})
|
||||
25
pkgs/build-support/build-setupcfg/default.nix
Normal file
25
pkgs/build-support/build-setupcfg/default.nix
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
# Build a python package from info made available by setupcfg2nix.
|
||||
#
|
||||
# * src: The source of the package.
|
||||
# * info: The package information generated by setupcfg2nix.
|
||||
# * meta: Standard nixpkgs metadata.
|
||||
# * application: Whether this package is a python library or an
|
||||
# application which happens to be written in python.
|
||||
# * doCheck: Whether to run the test suites.
|
||||
pythonPackages:
|
||||
{ src, info, meta ? {}, application ? false, doCheck ? true }: let
|
||||
build = if application
|
||||
then pythonPackages.buildPythonApplication
|
||||
else pythonPackages.buildPythonPackage;
|
||||
in build {
|
||||
inherit (info) pname version;
|
||||
|
||||
inherit src meta doCheck;
|
||||
|
||||
nativeBuildInputs = map (p: pythonPackages.${p}) (
|
||||
(info.setup_requires or []) ++
|
||||
(if doCheck then (info.tests_require or []) else []));
|
||||
|
||||
propagatedBuildInputs = map (p: pythonPackages.${p})
|
||||
(info.install_requires or []);
|
||||
}
|
||||
283
pkgs/build-support/buildenv/builder.pl
Executable file
283
pkgs/build-support/buildenv/builder.pl
Executable file
|
|
@ -0,0 +1,283 @@
|
|||
#! @perl@ -w
|
||||
|
||||
use strict;
|
||||
use Cwd 'abs_path';
|
||||
use IO::Handle;
|
||||
use File::Path;
|
||||
use File::Basename;
|
||||
use File::Compare;
|
||||
use JSON::PP;
|
||||
|
||||
STDOUT->autoflush(1);
|
||||
|
||||
$SIG{__WARN__} = sub { warn "warning: ", @_ };
|
||||
$SIG{__DIE__} = sub { die "error: ", @_ };
|
||||
|
||||
my $out = $ENV{"out"};
|
||||
my $extraPrefix = $ENV{"extraPrefix"};
|
||||
|
||||
my @pathsToLink = split ' ', $ENV{"pathsToLink"};
|
||||
|
||||
sub isInPathsToLink {
|
||||
my $path = shift;
|
||||
$path = "/" if $path eq "";
|
||||
foreach my $elem (@pathsToLink) {
|
||||
return 1 if
|
||||
$elem eq "/" ||
|
||||
(substr($path, 0, length($elem)) eq $elem
|
||||
&& (($path eq $elem) || (substr($path, length($elem), 1) eq "/")));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
# Returns whether a path in one of the linked packages may contain
|
||||
# files in one of the elements of pathsToLink.
|
||||
sub hasPathsToLink {
|
||||
my $path = shift;
|
||||
foreach my $elem (@pathsToLink) {
|
||||
return 1 if
|
||||
$path eq "" ||
|
||||
(substr($elem, 0, length($path)) eq $path
|
||||
&& (($path eq $elem) || (substr($elem, length($path), 1) eq "/")));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
# Similar to `lib.isStorePath`
|
||||
sub isStorePath {
|
||||
my $path = shift;
|
||||
my $storePath = "@storeDir@";
|
||||
|
||||
return substr($path, 0, 1) eq "/" && dirname($path) eq $storePath;
|
||||
}
|
||||
|
||||
# For each activated package, determine what symlinks to create.
|
||||
|
||||
my %symlinks;
|
||||
|
||||
# Add all pathsToLink and all parent directories.
|
||||
#
|
||||
# For "/a/b/c" that will include
|
||||
# [ "", "/a", "/a/b", "/a/b/c" ]
|
||||
#
|
||||
# That ensures the whole directory tree needed by pathsToLink is
|
||||
# created as directories and not symlinks.
|
||||
$symlinks{""} = ["", 0];
|
||||
for my $p (@pathsToLink) {
|
||||
my @parts = split '/', $p;
|
||||
|
||||
my $cur = "";
|
||||
for my $x (@parts) {
|
||||
$cur = $cur . "/$x";
|
||||
$cur = "" if $cur eq "/";
|
||||
$symlinks{$cur} = ["", 0];
|
||||
}
|
||||
}
|
||||
|
||||
sub findFiles;
|
||||
|
||||
sub findFilesInDir {
|
||||
my ($relName, $target, $ignoreCollisions, $checkCollisionContents, $priority) = @_;
|
||||
|
||||
opendir DIR, "$target" or die "cannot open `$target': $!";
|
||||
my @names = readdir DIR or die;
|
||||
closedir DIR;
|
||||
|
||||
foreach my $name (@names) {
|
||||
next if $name eq "." || $name eq "..";
|
||||
findFiles("$relName/$name", "$target/$name", $name, $ignoreCollisions, $checkCollisionContents, $priority);
|
||||
}
|
||||
}
|
||||
|
||||
sub checkCollision {
|
||||
my ($path1, $path2) = @_;
|
||||
|
||||
if (! -e $path1 || ! -e $path2) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
my $stat1 = (stat($path1))[2];
|
||||
my $stat2 = (stat($path2))[2];
|
||||
|
||||
if ($stat1 != $stat2) {
|
||||
warn "different permissions in `$path1' and `$path2': "
|
||||
. sprintf("%04o", $stat1 & 07777) . " <-> "
|
||||
. sprintf("%04o", $stat2 & 07777);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return compare($path1, $path2) == 0;
|
||||
}
|
||||
|
||||
sub prependDangling {
|
||||
my $path = shift;
|
||||
return (-l $path && ! -e $path ? "dangling symlink " : "") . "`$path'";
|
||||
}
|
||||
|
||||
sub findFiles {
|
||||
my ($relName, $target, $baseName, $ignoreCollisions, $checkCollisionContents, $priority) = @_;
|
||||
|
||||
# The store path must not be a file
|
||||
if (-f $target && isStorePath $target) {
|
||||
die "The store path $target is a file and can't be merged into an environment using pkgs.buildEnv!";
|
||||
}
|
||||
|
||||
# Urgh, hacky...
|
||||
return if
|
||||
$relName eq "/propagated-build-inputs" ||
|
||||
$relName eq "/nix-support" ||
|
||||
$relName =~ /info\/dir/ ||
|
||||
( $relName =~ /^\/share\/mime\// && !( $relName =~ /^\/share\/mime\/packages/ ) ) ||
|
||||
$baseName eq "perllocal.pod" ||
|
||||
$baseName eq "log" ||
|
||||
! (hasPathsToLink($relName) || isInPathsToLink($relName));
|
||||
|
||||
my ($oldTarget, $oldPriority) = @{$symlinks{$relName} // [undef, undef]};
|
||||
|
||||
# If target doesn't exist, create it. If it already exists as a
|
||||
# symlink to a file (not a directory) in a lower-priority package,
|
||||
# overwrite it.
|
||||
if (!defined $oldTarget || ($priority < $oldPriority && ($oldTarget ne "" && ! -d $oldTarget))) {
|
||||
# If target is a dangling symlink, emit a warning.
|
||||
if (-l $target && ! -e $target) {
|
||||
my $link = readlink $target;
|
||||
warn "creating dangling symlink `$out$extraPrefix/$relName' -> `$target' -> `$link'\n";
|
||||
}
|
||||
$symlinks{$relName} = [$target, $priority];
|
||||
return;
|
||||
}
|
||||
|
||||
# If target already exists and both targets resolves to the same path, skip
|
||||
if (
|
||||
defined $oldTarget && $oldTarget ne "" &&
|
||||
defined abs_path($target) && defined abs_path($oldTarget) &&
|
||||
abs_path($target) eq abs_path($oldTarget)
|
||||
) {
|
||||
# Prefer the target that is not a symlink, if any
|
||||
if (-l $oldTarget && ! -l $target) {
|
||||
$symlinks{$relName} = [$target, $priority];
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
# If target already exists as a symlink to a file (not a
|
||||
# directory) in a higher-priority package, skip.
|
||||
if (defined $oldTarget && $priority > $oldPriority && $oldTarget ne "" && ! -d $oldTarget) {
|
||||
return;
|
||||
}
|
||||
|
||||
# If target is supposed to be a directory but it isn't, die with an error message
|
||||
# instead of attempting to recurse into it, only to fail then.
|
||||
# This happens e.g. when pathsToLink contains a non-directory path.
|
||||
if ($oldTarget eq "" && ! -d $target) {
|
||||
die "not a directory: `$target'\n";
|
||||
}
|
||||
|
||||
unless (-d $target && ($oldTarget eq "" || -d $oldTarget)) {
|
||||
# Prepend "dangling symlink" to paths if applicable.
|
||||
my $targetRef = prependDangling($target);
|
||||
my $oldTargetRef = prependDangling($oldTarget);
|
||||
|
||||
if ($ignoreCollisions) {
|
||||
warn "collision between $targetRef and $oldTargetRef\n" if $ignoreCollisions == 1;
|
||||
return;
|
||||
} elsif ($checkCollisionContents && checkCollision($oldTarget, $target)) {
|
||||
return;
|
||||
} else {
|
||||
die "collision between $targetRef and $oldTargetRef\n";
|
||||
}
|
||||
}
|
||||
|
||||
findFilesInDir($relName, $oldTarget, $ignoreCollisions, $checkCollisionContents, $oldPriority) unless $oldTarget eq "";
|
||||
findFilesInDir($relName, $target, $ignoreCollisions, $checkCollisionContents, $priority);
|
||||
|
||||
$symlinks{$relName} = ["", $priority]; # denotes directory
|
||||
}
|
||||
|
||||
|
||||
my %done;
|
||||
my %postponed;
|
||||
|
||||
sub addPkg {
|
||||
my ($pkgDir, $ignoreCollisions, $checkCollisionContents, $priority) = @_;
|
||||
|
||||
return if (defined $done{$pkgDir});
|
||||
$done{$pkgDir} = 1;
|
||||
|
||||
findFiles("", $pkgDir, "", $ignoreCollisions, $checkCollisionContents, $priority);
|
||||
|
||||
my $propagatedFN = "$pkgDir/nix-support/propagated-user-env-packages";
|
||||
if (-e $propagatedFN) {
|
||||
open PROP, "<$propagatedFN" or die;
|
||||
my $propagated = <PROP>;
|
||||
close PROP;
|
||||
my @propagated = split ' ', $propagated;
|
||||
foreach my $p (@propagated) {
|
||||
$postponed{$p} = 1 unless defined $done{$p};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Read packages list.
|
||||
my $pkgs;
|
||||
|
||||
if (exists $ENV{"pkgsPath"}) {
|
||||
open FILE, $ENV{"pkgsPath"};
|
||||
$pkgs = <FILE>;
|
||||
close FILE;
|
||||
} else {
|
||||
$pkgs = $ENV{"pkgs"}
|
||||
}
|
||||
|
||||
# Symlink to the packages that have been installed explicitly by the
|
||||
# user.
|
||||
for my $pkg (@{decode_json $pkgs}) {
|
||||
for my $path (@{$pkg->{paths}}) {
|
||||
addPkg($path,
|
||||
$ENV{"ignoreCollisions"} eq "1",
|
||||
$ENV{"checkCollisionContents"} eq "1",
|
||||
$pkg->{priority})
|
||||
if -e $path;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Symlink to the packages that have been "propagated" by packages
|
||||
# installed by the user (i.e., package X declares that it wants Y
|
||||
# installed as well). We do these later because they have a lower
|
||||
# priority in case of collisions.
|
||||
my $priorityCounter = 1000; # don't care about collisions
|
||||
while (scalar(keys %postponed) > 0) {
|
||||
my @pkgDirs = keys %postponed;
|
||||
%postponed = ();
|
||||
foreach my $pkgDir (sort @pkgDirs) {
|
||||
addPkg($pkgDir, 2, $ENV{"checkCollisionContents"} eq "1", $priorityCounter++);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Create the symlinks.
|
||||
my $nrLinks = 0;
|
||||
foreach my $relName (sort keys %symlinks) {
|
||||
my ($target, $priority) = @{$symlinks{$relName}};
|
||||
my $abs = "$out" . "$extraPrefix" . "/$relName";
|
||||
next unless isInPathsToLink $relName;
|
||||
if ($target eq "") {
|
||||
#print "creating directory $relName\n";
|
||||
mkpath $abs or die "cannot create directory `$abs': $!";
|
||||
} else {
|
||||
#print "creating symlink $relName to $target\n";
|
||||
symlink $target, $abs ||
|
||||
die "error creating link `$abs': $!";
|
||||
$nrLinks++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
print STDERR "created $nrLinks symlinks in user environment\n";
|
||||
|
||||
|
||||
my $manifest = $ENV{"manifest"};
|
||||
if ($manifest) {
|
||||
symlink($manifest, "$out/manifest") or die "cannot create manifest";
|
||||
}
|
||||
82
pkgs/build-support/buildenv/default.nix
Normal file
82
pkgs/build-support/buildenv/default.nix
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
# buildEnv creates a tree of symlinks to the specified paths. This is
|
||||
# a fork of the buildEnv in the Nix distribution. Most changes should
|
||||
# eventually be merged back into the Nix distribution.
|
||||
|
||||
{ buildPackages, runCommand, lib, substituteAll }:
|
||||
|
||||
lib.makeOverridable
|
||||
({ name
|
||||
|
||||
, # The manifest file (if any). A symlink $out/manifest will be
|
||||
# created to it.
|
||||
manifest ? ""
|
||||
|
||||
, # The paths to symlink.
|
||||
paths
|
||||
|
||||
, # Whether to ignore collisions or abort.
|
||||
ignoreCollisions ? false
|
||||
|
||||
, # If there is a collision, check whether the contents and permissions match
|
||||
# and only if not, throw a collision error.
|
||||
checkCollisionContents ? true
|
||||
|
||||
, # The paths (relative to each element of `paths') that we want to
|
||||
# symlink (e.g., ["/bin"]). Any file not inside any of the
|
||||
# directories in the list is not symlinked.
|
||||
pathsToLink ? ["/"]
|
||||
|
||||
, # The package outputs to include. By default, only the default
|
||||
# output is included.
|
||||
extraOutputsToInstall ? []
|
||||
|
||||
, # Root the result in directory "$out${extraPrefix}", e.g. "/share".
|
||||
extraPrefix ? ""
|
||||
|
||||
, # Shell commands to run after building the symlink tree.
|
||||
postBuild ? ""
|
||||
|
||||
# Additional inputs
|
||||
, nativeBuildInputs ? [] # Handy e.g. if using makeWrapper in `postBuild`.
|
||||
, buildInputs ? []
|
||||
|
||||
, passthru ? {}
|
||||
, meta ? {}
|
||||
}:
|
||||
|
||||
let
|
||||
builder = substituteAll {
|
||||
src = ./builder.pl;
|
||||
inherit (builtins) storeDir;
|
||||
};
|
||||
in
|
||||
|
||||
runCommand name
|
||||
rec {
|
||||
inherit manifest ignoreCollisions checkCollisionContents passthru
|
||||
meta pathsToLink extraPrefix postBuild
|
||||
nativeBuildInputs buildInputs;
|
||||
pkgs = builtins.toJSON (map (drv: {
|
||||
paths =
|
||||
# First add the usual output(s): respect if user has chosen explicitly,
|
||||
# and otherwise use `meta.outputsToInstall`. The attribute is guaranteed
|
||||
# to exist in mkDerivation-created cases. The other cases (e.g. runCommand)
|
||||
# aren't expected to have multiple outputs.
|
||||
(if (! drv ? outputSpecified || ! drv.outputSpecified)
|
||||
&& drv.meta.outputsToInstall or null != null
|
||||
then map (outName: drv.${outName}) drv.meta.outputsToInstall
|
||||
else [ drv ])
|
||||
# Add any extra outputs specified by the caller of `buildEnv`.
|
||||
++ lib.filter (p: p!=null)
|
||||
(builtins.map (outName: drv.${outName} or null) extraOutputsToInstall);
|
||||
priority = drv.meta.priority or 5;
|
||||
}) paths);
|
||||
preferLocalBuild = true;
|
||||
allowSubstitutes = false;
|
||||
# XXX: The size is somewhat arbitrary
|
||||
passAsFile = if builtins.stringLength pkgs >= 128*1024 then [ "pkgs" ] else [ ];
|
||||
}
|
||||
''
|
||||
${buildPackages.perl}/bin/perl -w ${builder}
|
||||
eval "$postBuild"
|
||||
'')
|
||||
87
pkgs/build-support/cc-wrapper/add-flags.sh
Normal file
87
pkgs/build-support/cc-wrapper/add-flags.sh
Normal file
|
|
@ -0,0 +1,87 @@
|
|||
# N.B. It may be a surprise that the derivation-specific variables are exported,
|
||||
# since this is just sourced by the wrapped binaries---the end consumers. This
|
||||
# is because one wrapper binary may invoke another (e.g. cc invoking ld). In
|
||||
# that case, it is cheaper/better to not repeat this step and let the forked
|
||||
# wrapped binary just inherit the work of the forker's wrapper script.
|
||||
|
||||
var_templates_list=(
|
||||
NIX_CFLAGS_COMPILE
|
||||
NIX_CFLAGS_COMPILE_BEFORE
|
||||
NIX_CFLAGS_LINK
|
||||
NIX_CXXSTDLIB_COMPILE
|
||||
NIX_CXXSTDLIB_LINK
|
||||
NIX_GNATFLAGS_COMPILE
|
||||
)
|
||||
var_templates_bool=(
|
||||
NIX_ENFORCE_NO_NATIVE
|
||||
)
|
||||
|
||||
accumulateRoles
|
||||
|
||||
# We need to mangle names for hygiene, but also take parameters/overrides
|
||||
# from the environment.
|
||||
for var in "${var_templates_list[@]}"; do
|
||||
mangleVarList "$var" ${role_suffixes[@]+"${role_suffixes[@]}"}
|
||||
done
|
||||
for var in "${var_templates_bool[@]}"; do
|
||||
mangleVarBool "$var" ${role_suffixes[@]+"${role_suffixes[@]}"}
|
||||
done
|
||||
|
||||
# `-B@out@/bin' forces cc to use ld-wrapper.sh when calling ld.
|
||||
NIX_CFLAGS_COMPILE_@suffixSalt@="-B@out@/bin/ $NIX_CFLAGS_COMPILE_@suffixSalt@"
|
||||
|
||||
# Export and assign separately in order that a failing $(..) will fail
|
||||
# the script.
|
||||
|
||||
# Currently bootstrap-tools does not split glibc, and gcc files into
|
||||
# separate directories. As a workaround we want resulting cflags to be
|
||||
# ordered as: crt1-cflags libc-cflags cc-cflags. Otherwise we mix crt/libc.so
|
||||
# from different libc as seen in
|
||||
# https://github.com/NixOS/nixpkgs/issues/158042
|
||||
#
|
||||
# Note that below has reverse ordering as we prepend flags one-by-one.
|
||||
# Once bootstrap-tools is split into different directories we can stop
|
||||
# relying on flag ordering below.
|
||||
|
||||
if [ -e @out@/nix-support/cc-cflags ]; then
|
||||
NIX_CFLAGS_COMPILE_@suffixSalt@="$(< @out@/nix-support/cc-cflags) $NIX_CFLAGS_COMPILE_@suffixSalt@"
|
||||
fi
|
||||
|
||||
if [[ "$cInclude" = 1 ]] && [ -e @out@/nix-support/libc-cflags ]; then
|
||||
NIX_CFLAGS_COMPILE_@suffixSalt@="$(< @out@/nix-support/libc-cflags) $NIX_CFLAGS_COMPILE_@suffixSalt@"
|
||||
fi
|
||||
|
||||
if [ -e @out@/nix-support/libc-crt1-cflags ]; then
|
||||
NIX_CFLAGS_COMPILE_@suffixSalt@="$(< @out@/nix-support/libc-crt1-cflags) $NIX_CFLAGS_COMPILE_@suffixSalt@"
|
||||
fi
|
||||
|
||||
if [ -e @out@/nix-support/libcxx-cxxflags ]; then
|
||||
NIX_CXXSTDLIB_COMPILE_@suffixSalt@+=" $(< @out@/nix-support/libcxx-cxxflags)"
|
||||
fi
|
||||
|
||||
if [ -e @out@/nix-support/libcxx-ldflags ]; then
|
||||
NIX_CXXSTDLIB_LINK_@suffixSalt@+=" $(< @out@/nix-support/libcxx-ldflags)"
|
||||
fi
|
||||
|
||||
if [ -e @out@/nix-support/gnat-cflags ]; then
|
||||
NIX_GNATFLAGS_COMPILE_@suffixSalt@="$(< @out@/nix-support/gnat-cflags) $NIX_GNATFLAGS_COMPILE_@suffixSalt@"
|
||||
fi
|
||||
|
||||
if [ -e @out@/nix-support/cc-ldflags ]; then
|
||||
NIX_LDFLAGS_@suffixSalt@+=" $(< @out@/nix-support/cc-ldflags)"
|
||||
fi
|
||||
|
||||
if [ -e @out@/nix-support/cc-cflags-before ]; then
|
||||
NIX_CFLAGS_COMPILE_BEFORE_@suffixSalt@="$(< @out@/nix-support/cc-cflags-before) $NIX_CFLAGS_COMPILE_BEFORE_@suffixSalt@"
|
||||
fi
|
||||
|
||||
# Only add darwin min version flag if a default darwin min version is set,
|
||||
# which is a signal that we're targetting darwin.
|
||||
if [ "@darwinMinVersion@" ]; then
|
||||
mangleVarSingle @darwinMinVersionVariable@ ${role_suffixes[@]+"${role_suffixes[@]}"}
|
||||
|
||||
NIX_CFLAGS_COMPILE_BEFORE_@suffixSalt@="-m@darwinPlatformForCC@-version-min=${@darwinMinVersionVariable@_@suffixSalt@:-@darwinMinVersion@} $NIX_CFLAGS_COMPILE_BEFORE_@suffixSalt@"
|
||||
fi
|
||||
|
||||
# That way forked processes will not extend these environment variables again.
|
||||
export NIX_CC_WRAPPER_FLAGS_SET_@suffixSalt@=1
|
||||
73
pkgs/build-support/cc-wrapper/add-hardening.sh
Normal file
73
pkgs/build-support/cc-wrapper/add-hardening.sh
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
declare -a hardeningCFlags=()
|
||||
|
||||
declare -A hardeningEnableMap=()
|
||||
|
||||
# Intentionally word-split in case 'NIX_HARDENING_ENABLE' is defined in Nix. The
|
||||
# array expansion also prevents undefined variables from causing trouble with
|
||||
# `set -u`.
|
||||
for flag in ${NIX_HARDENING_ENABLE_@suffixSalt@-}; do
|
||||
hardeningEnableMap["$flag"]=1
|
||||
done
|
||||
|
||||
# Remove unsupported flags.
|
||||
for flag in @hardening_unsupported_flags@; do
|
||||
unset -v "hardeningEnableMap[$flag]"
|
||||
done
|
||||
|
||||
if (( "${NIX_DEBUG:-0}" >= 1 )); then
|
||||
declare -a allHardeningFlags=(fortify stackprotector pie pic strictoverflow format)
|
||||
declare -A hardeningDisableMap=()
|
||||
|
||||
# Determine which flags were effectively disabled so we can report below.
|
||||
for flag in "${allHardeningFlags[@]}"; do
|
||||
if [[ -z "${hardeningEnableMap[$flag]-}" ]]; then
|
||||
hardeningDisableMap["$flag"]=1
|
||||
fi
|
||||
done
|
||||
|
||||
printf 'HARDENING: disabled flags:' >&2
|
||||
(( "${#hardeningDisableMap[@]}" )) && printf ' %q' "${!hardeningDisableMap[@]}" >&2
|
||||
echo >&2
|
||||
|
||||
if (( "${#hardeningEnableMap[@]}" )); then
|
||||
echo 'HARDENING: Is active (not completely disabled with "all" flag)' >&2;
|
||||
fi
|
||||
fi
|
||||
|
||||
for flag in "${!hardeningEnableMap[@]}"; do
|
||||
case $flag in
|
||||
fortify)
|
||||
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling fortify >&2; fi
|
||||
hardeningCFlags+=('-O2' '-D_FORTIFY_SOURCE=2')
|
||||
;;
|
||||
stackprotector)
|
||||
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling stackprotector >&2; fi
|
||||
hardeningCFlags+=('-fstack-protector-strong' '--param' 'ssp-buffer-size=4')
|
||||
;;
|
||||
pie)
|
||||
# NB: we do not use `+=` here, because PIE flags must occur before any PIC flags
|
||||
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling CFlags -fPIE >&2; fi
|
||||
hardeningCFlags=('-fPIE' "${hardeningCFlags[@]}")
|
||||
if [[ ! (" $* " =~ " -shared " || " $* " =~ " -static ") ]]; then
|
||||
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling LDFlags -pie >&2; fi
|
||||
hardeningCFlags=('-pie' "${hardeningCFlags[@]}")
|
||||
fi
|
||||
;;
|
||||
pic)
|
||||
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling pic >&2; fi
|
||||
hardeningCFlags+=('-fPIC')
|
||||
;;
|
||||
strictoverflow)
|
||||
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling strictoverflow >&2; fi
|
||||
hardeningCFlags+=('-fno-strict-overflow')
|
||||
;;
|
||||
format)
|
||||
if (( "${NIX_DEBUG:-0}" >= 1 )); then echo HARDENING: enabling format >&2; fi
|
||||
hardeningCFlags+=('-Wformat' '-Wformat-security' '-Werror=format-security')
|
||||
;;
|
||||
*)
|
||||
# Ignore unsupported. Checked in Nix that at least *some*
|
||||
# tool supports each flag.
|
||||
;;
|
||||
esac
|
||||
done
|
||||
214
pkgs/build-support/cc-wrapper/cc-wrapper.sh
Normal file
214
pkgs/build-support/cc-wrapper/cc-wrapper.sh
Normal file
|
|
@ -0,0 +1,214 @@
|
|||
#! @shell@
|
||||
set -eu -o pipefail +o posix
|
||||
shopt -s nullglob
|
||||
|
||||
if (( "${NIX_DEBUG:-0}" >= 7 )); then
|
||||
set -x
|
||||
fi
|
||||
|
||||
path_backup="$PATH"
|
||||
|
||||
# That @-vars are substituted separately from bash evaluation makes
|
||||
# shellcheck think this, and others like it, are useless conditionals.
|
||||
# shellcheck disable=SC2157
|
||||
if [[ -n "@coreutils_bin@" && -n "@gnugrep_bin@" ]]; then
|
||||
PATH="@coreutils_bin@/bin:@gnugrep_bin@/bin"
|
||||
fi
|
||||
|
||||
source @out@/nix-support/utils.bash
|
||||
|
||||
|
||||
# Parse command line options and set several variables.
|
||||
# For instance, figure out if linker flags should be passed.
|
||||
# GCC prints annoying warnings when they are not needed.
|
||||
dontLink=0
|
||||
nonFlagArgs=0
|
||||
cc1=0
|
||||
# shellcheck disable=SC2193
|
||||
[[ "@prog@" = *++ ]] && isCxx=1 || isCxx=0
|
||||
cxxInclude=1
|
||||
cxxLibrary=1
|
||||
cInclude=1
|
||||
|
||||
expandResponseParams "$@"
|
||||
linkType=$(checkLinkType "${params[@]}")
|
||||
|
||||
declare -i n=0
|
||||
nParams=${#params[@]}
|
||||
while (( "$n" < "$nParams" )); do
|
||||
p=${params[n]}
|
||||
p2=${params[n+1]:-} # handle `p` being last one
|
||||
if [ "$p" = -c ]; then
|
||||
dontLink=1
|
||||
elif [ "$p" = -S ]; then
|
||||
dontLink=1
|
||||
elif [ "$p" = -E ]; then
|
||||
dontLink=1
|
||||
elif [ "$p" = -E ]; then
|
||||
dontLink=1
|
||||
elif [ "$p" = -M ]; then
|
||||
dontLink=1
|
||||
elif [ "$p" = -MM ]; then
|
||||
dontLink=1
|
||||
elif [[ "$p" = -x && "$p2" = *-header ]]; then
|
||||
dontLink=1
|
||||
elif [[ "$p" = -x && "$p2" = c++* && "$isCxx" = 0 ]]; then
|
||||
isCxx=1
|
||||
elif [ "$p" = -nostdlib ]; then
|
||||
cxxLibrary=0
|
||||
elif [ "$p" = -nostdinc ]; then
|
||||
cInclude=0
|
||||
cxxInclude=0
|
||||
elif [ "$p" = -nostdinc++ ]; then
|
||||
cxxInclude=0
|
||||
elif [[ "$p" != -?* ]]; then
|
||||
# A dash alone signifies standard input; it is not a flag
|
||||
nonFlagArgs=1
|
||||
elif [ "$p" = -cc1 ]; then
|
||||
cc1=1
|
||||
fi
|
||||
n+=1
|
||||
done
|
||||
|
||||
# If we pass a flag like -Wl, then gcc will call the linker unless it
|
||||
# can figure out that it has to do something else (e.g., because of a
|
||||
# "-c" flag). So if no non-flag arguments are given, don't pass any
|
||||
# linker flags. This catches cases like "gcc" (should just print
|
||||
# "gcc: no input files") and "gcc -v" (should print the version).
|
||||
if [ "$nonFlagArgs" = 0 ]; then
|
||||
dontLink=1
|
||||
fi
|
||||
|
||||
# Optionally filter out paths not refering to the store.
|
||||
if [[ "${NIX_ENFORCE_PURITY:-}" = 1 && -n "$NIX_STORE" ]]; then
|
||||
rest=()
|
||||
nParams=${#params[@]}
|
||||
declare -i n=0
|
||||
while (( "$n" < "$nParams" )); do
|
||||
p=${params[n]}
|
||||
p2=${params[n+1]:-} # handle `p` being last one
|
||||
if [ "${p:0:3}" = -L/ ] && badPath "${p:2}"; then
|
||||
skip "${p:2}"
|
||||
elif [ "$p" = -L ] && badPath "$p2"; then
|
||||
n+=1; skip "$p2"
|
||||
elif [ "${p:0:3}" = -I/ ] && badPath "${p:2}"; then
|
||||
skip "${p:2}"
|
||||
elif [ "$p" = -I ] && badPath "$p2"; then
|
||||
n+=1; skip "$p2"
|
||||
elif [ "$p" = -isystem ] && badPath "$p2"; then
|
||||
n+=1; skip "$p2"
|
||||
else
|
||||
rest+=("$p")
|
||||
fi
|
||||
n+=1
|
||||
done
|
||||
# Old bash empty array hack
|
||||
params=(${rest+"${rest[@]}"})
|
||||
fi
|
||||
|
||||
# Flirting with a layer violation here.
|
||||
if [ -z "${NIX_BINTOOLS_WRAPPER_FLAGS_SET_@suffixSalt@:-}" ]; then
|
||||
source @bintools@/nix-support/add-flags.sh
|
||||
fi
|
||||
|
||||
# Put this one second so libc ldflags take priority.
|
||||
if [ -z "${NIX_CC_WRAPPER_FLAGS_SET_@suffixSalt@:-}" ]; then
|
||||
source @out@/nix-support/add-flags.sh
|
||||
fi
|
||||
|
||||
# Clear march/mtune=native -- they bring impurity.
|
||||
if [ "$NIX_ENFORCE_NO_NATIVE_@suffixSalt@" = 1 ]; then
|
||||
rest=()
|
||||
# Old bash empty array hack
|
||||
for p in ${params+"${params[@]}"}; do
|
||||
if [[ "$p" = -m*=native ]]; then
|
||||
skip "$p"
|
||||
else
|
||||
rest+=("$p")
|
||||
fi
|
||||
done
|
||||
# Old bash empty array hack
|
||||
params=(${rest+"${rest[@]}"})
|
||||
fi
|
||||
|
||||
if [[ "$isCxx" = 1 ]]; then
|
||||
if [[ "$cxxInclude" = 1 ]]; then
|
||||
NIX_CFLAGS_COMPILE_@suffixSalt@+=" $NIX_CXXSTDLIB_COMPILE_@suffixSalt@"
|
||||
fi
|
||||
if [[ "$cxxLibrary" = 1 ]]; then
|
||||
NIX_CFLAGS_LINK_@suffixSalt@+=" $NIX_CXXSTDLIB_LINK_@suffixSalt@"
|
||||
fi
|
||||
fi
|
||||
|
||||
source @out@/nix-support/add-hardening.sh
|
||||
|
||||
# Add the flags for the C compiler proper.
|
||||
extraAfter=($NIX_CFLAGS_COMPILE_@suffixSalt@)
|
||||
extraBefore=(${hardeningCFlags[@]+"${hardeningCFlags[@]}"} $NIX_CFLAGS_COMPILE_BEFORE_@suffixSalt@)
|
||||
|
||||
if [ "$dontLink" != 1 ]; then
|
||||
|
||||
# Add the flags that should only be passed to the compiler when
|
||||
# linking.
|
||||
extraAfter+=($(filterRpathFlags "$linkType" $NIX_CFLAGS_LINK_@suffixSalt@))
|
||||
|
||||
# Add the flags that should be passed to the linker (and prevent
|
||||
# `ld-wrapper' from adding NIX_LDFLAGS_@suffixSalt@ again).
|
||||
for i in $(filterRpathFlags "$linkType" $NIX_LDFLAGS_BEFORE_@suffixSalt@); do
|
||||
extraBefore+=("-Wl,$i")
|
||||
done
|
||||
if [[ "$linkType" == dynamic && -n "$NIX_DYNAMIC_LINKER_@suffixSalt@" ]]; then
|
||||
extraBefore+=("-Wl,-dynamic-linker=$NIX_DYNAMIC_LINKER_@suffixSalt@")
|
||||
fi
|
||||
for i in $(filterRpathFlags "$linkType" $NIX_LDFLAGS_@suffixSalt@); do
|
||||
if [ "${i:0:3}" = -L/ ]; then
|
||||
extraAfter+=("$i")
|
||||
else
|
||||
extraAfter+=("-Wl,$i")
|
||||
fi
|
||||
done
|
||||
export NIX_LINK_TYPE_@suffixSalt@=$linkType
|
||||
fi
|
||||
|
||||
# As a very special hack, if the arguments are just `-v', then don't
|
||||
# add anything. This is to prevent `gcc -v' (which normally prints
|
||||
# out the version number and returns exit code 0) from printing out
|
||||
# `No input files specified' and returning exit code 1.
|
||||
if [ "$*" = -v ]; then
|
||||
extraAfter=()
|
||||
extraBefore=()
|
||||
fi
|
||||
|
||||
# clang's -cc1 mode is not compatible with most options
|
||||
# that we would pass. Rather than trying to pass only
|
||||
# options that would work, let's just remove all of them.
|
||||
if [ "$cc1" = 1 ]; then
|
||||
extraAfter=()
|
||||
extraBefore=()
|
||||
fi
|
||||
|
||||
# Optionally print debug info.
|
||||
if (( "${NIX_DEBUG:-0}" >= 1 )); then
|
||||
# Old bash workaround, see ld-wrapper for explanation.
|
||||
echo "extra flags before to @prog@:" >&2
|
||||
printf " %q\n" ${extraBefore+"${extraBefore[@]}"} >&2
|
||||
echo "original flags to @prog@:" >&2
|
||||
printf " %q\n" ${params+"${params[@]}"} >&2
|
||||
echo "extra flags after to @prog@:" >&2
|
||||
printf " %q\n" ${extraAfter+"${extraAfter[@]}"} >&2
|
||||
fi
|
||||
|
||||
PATH="$path_backup"
|
||||
# Old bash workaround, see above.
|
||||
|
||||
if (( "${NIX_CC_USE_RESPONSE_FILE:-@use_response_file_by_default@}" >= 1 )); then
|
||||
exec @prog@ @<(printf "%q\n" \
|
||||
${extraBefore+"${extraBefore[@]}"} \
|
||||
${params+"${params[@]}"} \
|
||||
${extraAfter+"${extraAfter[@]}"})
|
||||
else
|
||||
exec @prog@ \
|
||||
${extraBefore+"${extraBefore[@]}"} \
|
||||
${params+"${params[@]}"} \
|
||||
${extraAfter+"${extraAfter[@]}"}
|
||||
fi
|
||||
547
pkgs/build-support/cc-wrapper/default.nix
Normal file
547
pkgs/build-support/cc-wrapper/default.nix
Normal file
|
|
@ -0,0 +1,547 @@
|
|||
# The Nixpkgs CC is not directly usable, since it doesn't know where
|
||||
# the C library and standard header files are. Therefore the compiler
|
||||
# produced by that package cannot be installed directly in a user
|
||||
# environment and used from the command line. So we use a wrapper
|
||||
# script that sets up the right environment variables so that the
|
||||
# compiler and the linker just "work".
|
||||
|
||||
{ name ? ""
|
||||
, lib
|
||||
, stdenvNoCC
|
||||
, cc ? null, libc ? null, bintools, coreutils ? null, shell ? stdenvNoCC.shell
|
||||
, gccForLibs ? null
|
||||
, zlib ? null
|
||||
, nativeTools, noLibc ? false, nativeLibc, nativePrefix ? ""
|
||||
, propagateDoc ? cc != null && cc ? man
|
||||
, extraTools ? [], extraPackages ? [], extraBuildCommands ? ""
|
||||
, nixSupport ? {}
|
||||
, isGNU ? false, isClang ? cc.isClang or false, gnugrep ? null
|
||||
, buildPackages ? {}
|
||||
, libcxx ? null
|
||||
}:
|
||||
|
||||
with lib;
|
||||
|
||||
assert nativeTools -> !propagateDoc && nativePrefix != "";
|
||||
assert !nativeTools ->
|
||||
cc != null && coreutils != null && gnugrep != null;
|
||||
assert !(nativeLibc && noLibc);
|
||||
assert (noLibc || nativeLibc) == (libc == null);
|
||||
|
||||
let
|
||||
stdenv = stdenvNoCC;
|
||||
inherit (stdenv) hostPlatform targetPlatform;
|
||||
|
||||
# Prefix for binaries. Customarily ends with a dash separator.
|
||||
#
|
||||
# TODO(@Ericson2314) Make unconditional, or optional but always true by
|
||||
# default.
|
||||
targetPrefix = lib.optionalString (targetPlatform != hostPlatform)
|
||||
(targetPlatform.config + "-");
|
||||
|
||||
ccVersion = lib.getVersion cc;
|
||||
ccName = lib.removePrefix targetPrefix (lib.getName cc);
|
||||
|
||||
libc_bin = if libc == null then null else getBin libc;
|
||||
libc_dev = if libc == null then null else getDev libc;
|
||||
libc_lib = if libc == null then null else getLib libc;
|
||||
cc_solib = getLib cc
|
||||
+ optionalString (targetPlatform != hostPlatform) "/${targetPlatform.config}";
|
||||
|
||||
# The wrapper scripts use 'cat' and 'grep', so we may need coreutils.
|
||||
coreutils_bin = if nativeTools then "" else getBin coreutils;
|
||||
|
||||
# The "suffix salt" is a arbitrary string added in the end of env vars
|
||||
# defined by cc-wrapper's hooks so that multiple cc-wrappers can be used
|
||||
# without interfering. For the moment, it is defined as the target triple,
|
||||
# adjusted to be a valid bash identifier. This should be considered an
|
||||
# unstable implementation detail, however.
|
||||
suffixSalt = replaceStrings ["-" "."] ["_" "_"] targetPlatform.config;
|
||||
|
||||
expand-response-params =
|
||||
if (buildPackages.stdenv.hasCC or false) && buildPackages.stdenv.cc != "/dev/null"
|
||||
then import ../expand-response-params { inherit (buildPackages) stdenv; }
|
||||
else "";
|
||||
|
||||
useGccForLibs = isClang
|
||||
&& libcxx == null
|
||||
&& !stdenv.targetPlatform.isDarwin
|
||||
&& !(stdenv.targetPlatform.useLLVM or false)
|
||||
&& !(stdenv.targetPlatform.useAndroidPrebuilt or false)
|
||||
&& !(stdenv.targetPlatform.isiOS or false)
|
||||
&& gccForLibs != null;
|
||||
|
||||
# older compilers (for example bootstrap's GCC 5) fail with -march=too-modern-cpu
|
||||
isGccArchSupported = arch:
|
||||
if isGNU then
|
||||
{ # Intel
|
||||
skylake = versionAtLeast ccVersion "6.0";
|
||||
skylake-avx512 = versionAtLeast ccVersion "6.0";
|
||||
cannonlake = versionAtLeast ccVersion "8.0";
|
||||
icelake-client = versionAtLeast ccVersion "8.0";
|
||||
icelake-server = versionAtLeast ccVersion "8.0";
|
||||
cascadelake = versionAtLeast ccVersion "9.0";
|
||||
cooperlake = versionAtLeast ccVersion "10.0";
|
||||
tigerlake = versionAtLeast ccVersion "10.0";
|
||||
knm = versionAtLeast ccVersion "8.0";
|
||||
# AMD
|
||||
znver1 = versionAtLeast ccVersion "6.0";
|
||||
znver2 = versionAtLeast ccVersion "9.0";
|
||||
znver3 = versionAtLeast ccVersion "11.0";
|
||||
}.${arch} or true
|
||||
else if isClang then
|
||||
{ # Intel
|
||||
cannonlake = versionAtLeast ccVersion "5.0";
|
||||
icelake-client = versionAtLeast ccVersion "7.0";
|
||||
icelake-server = versionAtLeast ccVersion "7.0";
|
||||
knm = versionAtLeast ccVersion "7.0";
|
||||
# AMD
|
||||
znver1 = versionAtLeast ccVersion "4.0";
|
||||
znver2 = versionAtLeast ccVersion "9.0";
|
||||
}.${arch} or true
|
||||
else
|
||||
false;
|
||||
|
||||
|
||||
darwinPlatformForCC = optionalString stdenv.targetPlatform.isDarwin (
|
||||
if (targetPlatform.darwinPlatform == "macos" && isGNU) then "macosx"
|
||||
else targetPlatform.darwinPlatform
|
||||
);
|
||||
|
||||
darwinMinVersion = optionalString stdenv.targetPlatform.isDarwin (
|
||||
stdenv.targetPlatform.darwinMinVersion
|
||||
);
|
||||
|
||||
darwinMinVersionVariable = optionalString stdenv.targetPlatform.isDarwin
|
||||
stdenv.targetPlatform.darwinMinVersionVariable;
|
||||
in
|
||||
|
||||
# Ensure bintools matches
|
||||
assert libc_bin == bintools.libc_bin;
|
||||
assert libc_dev == bintools.libc_dev;
|
||||
assert libc_lib == bintools.libc_lib;
|
||||
assert nativeTools == bintools.nativeTools;
|
||||
assert nativeLibc == bintools.nativeLibc;
|
||||
assert nativePrefix == bintools.nativePrefix;
|
||||
|
||||
stdenv.mkDerivation {
|
||||
pname = targetPrefix
|
||||
+ (if name != "" then name else "${ccName}-wrapper");
|
||||
version = if cc == null then null else ccVersion;
|
||||
|
||||
preferLocalBuild = true;
|
||||
|
||||
inherit cc libc_bin libc_dev libc_lib bintools coreutils_bin;
|
||||
shell = getBin shell + shell.shellPath or "";
|
||||
gnugrep_bin = if nativeTools then "" else gnugrep;
|
||||
|
||||
inherit targetPrefix suffixSalt;
|
||||
inherit darwinPlatformForCC darwinMinVersion darwinMinVersionVariable;
|
||||
|
||||
outputs = [ "out" ] ++ optionals propagateDoc [ "man" "info" ];
|
||||
|
||||
passthru = {
|
||||
# "cc" is the generic name for a C compiler, but there is no one for package
|
||||
# providing the linker and related tools. The two we use now are GNU
|
||||
# Binutils, and Apple's "cctools"; "bintools" as an attempt to find an
|
||||
# unused middle-ground name that evokes both.
|
||||
inherit bintools;
|
||||
inherit libc nativeTools nativeLibc nativePrefix isGNU isClang;
|
||||
|
||||
emacsBufferSetup = pkgs: ''
|
||||
; We should handle propagation here too
|
||||
(mapc
|
||||
(lambda (arg)
|
||||
(when (file-directory-p (concat arg "/include"))
|
||||
(setenv "NIX_CFLAGS_COMPILE_${suffixSalt}" (concat (getenv "NIX_CFLAGS_COMPILE_${suffixSalt}") " -isystem " arg "/include"))))
|
||||
'(${concatStringsSep " " (map (pkg: "\"${pkg}\"") pkgs)}))
|
||||
'';
|
||||
|
||||
inherit nixSupport;
|
||||
};
|
||||
|
||||
dontBuild = true;
|
||||
dontConfigure = true;
|
||||
enableParallelBuilding = true;
|
||||
|
||||
unpackPhase = ''
|
||||
src=$PWD
|
||||
'';
|
||||
|
||||
wrapper = ./cc-wrapper.sh;
|
||||
|
||||
installPhase =
|
||||
''
|
||||
mkdir -p $out/bin $out/nix-support
|
||||
|
||||
wrap() {
|
||||
local dst="$1"
|
||||
local wrapper="$2"
|
||||
export prog="$3"
|
||||
export use_response_file_by_default=${if isClang then "1" else "0"}
|
||||
substituteAll "$wrapper" "$out/bin/$dst"
|
||||
chmod +x "$out/bin/$dst"
|
||||
}
|
||||
''
|
||||
|
||||
+ (if nativeTools then ''
|
||||
echo ${if targetPlatform.isDarwin then cc else nativePrefix} > $out/nix-support/orig-cc
|
||||
|
||||
ccPath="${if targetPlatform.isDarwin then cc else nativePrefix}/bin"
|
||||
'' else ''
|
||||
echo $cc > $out/nix-support/orig-cc
|
||||
|
||||
ccPath="${cc}/bin"
|
||||
'')
|
||||
|
||||
# Create symlinks to everything in the bintools wrapper.
|
||||
+ ''
|
||||
for bbin in $bintools/bin/*; do
|
||||
mkdir -p "$out/bin"
|
||||
ln -s "$bbin" "$out/bin/$(basename $bbin)"
|
||||
done
|
||||
''
|
||||
|
||||
# We export environment variables pointing to the wrapped nonstandard
|
||||
# cmds, lest some lousy configure script use those to guess compiler
|
||||
# version.
|
||||
+ ''
|
||||
export named_cc=${targetPrefix}cc
|
||||
export named_cxx=${targetPrefix}c++
|
||||
|
||||
if [ -e $ccPath/${targetPrefix}gcc ]; then
|
||||
wrap ${targetPrefix}gcc $wrapper $ccPath/${targetPrefix}gcc
|
||||
ln -s ${targetPrefix}gcc $out/bin/${targetPrefix}cc
|
||||
export named_cc=${targetPrefix}gcc
|
||||
export named_cxx=${targetPrefix}g++
|
||||
elif [ -e $ccPath/clang ]; then
|
||||
wrap ${targetPrefix}clang $wrapper $ccPath/clang
|
||||
ln -s ${targetPrefix}clang $out/bin/${targetPrefix}cc
|
||||
export named_cc=${targetPrefix}clang
|
||||
export named_cxx=${targetPrefix}clang++
|
||||
fi
|
||||
|
||||
if [ -e $ccPath/${targetPrefix}g++ ]; then
|
||||
wrap ${targetPrefix}g++ $wrapper $ccPath/${targetPrefix}g++
|
||||
ln -s ${targetPrefix}g++ $out/bin/${targetPrefix}c++
|
||||
elif [ -e $ccPath/clang++ ]; then
|
||||
wrap ${targetPrefix}clang++ $wrapper $ccPath/clang++
|
||||
ln -s ${targetPrefix}clang++ $out/bin/${targetPrefix}c++
|
||||
fi
|
||||
|
||||
if [ -e $ccPath/cpp ]; then
|
||||
wrap ${targetPrefix}cpp $wrapper $ccPath/cpp
|
||||
fi
|
||||
''
|
||||
|
||||
+ optionalString cc.langAda or false ''
|
||||
wrap ${targetPrefix}gnatmake ${./gnat-wrapper.sh} $ccPath/${targetPrefix}gnatmake
|
||||
wrap ${targetPrefix}gnatbind ${./gnat-wrapper.sh} $ccPath/${targetPrefix}gnatbind
|
||||
wrap ${targetPrefix}gnatlink ${./gnat-wrapper.sh} $ccPath/${targetPrefix}gnatlink
|
||||
|
||||
# this symlink points to the unwrapped gnat's output "out". It is used by
|
||||
# our custom gprconfig compiler description to find GNAT's ada runtime. See
|
||||
# ../../development/tools/build-managers/gprbuild/{boot.nix, nixpkgs-gnat.xml}
|
||||
ln -sf ${cc} $out/nix-support/gprconfig-gnat-unwrapped
|
||||
''
|
||||
|
||||
+ optionalString cc.langD or false ''
|
||||
wrap ${targetPrefix}gdc $wrapper $ccPath/${targetPrefix}gdc
|
||||
''
|
||||
|
||||
+ optionalString cc.langFortran or false ''
|
||||
wrap ${targetPrefix}gfortran $wrapper $ccPath/${targetPrefix}gfortran
|
||||
ln -sv ${targetPrefix}gfortran $out/bin/${targetPrefix}g77
|
||||
ln -sv ${targetPrefix}gfortran $out/bin/${targetPrefix}f77
|
||||
export named_fc=${targetPrefix}gfortran
|
||||
''
|
||||
|
||||
+ optionalString cc.langJava or false ''
|
||||
wrap ${targetPrefix}gcj $wrapper $ccPath/${targetPrefix}gcj
|
||||
''
|
||||
|
||||
+ optionalString cc.langGo or false ''
|
||||
wrap ${targetPrefix}gccgo $wrapper $ccPath/${targetPrefix}gccgo
|
||||
'';
|
||||
|
||||
strictDeps = true;
|
||||
propagatedBuildInputs = [ bintools ] ++ extraTools ++ optionals cc.langD or false [ zlib ];
|
||||
depsTargetTargetPropagated = optional (libcxx != null) libcxx ++ extraPackages;
|
||||
|
||||
wrapperName = "CC_WRAPPER";
|
||||
|
||||
setupHooks = [
|
||||
../setup-hooks/role.bash
|
||||
] ++ lib.optional (cc.langC or true) ./setup-hook.sh
|
||||
++ lib.optional (cc.langFortran or false) ./fortran-hook.sh;
|
||||
|
||||
postFixup =
|
||||
# Ensure flags files exists, as some other programs cat them. (That these
|
||||
# are considered an exposed interface is a bit dubious, but fine for now.)
|
||||
''
|
||||
touch "$out/nix-support/cc-cflags"
|
||||
touch "$out/nix-support/cc-ldflags"
|
||||
''
|
||||
|
||||
# Backwards compatability for packages expecting this file, e.g. with
|
||||
# `$NIX_CC/nix-support/dynamic-linker`.
|
||||
#
|
||||
# TODO(@Ericson2314): Remove this after stable release and force
|
||||
# everyone to refer to bintools-wrapper directly.
|
||||
+ ''
|
||||
if [[ -f "$bintools/nix-support/dynamic-linker" ]]; then
|
||||
ln -s "$bintools/nix-support/dynamic-linker" "$out/nix-support"
|
||||
fi
|
||||
if [[ -f "$bintools/nix-support/dynamic-linker-m32" ]]; then
|
||||
ln -s "$bintools/nix-support/dynamic-linker-m32" "$out/nix-support"
|
||||
fi
|
||||
''
|
||||
|
||||
##
|
||||
## General Clang support
|
||||
##
|
||||
+ optionalString isClang ''
|
||||
|
||||
echo "-target ${targetPlatform.config}" >> $out/nix-support/cc-cflags
|
||||
''
|
||||
|
||||
##
|
||||
## GCC libs for non-GCC support
|
||||
##
|
||||
+ optionalString useGccForLibs ''
|
||||
|
||||
echo "-B${gccForLibs}/lib/gcc/${targetPlatform.config}/${gccForLibs.version}" >> $out/nix-support/cc-cflags
|
||||
echo "-L${gccForLibs}/lib/gcc/${targetPlatform.config}/${gccForLibs.version}" >> $out/nix-support/cc-ldflags
|
||||
echo "-L${gccForLibs.lib}/${targetPlatform.config}/lib" >> $out/nix-support/cc-ldflags
|
||||
''
|
||||
|
||||
# TODO We would like to connect this to `useGccForLibs`, but we cannot yet
|
||||
# because `libcxxStdenv` on linux still needs this. Maybe someday we'll
|
||||
# always set `useLLVM` on Darwin, and maybe also break down `useLLVM` into
|
||||
# fine-grained use flags (libgcc vs compiler-rt, ld.lld vs legacy, libc++
|
||||
# vs libstdc++, etc.) since Darwin isn't `useLLVM` on all counts. (See
|
||||
# https://clang.llvm.org/docs/Toolchain.html for all the axes one might
|
||||
# break `useLLVM` into.)
|
||||
+ optionalString (isClang
|
||||
&& targetPlatform.isLinux
|
||||
&& !(stdenv.targetPlatform.useAndroidPrebuilt or false)
|
||||
&& !(stdenv.targetPlatform.useLLVM or false)
|
||||
&& gccForLibs != null) ''
|
||||
echo "--gcc-toolchain=${gccForLibs}" >> $out/nix-support/cc-cflags
|
||||
''
|
||||
|
||||
##
|
||||
## General libc support
|
||||
##
|
||||
|
||||
# The "-B${libc_lib}/lib/" flag is a quick hack to force gcc to link
|
||||
# against the crt1.o from our own glibc, rather than the one in
|
||||
# /usr/lib. (This is only an issue when using an `impure'
|
||||
# compiler/linker, i.e., one that searches /usr/lib and so on.)
|
||||
#
|
||||
# Unfortunately, setting -B appears to override the default search
|
||||
# path. Thus, the gcc-specific "../includes-fixed" directory is
|
||||
# now longer searched and glibc's <limits.h> header fails to
|
||||
# compile, because it uses "#include_next <limits.h>" to find the
|
||||
# limits.h file in ../includes-fixed. To remedy the problem,
|
||||
# another -idirafter is necessary to add that directory again.
|
||||
+ optionalString (libc != null) (''
|
||||
touch "$out/nix-support/libc-cflags"
|
||||
touch "$out/nix-support/libc-ldflags"
|
||||
echo "-B${libc_lib}${libc.libdir or "/lib/"}" >> $out/nix-support/libc-crt1-cflags
|
||||
'' + optionalString (!(cc.langD or false)) ''
|
||||
echo "-idirafter ${libc_dev}${libc.incdir or "/include"}" >> $out/nix-support/libc-cflags
|
||||
'' + optionalString (isGNU && (!(cc.langD or false))) ''
|
||||
for dir in "${cc}"/lib/gcc/*/*/include-fixed; do
|
||||
echo '-idirafter' ''${dir} >> $out/nix-support/libc-cflags
|
||||
done
|
||||
'' + ''
|
||||
|
||||
echo "${libc_lib}" > $out/nix-support/orig-libc
|
||||
echo "${libc_dev}" > $out/nix-support/orig-libc-dev
|
||||
'')
|
||||
|
||||
##
|
||||
## General libc++ support
|
||||
##
|
||||
|
||||
# We have a libc++ directly, we have one via "smuggled" GCC, or we have one
|
||||
# bundled with the C compiler because it is GCC
|
||||
+ optionalString (libcxx != null || (useGccForLibs && gccForLibs.langCC or false) || (isGNU && cc.langCC or false)) ''
|
||||
touch "$out/nix-support/libcxx-cxxflags"
|
||||
touch "$out/nix-support/libcxx-ldflags"
|
||||
''
|
||||
+ optionalString (libcxx == null && (useGccForLibs && gccForLibs.langCC or false)) ''
|
||||
for dir in ${gccForLibs}/include/c++/*; do
|
||||
echo "-isystem $dir" >> $out/nix-support/libcxx-cxxflags
|
||||
done
|
||||
for dir in ${gccForLibs}/include/c++/*/${targetPlatform.config}; do
|
||||
echo "-isystem $dir" >> $out/nix-support/libcxx-cxxflags
|
||||
done
|
||||
''
|
||||
+ optionalString (libcxx.isLLVM or false) (''
|
||||
echo "-isystem ${lib.getDev libcxx}/include/c++/v1" >> $out/nix-support/libcxx-cxxflags
|
||||
echo "-stdlib=libc++" >> $out/nix-support/libcxx-ldflags
|
||||
'' + lib.optionalString stdenv.targetPlatform.isLinux ''
|
||||
echo "-lc++abi" >> $out/nix-support/libcxx-ldflags
|
||||
'')
|
||||
|
||||
##
|
||||
## Initial CFLAGS
|
||||
##
|
||||
|
||||
# GCC shows ${cc_solib}/lib in `gcc -print-search-dirs', but not
|
||||
# ${cc_solib}/lib64 (even though it does actually search there...)..
|
||||
# This confuses libtool. So add it to the compiler tool search
|
||||
# path explicitly.
|
||||
+ optionalString (!nativeTools) ''
|
||||
if [ -e "${cc_solib}/lib64" -a ! -L "${cc_solib}/lib64" ]; then
|
||||
ccLDFlags+=" -L${cc_solib}/lib64"
|
||||
ccCFlags+=" -B${cc_solib}/lib64"
|
||||
fi
|
||||
ccLDFlags+=" -L${cc_solib}/lib"
|
||||
ccCFlags+=" -B${cc_solib}/lib"
|
||||
|
||||
'' + optionalString cc.langAda or false ''
|
||||
touch "$out/nix-support/gnat-cflags"
|
||||
touch "$out/nix-support/gnat-ldflags"
|
||||
basePath=$(echo $cc/lib/*/*/*)
|
||||
ccCFlags+=" -B$basePath -I$basePath/adainclude"
|
||||
gnatCFlags="-I$basePath/adainclude -I$basePath/adalib"
|
||||
|
||||
echo "$gnatCFlags" >> $out/nix-support/gnat-cflags
|
||||
'' + ''
|
||||
echo "$ccLDFlags" >> $out/nix-support/cc-ldflags
|
||||
echo "$ccCFlags" >> $out/nix-support/cc-cflags
|
||||
'' + optionalString (targetPlatform.isDarwin && (libcxx != null) && (cc.isClang or false)) ''
|
||||
echo " -L${lib.getLib libcxx}/lib" >> $out/nix-support/cc-ldflags
|
||||
''
|
||||
|
||||
##
|
||||
## Man page and info support
|
||||
##
|
||||
+ optionalString propagateDoc ''
|
||||
ln -s ${cc.man} $man
|
||||
ln -s ${cc.info} $info
|
||||
'' + optionalString (cc.langD or false) ''
|
||||
echo "-B${zlib}${zlib.libdir or "/lib/"}" >> $out/nix-support/libc-cflags
|
||||
''
|
||||
|
||||
##
|
||||
## Hardening support
|
||||
##
|
||||
+ ''
|
||||
export hardening_unsupported_flags="${builtins.concatStringsSep " " (cc.hardeningUnsupportedFlags or [])}"
|
||||
''
|
||||
|
||||
# Machine flags. These are necessary to support
|
||||
|
||||
# TODO: We should make a way to support miscellaneous machine
|
||||
# flags and other gcc flags as well.
|
||||
|
||||
# Always add -march based on cpu in triple. Sometimes there is a
|
||||
# discrepency (x86_64 vs. x86-64), so we provide an "arch" arg in
|
||||
# that case.
|
||||
# TODO: aarch64-darwin has mcpu incompatible with gcc
|
||||
+ optionalString ((targetPlatform ? gcc.arch) && (isClang || !(stdenv.isDarwin && stdenv.isAarch64)) &&
|
||||
isGccArchSupported targetPlatform.gcc.arch) ''
|
||||
echo "-march=${targetPlatform.gcc.arch}" >> $out/nix-support/cc-cflags-before
|
||||
''
|
||||
|
||||
# -mcpu is not very useful. You should use mtune and march
|
||||
# instead. It’s provided here for backwards compatibility.
|
||||
# TODO: aarch64-darwin has mcpu incompatible with gcc
|
||||
+ optionalString ((targetPlatform ? gcc.cpu) && (isClang || !(stdenv.isDarwin && stdenv.isAarch64))) ''
|
||||
echo "-mcpu=${targetPlatform.gcc.cpu}" >> $out/nix-support/cc-cflags-before
|
||||
''
|
||||
|
||||
# -mfloat-abi only matters on arm32 but we set it here
|
||||
# unconditionally just in case. If the abi specifically sets hard
|
||||
# vs. soft floats we use it here.
|
||||
+ optionalString (targetPlatform ? gcc.float-abi) ''
|
||||
echo "-mfloat-abi=${targetPlatform.gcc.float-abi}" >> $out/nix-support/cc-cflags-before
|
||||
''
|
||||
+ optionalString (targetPlatform ? gcc.fpu) ''
|
||||
echo "-mfpu=${targetPlatform.gcc.fpu}" >> $out/nix-support/cc-cflags-before
|
||||
''
|
||||
+ optionalString (targetPlatform ? gcc.mode) ''
|
||||
echo "-mmode=${targetPlatform.gcc.mode}" >> $out/nix-support/cc-cflags-before
|
||||
''
|
||||
+ optionalString (targetPlatform ? gcc.thumb) ''
|
||||
echo "-m${if targetPlatform.gcc.thumb then "thumb" else "arm"}" >> $out/nix-support/cc-cflags-before
|
||||
''
|
||||
+ optionalString (targetPlatform ? gcc.tune &&
|
||||
isGccArchSupported targetPlatform.gcc.tune) ''
|
||||
echo "-mtune=${targetPlatform.gcc.tune}" >> $out/nix-support/cc-cflags-before
|
||||
''
|
||||
|
||||
# TODO: categorize these and figure out a better place for them
|
||||
+ optionalString hostPlatform.isCygwin ''
|
||||
hardening_unsupported_flags+=" pic"
|
||||
'' + optionalString targetPlatform.isMinGW ''
|
||||
hardening_unsupported_flags+=" stackprotector fortify"
|
||||
'' + optionalString targetPlatform.isAvr ''
|
||||
hardening_unsupported_flags+=" stackprotector pic"
|
||||
'' + optionalString (targetPlatform.libc == "newlib") ''
|
||||
hardening_unsupported_flags+=" stackprotector fortify pie pic"
|
||||
'' + optionalString (targetPlatform.libc == "musl" && targetPlatform.isx86_32) ''
|
||||
hardening_unsupported_flags+=" stackprotector"
|
||||
'' + optionalString targetPlatform.isNetBSD ''
|
||||
hardening_unsupported_flags+=" stackprotector fortify"
|
||||
'' + optionalString cc.langAda or false ''
|
||||
hardening_unsupported_flags+=" format stackprotector strictoverflow"
|
||||
'' + optionalString cc.langD or false ''
|
||||
hardening_unsupported_flags+=" format"
|
||||
'' + optionalString targetPlatform.isWasm ''
|
||||
hardening_unsupported_flags+=" stackprotector fortify pie pic"
|
||||
''
|
||||
|
||||
+ optionalString (libc != null && targetPlatform.isAvr) ''
|
||||
for isa in avr5 avr3 avr4 avr6 avr25 avr31 avr35 avr51 avrxmega2 avrxmega4 avrxmega5 avrxmega6 avrxmega7 tiny-stack; do
|
||||
echo "-B${getLib libc}/avr/lib/$isa" >> $out/nix-support/libc-crt1-cflags
|
||||
done
|
||||
''
|
||||
|
||||
+ optionalString stdenv.targetPlatform.isDarwin ''
|
||||
echo "-arch ${targetPlatform.darwinArch}" >> $out/nix-support/cc-cflags
|
||||
''
|
||||
|
||||
+ optionalString targetPlatform.isAndroid ''
|
||||
echo "-D__ANDROID_API__=${targetPlatform.sdkVer}" >> $out/nix-support/cc-cflags
|
||||
''
|
||||
|
||||
# There are a few tools (to name one libstdcxx5) which do not work
|
||||
# well with multi line flags, so make the flags single line again
|
||||
+ ''
|
||||
for flags in "$out/nix-support"/*flags*; do
|
||||
substituteInPlace "$flags" --replace $'\n' ' '
|
||||
done
|
||||
|
||||
substituteAll ${./add-flags.sh} $out/nix-support/add-flags.sh
|
||||
substituteAll ${./add-hardening.sh} $out/nix-support/add-hardening.sh
|
||||
substituteAll ${../wrapper-common/utils.bash} $out/nix-support/utils.bash
|
||||
''
|
||||
|
||||
##
|
||||
## Extra custom steps
|
||||
##
|
||||
+ extraBuildCommands
|
||||
+ lib.strings.concatStringsSep "; "
|
||||
(lib.attrsets.mapAttrsToList
|
||||
(name: value: "echo ${toString value} >> $out/nix-support/${name}")
|
||||
nixSupport);
|
||||
|
||||
inherit expand-response-params;
|
||||
|
||||
# for substitution in utils.bash
|
||||
expandResponseParams = "${expand-response-params}/bin/expand-response-params";
|
||||
|
||||
meta =
|
||||
let cc_ = if cc != null then cc else {}; in
|
||||
(if cc_ ? meta then removeAttrs cc.meta ["priority"] else {}) //
|
||||
{ description =
|
||||
lib.attrByPath ["meta" "description"] "System C compiler" cc_
|
||||
+ " (wrapper script)";
|
||||
priority = 10;
|
||||
};
|
||||
}
|
||||
11
pkgs/build-support/cc-wrapper/fortran-hook.sh
Normal file
11
pkgs/build-support/cc-wrapper/fortran-hook.sh
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
getTargetRole
|
||||
getTargetRoleWrapper
|
||||
|
||||
export FC${role_post}=@named_fc@
|
||||
|
||||
# If unset, assume the default hardening flags.
|
||||
# These are different for fortran.
|
||||
: ${NIX_HARDENING_ENABLE="stackprotector pic strictoverflow relro bindnow"}
|
||||
export NIX_HARDENING_ENABLE
|
||||
|
||||
unset -v role_post
|
||||
167
pkgs/build-support/cc-wrapper/gnat-wrapper.sh
Normal file
167
pkgs/build-support/cc-wrapper/gnat-wrapper.sh
Normal file
|
|
@ -0,0 +1,167 @@
|
|||
#! @shell@
|
||||
set -eu -o pipefail +o posix
|
||||
shopt -s nullglob
|
||||
|
||||
if (( "${NIX_DEBUG:-0}" >= 7 )); then
|
||||
set -x
|
||||
fi
|
||||
|
||||
path_backup="$PATH"
|
||||
|
||||
# That @-vars are substituted separately from bash evaluation makes
|
||||
# shellcheck think this, and others like it, are useless conditionals.
|
||||
# shellcheck disable=SC2157
|
||||
if [[ -n "@coreutils_bin@" && -n "@gnugrep_bin@" ]]; then
|
||||
PATH="@coreutils_bin@/bin:@gnugrep_bin@/bin"
|
||||
fi
|
||||
|
||||
cInclude=0
|
||||
|
||||
source @out@/nix-support/utils.bash
|
||||
|
||||
# Flirting with a layer violation here.
|
||||
if [ -z "${NIX_BINTOOLS_WRAPPER_FLAGS_SET_@suffixSalt@:-}" ]; then
|
||||
source @bintools@/nix-support/add-flags.sh
|
||||
fi
|
||||
|
||||
# Put this one second so libc ldflags take priority.
|
||||
if [ -z "${NIX_CC_WRAPPER_FLAGS_SET_@suffixSalt@:-}" ]; then
|
||||
source @out@/nix-support/add-flags.sh
|
||||
fi
|
||||
|
||||
|
||||
# Parse command line options and set several variables.
|
||||
# For instance, figure out if linker flags should be passed.
|
||||
# GCC prints annoying warnings when they are not needed.
|
||||
dontLink=0
|
||||
nonFlagArgs=0
|
||||
# shellcheck disable=SC2193
|
||||
|
||||
expandResponseParams "$@"
|
||||
declare -i n=0
|
||||
nParams=${#params[@]}
|
||||
while (( "$n" < "$nParams" )); do
|
||||
p=${params[n]}
|
||||
p2=${params[n+1]:-} # handle `p` being last one
|
||||
if [ "$p" = -c ]; then
|
||||
dontLink=1
|
||||
elif [ "$p" = -S ]; then
|
||||
dontLink=1
|
||||
elif [ "$p" = -E ]; then
|
||||
dontLink=1
|
||||
elif [ "$p" = -E ]; then
|
||||
dontLink=1
|
||||
elif [ "$p" = -M ]; then
|
||||
dontLink=1
|
||||
elif [ "$p" = -MM ]; then
|
||||
dontLink=1
|
||||
elif [[ "$p" = -x && "$p2" = *-header ]]; then
|
||||
dontLink=1
|
||||
elif [[ "$p" != -?* ]]; then
|
||||
# A dash alone signifies standard input; it is not a flag
|
||||
nonFlagArgs=1
|
||||
fi
|
||||
n+=1
|
||||
done
|
||||
|
||||
# If we pass a flag like -Wl, then gcc will call the linker unless it
|
||||
# can figure out that it has to do something else (e.g., because of a
|
||||
# "-c" flag). So if no non-flag arguments are given, don't pass any
|
||||
# linker flags. This catches cases like "gcc" (should just print
|
||||
# "gcc: no input files") and "gcc -v" (should print the version).
|
||||
if [ "$nonFlagArgs" = 0 ]; then
|
||||
dontLink=1
|
||||
fi
|
||||
|
||||
# Optionally filter out paths not refering to the store.
|
||||
if [[ "${NIX_ENFORCE_PURITY:-}" = 1 && -n "$NIX_STORE" ]]; then
|
||||
rest=()
|
||||
nParams=${#params[@]}
|
||||
declare -i n=0
|
||||
while (( "$n" < "$nParams" )); do
|
||||
p=${params[n]}
|
||||
p2=${params[n+1]:-} # handle `p` being last one
|
||||
if [ "${p:0:3}" = -L/ ] && badPath "${p:2}"; then
|
||||
skip "${p:2}"
|
||||
elif [ "$p" = -L ] && badPath "$p2"; then
|
||||
n+=1; skip "$p2"
|
||||
elif [ "${p:0:3}" = -I/ ] && badPath "${p:2}"; then
|
||||
skip "${p:2}"
|
||||
elif [ "$p" = -I ] && badPath "$p2"; then
|
||||
n+=1; skip "$p2"
|
||||
elif [ "${p:0:4}" = -aI/ ] && badPath "${p:3}"; then
|
||||
skip "${p:3}"
|
||||
elif [ "$p" = -aI ] && badPath "$p2"; then
|
||||
n+=1; skip "$p2"
|
||||
elif [ "${p:0:4}" = -aO/ ] && badPath "${p:3}"; then
|
||||
skip "${p:3}"
|
||||
elif [ "$p" = -aO ] && badPath "$p2"; then
|
||||
n+=1; skip "$p2"
|
||||
elif [ "$p" = -isystem ] && badPath "$p2"; then
|
||||
n+=1; skip "$p2"
|
||||
else
|
||||
rest+=("$p")
|
||||
fi
|
||||
n+=1
|
||||
done
|
||||
# Old bash empty array hack
|
||||
params=(${rest+"${rest[@]}"})
|
||||
fi
|
||||
|
||||
|
||||
# Clear march/mtune=native -- they bring impurity.
|
||||
if [ "$NIX_ENFORCE_NO_NATIVE_@suffixSalt@" = 1 ]; then
|
||||
rest=()
|
||||
# Old bash empty array hack
|
||||
for p in ${params+"${params[@]}"}; do
|
||||
if [[ "$p" = -m*=native ]]; then
|
||||
skip "$p"
|
||||
else
|
||||
rest+=("$p")
|
||||
fi
|
||||
done
|
||||
# Old bash empty array hack
|
||||
params=(${rest+"${rest[@]}"})
|
||||
fi
|
||||
|
||||
if [ "$(basename $0)x" = "gnatmakex" ]; then
|
||||
extraBefore=("--GNATBIND=@out@/bin/gnatbind" "--GNATLINK=@out@/bin/gnatlink")
|
||||
extraAfter=($NIX_GNATFLAGS_COMPILE_@suffixSalt@)
|
||||
fi
|
||||
|
||||
if [ "$(basename $0)x" = "gnatbindx" ]; then
|
||||
extraBefore=()
|
||||
extraAfter=($NIX_GNATFLAGS_COMPILE_@suffixSalt@)
|
||||
fi
|
||||
|
||||
if [ "$(basename $0)x" = "gnatlinkx" ]; then
|
||||
extraBefore=()
|
||||
extraAfter=("--GCC=@out@/bin/gcc")
|
||||
fi
|
||||
|
||||
# As a very special hack, if the arguments are just `-v', then don't
|
||||
# add anything. This is to prevent `gcc -v' (which normally prints
|
||||
# out the version number and returns exit code 0) from printing out
|
||||
# `No input files specified' and returning exit code 1.
|
||||
if [ "$*" = -v ]; then
|
||||
extraAfter=()
|
||||
extraBefore=()
|
||||
fi
|
||||
|
||||
# Optionally print debug info.
|
||||
if (( "${NIX_DEBUG:-0}" >= 1 )); then
|
||||
# Old bash workaround, see ld-wrapper for explanation.
|
||||
echo "extra flags before to @prog@:" >&2
|
||||
printf " %q\n" ${extraBefore+"${extraBefore[@]}"} >&2
|
||||
echo "original flags to @prog@:" >&2
|
||||
printf " %q\n" ${params+"${params[@]}"} >&2
|
||||
echo "extra flags after to @prog@:" >&2
|
||||
printf " %q\n" ${extraAfter+"${extraAfter[@]}"} >&2
|
||||
fi
|
||||
|
||||
PATH="$path_backup"
|
||||
# Old bash workaround, see above.
|
||||
exec @prog@ \
|
||||
${extraBefore+"${extraBefore[@]}"} \
|
||||
${params+"${params[@]}"} \
|
||||
${extraAfter+"${extraAfter[@]}"}
|
||||
120
pkgs/build-support/cc-wrapper/setup-hook.sh
Normal file
120
pkgs/build-support/cc-wrapper/setup-hook.sh
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
# CC Wrapper hygiene
|
||||
#
|
||||
# For at least cross compilation, we need to depend on multiple cc-wrappers at
|
||||
# once---specifically up to one per sort of dependency. This follows from having
|
||||
# different tools targeting different platforms, and different flags for those
|
||||
# tools. For example:
|
||||
#
|
||||
# # Flags for compiling (whether or not linking) C code for the...
|
||||
# NIX_CFLAGS_COMPILE_FOR_BUILD # ...build platform
|
||||
# NIX_CFLAGS_COMPILE # ...host platform
|
||||
# NIX_CFLAGS_COMPILE_FOR_TARGET # ...target platform
|
||||
#
|
||||
# Notice that these platforms are the 3 *relative* to the package using
|
||||
# cc-wrapper, not absolute like `x86_64-pc-linux-gnu`.
|
||||
#
|
||||
# The simplest solution would be to have separate cc-wrappers per (3 intended
|
||||
# use-cases * n absolute concrete platforms). For the use-case axis, we would
|
||||
# @-splice in 'BUILD_' '' 'TARGET_' to use the write environment variables when
|
||||
# building the cc-wrapper, and likewise prefix the binaries' names so they didn't
|
||||
# clobber each other on the PATH. But the need for 3x cc-wrappers, along with
|
||||
# non-standard name prefixes, is annoying and liable to break packages' build
|
||||
# systems.
|
||||
#
|
||||
# Instead, we opt to have just one cc-wrapper per absolute platform. Matching
|
||||
# convention, the binaries' names can just be prefixed with their target
|
||||
# platform. On the other hand, that means packages will depend on not just
|
||||
# multiple cc-wrappers, but the exact same cc-wrapper derivation multiple ways.
|
||||
# That means the exact same cc-wrapper derivation must be able to avoid
|
||||
# conflicting with itself, despite the fact that `setup-hook.sh`, the `addCvars`
|
||||
# function, and `add-flags.sh` are all communicating with each other with
|
||||
# environment variables. Yuck.
|
||||
#
|
||||
# The basic strategy is:
|
||||
#
|
||||
# - Everyone exclusively *adds information* to relative-platform-specific
|
||||
# environment variables, like `NIX_CFLAGS_COMPILE_FOR_TARGET`, to communicate
|
||||
# with the wrapped binaries.
|
||||
#
|
||||
# - The wrapped binaries will exclusively *read* cc-wrapper-derivation-specific
|
||||
# environment variables distinguished with with `suffixSalt`, like
|
||||
# `NIX_CFLAGS_COMPILE_@suffixSalt@`.
|
||||
#
|
||||
# - `add-flags`, beyond its old task of reading extra flags stuck inside the
|
||||
# cc-wrapper derivation, will convert the relative-platform-specific
|
||||
# variables to cc-wrapper-derivation-specific variables. This conversion is
|
||||
# the only time all but one of the cc-wrapper-derivation-specific variables
|
||||
# are set.
|
||||
#
|
||||
# This ensures the flow of information is exclusive from
|
||||
# relative-platform-specific variables to cc-wrapper-derivation-specific
|
||||
# variables. This allows us to support the general case of a many--many relation
|
||||
# between relative platforms and cc-wrapper derivations.
|
||||
#
|
||||
# For more details, read the individual files where the mechanisms used to
|
||||
# accomplish this will be individually documented.
|
||||
|
||||
# Skip setup hook if we're neither a build-time dep, nor, temporarily, doing a
|
||||
# native compile.
|
||||
#
|
||||
# TODO(@Ericson2314): No native exception
|
||||
[[ -z ${strictDeps-} ]] || (( "$hostOffset" < 0 )) || return 0
|
||||
|
||||
# It's fine that any other cc-wrapper will redefine this. Bash functions close
|
||||
# over no state, and there's no @-substitutions within, so any redefined
|
||||
# function is guaranteed to be exactly the same.
|
||||
ccWrapper_addCVars () {
|
||||
# See ../setup-hooks/role.bash
|
||||
local role_post
|
||||
getHostRoleEnvHook
|
||||
|
||||
if [ -d "$1/include" ]; then
|
||||
export NIX_CFLAGS_COMPILE${role_post}+=" -isystem $1/include"
|
||||
fi
|
||||
|
||||
if [ -d "$1/Library/Frameworks" ]; then
|
||||
export NIX_CFLAGS_COMPILE${role_post}+=" -iframework $1/Library/Frameworks"
|
||||
fi
|
||||
}
|
||||
|
||||
# See ../setup-hooks/role.bash
|
||||
getTargetRole
|
||||
getTargetRoleWrapper
|
||||
|
||||
# We use the `targetOffset` to choose the right env hook to accumulate the right
|
||||
# sort of deps (those with that offset).
|
||||
addEnvHooks "$targetOffset" ccWrapper_addCVars
|
||||
|
||||
# Note 1: these come *after* $out in the PATH (see setup.sh).
|
||||
# Note 2: phase separation makes this look useless to shellcheck.
|
||||
|
||||
# shellcheck disable=SC2157
|
||||
if [ -n "@cc@" ]; then
|
||||
addToSearchPath _PATH @cc@/bin
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2157
|
||||
if [ -n "@libc_bin@" ]; then
|
||||
addToSearchPath _PATH @libc_bin@/bin
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2157
|
||||
if [ -n "@coreutils_bin@" ]; then
|
||||
addToSearchPath _PATH @coreutils_bin@/bin
|
||||
fi
|
||||
|
||||
# Export tool environment variables so various build systems use the right ones.
|
||||
|
||||
export NIX_CC${role_post}=@out@
|
||||
|
||||
export CC${role_post}=@named_cc@
|
||||
export CXX${role_post}=@named_cxx@
|
||||
export CC${role_post}=@named_cc@
|
||||
export CXX${role_post}=@named_cxx@
|
||||
|
||||
# If unset, assume the default hardening flags.
|
||||
: ${NIX_HARDENING_ENABLE="fortify stackprotector pic strictoverflow format relro bindnow"}
|
||||
export NIX_HARDENING_ENABLE
|
||||
|
||||
# No local scope in sourced file
|
||||
unset -v role_post
|
||||
36
pkgs/build-support/closure-info.nix
Normal file
36
pkgs/build-support/closure-info.nix
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
# This derivation builds two files containing information about the
|
||||
# closure of 'rootPaths': $out/store-paths contains the paths in the
|
||||
# closure, and $out/registration contains a file suitable for use with
|
||||
# "nix-store --load-db" and "nix-store --register-validity
|
||||
# --hash-given".
|
||||
|
||||
{ stdenv, buildPackages }:
|
||||
|
||||
{ rootPaths }:
|
||||
|
||||
assert builtins.langVersion >= 5;
|
||||
|
||||
stdenv.mkDerivation {
|
||||
name = "closure-info";
|
||||
|
||||
__structuredAttrs = true;
|
||||
|
||||
exportReferencesGraph.closure = rootPaths;
|
||||
|
||||
preferLocalBuild = true;
|
||||
|
||||
PATH = "${buildPackages.coreutils}/bin:${buildPackages.jq}/bin";
|
||||
|
||||
builder = builtins.toFile "builder"
|
||||
''
|
||||
. .attrs.sh
|
||||
|
||||
out=''${outputs[out]}
|
||||
|
||||
mkdir $out
|
||||
|
||||
jq -r ".closure | map(.narSize) | add" < .attrs.json > $out/total-nar-size
|
||||
jq -r '.closure | map([.path, .narHash, .narSize, "", (.references | length)] + .references) | add | map("\(.)\n") | add' < .attrs.json | head -n -1 > $out/registration
|
||||
jq -r .closure[].path < .attrs.json > $out/store-paths
|
||||
'';
|
||||
}
|
||||
132
pkgs/build-support/coq/default.nix
Normal file
132
pkgs/build-support/coq/default.nix
Normal file
|
|
@ -0,0 +1,132 @@
|
|||
{ lib, stdenv, coqPackages, coq, which, fetchzip }@args:
|
||||
let lib = import ./extra-lib.nix {inherit (args) lib;}; in
|
||||
with builtins; with lib;
|
||||
let
|
||||
isGitHubDomain = d: match "^github.*" d != null;
|
||||
isGitLabDomain = d: match "^gitlab.*" d != null;
|
||||
in
|
||||
{ pname,
|
||||
version ? null,
|
||||
fetcher ? null,
|
||||
owner ? "coq-community",
|
||||
domain ? "github.com",
|
||||
repo ? pname,
|
||||
defaultVersion ? null,
|
||||
releaseRev ? (v: v),
|
||||
displayVersion ? {},
|
||||
release ? {},
|
||||
buildInputs ? [],
|
||||
nativeBuildInputs ? [],
|
||||
extraBuildInputs ? [],
|
||||
extraNativeBuildInputs ? [],
|
||||
overrideBuildInputs ? [],
|
||||
overrideNativeBuildInputs ? [],
|
||||
namePrefix ? [ "coq" ],
|
||||
enableParallelBuilding ? true,
|
||||
extraInstallFlags ? [],
|
||||
setCOQBIN ? true,
|
||||
mlPlugin ? false,
|
||||
useMelquiondRemake ? null,
|
||||
dropAttrs ? [],
|
||||
keepAttrs ? [],
|
||||
dropDerivationAttrs ? [],
|
||||
useDune2ifVersion ? (x: false),
|
||||
useDune2 ? false,
|
||||
opam-name ? (concatStringsSep "-" (namePrefix ++ [ pname ])),
|
||||
...
|
||||
}@args:
|
||||
let
|
||||
args-to-remove = foldl (flip remove) ([
|
||||
"version" "fetcher" "repo" "owner" "domain" "releaseRev"
|
||||
"displayVersion" "defaultVersion" "useMelquiondRemake"
|
||||
"release"
|
||||
"buildInputs" "nativeBuildInputs"
|
||||
"extraBuildInputs" "extraNativeBuildInputs"
|
||||
"overrideBuildInputs" "overrideNativeBuildInputs"
|
||||
"namePrefix"
|
||||
"meta" "useDune2ifVersion" "useDune2" "opam-name"
|
||||
"extraInstallFlags" "setCOQBIN" "mlPlugin"
|
||||
"dropAttrs" "dropDerivationAttrs" "keepAttrs" ] ++ dropAttrs) keepAttrs;
|
||||
fetch = import ../coq/meta-fetch/default.nix
|
||||
{ inherit lib stdenv fetchzip; } ({
|
||||
inherit release releaseRev;
|
||||
location = { inherit domain owner repo; };
|
||||
} // optionalAttrs (args?fetcher) {inherit fetcher;});
|
||||
fetched = fetch (if !isNull version then version else defaultVersion);
|
||||
display-pkg = n: sep: v:
|
||||
let d = displayVersion.${n} or (if sep == "" then ".." else true); in
|
||||
n + optionalString (v != "" && v != null) (switch d [
|
||||
{ case = true; out = sep + v; }
|
||||
{ case = "."; out = sep + versions.major v; }
|
||||
{ case = ".."; out = sep + versions.majorMinor v; }
|
||||
{ case = "..."; out = sep + versions.majorMinorPatch v; }
|
||||
{ case = isFunction; out = optionalString (d v != "") (sep + d v); }
|
||||
{ case = isString; out = optionalString (d != "") (sep + d); }
|
||||
] "") + optionalString (v == null) "-broken";
|
||||
append-version = p: n: p + display-pkg n "" coqPackages.${n}.version + "-";
|
||||
prefix-name = foldl append-version "" namePrefix;
|
||||
useDune2 = args.useDune2 or (useDune2ifVersion fetched.version);
|
||||
coqlib-flags = switch coq.coq-version [
|
||||
{ case = v: versions.isLe "8.6" v && v != "dev" ;
|
||||
out = [ "COQLIB=$(out)/lib/coq/${coq.coq-version}/" ]; }
|
||||
] [ "COQLIBINSTALL=$(out)/lib/coq/${coq.coq-version}/user-contrib"
|
||||
"COQPLUGININSTALL=$(OCAMLFIND_DESTDIR)" ];
|
||||
docdir-flags = switch coq.coq-version [
|
||||
{ case = v: versions.isLe "8.6" v && v != "dev";
|
||||
out = [ "DOCDIR=$(out)/share/coq/${coq.coq-version}/" ]; }
|
||||
] [ "COQDOCINSTALL=$(out)/share/coq/${coq.coq-version}/user-contrib" ];
|
||||
in
|
||||
|
||||
stdenv.mkDerivation (removeAttrs ({
|
||||
|
||||
name = prefix-name + (display-pkg pname "-" fetched.version);
|
||||
|
||||
inherit (fetched) version src;
|
||||
|
||||
nativeBuildInputs = args.overrideNativeBuildInputs
|
||||
or ([ which coq.ocamlPackages.findlib ]
|
||||
++ optional useDune2 coq.ocamlPackages.dune_2
|
||||
++ optional (useDune2 || mlPlugin) coq.ocaml
|
||||
++ (args.nativeBuildInputs or []) ++ extraNativeBuildInputs);
|
||||
buildInputs = args.overrideBuildInputs
|
||||
or ([ coq ] ++ (args.buildInputs or []) ++ extraBuildInputs);
|
||||
inherit enableParallelBuilding;
|
||||
|
||||
meta = ({ platforms = coq.meta.platforms; } //
|
||||
(switch domain [{
|
||||
case = pred.union isGitHubDomain isGitLabDomain;
|
||||
out = { homepage = "https://${domain}/${owner}/${repo}"; };
|
||||
}] {}) //
|
||||
optionalAttrs (fetched.broken or false) { coqFilter = true; broken = true; }) //
|
||||
(args.meta or {}) ;
|
||||
|
||||
}
|
||||
// (optionalAttrs setCOQBIN { COQBIN = "${coq}/bin/"; })
|
||||
// (optionalAttrs (!args?installPhase && !args?useMelquiondRemake) {
|
||||
installFlags =
|
||||
[ "DESTDIR=$(out)" ] ++ coqlib-flags ++ docdir-flags ++
|
||||
extraInstallFlags;
|
||||
})
|
||||
// (optionalAttrs useDune2 {
|
||||
buildPhase = ''
|
||||
runHook preBuild
|
||||
dune build -p ${opam-name} ''${enableParallelBuilding:+-j $NIX_BUILD_CORES}
|
||||
runHook postBuild
|
||||
'';
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
dune install ${opam-name} --prefix=$out
|
||||
mv $out/lib/coq $out/lib/TEMPORARY
|
||||
mkdir $out/lib/coq/
|
||||
mv $out/lib/TEMPORARY $out/lib/coq/${coq.coq-version}
|
||||
runHook postInstall
|
||||
'';
|
||||
})
|
||||
// (optionalAttrs (args?useMelquiondRemake) rec {
|
||||
COQUSERCONTRIB = "$out/lib/coq/${coq.coq-version}/user-contrib";
|
||||
preConfigurePhases = "autoconf";
|
||||
configureFlags = [ "--libdir=${COQUSERCONTRIB}/${useMelquiondRemake.logpath or ""}" ];
|
||||
buildPhase = "./remake -j$NIX_BUILD_CORES";
|
||||
installPhase = "./remake install";
|
||||
})
|
||||
// (removeAttrs args args-to-remove)) dropDerivationAttrs)
|
||||
145
pkgs/build-support/coq/extra-lib.nix
Normal file
145
pkgs/build-support/coq/extra-lib.nix
Normal file
|
|
@ -0,0 +1,145 @@
|
|||
{ lib }:
|
||||
with builtins; with lib; recursiveUpdate lib (rec {
|
||||
|
||||
versions =
|
||||
let
|
||||
truncate = n: v: concatStringsSep "." (take n (splitVersion v));
|
||||
opTruncate = op: v0: v: let n = length (splitVersion v0); in
|
||||
op (truncate n v) (truncate n v0);
|
||||
in rec {
|
||||
|
||||
/* Get string of the first n parts of a version string.
|
||||
|
||||
Example:
|
||||
- truncate 2 "1.2.3-stuff"
|
||||
=> "1.2"
|
||||
|
||||
- truncate 4 "1.2.3-stuff"
|
||||
=> "1.2.3.stuff"
|
||||
*/
|
||||
|
||||
inherit truncate;
|
||||
|
||||
/* Get string of the first three parts (major, minor and patch)
|
||||
of a version string.
|
||||
|
||||
Example:
|
||||
majorMinorPatch "1.2.3-stuff"
|
||||
=> "1.2.3"
|
||||
*/
|
||||
majorMinorPatch = truncate 3;
|
||||
|
||||
/* Version comparison predicates,
|
||||
- isGe v0 v <-> v is greater or equal than v0 [*]
|
||||
- isLe v0 v <-> v is lesser or equal than v0 [*]
|
||||
- isGt v0 v <-> v is strictly greater than v0 [*]
|
||||
- isLt v0 v <-> v is strictly lesser than v0 [*]
|
||||
- isEq v0 v <-> v is equal to v0 [*]
|
||||
- range low high v <-> v is between low and high [**]
|
||||
|
||||
[*] truncating v to the same number of digits as v0
|
||||
[**] truncating v to low for the lower bound and high for the upper bound
|
||||
|
||||
Examples:
|
||||
- isGe "8.10" "8.10.1"
|
||||
=> true
|
||||
- isLe "8.10" "8.10.1"
|
||||
=> true
|
||||
- isGt "8.10" "8.10.1"
|
||||
=> false
|
||||
- isGt "8.10.0" "8.10.1"
|
||||
=> true
|
||||
- isEq "8.10" "8.10.1"
|
||||
=> true
|
||||
- range "8.10" "8.11" "8.11.1"
|
||||
=> true
|
||||
- range "8.10" "8.11+" "8.11.0"
|
||||
=> false
|
||||
- range "8.10" "8.11+" "8.11+beta1"
|
||||
=> false
|
||||
|
||||
*/
|
||||
isGe = opTruncate versionAtLeast;
|
||||
isGt = opTruncate (flip versionOlder);
|
||||
isLe = opTruncate (flip versionAtLeast);
|
||||
isLt = opTruncate versionOlder;
|
||||
isEq = opTruncate pred.equal;
|
||||
range = low: high: pred.inter (versions.isGe low) (versions.isLe high);
|
||||
};
|
||||
|
||||
/* Returns a list of list, splitting it using a predicate.
|
||||
This is analoguous to builtins.split sep list,
|
||||
with a predicate as a separator and a list instead of a string.
|
||||
|
||||
Type: splitList :: (a -> bool) -> [a] -> [[a]]
|
||||
|
||||
Example:
|
||||
splitList (x: x == "x") [ "y" "x" "z" "t" ]
|
||||
=> [ [ "y" ] "x" [ "z" "t" ] ]
|
||||
*/
|
||||
splitList = pred: l: # put in file lists
|
||||
let loop = (vv: v: l: if l == [] then vv ++ [v]
|
||||
else let hd = head l; tl = tail l; in
|
||||
if pred hd then loop (vv ++ [ v hd ]) [] tl else loop vv (v ++ [hd]) tl);
|
||||
in loop [] [] l;
|
||||
|
||||
pred = {
|
||||
/* Predicate intersection, union, and complement */
|
||||
inter = p: q: x: p x && q x;
|
||||
union = p: q: x: p x || q x;
|
||||
compl = p: x: ! p x;
|
||||
true = p: true;
|
||||
false = p: false;
|
||||
|
||||
/* predicate "being equal to y" */
|
||||
equal = y: x: x == y;
|
||||
};
|
||||
|
||||
/* Emulate a "switch - case" construct,
|
||||
instead of relying on `if then else if ...` */
|
||||
/* Usage:
|
||||
```nix
|
||||
switch-if [
|
||||
if-clause-1
|
||||
..
|
||||
if-clause-k
|
||||
] default-out
|
||||
```
|
||||
where a if-clause has the form `{ cond = b; out = r; }`
|
||||
the first branch such as `b` is true */
|
||||
|
||||
switch-if = c: d: (findFirst (getAttr "cond") {} c).out or d;
|
||||
|
||||
/* Usage:
|
||||
```nix
|
||||
switch x [
|
||||
simple-clause-1
|
||||
..
|
||||
simple-clause-k
|
||||
] default-out
|
||||
```
|
||||
where a simple-clause has the form `{ case = p; out = r; }`
|
||||
the first branch such as `p x` is true
|
||||
or
|
||||
```nix
|
||||
switch [ x1 .. xn ] [
|
||||
complex-clause-1
|
||||
..
|
||||
complex-clause-k
|
||||
] default-out
|
||||
```
|
||||
where a complex-clause is either a simple-clause
|
||||
or has the form { cases = [ p1 .. pn ]; out = r; }
|
||||
in which case the first branch such as all `pi x` are true
|
||||
|
||||
if the variables p are not functions,
|
||||
they are converted to a equal p
|
||||
if out is missing the default-out is taken */
|
||||
|
||||
switch = var: clauses: default: with pred; let
|
||||
compare = f: if isFunction f then f else equal f;
|
||||
combine = cl: var:
|
||||
if cl?case then compare cl.case var
|
||||
else all (equal true) (zipListsWith compare cl.cases var); in
|
||||
switch-if (map (cl: { cond = combine cl var; inherit (cl) out; }) clauses) default;
|
||||
})
|
||||
69
pkgs/build-support/coq/meta-fetch/default.nix
Normal file
69
pkgs/build-support/coq/meta-fetch/default.nix
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
{ lib, stdenv, fetchzip }@args:
|
||||
let lib' = lib; in
|
||||
let lib = import ../extra-lib.nix {lib = lib';}; in
|
||||
with builtins; with lib;
|
||||
let
|
||||
default-fetcher = {domain ? "github.com", owner ? "", repo, rev, name ? "source", sha256 ? null, ...}@args:
|
||||
let ext = if args?sha256 then "zip" else "tar.gz";
|
||||
fmt = if args?sha256 then "zip" else "tarball";
|
||||
pr = match "^#(.*)$" rev;
|
||||
url = switch-if [
|
||||
{ cond = isNull pr && !isNull (match "^github.*" domain);
|
||||
out = "https://${domain}/${owner}/${repo}/archive/${rev}.${ext}"; }
|
||||
{ cond = !isNull pr && !isNull (match "^github.*" domain);
|
||||
out = "https://api.${domain}/repos/${owner}/${repo}/${fmt}/pull/${head pr}/head"; }
|
||||
{ cond = isNull pr && !isNull (match "^gitlab.*" domain);
|
||||
out = "https://${domain}/${owner}/${repo}/-/archive/${rev}/${repo}-${rev}.${ext}"; }
|
||||
{ cond = !isNull (match "(www.)?mpi-sws.org" domain);
|
||||
out = "https://www.mpi-sws.org/~${owner}/${repo}/download/${repo}-${rev}.${ext}";}
|
||||
] (throw "meta-fetch: no fetcher found for domain ${domain} on ${rev}");
|
||||
fetch = x: if args?sha256 then fetchzip (x // { inherit sha256; }) else fetchTarball x;
|
||||
in fetch { inherit url ; };
|
||||
in
|
||||
{
|
||||
fetcher ? default-fetcher,
|
||||
location,
|
||||
release ? {},
|
||||
releaseRev ? (v: v),
|
||||
}:
|
||||
let isVersion = x: isString x && match "^/.*" x == null && release?${x};
|
||||
shortVersion = x: if (isString x && match "^/.*" x == null)
|
||||
then findFirst (v: versions.majorMinor v == x) null
|
||||
(sort versionAtLeast (attrNames release))
|
||||
else null;
|
||||
isShortVersion = x: shortVersion x != null;
|
||||
isPathString = x: isString x && match "^/.*" x != null && pathExists x; in
|
||||
arg:
|
||||
switch arg [
|
||||
{ case = isNull; out = { version = "broken"; src = ""; broken = true; }; }
|
||||
{ case = isPathString; out = { version = "dev"; src = arg; }; }
|
||||
{ case = pred.union isVersion isShortVersion;
|
||||
out = let v = if isVersion arg then arg else shortVersion arg; in
|
||||
let
|
||||
given-sha256 = release.${v}.sha256 or "";
|
||||
sha256 = if given-sha256 == "" then lib.fakeSha256 else given-sha256;
|
||||
rv = release.${v} // { inherit sha256; }; in
|
||||
{
|
||||
version = rv.version or v;
|
||||
src = rv.src or fetcher (location // { rev = releaseRev v; } // rv);
|
||||
};
|
||||
}
|
||||
{ case = isString;
|
||||
out = let
|
||||
splitted = filter isString (split ":" arg);
|
||||
rev = last splitted;
|
||||
has-owner = length splitted > 1;
|
||||
version = "dev"; in {
|
||||
inherit version;
|
||||
src = fetcher (location // { inherit rev; } //
|
||||
(optionalAttrs has-owner { owner = head splitted; }));
|
||||
}; }
|
||||
{ case = isAttrs;
|
||||
out = {
|
||||
version = arg.version or "dev";
|
||||
src = (arg.fetcher or fetcher) (location // (arg.location or {})); }; }
|
||||
{ case = isPath;
|
||||
out = {
|
||||
version = "dev" ;
|
||||
src = builtins.path {path = arg; name = location.name or "source";}; }; }
|
||||
] (throw "not a valid source description")
|
||||
25
pkgs/build-support/dhall/directory-to-nix.nix
Normal file
25
pkgs/build-support/dhall/directory-to-nix.nix
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
{ dhallPackages, dhallPackageToNix}:
|
||||
|
||||
# `dhallDirectoryToNix is a utility function to take a directory of Dhall files
|
||||
# and read them in as a Nix expression.
|
||||
#
|
||||
# This function is similar to `dhallToNix`, but takes a Nixpkgs Dhall package
|
||||
# as input instead of raw Dhall code.
|
||||
#
|
||||
# Note that this uses "import from derivation" (IFD), meaning that Nix will
|
||||
# perform a build during the evaluation phase if you use this
|
||||
# `dhallDirectoryToNix` utility. It is not possible to use
|
||||
# `dhallDirectoryToNix` in Nixpkgs, since the Nixpkgs Hydra doesn't allow IFD.
|
||||
|
||||
{ src
|
||||
, # The file to import, relative to the src root directory
|
||||
file ? "package.dhall"
|
||||
}@args:
|
||||
|
||||
let
|
||||
generatedPkg = dhallPackages.generateDhallDirectoryPackage args;
|
||||
|
||||
builtPkg = dhallPackages.callPackage generatedPkg { };
|
||||
|
||||
in
|
||||
dhallPackageToNix builtPkg
|
||||
36
pkgs/build-support/dhall/package-to-nix.nix
Normal file
36
pkgs/build-support/dhall/package-to-nix.nix
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
|
||||
# `dhallPackageToNix` is a utility function to take a Nixpkgs Dhall package
|
||||
# (created with a function like `dhallPackages.buildDhallDirectoryPackage`)
|
||||
# and read it in as a Nix expression.
|
||||
#
|
||||
# This function is similar to `dhallToNix`, but takes a Nixpkgs Dhall package
|
||||
# as input instead of raw Dhall code.
|
||||
#
|
||||
# Note that this uses "import from derivation" (IFD), meaning that Nix will
|
||||
# perform a build during the evaluation phase if you use this
|
||||
# `dhallPackageToNix` utility. It is not possible to use `dhallPackageToNix`
|
||||
# in Nixpkgs, since the Nixpkgs Hydra doesn't allow IFD.
|
||||
|
||||
{ stdenv, dhall-nix }:
|
||||
|
||||
dhallPackage:
|
||||
let
|
||||
drv = stdenv.mkDerivation {
|
||||
name = "dhall-compiled-package.nix";
|
||||
|
||||
buildCommand = ''
|
||||
# Dhall requires that the cache is writable, even if it is never written to.
|
||||
# We copy the cache from the input package to the current directory and
|
||||
# set the cache as writable.
|
||||
cp -r "${dhallPackage}/.cache" ./
|
||||
export XDG_CACHE_HOME=$PWD/.cache
|
||||
chmod -R +w ./.cache
|
||||
|
||||
dhall-to-nix <<< "${dhallPackage}/binary.dhall" > $out
|
||||
'';
|
||||
|
||||
nativeBuildInputs = [ dhall-nix ];
|
||||
};
|
||||
|
||||
in
|
||||
import drv
|
||||
38
pkgs/build-support/dhall/to-nix.nix
Normal file
38
pkgs/build-support/dhall/to-nix.nix
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
/* `dhallToNix` is a utility function to convert expressions in the Dhall
|
||||
configuration language to their corresponding Nix expressions.
|
||||
|
||||
Example:
|
||||
dhallToNix "{ foo = 1, bar = True }"
|
||||
=> { foo = 1; bar = true; }
|
||||
dhallToNix "λ(x : Bool) → x == False"
|
||||
=> x : x == false
|
||||
dhallToNix "λ(x : Bool) → x == False" false
|
||||
=> true
|
||||
|
||||
See https://hackage.haskell.org/package/dhall-nix/docs/Dhall-Nix.html for
|
||||
a longer tutorial
|
||||
|
||||
Note that this uses "import from derivation", meaning that Nix will perform
|
||||
a build during the evaluation phase if you use this `dhallToNix` utility
|
||||
*/
|
||||
{ stdenv, dhall-nix, writeText }:
|
||||
|
||||
let
|
||||
dhallToNix = code :
|
||||
let
|
||||
file = writeText "dhall-expression" code;
|
||||
|
||||
drv = stdenv.mkDerivation {
|
||||
name = "dhall-compiled.nix";
|
||||
|
||||
buildCommand = ''
|
||||
dhall-to-nix <<< "${file}" > $out
|
||||
'';
|
||||
|
||||
buildInputs = [ dhall-nix ];
|
||||
};
|
||||
|
||||
in
|
||||
import drv;
|
||||
in
|
||||
dhallToNix
|
||||
997
pkgs/build-support/docker/default.nix
Normal file
997
pkgs/build-support/docker/default.nix
Normal file
|
|
@ -0,0 +1,997 @@
|
|||
{ bashInteractive
|
||||
, buildPackages
|
||||
, cacert
|
||||
, callPackage
|
||||
, closureInfo
|
||||
, coreutils
|
||||
, e2fsprogs
|
||||
, fakechroot
|
||||
, fakeNss
|
||||
, fakeroot
|
||||
, go
|
||||
, jq
|
||||
, jshon
|
||||
, lib
|
||||
, makeWrapper
|
||||
, moreutils
|
||||
, nix
|
||||
, nixosTests
|
||||
, pigz
|
||||
, rsync
|
||||
, runCommand
|
||||
, runtimeShell
|
||||
, shadow
|
||||
, skopeo
|
||||
, storeDir ? builtins.storeDir
|
||||
, substituteAll
|
||||
, symlinkJoin
|
||||
, tarsum
|
||||
, util-linux
|
||||
, vmTools
|
||||
, writeReferencesToFile
|
||||
, writeScript
|
||||
, writeText
|
||||
, writeTextDir
|
||||
, writePython3
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (lib)
|
||||
optionals
|
||||
optionalString
|
||||
;
|
||||
|
||||
inherit (lib)
|
||||
escapeShellArgs
|
||||
toList
|
||||
;
|
||||
|
||||
mkDbExtraCommand = contents:
|
||||
let
|
||||
contentsList = if builtins.isList contents then contents else [ contents ];
|
||||
in
|
||||
''
|
||||
echo "Generating the nix database..."
|
||||
echo "Warning: only the database of the deepest Nix layer is loaded."
|
||||
echo " If you want to use nix commands in the container, it would"
|
||||
echo " be better to only have one layer that contains a nix store."
|
||||
|
||||
export NIX_REMOTE=local?root=$PWD
|
||||
# A user is required by nix
|
||||
# https://github.com/NixOS/nix/blob/9348f9291e5d9e4ba3c4347ea1b235640f54fd79/src/libutil/util.cc#L478
|
||||
export USER=nobody
|
||||
${buildPackages.nix}/bin/nix-store --load-db < ${closureInfo {rootPaths = contentsList;}}/registration
|
||||
|
||||
mkdir -p nix/var/nix/gcroots/docker/
|
||||
for i in ${lib.concatStringsSep " " contentsList}; do
|
||||
ln -s $i nix/var/nix/gcroots/docker/$(basename $i)
|
||||
done;
|
||||
'';
|
||||
|
||||
# The OCI Image specification recommends that configurations use values listed
|
||||
# in the Go Language document for GOARCH.
|
||||
# Reference: https://github.com/opencontainers/image-spec/blob/master/config.md#properties
|
||||
# For the mapping from Nixpkgs system parameters to GOARCH, we can reuse the
|
||||
# mapping from the go package.
|
||||
defaultArch = go.GOARCH;
|
||||
|
||||
in
|
||||
rec {
|
||||
examples = callPackage ./examples.nix {
|
||||
inherit buildImage buildLayeredImage fakeNss pullImage shadowSetup buildImageWithNixDb;
|
||||
};
|
||||
|
||||
tests = {
|
||||
inherit (nixosTests)
|
||||
docker-tools
|
||||
docker-tools-overlay
|
||||
# requires remote builder
|
||||
# docker-tools-cross
|
||||
;
|
||||
};
|
||||
|
||||
pullImage =
|
||||
let
|
||||
fixName = name: builtins.replaceStrings [ "/" ":" ] [ "-" "-" ] name;
|
||||
in
|
||||
{ imageName
|
||||
# To find the digest of an image, you can use skopeo:
|
||||
# see doc/functions.xml
|
||||
, imageDigest
|
||||
, sha256
|
||||
, os ? "linux"
|
||||
, arch ? defaultArch
|
||||
|
||||
# This is used to set name to the pulled image
|
||||
, finalImageName ? imageName
|
||||
# This used to set a tag to the pulled image
|
||||
, finalImageTag ? "latest"
|
||||
# This is used to disable TLS certificate verification, allowing access to http registries on (hopefully) trusted networks
|
||||
, tlsVerify ? true
|
||||
|
||||
, name ? fixName "docker-image-${finalImageName}-${finalImageTag}.tar"
|
||||
}:
|
||||
|
||||
runCommand name
|
||||
{
|
||||
inherit imageDigest;
|
||||
imageName = finalImageName;
|
||||
imageTag = finalImageTag;
|
||||
impureEnvVars = lib.fetchers.proxyImpureEnvVars;
|
||||
outputHashMode = "flat";
|
||||
outputHashAlgo = "sha256";
|
||||
outputHash = sha256;
|
||||
|
||||
nativeBuildInputs = [ skopeo ];
|
||||
SSL_CERT_FILE = "${cacert.out}/etc/ssl/certs/ca-bundle.crt";
|
||||
|
||||
sourceURL = "docker://${imageName}@${imageDigest}";
|
||||
destNameTag = "${finalImageName}:${finalImageTag}";
|
||||
} ''
|
||||
skopeo \
|
||||
--insecure-policy \
|
||||
--tmpdir=$TMPDIR \
|
||||
--override-os ${os} \
|
||||
--override-arch ${arch} \
|
||||
copy \
|
||||
--src-tls-verify=${lib.boolToString tlsVerify} \
|
||||
"$sourceURL" "docker-archive://$out:$destNameTag" \
|
||||
| cat # pipe through cat to force-disable progress bar
|
||||
'';
|
||||
|
||||
# We need to sum layer.tar, not a directory, hence tarsum instead of nix-hash.
|
||||
# And we cannot untar it, because then we cannot preserve permissions etc.
|
||||
inherit tarsum; # pkgs.dockerTools.tarsum
|
||||
|
||||
# buildEnv creates symlinks to dirs, which is hard to edit inside the overlay VM
|
||||
mergeDrvs =
|
||||
{ derivations
|
||||
, onlyDeps ? false
|
||||
}:
|
||||
runCommand "merge-drvs"
|
||||
{
|
||||
inherit derivations onlyDeps;
|
||||
} ''
|
||||
if [[ -n "$onlyDeps" ]]; then
|
||||
echo $derivations > $out
|
||||
exit 0
|
||||
fi
|
||||
|
||||
mkdir $out
|
||||
for derivation in $derivations; do
|
||||
echo "Merging $derivation..."
|
||||
if [[ -d "$derivation" ]]; then
|
||||
# If it's a directory, copy all of its contents into $out.
|
||||
cp -drf --preserve=mode -f $derivation/* $out/
|
||||
else
|
||||
# Otherwise treat the derivation as a tarball and extract it
|
||||
# into $out.
|
||||
tar -C $out -xpf $drv || true
|
||||
fi
|
||||
done
|
||||
'';
|
||||
|
||||
# Helper for setting up the base files for managing users and
|
||||
# groups, only if such files don't exist already. It is suitable for
|
||||
# being used in a runAsRoot script.
|
||||
shadowSetup = ''
|
||||
export PATH=${shadow}/bin:$PATH
|
||||
mkdir -p /etc/pam.d
|
||||
if [[ ! -f /etc/passwd ]]; then
|
||||
echo "root:x:0:0::/root:${runtimeShell}" > /etc/passwd
|
||||
echo "root:!x:::::::" > /etc/shadow
|
||||
fi
|
||||
if [[ ! -f /etc/group ]]; then
|
||||
echo "root:x:0:" > /etc/group
|
||||
echo "root:x::" > /etc/gshadow
|
||||
fi
|
||||
if [[ ! -f /etc/pam.d/other ]]; then
|
||||
cat > /etc/pam.d/other <<EOF
|
||||
account sufficient pam_unix.so
|
||||
auth sufficient pam_rootok.so
|
||||
password requisite pam_unix.so nullok sha512
|
||||
session required pam_unix.so
|
||||
EOF
|
||||
fi
|
||||
if [[ ! -f /etc/login.defs ]]; then
|
||||
touch /etc/login.defs
|
||||
fi
|
||||
'';
|
||||
|
||||
# Run commands in a virtual machine.
|
||||
runWithOverlay =
|
||||
{ name
|
||||
, fromImage ? null
|
||||
, fromImageName ? null
|
||||
, fromImageTag ? null
|
||||
, diskSize ? 1024
|
||||
, preMount ? ""
|
||||
, postMount ? ""
|
||||
, postUmount ? ""
|
||||
}:
|
||||
vmTools.runInLinuxVM (
|
||||
runCommand name
|
||||
{
|
||||
preVM = vmTools.createEmptyImage {
|
||||
size = diskSize;
|
||||
fullName = "docker-run-disk";
|
||||
destination = "./image";
|
||||
};
|
||||
inherit fromImage fromImageName fromImageTag;
|
||||
|
||||
nativeBuildInputs = [ util-linux e2fsprogs jshon rsync jq ];
|
||||
} ''
|
||||
mkdir disk
|
||||
mkfs /dev/${vmTools.hd}
|
||||
mount /dev/${vmTools.hd} disk
|
||||
cd disk
|
||||
|
||||
if [[ -n "$fromImage" ]]; then
|
||||
echo "Unpacking base image..."
|
||||
mkdir image
|
||||
tar -C image -xpf "$fromImage"
|
||||
|
||||
if [[ -n "$fromImageName" ]] && [[ -n "$fromImageTag" ]]; then
|
||||
parentID="$(
|
||||
cat "image/manifest.json" |
|
||||
jq -r '.[] | select(.RepoTags | contains([$desiredTag])) | rtrimstr(".json")' \
|
||||
--arg desiredTag "$fromImageName:$fromImageTag"
|
||||
)"
|
||||
else
|
||||
echo "From-image name or tag wasn't set. Reading the first ID."
|
||||
parentID="$(cat "image/manifest.json" | jq -r '.[0].Config | rtrimstr(".json")')"
|
||||
fi
|
||||
|
||||
cat ./image/manifest.json | jq -r '.[0].Layers | .[]' > layer-list
|
||||
else
|
||||
touch layer-list
|
||||
fi
|
||||
|
||||
# Unpack all of the parent layers into the image.
|
||||
lowerdir=""
|
||||
extractionID=0
|
||||
for layerTar in $(cat layer-list); do
|
||||
echo "Unpacking layer $layerTar"
|
||||
extractionID=$((extractionID + 1))
|
||||
|
||||
mkdir -p image/$extractionID/layer
|
||||
tar -C image/$extractionID/layer -xpf image/$layerTar
|
||||
rm image/$layerTar
|
||||
|
||||
find image/$extractionID/layer -name ".wh.*" -exec bash -c 'name="$(basename {}|sed "s/^.wh.//")"; mknod "$(dirname {})/$name" c 0 0; rm {}' \;
|
||||
|
||||
# Get the next lower directory and continue the loop.
|
||||
lowerdir=image/$extractionID/layer''${lowerdir:+:}$lowerdir
|
||||
done
|
||||
|
||||
mkdir work
|
||||
mkdir layer
|
||||
mkdir mnt
|
||||
|
||||
${lib.optionalString (preMount != "") ''
|
||||
# Execute pre-mount steps
|
||||
echo "Executing pre-mount steps..."
|
||||
${preMount}
|
||||
''}
|
||||
|
||||
if [ -n "$lowerdir" ]; then
|
||||
mount -t overlay overlay -olowerdir=$lowerdir,workdir=work,upperdir=layer mnt
|
||||
else
|
||||
mount --bind layer mnt
|
||||
fi
|
||||
|
||||
${lib.optionalString (postMount != "") ''
|
||||
# Execute post-mount steps
|
||||
echo "Executing post-mount steps..."
|
||||
${postMount}
|
||||
''}
|
||||
|
||||
umount mnt
|
||||
|
||||
(
|
||||
cd layer
|
||||
cmd='name="$(basename {})"; touch "$(dirname {})/.wh.$name"; rm "{}"'
|
||||
find . -type c -exec bash -c "$cmd" \;
|
||||
)
|
||||
|
||||
${postUmount}
|
||||
'');
|
||||
|
||||
exportImage = { name ? fromImage.name, fromImage, fromImageName ? null, fromImageTag ? null, diskSize ? 1024 }:
|
||||
runWithOverlay {
|
||||
inherit name fromImage fromImageName fromImageTag diskSize;
|
||||
|
||||
postMount = ''
|
||||
echo "Packing raw image..."
|
||||
tar -C mnt --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" -cf $out/layer.tar .
|
||||
'';
|
||||
|
||||
postUmount = ''
|
||||
mv $out/layer.tar .
|
||||
rm -rf $out
|
||||
mv layer.tar $out
|
||||
'';
|
||||
};
|
||||
|
||||
# Create an executable shell script which has the coreutils in its
|
||||
# PATH. Since root scripts are executed in a blank environment, even
|
||||
# things like `ls` or `echo` will be missing.
|
||||
shellScript = name: text:
|
||||
writeScript name ''
|
||||
#!${runtimeShell}
|
||||
set -e
|
||||
export PATH=${coreutils}/bin:/bin
|
||||
${text}
|
||||
'';
|
||||
|
||||
# Create a "layer" (set of files).
|
||||
mkPureLayer =
|
||||
{
|
||||
# Name of the layer
|
||||
name
|
||||
, # JSON containing configuration and metadata for this layer.
|
||||
baseJson
|
||||
, # Files to add to the layer.
|
||||
contents ? null
|
||||
, # When copying the contents into the image, preserve symlinks to
|
||||
# directories (see `rsync -K`). Otherwise, transform those symlinks
|
||||
# into directories.
|
||||
keepContentsDirlinks ? false
|
||||
, # Additional commands to run on the layer before it is tar'd up.
|
||||
extraCommands ? ""
|
||||
, uid ? 0
|
||||
, gid ? 0
|
||||
}:
|
||||
runCommand "docker-layer-${name}"
|
||||
{
|
||||
inherit baseJson contents extraCommands;
|
||||
nativeBuildInputs = [ jshon rsync tarsum ];
|
||||
}
|
||||
''
|
||||
mkdir layer
|
||||
if [[ -n "$contents" ]]; then
|
||||
echo "Adding contents..."
|
||||
for item in $contents; do
|
||||
echo "Adding $item"
|
||||
rsync -a${if keepContentsDirlinks then "K" else "k"} --chown=0:0 $item/ layer/
|
||||
done
|
||||
else
|
||||
echo "No contents to add to layer."
|
||||
fi
|
||||
|
||||
chmod ug+w layer
|
||||
|
||||
if [[ -n "$extraCommands" ]]; then
|
||||
(cd layer; eval "$extraCommands")
|
||||
fi
|
||||
|
||||
# Tar up the layer and throw it into 'layer.tar'.
|
||||
echo "Packing layer..."
|
||||
mkdir $out
|
||||
tarhash=$(tar -C layer --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=${toString uid} --group=${toString gid} -cf - . | tee -p $out/layer.tar | tarsum)
|
||||
|
||||
# Add a 'checksum' field to the JSON, with the value set to the
|
||||
# checksum of the tarball.
|
||||
cat ${baseJson} | jshon -s "$tarhash" -i checksum > $out/json
|
||||
|
||||
# Indicate to docker that we're using schema version 1.0.
|
||||
echo -n "1.0" > $out/VERSION
|
||||
|
||||
echo "Finished building layer '${name}'"
|
||||
'';
|
||||
|
||||
# Make a "root" layer; required if we need to execute commands as a
|
||||
# privileged user on the image. The commands themselves will be
|
||||
# performed in a virtual machine sandbox.
|
||||
mkRootLayer =
|
||||
{
|
||||
# Name of the image.
|
||||
name
|
||||
, # Script to run as root. Bash.
|
||||
runAsRoot
|
||||
, # Files to add to the layer. If null, an empty layer will be created.
|
||||
contents ? null
|
||||
, # When copying the contents into the image, preserve symlinks to
|
||||
# directories (see `rsync -K`). Otherwise, transform those symlinks
|
||||
# into directories.
|
||||
keepContentsDirlinks ? false
|
||||
, # JSON containing configuration and metadata for this layer.
|
||||
baseJson
|
||||
, # Existing image onto which to append the new layer.
|
||||
fromImage ? null
|
||||
, # Name of the image we're appending onto.
|
||||
fromImageName ? null
|
||||
, # Tag of the image we're appending onto.
|
||||
fromImageTag ? null
|
||||
, # How much disk to allocate for the temporary virtual machine.
|
||||
diskSize ? 1024
|
||||
, # Commands (bash) to run on the layer; these do not require sudo.
|
||||
extraCommands ? ""
|
||||
}:
|
||||
# Generate an executable script from the `runAsRoot` text.
|
||||
let
|
||||
runAsRootScript = shellScript "run-as-root.sh" runAsRoot;
|
||||
extraCommandsScript = shellScript "extra-commands.sh" extraCommands;
|
||||
in
|
||||
runWithOverlay {
|
||||
name = "docker-layer-${name}";
|
||||
|
||||
inherit fromImage fromImageName fromImageTag diskSize;
|
||||
|
||||
preMount = lib.optionalString (contents != null && contents != [ ]) ''
|
||||
echo "Adding contents..."
|
||||
for item in ${escapeShellArgs (map (c: "${c}") (toList contents))}; do
|
||||
echo "Adding $item..."
|
||||
rsync -a${if keepContentsDirlinks then "K" else "k"} --chown=0:0 $item/ layer/
|
||||
done
|
||||
|
||||
chmod ug+w layer
|
||||
'';
|
||||
|
||||
postMount = ''
|
||||
mkdir -p mnt/{dev,proc,sys} mnt${storeDir}
|
||||
|
||||
# Mount /dev, /sys and the nix store as shared folders.
|
||||
mount --rbind /dev mnt/dev
|
||||
mount --rbind /sys mnt/sys
|
||||
mount --rbind ${storeDir} mnt${storeDir}
|
||||
|
||||
# Execute the run as root script. See 'man unshare' for
|
||||
# details on what's going on here; basically this command
|
||||
# means that the runAsRootScript will be executed in a nearly
|
||||
# completely isolated environment.
|
||||
#
|
||||
# Ideally we would use --mount-proc=mnt/proc or similar, but this
|
||||
# doesn't work. The workaround is to setup proc after unshare.
|
||||
# See: https://github.com/karelzak/util-linux/issues/648
|
||||
unshare -imnpuf --mount-proc sh -c 'mount --rbind /proc mnt/proc && chroot mnt ${runAsRootScript}'
|
||||
|
||||
# Unmount directories and remove them.
|
||||
umount -R mnt/dev mnt/sys mnt${storeDir}
|
||||
rmdir --ignore-fail-on-non-empty \
|
||||
mnt/dev mnt/proc mnt/sys mnt${storeDir} \
|
||||
mnt$(dirname ${storeDir})
|
||||
'';
|
||||
|
||||
postUmount = ''
|
||||
(cd layer; ${extraCommandsScript})
|
||||
|
||||
echo "Packing layer..."
|
||||
mkdir -p $out
|
||||
tarhash=$(tar -C layer --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" -cf - . |
|
||||
tee -p $out/layer.tar |
|
||||
${tarsum}/bin/tarsum)
|
||||
|
||||
cat ${baseJson} | jshon -s "$tarhash" -i checksum > $out/json
|
||||
# Indicate to docker that we're using schema version 1.0.
|
||||
echo -n "1.0" > $out/VERSION
|
||||
|
||||
echo "Finished building layer '${name}'"
|
||||
'';
|
||||
};
|
||||
|
||||
buildLayeredImage = { name, ... }@args:
|
||||
let
|
||||
stream = streamLayeredImage args;
|
||||
in
|
||||
runCommand "${baseNameOf name}.tar.gz"
|
||||
{
|
||||
inherit (stream) imageName;
|
||||
passthru = { inherit (stream) imageTag; };
|
||||
nativeBuildInputs = [ pigz ];
|
||||
} "${stream} | pigz -nT > $out";
|
||||
|
||||
# 1. extract the base image
|
||||
# 2. create the layer
|
||||
# 3. add layer deps to the layer itself, diffing with the base image
|
||||
# 4. compute the layer id
|
||||
# 5. put the layer in the image
|
||||
# 6. repack the image
|
||||
buildImage =
|
||||
args@{
|
||||
# Image name.
|
||||
name
|
||||
, # Image tag, when null then the nix output hash will be used.
|
||||
tag ? null
|
||||
, # Parent image, to append to.
|
||||
fromImage ? null
|
||||
, # Name of the parent image; will be read from the image otherwise.
|
||||
fromImageName ? null
|
||||
, # Tag of the parent image; will be read from the image otherwise.
|
||||
fromImageTag ? null
|
||||
, # Files to put on the image (a nix store path or list of paths).
|
||||
contents ? null
|
||||
, # When copying the contents into the image, preserve symlinks to
|
||||
# directories (see `rsync -K`). Otherwise, transform those symlinks
|
||||
# into directories.
|
||||
keepContentsDirlinks ? false
|
||||
, # Docker config; e.g. what command to run on the container.
|
||||
config ? null
|
||||
, # Optional bash script to run on the files prior to fixturizing the layer.
|
||||
extraCommands ? ""
|
||||
, uid ? 0
|
||||
, gid ? 0
|
||||
, # Optional bash script to run as root on the image when provisioning.
|
||||
runAsRoot ? null
|
||||
, # Size of the virtual machine disk to provision when building the image.
|
||||
diskSize ? 1024
|
||||
, # Time of creation of the image.
|
||||
created ? "1970-01-01T00:00:01Z"
|
||||
,
|
||||
}:
|
||||
|
||||
let
|
||||
baseName = baseNameOf name;
|
||||
|
||||
# Create a JSON blob of the configuration. Set the date to unix zero.
|
||||
baseJson =
|
||||
let
|
||||
pure = writeText "${baseName}-config.json" (builtins.toJSON {
|
||||
inherit created config;
|
||||
architecture = defaultArch;
|
||||
os = "linux";
|
||||
});
|
||||
impure = runCommand "${baseName}-config.json"
|
||||
{ nativeBuildInputs = [ jq ]; }
|
||||
''
|
||||
jq ".created = \"$(TZ=utc date --iso-8601="seconds")\"" ${pure} > $out
|
||||
'';
|
||||
in
|
||||
if created == "now" then impure else pure;
|
||||
|
||||
layer =
|
||||
if runAsRoot == null
|
||||
then
|
||||
mkPureLayer
|
||||
{
|
||||
name = baseName;
|
||||
inherit baseJson contents keepContentsDirlinks extraCommands uid gid;
|
||||
} else
|
||||
mkRootLayer {
|
||||
name = baseName;
|
||||
inherit baseJson fromImage fromImageName fromImageTag
|
||||
contents keepContentsDirlinks runAsRoot diskSize
|
||||
extraCommands;
|
||||
};
|
||||
result = runCommand "docker-image-${baseName}.tar.gz"
|
||||
{
|
||||
nativeBuildInputs = [ jshon pigz jq moreutils ];
|
||||
# Image name must be lowercase
|
||||
imageName = lib.toLower name;
|
||||
imageTag = if tag == null then "" else tag;
|
||||
inherit fromImage baseJson;
|
||||
layerClosure = writeReferencesToFile layer;
|
||||
passthru.buildArgs = args;
|
||||
passthru.layer = layer;
|
||||
passthru.imageTag =
|
||||
if tag != null
|
||||
then tag
|
||||
else
|
||||
lib.head (lib.strings.splitString "-" (baseNameOf result.outPath));
|
||||
} ''
|
||||
${lib.optionalString (tag == null) ''
|
||||
outName="$(basename "$out")"
|
||||
outHash=$(echo "$outName" | cut -d - -f 1)
|
||||
|
||||
imageTag=$outHash
|
||||
''}
|
||||
|
||||
# Print tar contents:
|
||||
# 1: Interpreted as relative to the root directory
|
||||
# 2: With no trailing slashes on directories
|
||||
# This is useful for ensuring that the output matches the
|
||||
# values generated by the "find" command
|
||||
ls_tar() {
|
||||
for f in $(tar -tf $1 | xargs realpath -ms --relative-to=.); do
|
||||
if [[ "$f" != "." ]]; then
|
||||
echo "/$f"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
mkdir image
|
||||
touch baseFiles
|
||||
baseEnvs='[]'
|
||||
if [[ -n "$fromImage" ]]; then
|
||||
echo "Unpacking base image..."
|
||||
tar -C image -xpf "$fromImage"
|
||||
|
||||
# Store the layers and the environment variables from the base image
|
||||
cat ./image/manifest.json | jq -r '.[0].Layers | .[]' > layer-list
|
||||
configName="$(cat ./image/manifest.json | jq -r '.[0].Config')"
|
||||
baseEnvs="$(cat "./image/$configName" | jq '.config.Env // []')"
|
||||
|
||||
# Extract the parentID from the manifest
|
||||
if [[ -n "$fromImageName" ]] && [[ -n "$fromImageTag" ]]; then
|
||||
parentID="$(
|
||||
cat "image/manifest.json" |
|
||||
jq -r '.[] | select(.RepoTags | contains([$desiredTag])) | rtrimstr(".json")' \
|
||||
--arg desiredTag "$fromImageName:$fromImageTag"
|
||||
)"
|
||||
else
|
||||
echo "From-image name or tag wasn't set. Reading the first ID."
|
||||
parentID="$(cat "image/manifest.json" | jq -r '.[0].Config | rtrimstr(".json")')"
|
||||
fi
|
||||
|
||||
# Otherwise do not import the base image configuration and manifest
|
||||
chmod a+w image image/*.json
|
||||
rm -f image/*.json
|
||||
|
||||
for l in image/*/layer.tar; do
|
||||
ls_tar $l >> baseFiles
|
||||
done
|
||||
else
|
||||
touch layer-list
|
||||
fi
|
||||
|
||||
chmod -R ug+rw image
|
||||
|
||||
mkdir temp
|
||||
cp ${layer}/* temp/
|
||||
chmod ug+w temp/*
|
||||
|
||||
for dep in $(cat $layerClosure); do
|
||||
find $dep >> layerFiles
|
||||
done
|
||||
|
||||
echo "Adding layer..."
|
||||
# Record the contents of the tarball with ls_tar.
|
||||
ls_tar temp/layer.tar >> baseFiles
|
||||
|
||||
# Append nix/store directory to the layer so that when the layer is loaded in the
|
||||
# image /nix/store has read permissions for non-root users.
|
||||
# nix/store is added only if the layer has /nix/store paths in it.
|
||||
if [ $(wc -l < $layerClosure) -gt 1 ] && [ $(grep -c -e "^/nix/store$" baseFiles) -eq 0 ]; then
|
||||
mkdir -p nix/store
|
||||
chmod -R 555 nix
|
||||
echo "./nix" >> layerFiles
|
||||
echo "./nix/store" >> layerFiles
|
||||
fi
|
||||
|
||||
# Get the files in the new layer which were *not* present in
|
||||
# the old layer, and record them as newFiles.
|
||||
comm <(sort -n baseFiles|uniq) \
|
||||
<(sort -n layerFiles|uniq|grep -v ${layer}) -1 -3 > newFiles
|
||||
# Append the new files to the layer.
|
||||
tar -rpf temp/layer.tar --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" \
|
||||
--owner=0 --group=0 --no-recursion --verbatim-files-from --files-from newFiles
|
||||
|
||||
echo "Adding meta..."
|
||||
|
||||
# If we have a parentID, add it to the json metadata.
|
||||
if [[ -n "$parentID" ]]; then
|
||||
cat temp/json | jshon -s "$parentID" -i parent > tmpjson
|
||||
mv tmpjson temp/json
|
||||
fi
|
||||
|
||||
# Take the sha256 sum of the generated json and use it as the layer ID.
|
||||
# Compute the size and add it to the json under the 'Size' field.
|
||||
layerID=$(sha256sum temp/json|cut -d ' ' -f 1)
|
||||
size=$(stat --printf="%s" temp/layer.tar)
|
||||
cat temp/json | jshon -s "$layerID" -i id -n $size -i Size > tmpjson
|
||||
mv tmpjson temp/json
|
||||
|
||||
# Use the temp folder we've been working on to create a new image.
|
||||
mv temp image/$layerID
|
||||
|
||||
# Add the new layer ID to the end of the layer list
|
||||
(
|
||||
cat layer-list
|
||||
# originally this used `sed -i "1i$layerID" layer-list`, but
|
||||
# would fail if layer-list was completely empty.
|
||||
echo "$layerID/layer.tar"
|
||||
) | sponge layer-list
|
||||
|
||||
# Create image json and image manifest
|
||||
imageJson=$(cat ${baseJson} | jq '.config.Env = $baseenv + .config.Env' --argjson baseenv "$baseEnvs")
|
||||
imageJson=$(echo "$imageJson" | jq ". + {\"rootfs\": {\"diff_ids\": [], \"type\": \"layers\"}}")
|
||||
manifestJson=$(jq -n "[{\"RepoTags\":[\"$imageName:$imageTag\"]}]")
|
||||
|
||||
for layerTar in $(cat ./layer-list); do
|
||||
layerChecksum=$(sha256sum image/$layerTar | cut -d ' ' -f1)
|
||||
imageJson=$(echo "$imageJson" | jq ".history |= . + [{\"created\": \"$(jq -r .created ${baseJson})\"}]")
|
||||
# diff_ids order is from the bottom-most to top-most layer
|
||||
imageJson=$(echo "$imageJson" | jq ".rootfs.diff_ids |= . + [\"sha256:$layerChecksum\"]")
|
||||
manifestJson=$(echo "$manifestJson" | jq ".[0].Layers |= . + [\"$layerTar\"]")
|
||||
done
|
||||
|
||||
imageJsonChecksum=$(echo "$imageJson" | sha256sum | cut -d ' ' -f1)
|
||||
echo "$imageJson" > "image/$imageJsonChecksum.json"
|
||||
manifestJson=$(echo "$manifestJson" | jq ".[0].Config = \"$imageJsonChecksum.json\"")
|
||||
echo "$manifestJson" > image/manifest.json
|
||||
|
||||
# Store the json under the name image/repositories.
|
||||
jshon -n object \
|
||||
-n object -s "$layerID" -i "$imageTag" \
|
||||
-i "$imageName" > image/repositories
|
||||
|
||||
# Make the image read-only.
|
||||
chmod -R a-w image
|
||||
|
||||
echo "Cooking the image..."
|
||||
tar -C image --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=0 --group=0 --xform s:'^./':: -c . | pigz -nT > $out
|
||||
|
||||
echo "Finished."
|
||||
'';
|
||||
|
||||
in
|
||||
result;
|
||||
|
||||
# Merge the tarballs of images built with buildImage into a single
|
||||
# tarball that contains all images. Running `docker load` on the resulting
|
||||
# tarball will load the images into the docker daemon.
|
||||
mergeImages = images: runCommand "merge-docker-images"
|
||||
{
|
||||
inherit images;
|
||||
nativeBuildInputs = [ pigz jq ];
|
||||
} ''
|
||||
mkdir image inputs
|
||||
# Extract images
|
||||
repos=()
|
||||
manifests=()
|
||||
for item in $images; do
|
||||
name=$(basename $item)
|
||||
mkdir inputs/$name
|
||||
tar -I pigz -xf $item -C inputs/$name
|
||||
if [ -f inputs/$name/repositories ]; then
|
||||
repos+=(inputs/$name/repositories)
|
||||
fi
|
||||
if [ -f inputs/$name/manifest.json ]; then
|
||||
manifests+=(inputs/$name/manifest.json)
|
||||
fi
|
||||
done
|
||||
# Copy all layers from input images to output image directory
|
||||
cp -R --no-clobber inputs/*/* image/
|
||||
# Merge repositories objects and manifests
|
||||
jq -s add "''${repos[@]}" > repositories
|
||||
jq -s add "''${manifests[@]}" > manifest.json
|
||||
# Replace output image repositories and manifest with merged versions
|
||||
mv repositories image/repositories
|
||||
mv manifest.json image/manifest.json
|
||||
# Create tarball and gzip
|
||||
tar -C image --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=0 --group=0 --xform s:'^./':: -c . | pigz -nT > $out
|
||||
'';
|
||||
|
||||
|
||||
# Provide a /etc/passwd and /etc/group that contain root and nobody.
|
||||
# Useful when packaging binaries that insist on using nss to look up
|
||||
# username/groups (like nginx).
|
||||
# /bin/sh is fine to not exist, and provided by another shim.
|
||||
inherit fakeNss; # alias
|
||||
|
||||
# This provides a /usr/bin/env, for shell scripts using the
|
||||
# "#!/usr/bin/env executable" shebang.
|
||||
usrBinEnv = runCommand "usr-bin-env" { } ''
|
||||
mkdir -p $out/usr/bin
|
||||
ln -s ${coreutils}/bin/env $out/usr/bin
|
||||
'';
|
||||
|
||||
# This provides /bin/sh, pointing to bashInteractive.
|
||||
binSh = runCommand "bin-sh" { } ''
|
||||
mkdir -p $out/bin
|
||||
ln -s ${bashInteractive}/bin/bash $out/bin/sh
|
||||
'';
|
||||
|
||||
# Build an image and populate its nix database with the provided
|
||||
# contents. The main purpose is to be able to use nix commands in
|
||||
# the container.
|
||||
# Be careful since this doesn't work well with multilayer.
|
||||
buildImageWithNixDb = args@{ contents ? null, extraCommands ? "", ... }: (
|
||||
buildImage (args // {
|
||||
extraCommands = (mkDbExtraCommand contents) + extraCommands;
|
||||
})
|
||||
);
|
||||
|
||||
buildLayeredImageWithNixDb = args@{ contents ? null, extraCommands ? "", ... }: (
|
||||
buildLayeredImage (args // {
|
||||
extraCommands = (mkDbExtraCommand contents) + extraCommands;
|
||||
})
|
||||
);
|
||||
|
||||
streamLayeredImage =
|
||||
{
|
||||
# Image Name
|
||||
name
|
||||
, # Image tag, the Nix's output hash will be used if null
|
||||
tag ? null
|
||||
, # Parent image, to append to.
|
||||
fromImage ? null
|
||||
, # Files to put on the image (a nix store path or list of paths).
|
||||
contents ? [ ]
|
||||
, # Docker config; e.g. what command to run on the container.
|
||||
config ? { }
|
||||
, # Time of creation of the image. Passing "now" will make the
|
||||
# created date be the time of building.
|
||||
created ? "1970-01-01T00:00:01Z"
|
||||
, # Optional bash script to run on the files prior to fixturizing the layer.
|
||||
extraCommands ? ""
|
||||
, # Optional bash script to run inside fakeroot environment.
|
||||
# Could be used for changing ownership of files in customisation layer.
|
||||
fakeRootCommands ? ""
|
||||
, # Whether to run fakeRootCommands in fakechroot as well, so that they
|
||||
# appear to run inside the image, but have access to the normal Nix store.
|
||||
# Perhaps this could be enabled on by default on pkgs.stdenv.buildPlatform.isLinux
|
||||
enableFakechroot ? false
|
||||
, # We pick 100 to ensure there is plenty of room for extension. I
|
||||
# believe the actual maximum is 128.
|
||||
maxLayers ? 100
|
||||
, # Whether to include store paths in the image. You generally want to leave
|
||||
# this on, but tooling may disable this to insert the store paths more
|
||||
# efficiently via other means, such as bind mounting the host store.
|
||||
includeStorePaths ? true
|
||||
, # Passthru arguments for the underlying derivation.
|
||||
passthru ? {}
|
||||
,
|
||||
}:
|
||||
assert
|
||||
(lib.assertMsg (maxLayers > 1)
|
||||
"the maxLayers argument of dockerTools.buildLayeredImage function must be greather than 1 (current value: ${toString maxLayers})");
|
||||
let
|
||||
baseName = baseNameOf name;
|
||||
|
||||
streamScript = writePython3 "stream" { } ./stream_layered_image.py;
|
||||
baseJson = writeText "${baseName}-base.json" (builtins.toJSON {
|
||||
inherit config;
|
||||
architecture = defaultArch;
|
||||
os = "linux";
|
||||
});
|
||||
|
||||
contentsList = if builtins.isList contents then contents else [ contents ];
|
||||
|
||||
# We store the customisation layer as a tarball, to make sure that
|
||||
# things like permissions set on 'extraCommands' are not overriden
|
||||
# by Nix. Then we precompute the sha256 for performance.
|
||||
customisationLayer = symlinkJoin {
|
||||
name = "${baseName}-customisation-layer";
|
||||
paths = contentsList;
|
||||
inherit extraCommands fakeRootCommands;
|
||||
nativeBuildInputs = [
|
||||
fakeroot
|
||||
] ++ optionals enableFakechroot [
|
||||
fakechroot
|
||||
# for chroot
|
||||
coreutils
|
||||
# fakechroot needs getopt, which is provided by util-linux
|
||||
util-linux
|
||||
];
|
||||
postBuild = ''
|
||||
mv $out old_out
|
||||
(cd old_out; eval "$extraCommands" )
|
||||
|
||||
mkdir $out
|
||||
${optionalString enableFakechroot ''
|
||||
export FAKECHROOT_EXCLUDE_PATH=/dev:/proc:/sys:${builtins.storeDir}:$out/layer.tar
|
||||
''}
|
||||
${optionalString enableFakechroot ''fakechroot chroot $PWD/old_out ''}fakeroot bash -c '
|
||||
source $stdenv/setup
|
||||
${optionalString (!enableFakechroot) ''cd old_out''}
|
||||
eval "$fakeRootCommands"
|
||||
tar \
|
||||
--sort name \
|
||||
--numeric-owner --mtime "@$SOURCE_DATE_EPOCH" \
|
||||
--hard-dereference \
|
||||
-cf $out/layer.tar .
|
||||
'
|
||||
|
||||
sha256sum $out/layer.tar \
|
||||
| cut -f 1 -d ' ' \
|
||||
> $out/checksum
|
||||
'';
|
||||
};
|
||||
|
||||
closureRoots = lib.optionals includeStorePaths /* normally true */ (
|
||||
[ baseJson customisationLayer ]
|
||||
);
|
||||
overallClosure = writeText "closure" (lib.concatStringsSep " " closureRoots);
|
||||
|
||||
# These derivations are only created as implementation details of docker-tools,
|
||||
# so they'll be excluded from the created images.
|
||||
unnecessaryDrvs = [ baseJson overallClosure customisationLayer ];
|
||||
|
||||
conf = runCommand "${baseName}-conf.json"
|
||||
{
|
||||
inherit fromImage maxLayers created;
|
||||
imageName = lib.toLower name;
|
||||
passthru.imageTag =
|
||||
if tag != null
|
||||
then tag
|
||||
else
|
||||
lib.head (lib.strings.splitString "-" (baseNameOf conf.outPath));
|
||||
paths = buildPackages.referencesByPopularity overallClosure;
|
||||
nativeBuildInputs = [ jq ];
|
||||
} ''
|
||||
${if (tag == null) then ''
|
||||
outName="$(basename "$out")"
|
||||
outHash=$(echo "$outName" | cut -d - -f 1)
|
||||
|
||||
imageTag=$outHash
|
||||
'' else ''
|
||||
imageTag="${tag}"
|
||||
''}
|
||||
|
||||
# convert "created" to iso format
|
||||
if [[ "$created" != "now" ]]; then
|
||||
created="$(date -Iseconds -d "$created")"
|
||||
fi
|
||||
|
||||
paths() {
|
||||
cat $paths ${lib.concatMapStringsSep " "
|
||||
(path: "| (grep -v ${path} || true)")
|
||||
unnecessaryDrvs}
|
||||
}
|
||||
|
||||
# Compute the number of layers that are already used by a potential
|
||||
# 'fromImage' as well as the customization layer. Ensure that there is
|
||||
# still at least one layer available to store the image contents.
|
||||
usedLayers=0
|
||||
|
||||
# subtract number of base image layers
|
||||
if [[ -n "$fromImage" ]]; then
|
||||
(( usedLayers += $(tar -xOf "$fromImage" manifest.json | jq '.[0].Layers | length') ))
|
||||
fi
|
||||
|
||||
# one layer will be taken up by the customisation layer
|
||||
(( usedLayers += 1 ))
|
||||
|
||||
if ! (( $usedLayers < $maxLayers )); then
|
||||
echo >&2 "Error: usedLayers $usedLayers layers to store 'fromImage' and" \
|
||||
"'extraCommands', but only maxLayers=$maxLayers were" \
|
||||
"allowed. At least 1 layer is required to store contents."
|
||||
exit 1
|
||||
fi
|
||||
availableLayers=$(( maxLayers - usedLayers ))
|
||||
|
||||
# Create $maxLayers worth of Docker Layers, one layer per store path
|
||||
# unless there are more paths than $maxLayers. In that case, create
|
||||
# $maxLayers-1 for the most popular layers, and smush the remainaing
|
||||
# store paths in to one final layer.
|
||||
#
|
||||
# The following code is fiddly w.r.t. ensuring every layer is
|
||||
# created, and that no paths are missed. If you change the
|
||||
# following lines, double-check that your code behaves properly
|
||||
# when the number of layers equals:
|
||||
# maxLayers-1, maxLayers, and maxLayers+1, 0
|
||||
store_layers="$(
|
||||
paths |
|
||||
jq -sR '
|
||||
rtrimstr("\n") | split("\n")
|
||||
| (.[:$maxLayers-1] | map([.])) + [ .[$maxLayers-1:] ]
|
||||
| map(select(length > 0))
|
||||
' \
|
||||
--argjson maxLayers "$availableLayers"
|
||||
)"
|
||||
|
||||
cat ${baseJson} | jq '
|
||||
. + {
|
||||
"store_dir": $store_dir,
|
||||
"from_image": $from_image,
|
||||
"store_layers": $store_layers,
|
||||
"customisation_layer", $customisation_layer,
|
||||
"repo_tag": $repo_tag,
|
||||
"created": $created
|
||||
}
|
||||
' --arg store_dir "${storeDir}" \
|
||||
--argjson from_image ${if fromImage == null then "null" else "'\"${fromImage}\"'"} \
|
||||
--argjson store_layers "$store_layers" \
|
||||
--arg customisation_layer ${customisationLayer} \
|
||||
--arg repo_tag "$imageName:$imageTag" \
|
||||
--arg created "$created" |
|
||||
tee $out
|
||||
'';
|
||||
result = runCommand "stream-${baseName}"
|
||||
{
|
||||
inherit (conf) imageName;
|
||||
passthru = passthru // {
|
||||
inherit (conf) imageTag;
|
||||
|
||||
# Distinguish tarballs and exes at the Nix level so functions that
|
||||
# take images can know in advance how the image is supposed to be used.
|
||||
isExe = true;
|
||||
};
|
||||
nativeBuildInputs = [ makeWrapper ];
|
||||
} ''
|
||||
makeWrapper ${streamScript} $out --add-flags ${conf}
|
||||
'';
|
||||
in
|
||||
result;
|
||||
}
|
||||
40
pkgs/build-support/docker/detjson.py
Normal file
40
pkgs/build-support/docker/detjson.py
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Deterministic layer json: https://github.com/docker/hub-feedback/issues/488
|
||||
|
||||
import sys
|
||||
reload(sys)
|
||||
sys.setdefaultencoding('UTF8')
|
||||
import json
|
||||
|
||||
# If any of the keys below are equal to a certain value
|
||||
# then we can delete it because it's the default value
|
||||
SAFEDELS = {
|
||||
"Size": 0,
|
||||
"config": {
|
||||
"ExposedPorts": None,
|
||||
"MacAddress": "",
|
||||
"NetworkDisabled": False,
|
||||
"PortSpecs": None,
|
||||
"VolumeDriver": ""
|
||||
}
|
||||
}
|
||||
SAFEDELS["container_config"] = SAFEDELS["config"]
|
||||
|
||||
def makedet(j, safedels):
|
||||
for k,v in safedels.items():
|
||||
if k not in j:
|
||||
continue
|
||||
if type(v) == dict:
|
||||
makedet(j[k], v)
|
||||
elif j[k] == v:
|
||||
del j[k]
|
||||
|
||||
def main():
|
||||
j = json.load(sys.stdin)
|
||||
makedet(j, SAFEDELS)
|
||||
json.dump(j, sys.stdout, sort_keys=True)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
613
pkgs/build-support/docker/examples.nix
Normal file
613
pkgs/build-support/docker/examples.nix
Normal file
|
|
@ -0,0 +1,613 @@
|
|||
# Examples of using the docker tools to build packages.
|
||||
#
|
||||
# This file defines several docker images. In order to use an image,
|
||||
# build its derivation with `nix-build`, and then load the result with
|
||||
# `docker load`. For example:
|
||||
#
|
||||
# $ nix-build '<nixpkgs>' -A dockerTools.examples.redis
|
||||
# $ docker load < result
|
||||
|
||||
{ pkgs, buildImage, buildLayeredImage, fakeNss, pullImage, shadowSetup, buildImageWithNixDb, pkgsCross }:
|
||||
|
||||
rec {
|
||||
# 1. basic example
|
||||
bash = buildImage {
|
||||
name = "bash";
|
||||
tag = "latest";
|
||||
contents = pkgs.bashInteractive;
|
||||
};
|
||||
|
||||
# 2. service example, layered on another image
|
||||
redis = buildImage {
|
||||
name = "redis";
|
||||
tag = "latest";
|
||||
|
||||
# for example's sake, we can layer redis on top of bash or debian
|
||||
fromImage = bash;
|
||||
# fromImage = debian;
|
||||
|
||||
contents = pkgs.redis;
|
||||
runAsRoot = ''
|
||||
mkdir -p /data
|
||||
'';
|
||||
|
||||
config = {
|
||||
Cmd = [ "/bin/redis-server" ];
|
||||
WorkingDir = "/data";
|
||||
Volumes = {
|
||||
"/data" = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# 3. another service example
|
||||
nginx = let
|
||||
nginxPort = "80";
|
||||
nginxConf = pkgs.writeText "nginx.conf" ''
|
||||
user nobody nobody;
|
||||
daemon off;
|
||||
error_log /dev/stdout info;
|
||||
pid /dev/null;
|
||||
events {}
|
||||
http {
|
||||
access_log /dev/stdout;
|
||||
server {
|
||||
listen ${nginxPort};
|
||||
index index.html;
|
||||
location / {
|
||||
root ${nginxWebRoot};
|
||||
}
|
||||
}
|
||||
}
|
||||
'';
|
||||
nginxWebRoot = pkgs.writeTextDir "index.html" ''
|
||||
<html><body><h1>Hello from NGINX</h1></body></html>
|
||||
'';
|
||||
in
|
||||
buildLayeredImage {
|
||||
name = "nginx-container";
|
||||
tag = "latest";
|
||||
contents = [
|
||||
fakeNss
|
||||
pkgs.nginx
|
||||
];
|
||||
|
||||
extraCommands = ''
|
||||
# nginx still tries to read this directory even if error_log
|
||||
# directive is specifying another file :/
|
||||
mkdir -p var/log/nginx
|
||||
mkdir -p var/cache/nginx
|
||||
'';
|
||||
|
||||
config = {
|
||||
Cmd = [ "nginx" "-c" nginxConf ];
|
||||
ExposedPorts = {
|
||||
"${nginxPort}/tcp" = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# 4. example of pulling an image. could be used as a base for other images
|
||||
nixFromDockerHub = pullImage {
|
||||
imageName = "nixos/nix";
|
||||
imageDigest = "sha256:85299d86263a3059cf19f419f9d286cc9f06d3c13146a8ebbb21b3437f598357";
|
||||
sha256 = "19fw0n3wmddahzr20mhdqv6jkjn1kanh6n2mrr08ai53dr8ph5n7";
|
||||
finalImageTag = "2.2.1";
|
||||
finalImageName = "nix";
|
||||
};
|
||||
# Same example, but re-fetches every time the fetcher implementation changes.
|
||||
# NOTE: Only use this for testing, or you'd be wasting a lot of time, network and space.
|
||||
testNixFromDockerHub = pkgs.testers.invalidateFetcherByDrvHash pullImage {
|
||||
imageName = "nixos/nix";
|
||||
imageDigest = "sha256:85299d86263a3059cf19f419f9d286cc9f06d3c13146a8ebbb21b3437f598357";
|
||||
sha256 = "19fw0n3wmddahzr20mhdqv6jkjn1kanh6n2mrr08ai53dr8ph5n7";
|
||||
finalImageTag = "2.2.1";
|
||||
finalImageName = "nix";
|
||||
};
|
||||
|
||||
# 5. example of multiple contents, emacs and vi happily coexisting
|
||||
editors = buildImage {
|
||||
name = "editors";
|
||||
contents = [
|
||||
pkgs.coreutils
|
||||
pkgs.bash
|
||||
pkgs.emacs
|
||||
pkgs.vim
|
||||
pkgs.nano
|
||||
];
|
||||
};
|
||||
|
||||
# 6. nix example to play with the container nix store
|
||||
# docker run -it --rm nix nix-store -qR $(nix-build '<nixpkgs>' -A nix)
|
||||
nix = buildImageWithNixDb {
|
||||
name = "nix";
|
||||
tag = "latest";
|
||||
contents = [
|
||||
# nix-store uses cat program to display results as specified by
|
||||
# the image env variable NIX_PAGER.
|
||||
pkgs.coreutils
|
||||
pkgs.nix
|
||||
pkgs.bash
|
||||
];
|
||||
config = {
|
||||
Env = [
|
||||
"NIX_PAGER=cat"
|
||||
# A user is required by nix
|
||||
# https://github.com/NixOS/nix/blob/9348f9291e5d9e4ba3c4347ea1b235640f54fd79/src/libutil/util.cc#L478
|
||||
"USER=nobody"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
# 7. example of adding something on top of an image pull by our
|
||||
# dockerTools chain.
|
||||
onTopOfPulledImage = buildImage {
|
||||
name = "onTopOfPulledImage";
|
||||
tag = "latest";
|
||||
fromImage = nixFromDockerHub;
|
||||
contents = [ pkgs.hello ];
|
||||
};
|
||||
|
||||
# 8. regression test for erroneous use of eval and string expansion.
|
||||
# See issue #34779 and PR #40947 for details.
|
||||
runAsRootExtraCommands = pkgs.dockerTools.buildImage {
|
||||
name = "runAsRootExtraCommands";
|
||||
tag = "latest";
|
||||
contents = [ pkgs.coreutils ];
|
||||
# The parens here are to create problematic bash to embed and eval. In case
|
||||
# this is *embedded* into the script (with nix expansion) the initial quotes
|
||||
# will close the string and the following parens are unexpected
|
||||
runAsRoot = ''echo "(runAsRoot)" > runAsRoot'';
|
||||
extraCommands = ''echo "(extraCommand)" > extraCommands'';
|
||||
};
|
||||
|
||||
# 9. Ensure that setting created to now results in a date which
|
||||
# isn't the epoch + 1
|
||||
unstableDate = pkgs.dockerTools.buildImage {
|
||||
name = "unstable-date";
|
||||
tag = "latest";
|
||||
contents = [ pkgs.coreutils ];
|
||||
created = "now";
|
||||
};
|
||||
|
||||
# 10. Create a layered image
|
||||
layered-image = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "layered-image";
|
||||
tag = "latest";
|
||||
extraCommands = ''echo "(extraCommand)" > extraCommands'';
|
||||
config.Cmd = [ "${pkgs.hello}/bin/hello" ];
|
||||
contents = [ pkgs.hello pkgs.bash pkgs.coreutils ];
|
||||
};
|
||||
|
||||
# 11. Create an image on top of a layered image
|
||||
layered-on-top = pkgs.dockerTools.buildImage {
|
||||
name = "layered-on-top";
|
||||
tag = "latest";
|
||||
fromImage = layered-image;
|
||||
extraCommands = ''
|
||||
mkdir ./example-output
|
||||
chmod 777 ./example-output
|
||||
'';
|
||||
config = {
|
||||
Env = [ "PATH=${pkgs.coreutils}/bin/" ];
|
||||
WorkingDir = "/example-output";
|
||||
Cmd = [
|
||||
"${pkgs.bash}/bin/bash" "-c" "echo hello > foo; cat foo"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
# 12 Create a layered image on top of a layered image
|
||||
layered-on-top-layered = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "layered-on-top-layered";
|
||||
tag = "latest";
|
||||
fromImage = layered-image;
|
||||
extraCommands = ''
|
||||
mkdir ./example-output
|
||||
chmod 777 ./example-output
|
||||
'';
|
||||
config = {
|
||||
Env = [ "PATH=${pkgs.coreutils}/bin/" ];
|
||||
WorkingDir = "/example-output";
|
||||
Cmd = [
|
||||
"${pkgs.bash}/bin/bash" "-c" "echo hello > foo; cat foo"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
# 13. example of running something as root on top of a parent image
|
||||
# Regression test related to PR #52109
|
||||
runAsRootParentImage = buildImage {
|
||||
name = "runAsRootParentImage";
|
||||
tag = "latest";
|
||||
runAsRoot = "touch /example-file";
|
||||
fromImage = bash;
|
||||
};
|
||||
|
||||
# 14. example of 3 layers images This image is used to verify the
|
||||
# order of layers is correct.
|
||||
# It allows to validate
|
||||
# - the layer of parent are below
|
||||
# - the order of parent layer is preserved at image build time
|
||||
# (this is why there are 3 images)
|
||||
layersOrder = let
|
||||
l1 = pkgs.dockerTools.buildImage {
|
||||
name = "l1";
|
||||
tag = "latest";
|
||||
extraCommands = ''
|
||||
mkdir -p tmp
|
||||
echo layer1 > tmp/layer1
|
||||
echo layer1 > tmp/layer2
|
||||
echo layer1 > tmp/layer3
|
||||
'';
|
||||
};
|
||||
l2 = pkgs.dockerTools.buildImage {
|
||||
name = "l2";
|
||||
fromImage = l1;
|
||||
tag = "latest";
|
||||
extraCommands = ''
|
||||
mkdir -p tmp
|
||||
echo layer2 > tmp/layer2
|
||||
echo layer2 > tmp/layer3
|
||||
'';
|
||||
};
|
||||
in pkgs.dockerTools.buildImage {
|
||||
name = "l3";
|
||||
fromImage = l2;
|
||||
tag = "latest";
|
||||
contents = [ pkgs.coreutils ];
|
||||
extraCommands = ''
|
||||
mkdir -p tmp
|
||||
echo layer3 > tmp/layer3
|
||||
'';
|
||||
};
|
||||
|
||||
# 15. Environment variable inheritance.
|
||||
# Child image should inherit parents environment variables,
|
||||
# optionally overriding them.
|
||||
environmentVariablesParent = pkgs.dockerTools.buildImage {
|
||||
name = "parent";
|
||||
tag = "latest";
|
||||
config = {
|
||||
Env = [
|
||||
"FROM_PARENT=true"
|
||||
"LAST_LAYER=parent"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
environmentVariables = pkgs.dockerTools.buildImage {
|
||||
name = "child";
|
||||
fromImage = environmentVariablesParent;
|
||||
tag = "latest";
|
||||
contents = [ pkgs.coreutils ];
|
||||
config = {
|
||||
Env = [
|
||||
"FROM_CHILD=true"
|
||||
"LAST_LAYER=child"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
environmentVariablesLayered = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "child";
|
||||
fromImage = environmentVariablesParent;
|
||||
tag = "latest";
|
||||
contents = [ pkgs.coreutils ];
|
||||
config = {
|
||||
Env = [
|
||||
"FROM_CHILD=true"
|
||||
"LAST_LAYER=child"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
# 16. Create another layered image, for comparing layers with image 10.
|
||||
another-layered-image = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "another-layered-image";
|
||||
tag = "latest";
|
||||
config.Cmd = [ "${pkgs.hello}/bin/hello" ];
|
||||
};
|
||||
|
||||
# 17. Create a layered image with only 2 layers
|
||||
two-layered-image = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "two-layered-image";
|
||||
tag = "latest";
|
||||
config.Cmd = [ "${pkgs.hello}/bin/hello" ];
|
||||
contents = [ pkgs.bash pkgs.hello ];
|
||||
maxLayers = 2;
|
||||
};
|
||||
|
||||
# 18. Create a layered image with more packages than max layers.
|
||||
# coreutils and hello are part of the same layer
|
||||
bulk-layer = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "bulk-layer";
|
||||
tag = "latest";
|
||||
contents = with pkgs; [
|
||||
coreutils hello
|
||||
];
|
||||
maxLayers = 2;
|
||||
};
|
||||
|
||||
# 19. Create a layered image with a base image and more packages than max
|
||||
# layers. coreutils and hello are part of the same layer
|
||||
layered-bulk-layer = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "layered-bulk-layer";
|
||||
tag = "latest";
|
||||
fromImage = two-layered-image;
|
||||
contents = with pkgs; [
|
||||
coreutils hello
|
||||
];
|
||||
maxLayers = 4;
|
||||
};
|
||||
|
||||
# 20. Create a "layered" image without nix store layers. This is not
|
||||
# recommended, but can be useful for base images in rare cases.
|
||||
no-store-paths = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "no-store-paths";
|
||||
tag = "latest";
|
||||
extraCommands = ''
|
||||
# This removes sharing of busybox and is not recommended. We do this
|
||||
# to make the example suitable as a test case with working binaries.
|
||||
cp -r ${pkgs.pkgsStatic.busybox}/* .
|
||||
|
||||
# This is a "build" dependency that will not appear in the image
|
||||
${pkgs.hello}/bin/hello
|
||||
'';
|
||||
};
|
||||
|
||||
nixLayered = pkgs.dockerTools.buildLayeredImageWithNixDb {
|
||||
name = "nix-layered";
|
||||
tag = "latest";
|
||||
contents = [
|
||||
# nix-store uses cat program to display results as specified by
|
||||
# the image env variable NIX_PAGER.
|
||||
pkgs.coreutils
|
||||
pkgs.nix
|
||||
pkgs.bash
|
||||
];
|
||||
config = {
|
||||
Env = [
|
||||
"NIX_PAGER=cat"
|
||||
# A user is required by nix
|
||||
# https://github.com/NixOS/nix/blob/9348f9291e5d9e4ba3c4347ea1b235640f54fd79/src/libutil/util.cc#L478
|
||||
"USER=nobody"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
# 21. Support files in the store on buildLayeredImage
|
||||
# See: https://github.com/NixOS/nixpkgs/pull/91084#issuecomment-653496223
|
||||
filesInStore = pkgs.dockerTools.buildLayeredImageWithNixDb {
|
||||
name = "file-in-store";
|
||||
tag = "latest";
|
||||
contents = [
|
||||
pkgs.coreutils
|
||||
pkgs.nix
|
||||
(pkgs.writeScriptBin "myscript" ''
|
||||
#!${pkgs.runtimeShell}
|
||||
cat ${pkgs.writeText "somefile" "some data"}
|
||||
'')
|
||||
];
|
||||
config = {
|
||||
Cmd = [ "myscript" ];
|
||||
# For some reason 'nix-store --verify' requires this environment variable
|
||||
Env = [ "USER=root" ];
|
||||
};
|
||||
};
|
||||
|
||||
# 22. Ensure that setting created to now results in a date which
|
||||
# isn't the epoch + 1 for layered images.
|
||||
unstableDateLayered = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "unstable-date-layered";
|
||||
tag = "latest";
|
||||
contents = [ pkgs.coreutils ];
|
||||
created = "now";
|
||||
};
|
||||
|
||||
# 23. Ensure that layers are unpacked in the correct order before the
|
||||
# runAsRoot script is executed.
|
||||
layersUnpackOrder =
|
||||
let
|
||||
layerOnTopOf = parent: layerName:
|
||||
pkgs.dockerTools.buildImage {
|
||||
name = "layers-unpack-order-${layerName}";
|
||||
tag = "latest";
|
||||
fromImage = parent;
|
||||
contents = [ pkgs.coreutils ];
|
||||
runAsRoot = ''
|
||||
#!${pkgs.runtimeShell}
|
||||
echo -n "${layerName}" >> /layer-order
|
||||
'';
|
||||
};
|
||||
# When executing the runAsRoot script when building layer C, if layer B is
|
||||
# not unpacked on top of layer A, the contents of /layer-order will not be
|
||||
# "ABC".
|
||||
layerA = layerOnTopOf null "a";
|
||||
layerB = layerOnTopOf layerA "b";
|
||||
layerC = layerOnTopOf layerB "c";
|
||||
in layerC;
|
||||
|
||||
# buildImage without explicit tag
|
||||
bashNoTag = pkgs.dockerTools.buildImage {
|
||||
name = "bash-no-tag";
|
||||
contents = pkgs.bashInteractive;
|
||||
};
|
||||
|
||||
# buildLayeredImage without explicit tag
|
||||
bashNoTagLayered = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "bash-no-tag-layered";
|
||||
contents = pkgs.bashInteractive;
|
||||
};
|
||||
|
||||
# buildImage without explicit tag
|
||||
bashNoTagStreamLayered = pkgs.dockerTools.streamLayeredImage {
|
||||
name = "bash-no-tag-stream-layered";
|
||||
contents = pkgs.bashInteractive;
|
||||
};
|
||||
|
||||
# buildLayeredImage with non-root user
|
||||
bashLayeredWithUser =
|
||||
let
|
||||
nonRootShadowSetup = { user, uid, gid ? uid }: with pkgs; [
|
||||
(
|
||||
writeTextDir "etc/shadow" ''
|
||||
root:!x:::::::
|
||||
${user}:!:::::::
|
||||
''
|
||||
)
|
||||
(
|
||||
writeTextDir "etc/passwd" ''
|
||||
root:x:0:0::/root:${runtimeShell}
|
||||
${user}:x:${toString uid}:${toString gid}::/home/${user}:
|
||||
''
|
||||
)
|
||||
(
|
||||
writeTextDir "etc/group" ''
|
||||
root:x:0:
|
||||
${user}:x:${toString gid}:
|
||||
''
|
||||
)
|
||||
(
|
||||
writeTextDir "etc/gshadow" ''
|
||||
root:x::
|
||||
${user}:x::
|
||||
''
|
||||
)
|
||||
];
|
||||
in
|
||||
pkgs.dockerTools.buildLayeredImage {
|
||||
name = "bash-layered-with-user";
|
||||
tag = "latest";
|
||||
contents = [ pkgs.bash pkgs.coreutils ] ++ nonRootShadowSetup { uid = 999; user = "somebody"; };
|
||||
};
|
||||
|
||||
# basic example, with cross compilation
|
||||
cross = let
|
||||
# Cross compile for x86_64 if on aarch64
|
||||
crossPkgs =
|
||||
if pkgs.stdenv.hostPlatform.system == "aarch64-linux" then pkgsCross.gnu64
|
||||
else pkgsCross.aarch64-multiplatform;
|
||||
in crossPkgs.dockerTools.buildImage {
|
||||
name = "hello-cross";
|
||||
tag = "latest";
|
||||
contents = crossPkgs.hello;
|
||||
};
|
||||
|
||||
# layered image where a store path is itself a symlink
|
||||
layeredStoreSymlink =
|
||||
let
|
||||
target = pkgs.writeTextDir "dir/target" "Content doesn't matter.";
|
||||
symlink = pkgs.runCommand "symlink" {} "ln -s ${target} $out";
|
||||
in
|
||||
pkgs.dockerTools.buildLayeredImage {
|
||||
name = "layeredstoresymlink";
|
||||
tag = "latest";
|
||||
contents = [ pkgs.bash symlink ];
|
||||
} // { passthru = { inherit symlink; }; };
|
||||
|
||||
# image with registry/ prefix
|
||||
prefixedImage = pkgs.dockerTools.buildImage {
|
||||
name = "registry-1.docker.io/image";
|
||||
tag = "latest";
|
||||
config.Cmd = [ "${pkgs.hello}/bin/hello" ];
|
||||
};
|
||||
|
||||
# layered image with registry/ prefix
|
||||
prefixedLayeredImage = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "registry-1.docker.io/layered-image";
|
||||
tag = "latest";
|
||||
config.Cmd = [ "${pkgs.hello}/bin/hello" ];
|
||||
};
|
||||
|
||||
# layered image with files owned by a user other than root
|
||||
layeredImageWithFakeRootCommands = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "layered-image-with-fake-root-commands";
|
||||
tag = "latest";
|
||||
contents = [
|
||||
pkgs.pkgsStatic.busybox
|
||||
];
|
||||
fakeRootCommands = ''
|
||||
mkdir -p ./home/jane
|
||||
chown 1000 ./home/jane
|
||||
ln -s ${pkgs.hello.overrideAttrs (o: {
|
||||
# A unique `hello` to make sure that it isn't included via another mechanism by accident.
|
||||
configureFlags = o.configureFlags or "" + " --program-prefix=layeredImageWithFakeRootCommands-";
|
||||
doCheck = false;
|
||||
})} ./hello
|
||||
'';
|
||||
};
|
||||
|
||||
# tarball consisting of both bash and redis images
|
||||
mergedBashAndRedis = pkgs.dockerTools.mergeImages [
|
||||
bash
|
||||
redis
|
||||
];
|
||||
|
||||
# tarball consisting of bash (without tag) and redis images
|
||||
mergedBashNoTagAndRedis = pkgs.dockerTools.mergeImages [
|
||||
bashNoTag
|
||||
redis
|
||||
];
|
||||
|
||||
# tarball consisting of bash and layered image with different owner of the
|
||||
# /home/jane directory
|
||||
mergedBashFakeRoot = pkgs.dockerTools.mergeImages [
|
||||
bash
|
||||
layeredImageWithFakeRootCommands
|
||||
];
|
||||
|
||||
helloOnRoot = pkgs.dockerTools.streamLayeredImage {
|
||||
name = "hello";
|
||||
tag = "latest";
|
||||
contents = [
|
||||
(pkgs.buildEnv {
|
||||
name = "hello-root";
|
||||
paths = [ pkgs.hello ];
|
||||
})
|
||||
];
|
||||
config.Cmd = [ "hello" ];
|
||||
};
|
||||
|
||||
helloOnRootNoStore = pkgs.dockerTools.streamLayeredImage {
|
||||
name = "hello";
|
||||
tag = "latest";
|
||||
contents = [
|
||||
(pkgs.buildEnv {
|
||||
name = "hello-root";
|
||||
paths = [ pkgs.hello ];
|
||||
})
|
||||
];
|
||||
config.Cmd = [ "hello" ];
|
||||
includeStorePaths = false;
|
||||
};
|
||||
|
||||
# Example export of the bash image
|
||||
exportBash = pkgs.dockerTools.exportImage { fromImage = bash; };
|
||||
|
||||
imageViaFakeChroot = pkgs.dockerTools.streamLayeredImage {
|
||||
name = "image-via-fake-chroot";
|
||||
tag = "latest";
|
||||
config.Cmd = [ "hello" ];
|
||||
enableFakechroot = true;
|
||||
# Crucially, instead of a relative path, this creates /bin, which is
|
||||
# intercepted by fakechroot.
|
||||
# This functionality is not available on darwin as of 2021.
|
||||
fakeRootCommands = ''
|
||||
mkdir /bin
|
||||
ln -s ${pkgs.hello}/bin/hello /bin/hello
|
||||
'';
|
||||
};
|
||||
|
||||
build-image-with-path = buildImage {
|
||||
name = "build-image-with-path";
|
||||
tag = "latest";
|
||||
contents = [ pkgs.bashInteractive ./test-dummy ];
|
||||
};
|
||||
|
||||
layered-image-with-path = pkgs.dockerTools.streamLayeredImage {
|
||||
name = "layered-image-with-path";
|
||||
tag = "latest";
|
||||
contents = [ pkgs.bashInteractive ./test-dummy ];
|
||||
};
|
||||
}
|
||||
173
pkgs/build-support/docker/nix-prefetch-docker
Executable file
173
pkgs/build-support/docker/nix-prefetch-docker
Executable file
|
|
@ -0,0 +1,173 @@
|
|||
#! /usr/bin/env bash
|
||||
|
||||
set -e -o pipefail
|
||||
|
||||
os=
|
||||
arch=
|
||||
imageName=
|
||||
imageTag=
|
||||
imageDigest=
|
||||
finalImageName=
|
||||
finalImageTag=
|
||||
hashType=$NIX_HASH_ALGO
|
||||
hashFormat=$hashFormat
|
||||
format=nix
|
||||
|
||||
usage(){
|
||||
echo >&2 "syntax: nix-prefetch-docker [options] [IMAGE_NAME [IMAGE_TAG|IMAGE_DIGEST]]
|
||||
|
||||
Options:
|
||||
--os os OS to fetch image for
|
||||
--arch linux Arch to fetch image for
|
||||
--image-name name Name of the image to fetch
|
||||
--image-tag tag Image tag
|
||||
--image-digest digest Image digest
|
||||
--final-image-name name Desired name of the image
|
||||
--final-image-tag tag Desired image tag
|
||||
--json Output result in json format instead of nix
|
||||
--quiet Only print the final result
|
||||
"
|
||||
exit 1
|
||||
}
|
||||
|
||||
get_image_digest(){
|
||||
local imageName=$1
|
||||
local imageTag=$2
|
||||
|
||||
if test -z "$imageTag"; then
|
||||
imageTag="latest"
|
||||
fi
|
||||
|
||||
skopeo --insecure-policy --tmpdir=$TMPDIR inspect "docker://$imageName:$imageTag" | jq '.Digest' -r
|
||||
}
|
||||
|
||||
get_name() {
|
||||
local imageName=$1
|
||||
local imageTag=$2
|
||||
|
||||
echo "docker-image-$(echo "$imageName:$imageTag" | tr '/:' '-').tar"
|
||||
}
|
||||
|
||||
argi=0
|
||||
argfun=""
|
||||
for arg; do
|
||||
if test -z "$argfun"; then
|
||||
case $arg in
|
||||
--os) argfun=set_os;;
|
||||
--arch) argfun=set_arch;;
|
||||
--image-name) argfun=set_imageName;;
|
||||
--image-tag) argfun=set_imageTag;;
|
||||
--image-digest) argfun=set_imageDigest;;
|
||||
--final-image-name) argfun=set_finalImageName;;
|
||||
--final-image-tag) argfun=set_finalImageTag;;
|
||||
--quiet) QUIET=true;;
|
||||
--json) format=json;;
|
||||
--help) usage; exit;;
|
||||
*)
|
||||
: $((++argi))
|
||||
case $argi in
|
||||
1) imageName=$arg;;
|
||||
2) [[ $arg == *"sha256"* ]] && imageDigest=$arg || imageTag=$arg;;
|
||||
*) exit 1;;
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
else
|
||||
case $argfun in
|
||||
set_*)
|
||||
var=${argfun#set_}
|
||||
eval $var=$arg
|
||||
;;
|
||||
esac
|
||||
argfun=""
|
||||
fi
|
||||
done
|
||||
|
||||
if test -z "$imageName"; then
|
||||
usage
|
||||
fi
|
||||
|
||||
if test -z "$os"; then
|
||||
os=linux
|
||||
fi
|
||||
|
||||
if test -z "$arch"; then
|
||||
arch=amd64
|
||||
fi
|
||||
|
||||
if test -z "$hashType"; then
|
||||
hashType=sha256
|
||||
fi
|
||||
|
||||
if test -z "$hashFormat"; then
|
||||
hashFormat=base32
|
||||
fi
|
||||
|
||||
if test -z "$finalImageName"; then
|
||||
finalImageName="$imageName"
|
||||
fi
|
||||
|
||||
if test -z "$finalImageTag"; then
|
||||
if test -z "$imageTag"; then
|
||||
finalImageTag="latest"
|
||||
else
|
||||
finalImageTag="$imageTag"
|
||||
fi
|
||||
fi
|
||||
|
||||
if test -z "$imageDigest"; then
|
||||
imageDigest=$(get_image_digest $imageName $imageTag)
|
||||
fi
|
||||
|
||||
sourceUrl="docker://$imageName@$imageDigest"
|
||||
|
||||
tmpPath="$(mktemp -d "${TMPDIR:-/tmp}/skopeo-copy-tmp-XXXXXXXX")"
|
||||
trap "rm -rf \"$tmpPath\"" EXIT
|
||||
|
||||
tmpFile="$tmpPath/$(get_name $finalImageName $finalImageTag)"
|
||||
|
||||
if test -z "$QUIET"; then
|
||||
skopeo --insecure-policy --tmpdir=$TMPDIR --override-os ${os} --override-arch ${arch} copy "$sourceUrl" "docker-archive://$tmpFile:$finalImageName:$finalImageTag" >&2
|
||||
else
|
||||
skopeo --insecure-policy --tmpdir=$TMPDIR --override-os ${os} --override-arch ${arch} copy "$sourceUrl" "docker-archive://$tmpFile:$finalImageName:$finalImageTag" > /dev/null
|
||||
fi
|
||||
|
||||
# Compute the hash.
|
||||
imageHash=$(nix-hash --flat --type $hashType --base32 "$tmpFile")
|
||||
|
||||
# Add the downloaded file to Nix store.
|
||||
finalPath=$(nix-store --add-fixed "$hashType" "$tmpFile")
|
||||
|
||||
if test -z "$QUIET"; then
|
||||
echo "-> ImageName: $imageName" >&2
|
||||
echo "-> ImageDigest: $imageDigest" >&2
|
||||
echo "-> FinalImageName: $finalImageName" >&2
|
||||
echo "-> FinalImageTag: $finalImageTag" >&2
|
||||
echo "-> ImagePath: $finalPath" >&2
|
||||
echo "-> ImageHash: $imageHash" >&2
|
||||
fi
|
||||
|
||||
if [ "$format" == "nix" ]; then
|
||||
cat <<EOF
|
||||
{
|
||||
imageName = "$imageName";
|
||||
imageDigest = "$imageDigest";
|
||||
sha256 = "$imageHash";
|
||||
finalImageName = "$finalImageName";
|
||||
finalImageTag = "$finalImageTag";
|
||||
}
|
||||
EOF
|
||||
|
||||
else
|
||||
|
||||
cat <<EOF
|
||||
{
|
||||
"imageName": "$imageName",
|
||||
"imageDigest": "$imageDigest",
|
||||
"sha256": "$imageHash",
|
||||
"finalImageName": "$finalImageName",
|
||||
"finalImageTag": "$finalImageTag"
|
||||
}
|
||||
EOF
|
||||
|
||||
fi
|
||||
24
pkgs/build-support/docker/nix-prefetch-docker.nix
Normal file
24
pkgs/build-support/docker/nix-prefetch-docker.nix
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
{ lib, stdenv, makeWrapper, nix, skopeo, jq }:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
name = "nix-prefetch-docker";
|
||||
|
||||
nativeBuildInputs = [ makeWrapper ];
|
||||
|
||||
dontUnpack = true;
|
||||
|
||||
installPhase = ''
|
||||
install -vD ${./nix-prefetch-docker} $out/bin/$name;
|
||||
wrapProgram $out/bin/$name \
|
||||
--prefix PATH : ${lib.makeBinPath [ nix skopeo jq ]} \
|
||||
--set HOME /homeless-shelter
|
||||
'';
|
||||
|
||||
preferLocalBuild = true;
|
||||
|
||||
meta = with lib; {
|
||||
description = "Script used to obtain source hashes for dockerTools.pullImage";
|
||||
maintainers = with maintainers; [ offline ];
|
||||
platforms = platforms.unix;
|
||||
};
|
||||
}
|
||||
391
pkgs/build-support/docker/stream_layered_image.py
Normal file
391
pkgs/build-support/docker/stream_layered_image.py
Normal file
|
|
@ -0,0 +1,391 @@
|
|||
"""
|
||||
This script generates a Docker image from a set of store paths. Uses
|
||||
Docker Image Specification v1.2 as reference [1].
|
||||
|
||||
It expects a JSON file with the following properties and writes the
|
||||
image as an uncompressed tarball to stdout:
|
||||
|
||||
* "architecture", "config", "os", "created", "repo_tag" correspond to
|
||||
the fields with the same name on the image spec [2].
|
||||
* "created" can be "now".
|
||||
* "created" is also used as mtime for files added to the image.
|
||||
* "store_layers" is a list of layers in ascending order, where each
|
||||
layer is the list of store paths to include in that layer.
|
||||
|
||||
The main challenge for this script to create the final image in a
|
||||
streaming fashion, without dumping any intermediate data to disk
|
||||
for performance.
|
||||
|
||||
A docker image has each layer contents archived as separate tarballs,
|
||||
and they later all get enveloped into a single big tarball in a
|
||||
content addressed fashion. However, because how "tar" format works,
|
||||
we have to know about the name (which includes the checksum in our
|
||||
case) and the size of the tarball before we can start adding it to the
|
||||
outer tarball. We achieve that by creating the layer tarballs twice;
|
||||
on the first iteration we calculate the file size and the checksum,
|
||||
and on the second one we actually stream the contents. 'add_layer_dir'
|
||||
function does all this.
|
||||
|
||||
[1]: https://github.com/moby/moby/blob/master/image/spec/v1.2.md
|
||||
[2]: https://github.com/moby/moby/blob/4fb59c20a4fb54f944fe170d0ff1d00eb4a24d6f/image/spec/v1.2.md#image-json-field-descriptions
|
||||
""" # noqa: E501
|
||||
|
||||
|
||||
import io
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import json
|
||||
import hashlib
|
||||
import pathlib
|
||||
import tarfile
|
||||
import itertools
|
||||
import threading
|
||||
from datetime import datetime, timezone
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
def archive_paths_to(obj, paths, mtime):
|
||||
"""
|
||||
Writes the given store paths as a tar file to the given stream.
|
||||
|
||||
obj: Stream to write to. Should have a 'write' method.
|
||||
paths: List of store paths.
|
||||
"""
|
||||
|
||||
# gettarinfo makes the paths relative, this makes them
|
||||
# absolute again
|
||||
def append_root(ti):
|
||||
ti.name = "/" + ti.name
|
||||
return ti
|
||||
|
||||
def apply_filters(ti):
|
||||
ti.mtime = mtime
|
||||
ti.uid = 0
|
||||
ti.gid = 0
|
||||
ti.uname = "root"
|
||||
ti.gname = "root"
|
||||
return ti
|
||||
|
||||
def nix_root(ti):
|
||||
ti.mode = 0o0555 # r-xr-xr-x
|
||||
return ti
|
||||
|
||||
def dir(path):
|
||||
ti = tarfile.TarInfo(path)
|
||||
ti.type = tarfile.DIRTYPE
|
||||
return ti
|
||||
|
||||
with tarfile.open(fileobj=obj, mode="w|") as tar:
|
||||
# To be consistent with the docker utilities, we need to have
|
||||
# these directories first when building layer tarballs.
|
||||
tar.addfile(apply_filters(nix_root(dir("/nix"))))
|
||||
tar.addfile(apply_filters(nix_root(dir("/nix/store"))))
|
||||
|
||||
for path in paths:
|
||||
path = pathlib.Path(path)
|
||||
if path.is_symlink():
|
||||
files = [path]
|
||||
else:
|
||||
files = itertools.chain([path], path.rglob("*"))
|
||||
|
||||
for filename in sorted(files):
|
||||
ti = append_root(tar.gettarinfo(filename))
|
||||
|
||||
# copy hardlinks as regular files
|
||||
if ti.islnk():
|
||||
ti.type = tarfile.REGTYPE
|
||||
ti.linkname = ""
|
||||
ti.size = filename.stat().st_size
|
||||
|
||||
ti = apply_filters(ti)
|
||||
if ti.isfile():
|
||||
with open(filename, "rb") as f:
|
||||
tar.addfile(ti, f)
|
||||
else:
|
||||
tar.addfile(ti)
|
||||
|
||||
|
||||
class ExtractChecksum:
|
||||
"""
|
||||
A writable stream which only calculates the final file size and
|
||||
sha256sum, while discarding the actual contents.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._digest = hashlib.sha256()
|
||||
self._size = 0
|
||||
|
||||
def write(self, data):
|
||||
self._digest.update(data)
|
||||
self._size += len(data)
|
||||
|
||||
def extract(self):
|
||||
"""
|
||||
Returns: Hex-encoded sha256sum and size as a tuple.
|
||||
"""
|
||||
return (self._digest.hexdigest(), self._size)
|
||||
|
||||
|
||||
FromImage = namedtuple("FromImage", ["tar", "manifest_json", "image_json"])
|
||||
# Some metadata for a layer
|
||||
LayerInfo = namedtuple("LayerInfo", ["size", "checksum", "path", "paths"])
|
||||
|
||||
|
||||
def load_from_image(from_image_str):
|
||||
"""
|
||||
Loads the given base image, if any.
|
||||
|
||||
from_image_str: Path to the base image archive.
|
||||
|
||||
Returns: A 'FromImage' object with references to the loaded base image,
|
||||
or 'None' if no base image was provided.
|
||||
"""
|
||||
if from_image_str is None:
|
||||
return None
|
||||
|
||||
base_tar = tarfile.open(from_image_str)
|
||||
|
||||
manifest_json_tarinfo = base_tar.getmember("manifest.json")
|
||||
with base_tar.extractfile(manifest_json_tarinfo) as f:
|
||||
manifest_json = json.load(f)
|
||||
|
||||
image_json_tarinfo = base_tar.getmember(manifest_json[0]["Config"])
|
||||
with base_tar.extractfile(image_json_tarinfo) as f:
|
||||
image_json = json.load(f)
|
||||
|
||||
return FromImage(base_tar, manifest_json, image_json)
|
||||
|
||||
|
||||
def add_base_layers(tar, from_image):
|
||||
"""
|
||||
Adds the layers from the given base image to the final image.
|
||||
|
||||
tar: 'tarfile.TarFile' object for new layers to be added to.
|
||||
from_image: 'FromImage' object with references to the loaded base image.
|
||||
"""
|
||||
if from_image is None:
|
||||
print("No 'fromImage' provided", file=sys.stderr)
|
||||
return []
|
||||
|
||||
layers = from_image.manifest_json[0]["Layers"]
|
||||
checksums = from_image.image_json["rootfs"]["diff_ids"]
|
||||
layers_checksums = zip(layers, checksums)
|
||||
|
||||
for num, (layer, checksum) in enumerate(layers_checksums, start=1):
|
||||
layer_tarinfo = from_image.tar.getmember(layer)
|
||||
checksum = re.sub(r"^sha256:", "", checksum)
|
||||
|
||||
tar.addfile(layer_tarinfo, from_image.tar.extractfile(layer_tarinfo))
|
||||
path = layer_tarinfo.path
|
||||
size = layer_tarinfo.size
|
||||
|
||||
print("Adding base layer", num, "from", path, file=sys.stderr)
|
||||
yield LayerInfo(size=size, checksum=checksum, path=path, paths=[path])
|
||||
|
||||
from_image.tar.close()
|
||||
|
||||
|
||||
def overlay_base_config(from_image, final_config):
|
||||
"""
|
||||
Overlays the final image 'config' JSON on top of selected defaults from the
|
||||
base image 'config' JSON.
|
||||
|
||||
from_image: 'FromImage' object with references to the loaded base image.
|
||||
final_config: 'dict' object of the final image 'config' JSON.
|
||||
"""
|
||||
if from_image is None:
|
||||
return final_config
|
||||
|
||||
base_config = from_image.image_json["config"]
|
||||
|
||||
# Preserve environment from base image
|
||||
final_env = base_config.get("Env", []) + final_config.get("Env", [])
|
||||
if final_env:
|
||||
# Resolve duplicates (last one wins) and format back as list
|
||||
resolved_env = {entry.split("=", 1)[0]: entry for entry in final_env}
|
||||
final_config["Env"] = list(resolved_env.values())
|
||||
return final_config
|
||||
|
||||
|
||||
def add_layer_dir(tar, paths, store_dir, mtime):
|
||||
"""
|
||||
Appends given store paths to a TarFile object as a new layer.
|
||||
|
||||
tar: 'tarfile.TarFile' object for the new layer to be added to.
|
||||
paths: List of store paths.
|
||||
store_dir: the root directory of the nix store
|
||||
mtime: 'mtime' of the added files and the layer tarball.
|
||||
Should be an integer representing a POSIX time.
|
||||
|
||||
Returns: A 'LayerInfo' object containing some metadata of
|
||||
the layer added.
|
||||
"""
|
||||
|
||||
invalid_paths = [i for i in paths if not i.startswith(store_dir)]
|
||||
assert len(invalid_paths) == 0, \
|
||||
f"Expecting absolute paths from {store_dir}, but got: {invalid_paths}"
|
||||
|
||||
# First, calculate the tarball checksum and the size.
|
||||
extract_checksum = ExtractChecksum()
|
||||
archive_paths_to(
|
||||
extract_checksum,
|
||||
paths,
|
||||
mtime=mtime,
|
||||
)
|
||||
(checksum, size) = extract_checksum.extract()
|
||||
|
||||
path = f"{checksum}/layer.tar"
|
||||
layer_tarinfo = tarfile.TarInfo(path)
|
||||
layer_tarinfo.size = size
|
||||
layer_tarinfo.mtime = mtime
|
||||
|
||||
# Then actually stream the contents to the outer tarball.
|
||||
read_fd, write_fd = os.pipe()
|
||||
with open(read_fd, "rb") as read, open(write_fd, "wb") as write:
|
||||
def producer():
|
||||
archive_paths_to(
|
||||
write,
|
||||
paths,
|
||||
mtime=mtime,
|
||||
)
|
||||
write.close()
|
||||
|
||||
# Closing the write end of the fifo also closes the read end,
|
||||
# so we don't need to wait until this thread is finished.
|
||||
#
|
||||
# Any exception from the thread will get printed by the default
|
||||
# exception handler, and the 'addfile' call will fail since it
|
||||
# won't be able to read required amount of bytes.
|
||||
threading.Thread(target=producer).start()
|
||||
tar.addfile(layer_tarinfo, read)
|
||||
|
||||
return LayerInfo(size=size, checksum=checksum, path=path, paths=paths)
|
||||
|
||||
|
||||
def add_customisation_layer(target_tar, customisation_layer, mtime):
|
||||
"""
|
||||
Adds the customisation layer as a new layer. This is layer is structured
|
||||
differently; given store path has the 'layer.tar' and corresponding
|
||||
sha256sum ready.
|
||||
|
||||
tar: 'tarfile.TarFile' object for the new layer to be added to.
|
||||
customisation_layer: Path containing the layer archive.
|
||||
mtime: 'mtime' of the added layer tarball.
|
||||
"""
|
||||
|
||||
checksum_path = os.path.join(customisation_layer, "checksum")
|
||||
with open(checksum_path) as f:
|
||||
checksum = f.read().strip()
|
||||
assert len(checksum) == 64, f"Invalid sha256 at ${checksum_path}."
|
||||
|
||||
layer_path = os.path.join(customisation_layer, "layer.tar")
|
||||
|
||||
path = f"{checksum}/layer.tar"
|
||||
tarinfo = target_tar.gettarinfo(layer_path)
|
||||
tarinfo.name = path
|
||||
tarinfo.mtime = mtime
|
||||
|
||||
with open(layer_path, "rb") as f:
|
||||
target_tar.addfile(tarinfo, f)
|
||||
|
||||
return LayerInfo(
|
||||
size=None,
|
||||
checksum=checksum,
|
||||
path=path,
|
||||
paths=[customisation_layer]
|
||||
)
|
||||
|
||||
|
||||
def add_bytes(tar, path, content, mtime):
|
||||
"""
|
||||
Adds a file to the tarball with given path and contents.
|
||||
|
||||
tar: 'tarfile.TarFile' object.
|
||||
path: Path of the file as a string.
|
||||
content: Contents of the file.
|
||||
mtime: 'mtime' of the file. Should be an integer representing a POSIX time.
|
||||
"""
|
||||
assert type(content) is bytes
|
||||
|
||||
ti = tarfile.TarInfo(path)
|
||||
ti.size = len(content)
|
||||
ti.mtime = mtime
|
||||
tar.addfile(ti, io.BytesIO(content))
|
||||
|
||||
|
||||
def main():
|
||||
with open(sys.argv[1], "r") as f:
|
||||
conf = json.load(f)
|
||||
|
||||
created = (
|
||||
datetime.now(tz=timezone.utc)
|
||||
if conf["created"] == "now"
|
||||
else datetime.fromisoformat(conf["created"])
|
||||
)
|
||||
mtime = int(created.timestamp())
|
||||
store_dir = conf["store_dir"]
|
||||
|
||||
from_image = load_from_image(conf["from_image"])
|
||||
|
||||
with tarfile.open(mode="w|", fileobj=sys.stdout.buffer) as tar:
|
||||
layers = []
|
||||
layers.extend(add_base_layers(tar, from_image))
|
||||
|
||||
start = len(layers) + 1
|
||||
for num, store_layer in enumerate(conf["store_layers"], start=start):
|
||||
print("Creating layer", num, "from paths:", store_layer,
|
||||
file=sys.stderr)
|
||||
info = add_layer_dir(tar, store_layer, store_dir, mtime=mtime)
|
||||
layers.append(info)
|
||||
|
||||
print("Creating layer", len(layers) + 1, "with customisation...",
|
||||
file=sys.stderr)
|
||||
layers.append(
|
||||
add_customisation_layer(
|
||||
tar,
|
||||
conf["customisation_layer"],
|
||||
mtime=mtime
|
||||
)
|
||||
)
|
||||
|
||||
print("Adding manifests...", file=sys.stderr)
|
||||
|
||||
image_json = {
|
||||
"created": datetime.isoformat(created),
|
||||
"architecture": conf["architecture"],
|
||||
"os": "linux",
|
||||
"config": overlay_base_config(from_image, conf["config"]),
|
||||
"rootfs": {
|
||||
"diff_ids": [f"sha256:{layer.checksum}" for layer in layers],
|
||||
"type": "layers",
|
||||
},
|
||||
"history": [
|
||||
{
|
||||
"created": datetime.isoformat(created),
|
||||
"comment": f"store paths: {layer.paths}"
|
||||
}
|
||||
for layer in layers
|
||||
],
|
||||
}
|
||||
|
||||
image_json = json.dumps(image_json, indent=4).encode("utf-8")
|
||||
image_json_checksum = hashlib.sha256(image_json).hexdigest()
|
||||
image_json_path = f"{image_json_checksum}.json"
|
||||
add_bytes(tar, image_json_path, image_json, mtime=mtime)
|
||||
|
||||
manifest_json = [
|
||||
{
|
||||
"Config": image_json_path,
|
||||
"RepoTags": [conf["repo_tag"]],
|
||||
"Layers": [layer.path for layer in layers],
|
||||
}
|
||||
]
|
||||
manifest_json = json.dumps(manifest_json, indent=4).encode("utf-8")
|
||||
add_bytes(tar, "manifest.json", manifest_json, mtime=mtime)
|
||||
|
||||
print("Done.", file=sys.stderr)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
24
pkgs/build-support/docker/tarsum.go
Normal file
24
pkgs/build-support/docker/tarsum.go
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"github.com/docker/docker/pkg/tarsum"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ts, err := tarsum.NewTarSum(os.Stdin, true, tarsum.Version1)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if _, err = io.Copy(ioutil.Discard, ts); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Println(ts.Sum(nil))
|
||||
}
|
||||
42
pkgs/build-support/docker/tarsum.nix
Normal file
42
pkgs/build-support/docker/tarsum.nix
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
{ stdenv, go, docker, nixosTests }:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
name = "tarsum";
|
||||
|
||||
nativeBuildInputs = [ go ];
|
||||
disallowedReferences = [ go ];
|
||||
|
||||
dontUnpack = true;
|
||||
|
||||
CGO_ENABLED = 0;
|
||||
GOFLAGS = "-trimpath";
|
||||
GO111MODULE = "off";
|
||||
|
||||
buildPhase = ''
|
||||
runHook preBuild
|
||||
mkdir tarsum
|
||||
cd tarsum
|
||||
cp ${./tarsum.go} tarsum.go
|
||||
export GOPATH=$(pwd)
|
||||
export GOCACHE="$TMPDIR/go-cache"
|
||||
mkdir -p src/github.com/docker/docker/pkg
|
||||
ln -sT ${docker.moby-src}/pkg/tarsum src/github.com/docker/docker/pkg/tarsum
|
||||
go build
|
||||
runHook postBuild
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
mkdir -p $out/bin
|
||||
cp tarsum $out/bin/
|
||||
runHook postInstall
|
||||
'';
|
||||
|
||||
passthru = {
|
||||
tests = {
|
||||
dockerTools = nixosTests.docker-tools;
|
||||
};
|
||||
};
|
||||
|
||||
meta.platforms = go.meta.platforms;
|
||||
}
|
||||
1
pkgs/build-support/docker/test-dummy/hello.txt
Normal file
1
pkgs/build-support/docker/test-dummy/hello.txt
Normal file
|
|
@ -0,0 +1 @@
|
|||
Hello there!
|
||||
146
pkgs/build-support/dotnet/build-dotnet-module/default.nix
Normal file
146
pkgs/build-support/dotnet/build-dotnet-module/default.nix
Normal file
|
|
@ -0,0 +1,146 @@
|
|||
{ lib, stdenvNoCC, linkFarmFromDrvs, callPackage, nuget-to-nix, writeScript, makeWrapper, fetchurl, xml2, dotnetCorePackages, dotnetPackages, mkNugetSource, mkNugetDeps, cacert }:
|
||||
|
||||
{ name ? "${args.pname}-${args.version}"
|
||||
, pname ? name
|
||||
, enableParallelBuilding ? true
|
||||
, doCheck ? false
|
||||
# Flags to pass to `makeWrapper`. This is done to avoid double wrapping.
|
||||
, makeWrapperArgs ? []
|
||||
|
||||
# Flags to pass to `dotnet restore`.
|
||||
, dotnetRestoreFlags ? []
|
||||
# Flags to pass to `dotnet build`.
|
||||
, dotnetBuildFlags ? []
|
||||
# Flags to pass to `dotnet test`, if running tests is enabled.
|
||||
, dotnetTestFlags ? []
|
||||
# Flags to pass to `dotnet install`.
|
||||
, dotnetInstallFlags ? []
|
||||
# Flags to pass to `dotnet pack`.
|
||||
, dotnetPackFlags ? []
|
||||
# Flags to pass to dotnet in all phases.
|
||||
, dotnetFlags ? []
|
||||
|
||||
# The path to publish the project to. When unset, the directory "$out/lib/$pname" is used.
|
||||
, installPath ? null
|
||||
# The binaries that should get installed to `$out/bin`, relative to `$out/lib/$pname/`. These get wrapped accordingly.
|
||||
# Unfortunately, dotnet has no method for doing this automatically.
|
||||
# If unset, all executables in the projects root will get installed. This may cause bloat!
|
||||
, executables ? null
|
||||
# Packs a project as a `nupkg`, and installs it to `$out/share`. If set to `true`, the derivation can be used as a dependency for another dotnet project by adding it to `projectReferences`.
|
||||
, packNupkg ? false
|
||||
# The packages project file, which contains instructions on how to compile it. This can be an array of multiple project files as well.
|
||||
, projectFile ? null
|
||||
# The NuGet dependency file. This locks all NuGet dependency versions, as otherwise they cannot be deterministically fetched.
|
||||
# This can be generated by running the `passthru.fetch-deps` script.
|
||||
, nugetDeps ? null
|
||||
# A list of derivations containing nupkg packages for local project references.
|
||||
# Referenced derivations can be built with `buildDotnetModule` with `packNupkg=true` flag.
|
||||
# Since we are sharing them as nugets they must be added to csproj/fsproj files as `PackageReference` as well.
|
||||
# For example, your project has a local dependency:
|
||||
# <ProjectReference Include="../foo/bar.fsproj" />
|
||||
# To enable discovery through `projectReferences` you would need to add a line:
|
||||
# <ProjectReference Include="../foo/bar.fsproj" />
|
||||
# <PackageReference Include="bar" Version="*" Condition=" '$(ContinuousIntegrationBuild)'=='true' "/>
|
||||
, projectReferences ? []
|
||||
# Libraries that need to be available at runtime should be passed through this.
|
||||
# These get wrapped into `LD_LIBRARY_PATH`.
|
||||
, runtimeDeps ? []
|
||||
|
||||
# Tests to disable. This gets passed to `dotnet test --filter "FullyQualifiedName!={}"`, to ensure compatibility with all frameworks.
|
||||
# See https://docs.microsoft.com/en-us/dotnet/core/tools/dotnet-test#filter-option-details for more details.
|
||||
, disabledTests ? []
|
||||
# The project file to run unit tests against. This is usually referenced in the regular project file, but sometimes it needs to be manually set.
|
||||
# It gets restored and build, but not installed. You may need to regenerate your nuget lockfile after setting this.
|
||||
, testProjectFile ? ""
|
||||
|
||||
# The type of build to perform. This is passed to `dotnet` with the `--configuration` flag. Possible values are `Release`, `Debug`, etc.
|
||||
, buildType ? "Release"
|
||||
# The dotnet SDK to use.
|
||||
, dotnet-sdk ? dotnetCorePackages.sdk_6_0
|
||||
# The dotnet runtime to use.
|
||||
, dotnet-runtime ? dotnetCorePackages.runtime_6_0
|
||||
# The dotnet SDK to run tests against. This can differentiate from the SDK compiled against.
|
||||
, dotnet-test-sdk ? dotnet-sdk
|
||||
, ... } @ args:
|
||||
|
||||
assert projectFile == null -> throw "Defining the `projectFile` attribute is required. This is usually an `.csproj`, or `.sln` file.";
|
||||
|
||||
# TODO: Automatically generate a dependency file when a lockfile is present.
|
||||
# This file is unfortunately almost never present, as Microsoft recommands not to push this in upstream repositories.
|
||||
assert nugetDeps == null -> throw "Defining the `nugetDeps` attribute is required, as to lock the NuGet dependencies. This file can be generated by running the `passthru.fetch-deps` script.";
|
||||
|
||||
let
|
||||
inherit (callPackage ./hooks {
|
||||
inherit dotnet-sdk dotnet-test-sdk disabledTests nuget-source dotnet-runtime runtimeDeps buildType;
|
||||
}) dotnetConfigureHook dotnetBuildHook dotnetCheckHook dotnetInstallHook dotnetFixupHook;
|
||||
|
||||
localDeps = if (projectReferences != [])
|
||||
then linkFarmFromDrvs "${name}-project-references" projectReferences
|
||||
else null;
|
||||
|
||||
_nugetDeps = mkNugetDeps { inherit name; nugetDeps = import nugetDeps; };
|
||||
|
||||
nuget-source = mkNugetSource {
|
||||
name = "${name}-nuget-source";
|
||||
description = "A Nuget source with the dependencies for ${name}";
|
||||
deps = [ _nugetDeps ] ++ lib.optional (localDeps != null) localDeps;
|
||||
};
|
||||
|
||||
in stdenvNoCC.mkDerivation (args // {
|
||||
nativeBuildInputs = args.nativeBuildInputs or [] ++ [
|
||||
dotnetConfigureHook
|
||||
dotnetBuildHook
|
||||
dotnetCheckHook
|
||||
dotnetInstallHook
|
||||
dotnetFixupHook
|
||||
|
||||
dotnet-sdk
|
||||
cacert
|
||||
makeWrapper
|
||||
];
|
||||
|
||||
# Stripping breaks the executable
|
||||
dontStrip = args.dontStrip or true;
|
||||
|
||||
# gappsWrapperArgs gets included when wrapping for dotnet, as to avoid double wrapping
|
||||
dontWrapGApps = args.dontWrapGApps or true;
|
||||
|
||||
passthru = {
|
||||
inherit nuget-source;
|
||||
|
||||
fetch-deps = writeScript "fetch-${pname}-deps" ''
|
||||
set -euo pipefail
|
||||
cd "$(dirname "''${BASH_SOURCE[0]}")"
|
||||
|
||||
export HOME=$(mktemp -d)
|
||||
deps_file="/tmp/${pname}-deps.nix"
|
||||
|
||||
store_src="${args.src}"
|
||||
src="$(mktemp -d /tmp/${pname}.XXX)"
|
||||
cp -rT "$store_src" "$src"
|
||||
chmod -R +w "$src"
|
||||
|
||||
trap "rm -rf $src $HOME" EXIT
|
||||
pushd "$src"
|
||||
|
||||
export DOTNET_NOLOGO=1
|
||||
export DOTNET_CLI_TELEMETRY_OPTOUT=1
|
||||
|
||||
mkdir -p "$HOME/nuget_pkgs"
|
||||
|
||||
for project in "${lib.concatStringsSep "\" \"" ((lib.toList projectFile) ++ lib.optionals (testProjectFile != "") (lib.toList testProjectFile))}"; do
|
||||
${dotnet-sdk}/bin/dotnet restore "$project" \
|
||||
${lib.optionalString (!enableParallelBuilding) "--disable-parallel"} \
|
||||
-p:ContinuousIntegrationBuild=true \
|
||||
-p:Deterministic=true \
|
||||
--packages "$HOME/nuget_pkgs" \
|
||||
${lib.optionalString (dotnetRestoreFlags != []) (builtins.toString dotnetRestoreFlags)} \
|
||||
${lib.optionalString (dotnetFlags != []) (builtins.toString dotnetFlags)}
|
||||
done
|
||||
|
||||
echo "Writing lockfile..."
|
||||
${nuget-to-nix}/bin/nuget-to-nix "$HOME/nuget_pkgs" > "$deps_file"
|
||||
echo "Succesfully wrote lockfile to: $deps_file"
|
||||
'';
|
||||
} // args.passthru or {};
|
||||
})
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
{ lib
|
||||
, callPackage
|
||||
, makeSetupHook
|
||||
, makeWrapper
|
||||
, dotnet-sdk
|
||||
, dotnet-test-sdk
|
||||
, disabledTests
|
||||
, nuget-source
|
||||
, dotnet-runtime
|
||||
, runtimeDeps
|
||||
, buildType
|
||||
}:
|
||||
|
||||
{
|
||||
dotnetConfigureHook = callPackage ({ }:
|
||||
makeSetupHook {
|
||||
name = "dotnet-configure-hook";
|
||||
deps = [ dotnet-sdk nuget-source ];
|
||||
substitutions = {
|
||||
nugetSource = nuget-source;
|
||||
};
|
||||
} ./dotnet-configure-hook.sh) { };
|
||||
|
||||
dotnetBuildHook = callPackage ({ }:
|
||||
makeSetupHook {
|
||||
name = "dotnet-build-hook";
|
||||
deps = [ dotnet-sdk ];
|
||||
substitutions = {
|
||||
inherit buildType;
|
||||
};
|
||||
} ./dotnet-build-hook.sh) { };
|
||||
|
||||
dotnetCheckHook = callPackage ({ }:
|
||||
makeSetupHook {
|
||||
name = "dotnet-check-hook";
|
||||
deps = [ dotnet-test-sdk ];
|
||||
substitutions = {
|
||||
inherit buildType;
|
||||
disabledTests = lib.optionalString (disabledTests != [])
|
||||
(lib.concatStringsSep "&FullyQualifiedName!=" disabledTests);
|
||||
};
|
||||
} ./dotnet-check-hook.sh) { };
|
||||
|
||||
dotnetInstallHook = callPackage ({ }:
|
||||
makeSetupHook {
|
||||
name = "dotnet-install-hook";
|
||||
deps = [ dotnet-sdk ];
|
||||
substitutions = {
|
||||
inherit buildType;
|
||||
};
|
||||
} ./dotnet-install-hook.sh) { };
|
||||
|
||||
dotnetFixupHook = callPackage ({ }:
|
||||
makeSetupHook {
|
||||
name = "dotnet-fixup-hook";
|
||||
deps = [ dotnet-runtime makeWrapper ];
|
||||
substitutions = {
|
||||
dotnetRuntime = dotnet-runtime;
|
||||
runtimeDeps = lib.makeLibraryPath runtimeDeps;
|
||||
};
|
||||
} ./dotnet-fixup-hook.sh) { };
|
||||
}
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
# inherit arguments from derivation
|
||||
dotnetBuildFlags=( ${dotnetBuildFlags[@]-} )
|
||||
|
||||
dotnetBuildHook() {
|
||||
echo "Executing dotnetBuildHook"
|
||||
|
||||
runHook preBuild
|
||||
|
||||
if [ "${enableParallelBuilding-}" ]; then
|
||||
maxCpuFlag="$NIX_BUILD_CORES"
|
||||
parallelBuildFlag="true"
|
||||
else
|
||||
maxCpuFlag="1"
|
||||
parallelBuildFlag="false"
|
||||
fi
|
||||
|
||||
if [ "${version-}" ]; then
|
||||
versionFlag="-p:Version=${version-}"
|
||||
fi
|
||||
|
||||
for project in ${projectFile[@]} ${testProjectFile[@]}; do
|
||||
env \
|
||||
dotnet build "$project" \
|
||||
-maxcpucount:$maxCpuFlag \
|
||||
-p:BuildInParallel=$parallelBuildFlag \
|
||||
-p:ContinuousIntegrationBuild=true \
|
||||
-p:Deterministic=true \
|
||||
--configuration "@buildType@" \
|
||||
--no-restore \
|
||||
${versionFlag-} \
|
||||
${dotnetBuildFlags[@]} \
|
||||
${dotnetFlags[@]}
|
||||
done
|
||||
|
||||
runHook postBuild
|
||||
|
||||
echo "Finished dotnetBuildHook"
|
||||
}
|
||||
|
||||
if [[ -z "${dontDotnetBuild-}" && -z "${buildPhase-}" ]]; then
|
||||
buildPhase=dotnetBuildHook
|
||||
fi
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
# inherit arguments from derivation
|
||||
dotnetTestFlags=( ${dotnetTestFlags[@]-} )
|
||||
|
||||
dotnetCheckHook() {
|
||||
echo "Executing dotnetCheckHook"
|
||||
|
||||
runHook preCheck
|
||||
|
||||
if [ "${disabledTests-}" ]; then
|
||||
disabledTestsFlag="--filter FullyQualifiedName!=@disabledTests@"
|
||||
fi
|
||||
|
||||
for project in ${testProjectFile[@]}; do
|
||||
env \
|
||||
dotnet test "$project" \
|
||||
-maxcpucount:$maxCpuFlag \
|
||||
-p:ContinuousIntegrationBuild=true \
|
||||
-p:Deterministic=true \
|
||||
--configuration "@buildType@" \
|
||||
--no-build \
|
||||
--logger "console;verbosity=normal" \
|
||||
${disabledTestsFlag-} \
|
||||
"${dotnetTestFlags[@]}" \
|
||||
"${dotnetFlags[@]}"
|
||||
done
|
||||
|
||||
runHook postCheck
|
||||
|
||||
echo "Finished dotnetCheckHook"
|
||||
}
|
||||
|
||||
if [[ -z "${dontDotnetCheck-}" && -z "${checkPhase-}" ]]; then
|
||||
checkPhase=dotnetCheckHook
|
||||
fi
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
declare -a projectFile testProjectFile
|
||||
|
||||
# inherit arguments from derivation
|
||||
dotnetFlags=( ${dotnetFlags[@]-} )
|
||||
dotnetRestoreFlags=( ${dotnetRestoreFlags[@]-} )
|
||||
|
||||
dotnetConfigureHook() {
|
||||
echo "Executing dotnetConfigureHook"
|
||||
|
||||
runHook preConfigure
|
||||
|
||||
if [ -z "${enableParallelBuilding-}" ]; then
|
||||
parallelFlag="--disable-parallel"
|
||||
fi
|
||||
|
||||
for project in ${projectFile[@]} ${testProjectFile[@]}; do
|
||||
env \
|
||||
dotnet restore "$project" \
|
||||
-p:ContinuousIntegrationBuild=true \
|
||||
-p:Deterministic=true \
|
||||
--source "@nugetSource@/lib" \
|
||||
${parallelFlag-} \
|
||||
${dotnetRestoreFlags[@]} \
|
||||
${dotnetFlags[@]}
|
||||
done
|
||||
|
||||
runHook postConfigure
|
||||
|
||||
echo "Finished dotnetConfigureHook"
|
||||
}
|
||||
|
||||
if [[ -z "${dontDotnetConfigure-}" && -z "${configurePhase-}" ]]; then
|
||||
configurePhase=dotnetConfigureHook
|
||||
fi
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
# Inherit arguments from the derivation
|
||||
makeWrapperArgs=( ${makeWrapperArgs-} )
|
||||
|
||||
# First argument is the executable you want to wrap,
|
||||
# the second is the destination for the wrapper.
|
||||
wrapDotnetProgram() {
|
||||
makeWrapper "$1" "$2" \
|
||||
--set "DOTNET_ROOT" "@dotnetRuntime@" \
|
||||
--suffix "LD_LIBRARY_PATH" : "@runtimeDeps@" \
|
||||
"${gappsWrapperArgs[@]}" \
|
||||
"${makeWrapperArgs[@]}"
|
||||
|
||||
echo "Installed wrapper to: "$2""
|
||||
}
|
||||
|
||||
dotnetFixupHook() {
|
||||
echo "Executing dotnetFixupPhase"
|
||||
|
||||
if [ "${executables}" ]; then
|
||||
for executable in ${executables[@]}; do
|
||||
execPath="$out/lib/${pname}/$executable"
|
||||
|
||||
if [[ -f "$execPath" && -x "$execPath" ]]; then
|
||||
wrapDotnetProgram "$execPath" "$out/bin/$(basename "$executable")"
|
||||
else
|
||||
echo "Specified binary \"$executable\" is either not an executable, or does not exist!"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
else
|
||||
for executable in $out/lib/${pname}/*; do
|
||||
if [[ -f "$executable" && -x "$executable" && "$executable" != *"dll"* ]]; then
|
||||
wrapDotnetProgram "$executable" "$out/bin/$(basename "$executable")"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
echo "Finished dotnetFixupPhase"
|
||||
}
|
||||
|
||||
if [[ -z "${dontDotnetFixup-}" ]]; then
|
||||
preFixupPhases+=" dotnetFixupHook"
|
||||
fi
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
# inherit arguments from derivation
|
||||
dotnetInstallFlags=( ${dotnetInstallFlags[@]-} )
|
||||
|
||||
dotnetInstallHook() {
|
||||
echo "Executing dotnetInstallHook"
|
||||
|
||||
runHook preInstall
|
||||
|
||||
for project in ${projectFile[@]}; do
|
||||
env \
|
||||
dotnet publish "$project" \
|
||||
-p:ContinuousIntegrationBuild=true \
|
||||
-p:Deterministic=true \
|
||||
--output "$out/lib/${pname}" \
|
||||
--configuration "@buildType@" \
|
||||
--no-build \
|
||||
--no-self-contained \
|
||||
${dotnetInstallFlags[@]} \
|
||||
${dotnetFlags[@]}
|
||||
done
|
||||
|
||||
if [[ "${packNupkg-}" ]]; then
|
||||
for project in ${projectFile[@]}; do
|
||||
env \
|
||||
dotnet pack "$project" \
|
||||
-p:ContinuousIntegrationBuild=true \
|
||||
-p:Deterministic=true \
|
||||
--output "$out/share" \
|
||||
--configuration "@buildType@" \
|
||||
--no-build \
|
||||
${dotnetPackFlags[@]} \
|
||||
${dotnetFlags[@]}
|
||||
done
|
||||
fi
|
||||
|
||||
runHook postInstall
|
||||
|
||||
echo "Finished dotnetInstallHook"
|
||||
}
|
||||
|
||||
if [[ -z "${dontDotnetInstall-}" && -z "${installPhase-}" ]]; then
|
||||
installPhase=dotnetInstallHook
|
||||
fi
|
||||
116
pkgs/build-support/dotnet/build-dotnet-package/default.nix
Normal file
116
pkgs/build-support/dotnet/build-dotnet-package/default.nix
Normal file
|
|
@ -0,0 +1,116 @@
|
|||
{ stdenv, lib, makeWrapper, pkg-config, mono, dotnetbuildhelpers }:
|
||||
|
||||
attrsOrig @
|
||||
{ pname
|
||||
, version
|
||||
, nativeBuildInputs ? []
|
||||
, xBuildFiles ? [ ]
|
||||
, xBuildFlags ? [ "/p:Configuration=Release" ]
|
||||
, outputFiles ? [ "bin/Release/*" ]
|
||||
, dllFiles ? [ "*.dll" ]
|
||||
, exeFiles ? [ "*.exe" ]
|
||||
# Additional arguments to pass to the makeWrapper function, which wraps
|
||||
# generated binaries.
|
||||
, makeWrapperArgs ? [ ]
|
||||
, ... }:
|
||||
let
|
||||
arrayToShell = (a: toString (map (lib.escape (lib.stringToCharacters "\\ ';$`()|<>\t") ) a));
|
||||
|
||||
attrs = {
|
||||
inherit pname version;
|
||||
|
||||
nativeBuildInputs = [
|
||||
pkg-config
|
||||
makeWrapper
|
||||
dotnetbuildhelpers
|
||||
mono
|
||||
] ++ nativeBuildInputs;
|
||||
|
||||
configurePhase = ''
|
||||
runHook preConfigure
|
||||
|
||||
[ -z "''${dontPlacateNuget-}" ] && placate-nuget.sh
|
||||
[ -z "''${dontPlacatePaket-}" ] && placate-paket.sh
|
||||
[ -z "''${dontPatchFSharpTargets-}" ] && patch-fsharp-targets.sh
|
||||
|
||||
runHook postConfigure
|
||||
'';
|
||||
|
||||
buildPhase = ''
|
||||
runHook preBuild
|
||||
|
||||
echo Building dotNET packages...
|
||||
|
||||
# Probably needs to be moved to fsharp
|
||||
if pkg-config FSharp.Core
|
||||
then
|
||||
export FSharpTargetsPath="$(dirname $(pkg-config FSharp.Core --variable=Libraries))/Microsoft.FSharp.Targets"
|
||||
fi
|
||||
|
||||
ran=""
|
||||
for xBuildFile in ${arrayToShell xBuildFiles} ''${xBuildFilesExtra}
|
||||
do
|
||||
ran="yes"
|
||||
xbuild ${arrayToShell xBuildFlags} ''${xBuildFlagsArray} $xBuildFile
|
||||
done
|
||||
|
||||
[ -z "$ran" ] && xbuild ${arrayToShell xBuildFlags} ''${xBuildFlagsArray}
|
||||
|
||||
runHook postBuild
|
||||
'';
|
||||
|
||||
dontStrip = true;
|
||||
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
|
||||
target="$out/lib/dotnet/${pname}"
|
||||
mkdir -p "$target"
|
||||
|
||||
cp -rv ${arrayToShell outputFiles} "''${outputFilesArray[@]}" "$target"
|
||||
|
||||
if [ -z "''${dontRemoveDuplicatedDlls-}" ]
|
||||
then
|
||||
pushd "$out"
|
||||
remove-duplicated-dlls.sh
|
||||
popd
|
||||
fi
|
||||
|
||||
set -f
|
||||
for dllPattern in ${arrayToShell dllFiles} ''${dllFilesArray[@]}
|
||||
do
|
||||
set +f
|
||||
for dll in "$target"/$dllPattern
|
||||
do
|
||||
[ -f "$dll" ] || continue
|
||||
if pkg-config $(basename -s .dll "$dll")
|
||||
then
|
||||
echo "$dll already exported by a buildInputs, not re-exporting"
|
||||
else
|
||||
create-pkg-config-for-dll.sh "$out/lib/pkgconfig" "$dll"
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
set -f
|
||||
for exePattern in ${arrayToShell exeFiles} ''${exeFilesArray[@]}
|
||||
do
|
||||
set +f
|
||||
for exe in "$target"/$exePattern
|
||||
do
|
||||
[ -f "$exe" ] || continue
|
||||
mkdir -p "$out"/bin
|
||||
commandName="$(basename -s .exe "$(echo "$exe" | tr "[A-Z]" "[a-z]")")"
|
||||
makeWrapper \
|
||||
"${mono}/bin/mono" \
|
||||
"$out"/bin/"$commandName" \
|
||||
--add-flags "\"$exe\"" \
|
||||
''${makeWrapperArgs}
|
||||
done
|
||||
done
|
||||
|
||||
runHook postInstall
|
||||
'';
|
||||
};
|
||||
in
|
||||
stdenv.mkDerivation (attrs // (builtins.removeAttrs attrsOrig [ "nativeBuildInputs" ] ))
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
targetDir="$1"
|
||||
dllFullPath="$2"
|
||||
|
||||
dllVersion="$(monodis --assembly "$dllFullPath" | grep ^Version: | cut -f 2 -d : | xargs)"
|
||||
[ -z "$dllVersion" ] && echo "Defaulting dllVersion to 0.0.0" && dllVersion="0.0.0"
|
||||
dllFileName="$(basename $dllFullPath)"
|
||||
dllRootName="$(basename -s .dll $dllFileName)"
|
||||
targetPcFile="$targetDir"/"$dllRootName".pc
|
||||
|
||||
mkdir -p "$targetDir"
|
||||
|
||||
cat > $targetPcFile << EOF
|
||||
Libraries=$dllFullPath
|
||||
|
||||
Name: $dllRootName
|
||||
Description: $dllRootName
|
||||
Version: $dllVersion
|
||||
Libs: -r:$dllFileName
|
||||
EOF
|
||||
|
||||
echo "Created $targetPcFile"
|
||||
18
pkgs/build-support/dotnet/dotnetbuildhelpers/default.nix
Normal file
18
pkgs/build-support/dotnet/dotnetbuildhelpers/default.nix
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
{ runCommand, mono, pkg-config }:
|
||||
runCommand
|
||||
"dotnetbuildhelpers"
|
||||
{ preferLocalBuild = true; }
|
||||
''
|
||||
target="$out/bin"
|
||||
mkdir -p "$target"
|
||||
|
||||
for script in ${./create-pkg-config-for-dll.sh} ${./patch-fsharp-targets.sh} ${./remove-duplicated-dlls.sh} ${./placate-nuget.sh} ${./placate-paket.sh}
|
||||
do
|
||||
scriptName="$(basename "$script" | cut -f 2- -d -)"
|
||||
cp -v "$script" "$target"/"$scriptName"
|
||||
chmod 755 "$target"/"$scriptName"
|
||||
patchShebangs "$target"/"$scriptName"
|
||||
substituteInPlace "$target"/"$scriptName" --replace pkg-config ${pkg-config}/bin/${pkg-config.targetPrefix}pkg-config
|
||||
substituteInPlace "$target"/"$scriptName" --replace monodis ${mono}/bin/monodis
|
||||
done
|
||||
''
|
||||
20
pkgs/build-support/dotnet/dotnetbuildhelpers/patch-fsharp-targets.sh
Executable file
20
pkgs/build-support/dotnet/dotnetbuildhelpers/patch-fsharp-targets.sh
Executable file
|
|
@ -0,0 +1,20 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Some project files look for F# targets in $(FSharpTargetsPath)
|
||||
# so it's a good idea to add something like this to your ~/.bash_profile:
|
||||
|
||||
# export FSharpTargetsPath=$(dirname $(which fsharpc))/../lib/mono/4.0/Microsoft.FSharp.Targets
|
||||
|
||||
# In build scripts, you would add somehting like this:
|
||||
|
||||
# export FSharpTargetsPath="${fsharp}/lib/mono/4.0/Microsoft.FSharp.Targets"
|
||||
|
||||
# However, some project files look for F# targets in the main Mono directory. When that happens
|
||||
# patch the project files using this script so they will look in $(FSharpTargetsPath) instead.
|
||||
|
||||
echo "Patching F# targets in fsproj files..."
|
||||
|
||||
find -iname \*.fsproj -print -exec \
|
||||
sed --in-place=.bak \
|
||||
-e 's,<FSharpTargetsPath>\([^<]*\)</FSharpTargetsPath>,<FSharpTargetsPath Condition="Exists('\'\\1\'')">\1</FSharpTargetsPath>,'g \
|
||||
{} \;
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
echo Placating Nuget in nuget.targets
|
||||
find -iname nuget.targets -print -exec sed --in-place=bak -e 's,mono --runtime[^<]*,true NUGET PLACATED BY buildDotnetPackage,g' {} \;
|
||||
|
||||
echo Just to be sure, replacing Nuget executables by empty files.
|
||||
find . -iname nuget.exe \! -size 0 -exec mv -v {} {}.bak \; -exec touch {} \;
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
echo Placating Paket in paket.targets
|
||||
find -iname paket.targets -print -exec sed --in-place=bak -e 's,mono --runtime[^<]*,true PAKET PLACATED BY buildDotnetPackage,g' {} \;
|
||||
|
||||
echo Just to be sure, replacing Paket executables by empty files.
|
||||
find . -iname paket\*.exe \! -size 0 -exec mv -v {} {}.bak \; -exec touch {} \;
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
IFS="
|
||||
"
|
||||
|
||||
for dll in $(find -iname \*.dll)
|
||||
do
|
||||
baseName="$(basename "$dll" | sed "s/.dll$//i")"
|
||||
if pkg-config "$baseName"
|
||||
then
|
||||
candidateDll="$(pkg-config "$baseName" --variable=Libraries)"
|
||||
|
||||
if diff "$dll" "$candidateDll" >/dev/null
|
||||
then
|
||||
echo "$dll is identical to $candidateDll. Substituting..."
|
||||
rm -vf "$dll"
|
||||
ln -sv "$candidateDll" "$dll"
|
||||
else
|
||||
echo "$dll and $candidateDll share the same name but have different contents, leaving alone."
|
||||
fi
|
||||
fi
|
||||
done
|
||||
20
pkgs/build-support/dotnet/dotnetenv/Wrapper/Wrapper.sln
Normal file
20
pkgs/build-support/dotnet/dotnetenv/Wrapper/Wrapper.sln
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
|
||||
Microsoft Visual Studio Solution File, Format Version 11.00
|
||||
# Visual Studio 2010
|
||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Wrapper", "Wrapper\Wrapper.csproj", "{D01B3597-E85E-42F4-940A-EF5AE712942F}"
|
||||
EndProject
|
||||
Global
|
||||
GlobalSection(SolutionConfigurationPlatforms) = preSolution
|
||||
Debug|x86 = Debug|x86
|
||||
Release|x86 = Release|x86
|
||||
EndGlobalSection
|
||||
GlobalSection(ProjectConfigurationPlatforms) = postSolution
|
||||
{D01B3597-E85E-42F4-940A-EF5AE712942F}.Debug|x86.ActiveCfg = Debug|x86
|
||||
{D01B3597-E85E-42F4-940A-EF5AE712942F}.Debug|x86.Build.0 = Debug|x86
|
||||
{D01B3597-E85E-42F4-940A-EF5AE712942F}.Release|x86.ActiveCfg = Release|x86
|
||||
{D01B3597-E85E-42F4-940A-EF5AE712942F}.Release|x86.Build.0 = Release|x86
|
||||
EndGlobalSection
|
||||
GlobalSection(SolutionProperties) = preSolution
|
||||
HideSolutionNode = FALSE
|
||||
EndGlobalSection
|
||||
EndGlobal
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
using System.Reflection;
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Runtime.InteropServices;
|
||||
|
||||
// General Information about an assembly is controlled through the following
|
||||
// set of attributes. Change these attribute values to modify the information
|
||||
// associated with an assembly.
|
||||
[assembly: AssemblyTitle("Wrapper")]
|
||||
[assembly: AssemblyDescription("")]
|
||||
[assembly: AssemblyConfiguration("")]
|
||||
[assembly: AssemblyCompany("Philips Healthcare")]
|
||||
[assembly: AssemblyProduct("Wrapper")]
|
||||
[assembly: AssemblyCopyright("Copyright © Philips Healthcare 2011")]
|
||||
[assembly: AssemblyTrademark("")]
|
||||
[assembly: AssemblyCulture("")]
|
||||
|
||||
// Setting ComVisible to false makes the types in this assembly not visible
|
||||
// to COM components. If you need to access a type in this assembly from
|
||||
// COM, set the ComVisible attribute to true on that type.
|
||||
[assembly: ComVisible(false)]
|
||||
|
||||
// The following GUID is for the ID of the typelib if this project is exposed to COM
|
||||
[assembly: Guid("2045ce22-78c7-4cd6-ad0a-9367f8a49738")]
|
||||
|
||||
// Version information for an assembly consists of the following four values:
|
||||
//
|
||||
// Major Version
|
||||
// Minor Version
|
||||
// Build Number
|
||||
// Revision
|
||||
//
|
||||
// You can specify all the values or you can default the Build and Revision Numbers
|
||||
// by using the '*' as shown below:
|
||||
// [assembly: AssemblyVersion("1.0.*")]
|
||||
[assembly: AssemblyVersion("1.0.0.0")]
|
||||
[assembly: AssemblyFileVersion("1.0.0.0")]
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
using System;
|
||||
using System.Reflection;
|
||||
using System.IO;
|
||||
|
||||
namespace @NAMESPACE@Wrapper
|
||||
{
|
||||
class @MAINCLASSNAME@Wrapper
|
||||
{
|
||||
private String[] AssemblySearchPaths = { @ASSEMBLYSEARCHPATH@ };
|
||||
|
||||
private String ExePath = @"@EXEPATH@";
|
||||
|
||||
private String MainClassName = "@NAMESPACE@.@MAINCLASSNAME@";
|
||||
|
||||
private Assembly exeAssembly;
|
||||
|
||||
public @MAINCLASSNAME@Wrapper(string[] args)
|
||||
{
|
||||
// Attach the resolve event handler to the AppDomain so that missing library assemblies will be searched
|
||||
AppDomain currentDomain = AppDomain.CurrentDomain;
|
||||
currentDomain.AssemblyResolve += new ResolveEventHandler(MyResolveEventHandler);
|
||||
|
||||
// Dynamically load the executable assembly
|
||||
exeAssembly = Assembly.LoadFrom(ExePath);
|
||||
|
||||
// Lookup the main class
|
||||
Type mainClass = exeAssembly.GetType(MainClassName);
|
||||
|
||||
// Lookup the main method
|
||||
MethodInfo mainMethod = mainClass.GetMethod("Main");
|
||||
|
||||
// Invoke the main method
|
||||
mainMethod.Invoke(this, new Object[] {args});
|
||||
}
|
||||
|
||||
static void Main(string[] args)
|
||||
{
|
||||
new @MAINCLASSNAME@Wrapper(args);
|
||||
}
|
||||
|
||||
private Assembly MyResolveEventHandler(object sender, ResolveEventArgs args)
|
||||
{
|
||||
// This handler is called only when the common language runtime tries to bind to the assembly and fails.
|
||||
|
||||
Assembly MyAssembly;
|
||||
String assemblyPath = "";
|
||||
String requestedAssemblyName = args.Name.Substring(0, args.Name.IndexOf(","));
|
||||
|
||||
// Search for the right path of the library assembly
|
||||
foreach (String currentAssemblyPath in AssemblySearchPaths)
|
||||
{
|
||||
assemblyPath = currentAssemblyPath + "/" + requestedAssemblyName + ".dll";
|
||||
|
||||
if (File.Exists(assemblyPath))
|
||||
break;
|
||||
}
|
||||
|
||||
// Load the assembly from the specified path.
|
||||
MyAssembly = Assembly.LoadFrom(assemblyPath);
|
||||
|
||||
// Return the loaded assembly.
|
||||
return MyAssembly;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<PropertyGroup>
|
||||
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
|
||||
<Platform Condition=" '$(Platform)' == '' ">x86</Platform>
|
||||
<ProductVersion>8.0.30703</ProductVersion>
|
||||
<SchemaVersion>2.0</SchemaVersion>
|
||||
<ProjectGuid>{D01B3597-E85E-42F4-940A-EF5AE712942F}</ProjectGuid>
|
||||
<OutputType>Exe</OutputType>
|
||||
<AppDesignerFolder>Properties</AppDesignerFolder>
|
||||
<RootNamespace>@ROOTNAMESPACE@</RootNamespace>
|
||||
<AssemblyName>@ASSEMBLYNAME@</AssemblyName>
|
||||
<TargetFrameworkVersion>v4.0</TargetFrameworkVersion>
|
||||
<TargetFrameworkProfile>Client</TargetFrameworkProfile>
|
||||
<FileAlignment>512</FileAlignment>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|x86' ">
|
||||
<PlatformTarget>x86</PlatformTarget>
|
||||
<DebugSymbols>true</DebugSymbols>
|
||||
<DebugType>full</DebugType>
|
||||
<Optimize>false</Optimize>
|
||||
<OutputPath>bin\Debug\</OutputPath>
|
||||
<DefineConstants>DEBUG;TRACE</DefineConstants>
|
||||
<ErrorReport>prompt</ErrorReport>
|
||||
<WarningLevel>4</WarningLevel>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|x86' ">
|
||||
<PlatformTarget>x86</PlatformTarget>
|
||||
<DebugType>pdbonly</DebugType>
|
||||
<Optimize>true</Optimize>
|
||||
<OutputPath>bin\Release\</OutputPath>
|
||||
<DefineConstants>TRACE</DefineConstants>
|
||||
<ErrorReport>prompt</ErrorReport>
|
||||
<WarningLevel>4</WarningLevel>
|
||||
</PropertyGroup>
|
||||
<ItemGroup>
|
||||
<Reference Include="System" />
|
||||
<Reference Include="System.Core" />
|
||||
<Reference Include="System.Xml.Linq" />
|
||||
<Reference Include="System.Data.DataSetExtensions" />
|
||||
<Reference Include="Microsoft.CSharp" />
|
||||
<Reference Include="System.Data" />
|
||||
<Reference Include="System.Xml" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<Compile Include="Wrapper.cs" />
|
||||
<Compile Include="Properties\AssemblyInfo.cs" />
|
||||
</ItemGroup>
|
||||
<Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
|
||||
<!-- To modify your build process, add your task inside one of the targets below and uncomment it.
|
||||
Other similar extension points exist, see Microsoft.Common.targets.
|
||||
<Target Name="BeforeBuild">
|
||||
</Target>
|
||||
<Target Name="AfterBuild">
|
||||
</Target>
|
||||
-->
|
||||
</Project>
|
||||
85
pkgs/build-support/dotnet/dotnetenv/build-solution.nix
Normal file
85
pkgs/build-support/dotnet/dotnetenv/build-solution.nix
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
{ lib, stdenv, dotnetfx }:
|
||||
{ name
|
||||
, src
|
||||
, baseDir ? "."
|
||||
, slnFile
|
||||
, targets ? "ReBuild"
|
||||
, verbosity ? "detailed"
|
||||
, options ? "/p:Configuration=Debug;Platform=Win32"
|
||||
, assemblyInputs ? []
|
||||
, preBuild ? ""
|
||||
, modifyPublicMain ? false
|
||||
, mainClassFile ? null
|
||||
}:
|
||||
|
||||
assert modifyPublicMain -> mainClassFile != null;
|
||||
|
||||
stdenv.mkDerivation {
|
||||
inherit name src;
|
||||
|
||||
buildInputs = [ dotnetfx ];
|
||||
|
||||
preConfigure = ''
|
||||
cd ${baseDir}
|
||||
'';
|
||||
|
||||
preBuild = ''
|
||||
${lib.optionalString modifyPublicMain ''
|
||||
sed -i -e "s|static void Main|public static void Main|" ${mainClassFile}
|
||||
''}
|
||||
${preBuild}
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
addDeps()
|
||||
{
|
||||
if [ -f $1/nix-support/dotnet-assemblies ]
|
||||
then
|
||||
for i in $(cat $1/nix-support/dotnet-assemblies)
|
||||
do
|
||||
windowsPath=$(cygpath --windows $i)
|
||||
assemblySearchPaths="$assemblySearchPaths;$windowsPath"
|
||||
|
||||
addDeps $i
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
for i in ${toString assemblyInputs}
|
||||
do
|
||||
windowsPath=$(cygpath --windows $i)
|
||||
echo "Using assembly path: $windowsPath"
|
||||
|
||||
if [ "$assemblySearchPaths" = "" ]
|
||||
then
|
||||
assemblySearchPaths="$windowsPath"
|
||||
else
|
||||
assemblySearchPaths="$assemblySearchPaths;$windowsPath"
|
||||
fi
|
||||
|
||||
addDeps $i
|
||||
done
|
||||
|
||||
echo "Assembly search paths are: $assemblySearchPaths"
|
||||
|
||||
if [ "$assemblySearchPaths" != "" ]
|
||||
then
|
||||
echo "Using assembly search paths args: $assemblySearchPathsArg"
|
||||
export AssemblySearchPaths=$assemblySearchPaths
|
||||
fi
|
||||
|
||||
mkdir -p $out
|
||||
MSBuild.exe ${toString slnFile} /nologo /t:${targets} /p:IntermediateOutputPath=$(cygpath --windows $out)\\ /p:OutputPath=$(cygpath --windows $out)\\ /verbosity:${verbosity} ${options}
|
||||
|
||||
# Because .NET assemblies store strings as UTF-16 internally, we cannot detect
|
||||
# hashes. Therefore a text files containing the proper paths is created
|
||||
# We can also use this file the propagate transitive dependencies.
|
||||
|
||||
mkdir -p $out/nix-support
|
||||
|
||||
for i in ${toString assemblyInputs}
|
||||
do
|
||||
echo $i >> $out/nix-support/dotnet-assemblies
|
||||
done
|
||||
'';
|
||||
}
|
||||
17
pkgs/build-support/dotnet/dotnetenv/default.nix
Normal file
17
pkgs/build-support/dotnet/dotnetenv/default.nix
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
{ lib, stdenv, dotnetfx }:
|
||||
|
||||
let dotnetenv =
|
||||
{
|
||||
buildSolution = import ./build-solution.nix {
|
||||
inherit lib stdenv;
|
||||
dotnetfx = dotnetfx.pkg;
|
||||
};
|
||||
|
||||
buildWrapper = import ./wrapper.nix {
|
||||
inherit dotnetenv;
|
||||
};
|
||||
|
||||
inherit (dotnetfx) assembly20Path wcfPath referenceAssembly30Path referenceAssembly35Path;
|
||||
};
|
||||
in
|
||||
dotnetenv
|
||||
64
pkgs/build-support/dotnet/dotnetenv/wrapper.nix
Normal file
64
pkgs/build-support/dotnet/dotnetenv/wrapper.nix
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
{dotnetenv}:
|
||||
|
||||
{ name
|
||||
, src
|
||||
, baseDir ? "."
|
||||
, slnFile
|
||||
, targets ? "ReBuild"
|
||||
, verbosity ? "detailed"
|
||||
, options ? "/p:Configuration=Debug;Platform=Win32"
|
||||
, assemblyInputs ? []
|
||||
, preBuild ? ""
|
||||
, namespace
|
||||
, mainClassName
|
||||
, mainClassFile
|
||||
, modifyPublicMain ? true
|
||||
}:
|
||||
|
||||
let
|
||||
application = dotnetenv.buildSolution {
|
||||
inherit name src baseDir slnFile targets verbosity;
|
||||
inherit options assemblyInputs preBuild;
|
||||
inherit modifyPublicMain mainClassFile;
|
||||
};
|
||||
in
|
||||
dotnetenv.buildSolution {
|
||||
name = "${name}-wrapper";
|
||||
src = ./Wrapper;
|
||||
slnFile = "Wrapper.sln";
|
||||
assemblyInputs = [ application ];
|
||||
preBuild = ''
|
||||
addRuntimeDeps()
|
||||
{
|
||||
if [ -f $1/nix-support/dotnet-assemblies ]
|
||||
then
|
||||
for i in $(cat $1/nix-support/dotnet-assemblies)
|
||||
do
|
||||
windowsPath=$(cygpath --windows $i | sed 's|\\|\\\\|g')
|
||||
assemblySearchArray="$assemblySearchArray @\"$windowsPath\""
|
||||
|
||||
addRuntimeDeps $i
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
export exePath=$(cygpath --windows $(find ${application} -name \*.exe) | sed 's|\\|\\\\|g')
|
||||
|
||||
# Generate assemblySearchPaths string array contents
|
||||
for path in ${toString assemblyInputs}
|
||||
do
|
||||
assemblySearchArray="$assemblySearchArray @\"$(cygpath --windows $path | sed 's|\\|\\\\|g')\", "
|
||||
addRuntimeDeps $path
|
||||
done
|
||||
|
||||
sed -e "s|@ROOTNAMESPACE@|${namespace}Wrapper|" \
|
||||
-e "s|@ASSEMBLYNAME@|${namespace}|" \
|
||||
Wrapper/Wrapper.csproj.in > Wrapper/Wrapper.csproj
|
||||
|
||||
sed -e "s|@NAMESPACE@|${namespace}|g" \
|
||||
-e "s|@MAINCLASSNAME@|${mainClassName}|g" \
|
||||
-e "s|@EXEPATH@|$exePath|g" \
|
||||
-e "s|@ASSEMBLYSEARCHPATH@|$assemblySearchArray|" \
|
||||
Wrapper/Wrapper.cs.in > Wrapper/Wrapper.cs
|
||||
'';
|
||||
}
|
||||
43
pkgs/build-support/dotnet/fetchnuget/default.nix
Normal file
43
pkgs/build-support/dotnet/fetchnuget/default.nix
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
{ fetchurl, buildDotnetPackage, unzip }:
|
||||
|
||||
attrs @
|
||||
{ pname
|
||||
, version
|
||||
, url ? "https://www.nuget.org/api/v2/package/${pname}/${version}"
|
||||
, sha256 ? ""
|
||||
, md5 ? ""
|
||||
, ...
|
||||
}:
|
||||
if md5 != "" then
|
||||
throw "fetchnuget does not support md5 anymore, please use sha256"
|
||||
else
|
||||
buildDotnetPackage ({
|
||||
src = fetchurl {
|
||||
inherit url sha256;
|
||||
name = "${pname}.${version}.zip";
|
||||
};
|
||||
|
||||
sourceRoot = ".";
|
||||
|
||||
nativeBuildInputs = [ unzip ];
|
||||
|
||||
dontBuild = true;
|
||||
|
||||
preInstall = ''
|
||||
function traverseRename () {
|
||||
for e in *
|
||||
do
|
||||
t="$(echo "$e" | sed -e "s/%20/\ /g" -e "s/%2B/+/g")"
|
||||
[ "$t" != "$e" ] && mv -vn "$e" "$t"
|
||||
if [ -d "$t" ]
|
||||
then
|
||||
cd "$t"
|
||||
traverseRename
|
||||
cd ..
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
traverseRename
|
||||
'';
|
||||
} // attrs)
|
||||
9
pkgs/build-support/dotnet/make-nuget-deps/default.nix
Normal file
9
pkgs/build-support/dotnet/make-nuget-deps/default.nix
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
{ linkFarmFromDrvs, fetchurl }:
|
||||
{ name, nugetDeps }:
|
||||
linkFarmFromDrvs "${name}-nuget-deps" (nugetDeps {
|
||||
fetchNuGet = { pname, version, sha256 }: fetchurl {
|
||||
name = "${pname}-${version}.nupkg";
|
||||
url = "https://www.nuget.org/api/v2/package/${pname}/${version}";
|
||||
inherit sha256;
|
||||
};
|
||||
})
|
||||
38
pkgs/build-support/dotnet/make-nuget-source/default.nix
Normal file
38
pkgs/build-support/dotnet/make-nuget-source/default.nix
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
{ dotnetPackages, lib, xml2, stdenvNoCC }:
|
||||
|
||||
{ name
|
||||
, description ? ""
|
||||
, deps ? []
|
||||
}:
|
||||
|
||||
let
|
||||
nuget-source = stdenvNoCC.mkDerivation rec {
|
||||
inherit name;
|
||||
|
||||
meta.description = description;
|
||||
nativeBuildInputs = [ dotnetPackages.Nuget xml2 ];
|
||||
|
||||
buildCommand = ''
|
||||
export HOME=$(mktemp -d)
|
||||
mkdir -p $out/{lib,share}
|
||||
|
||||
${lib.concatMapStringsSep "\n" (dep: ''
|
||||
nuget init "${dep}" "$out/lib"
|
||||
'') deps}
|
||||
|
||||
# Generates a list of all licenses' spdx ids, if available.
|
||||
# Note that this currently ignores any license provided in plain text (e.g. "LICENSE.txt")
|
||||
find "$out/lib" -name "*.nuspec" -exec sh -c \
|
||||
"NUSPEC=\$(xml2 < {}) && echo "\$NUSPEC" | grep license/@type=expression | tr -s \ '\n' | grep "license=" | cut -d'=' -f2" \
|
||||
\; | sort -u > $out/share/licenses
|
||||
'';
|
||||
} // { # We need data from `$out` for `meta`, so we have to use overrides as to not hit infinite recursion.
|
||||
meta.licence = let
|
||||
depLicenses = lib.splitString "\n" (builtins.readFile "${nuget-source}/share/licenses");
|
||||
in (lib.flatten (lib.forEach depLicenses (spdx:
|
||||
if (spdx != "")
|
||||
then lib.getLicenseFromSpdxId spdx
|
||||
else []
|
||||
)));
|
||||
};
|
||||
in nuget-source
|
||||
27
pkgs/build-support/dotnet/nuget-to-nix/default.nix
Normal file
27
pkgs/build-support/dotnet/nuget-to-nix/default.nix
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
{ lib
|
||||
, runCommandLocal
|
||||
, runtimeShell
|
||||
, substituteAll
|
||||
, nix
|
||||
, coreutils
|
||||
, findutils
|
||||
, gnused
|
||||
}:
|
||||
|
||||
runCommandLocal "nuget-to-nix" {
|
||||
script = substituteAll {
|
||||
src = ./nuget-to-nix.sh;
|
||||
inherit runtimeShell;
|
||||
|
||||
binPath = lib.makeBinPath [
|
||||
nix
|
||||
coreutils
|
||||
findutils
|
||||
gnused
|
||||
];
|
||||
};
|
||||
|
||||
meta.description = "Convert a nuget packages directory to a lockfile for buildDotnetModule";
|
||||
} ''
|
||||
install -Dm755 $script $out/bin/nuget-to-nix
|
||||
''
|
||||
29
pkgs/build-support/dotnet/nuget-to-nix/nuget-to-nix.sh
Executable file
29
pkgs/build-support/dotnet/nuget-to-nix/nuget-to-nix.sh
Executable file
|
|
@ -0,0 +1,29 @@
|
|||
#!@runtimeShell@
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
export PATH="@binPath@"
|
||||
|
||||
if [ $# -eq 0 ]; then
|
||||
>&2 echo "Usage: $0 [packages directory] > deps.nix"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pkgs=$1
|
||||
tmpfile=$(mktemp /tmp/nuget-to-nix.XXXXXX)
|
||||
trap "rm -f ${tmpfile}" EXIT
|
||||
|
||||
echo "{ fetchNuGet }: ["
|
||||
|
||||
while read pkg_spec; do
|
||||
{ read pkg_name; read pkg_version; } < <(
|
||||
# Build version part should be ignored: `3.0.0-beta2.20059.3+77df2220` -> `3.0.0-beta2.20059.3`
|
||||
sed -nE 's/.*<id>([^<]*).*/\1/p; s/.*<version>([^<+]*).*/\1/p' "$pkg_spec")
|
||||
pkg_sha256="$(nix-hash --type sha256 --flat --base32 "$(dirname "$pkg_spec")"/*.nupkg)"
|
||||
|
||||
echo " (fetchNuGet { pname = \"$pkg_name\"; version = \"$pkg_version\"; sha256 = \"$pkg_sha256\"; })" >> ${tmpfile}
|
||||
done < <(find $1 -name '*.nuspec')
|
||||
|
||||
LC_ALL=C sort --ignore-case ${tmpfile}
|
||||
|
||||
echo "]"
|
||||
79
pkgs/build-support/emacs/buffer.nix
Normal file
79
pkgs/build-support/emacs/buffer.nix
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
# Functions to build elisp files to locally configure emcas buffers.
|
||||
# See https://github.com/shlevy/nix-buffer
|
||||
|
||||
{ lib, writeText, inherit-local }:
|
||||
|
||||
rec {
|
||||
withPackages = pkgs': let
|
||||
pkgs = builtins.filter (x: x != null) pkgs';
|
||||
extras = map (x: x.emacsBufferSetup pkgs) (builtins.filter (builtins.hasAttr "emacsBufferSetup") pkgs);
|
||||
in writeText "dir-locals.el" ''
|
||||
(require 'inherit-local "${inherit-local}/share/emacs/site-lisp/elpa/inherit-local-${inherit-local.version}/inherit-local.elc")
|
||||
|
||||
; Only set up nixpkgs buffer handling when we have some buffers active
|
||||
(defvar nixpkgs--buffer-count 0)
|
||||
(when (eq nixpkgs--buffer-count 0)
|
||||
(make-variable-buffer-local 'nixpkgs--is-nixpkgs-buffer)
|
||||
; When generating a new temporary buffer (one whose name starts with a space), do inherit-local inheritance and make it a nixpkgs buffer
|
||||
(defun nixpkgs--around-generate (orig name)
|
||||
(if (and nixpkgs--is-nixpkgs-buffer (eq (aref name 0) ?\s))
|
||||
(let ((buf (funcall orig name)))
|
||||
(progn
|
||||
(inherit-local-inherit-child buf)
|
||||
(with-current-buffer buf
|
||||
(setq nixpkgs--buffer-count (1+ nixpkgs--buffer-count))
|
||||
(add-hook 'kill-buffer-hook 'nixpkgs--decrement-buffer-count nil t)))
|
||||
buf)
|
||||
(funcall orig name)))
|
||||
(advice-add 'generate-new-buffer :around #'nixpkgs--around-generate)
|
||||
; When we have no more nixpkgs buffers, tear down the buffer handling
|
||||
(defun nixpkgs--decrement-buffer-count ()
|
||||
(setq nixpkgs--buffer-count (1- nixpkgs--buffer-count))
|
||||
(when (eq nixpkgs--buffer-count 0)
|
||||
(advice-remove 'generate-new-buffer #'nixpkgs--around-generate)
|
||||
(fmakunbound 'nixpkgs--around-generate)
|
||||
(fmakunbound 'nixpkgs--decrement-buffer-count))))
|
||||
(setq nixpkgs--buffer-count (1+ nixpkgs--buffer-count))
|
||||
(add-hook 'kill-buffer-hook 'nixpkgs--decrement-buffer-count nil t)
|
||||
|
||||
; Add packages to PATH and exec-path
|
||||
(make-local-variable 'process-environment)
|
||||
(put 'process-environment 'permanent-local t)
|
||||
(inherit-local 'process-environment)
|
||||
; setenv modifies in place, so copy the environment first
|
||||
(setq process-environment (copy-tree process-environment))
|
||||
(setenv "PATH" (concat "${lib.makeSearchPath "bin" pkgs}:" (getenv "PATH")))
|
||||
(inherit-local-permanent exec-path (append '(${builtins.concatStringsSep " " (map (p: "\"${p}/bin\"") pkgs)}) exec-path))
|
||||
|
||||
(inherit-local-permanent eshell-path-env (concat "${lib.makeSearchPath "bin" pkgs}:" eshell-path-env))
|
||||
|
||||
(setq nixpkgs--is-nixpkgs-buffer t)
|
||||
(inherit-local 'nixpkgs--is-nixpkgs-buffer)
|
||||
|
||||
${lib.concatStringsSep "\n" extras}
|
||||
'';
|
||||
# nix-buffer function for a project with a bunch of haskell packages
|
||||
# in one directory
|
||||
haskellMonoRepo = { project-root # The monorepo root
|
||||
, haskellPackages # The composed haskell packages set that contains all of the packages
|
||||
}: { root }:
|
||||
let # The haskell paths.
|
||||
haskell-paths = lib.filesystem.haskellPathsInDir project-root;
|
||||
# Find the haskell package that the 'root' is in, if any.
|
||||
haskell-path-parent =
|
||||
let filtered = builtins.filter (name:
|
||||
lib.hasPrefix (toString (project-root + "/${name}")) (toString root)
|
||||
) (builtins.attrNames haskell-paths);
|
||||
in
|
||||
if filtered == [] then null else builtins.head filtered;
|
||||
# We're in the directory of a haskell package
|
||||
is-haskell-package = haskell-path-parent != null;
|
||||
haskell-package = haskellPackages.${haskell-path-parent};
|
||||
# GHC environment with all needed deps for the haskell package
|
||||
haskell-package-env =
|
||||
builtins.head haskell-package.env.nativeBuildInputs;
|
||||
in
|
||||
if is-haskell-package
|
||||
then withPackages [ haskell-package-env ]
|
||||
else {};
|
||||
}
|
||||
41
pkgs/build-support/emacs/elpa.nix
Normal file
41
pkgs/build-support/emacs/elpa.nix
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
# builder for Emacs packages built for packages.el
|
||||
|
||||
{ lib, stdenv, emacs, texinfo, writeText, gcc }:
|
||||
|
||||
with lib;
|
||||
|
||||
{ pname
|
||||
, version
|
||||
, src
|
||||
, meta ? {}
|
||||
, ...
|
||||
}@args:
|
||||
|
||||
let
|
||||
|
||||
defaultMeta = {
|
||||
homepage = args.src.meta.homepage or "https://elpa.gnu.org/packages/${pname}.html";
|
||||
};
|
||||
|
||||
in
|
||||
|
||||
import ./generic.nix { inherit lib stdenv emacs texinfo writeText gcc; } ({
|
||||
|
||||
dontUnpack = true;
|
||||
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
|
||||
emacs --batch -Q -l ${./elpa2nix.el} \
|
||||
-f elpa2nix-install-package \
|
||||
"$src" "$out/share/emacs/site-lisp/elpa"
|
||||
|
||||
runHook postInstall
|
||||
'';
|
||||
|
||||
meta = defaultMeta // meta;
|
||||
}
|
||||
|
||||
// removeAttrs args [ "files" "fileSpecs"
|
||||
"meta"
|
||||
])
|
||||
33
pkgs/build-support/emacs/elpa2nix.el
Normal file
33
pkgs/build-support/emacs/elpa2nix.el
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
(require 'package)
|
||||
(package-initialize)
|
||||
|
||||
(defun elpa2nix-install-package ()
|
||||
(if (not noninteractive)
|
||||
(error "`elpa2nix-install-package' is to be used only with -batch"))
|
||||
(pcase command-line-args-left
|
||||
(`(,archive ,elpa)
|
||||
(progn (setq package-user-dir elpa)
|
||||
(elpa2nix-install-file archive)))))
|
||||
|
||||
(defun elpa2nix-install-from-buffer ()
|
||||
"Install a package from the current buffer."
|
||||
(let ((pkg-desc (if (derived-mode-p 'tar-mode)
|
||||
(package-tar-file-info)
|
||||
(package-buffer-info))))
|
||||
;; Install the package itself.
|
||||
(package-unpack pkg-desc)
|
||||
pkg-desc))
|
||||
|
||||
(defun elpa2nix-install-file (file)
|
||||
"Install a package from a file.
|
||||
The file can either be a tar file or an Emacs Lisp file."
|
||||
(let ((is-tar (string-match "\\.tar\\'" file)))
|
||||
(with-temp-buffer
|
||||
(if is-tar
|
||||
(insert-file-contents-literally file)
|
||||
(insert-file-contents file))
|
||||
(when is-tar (tar-mode))
|
||||
(elpa2nix-install-from-buffer))))
|
||||
|
||||
;; Allow installing package tarfiles larger than 10MB
|
||||
(setq large-file-warning-threshold nil)
|
||||
34
pkgs/build-support/emacs/emacs-funcs.sh
Normal file
34
pkgs/build-support/emacs/emacs-funcs.sh
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
addToEmacsLoadPath() {
|
||||
local lispDir="$1"
|
||||
if [[ -d $lispDir && ${EMACSLOADPATH-} != *"$lispDir":* ]] ; then
|
||||
# It turns out, that the trailing : is actually required
|
||||
# see https://www.gnu.org/software/emacs/manual/html_node/elisp/Library-Search.html
|
||||
export EMACSLOADPATH="$lispDir:${EMACSLOADPATH-}"
|
||||
fi
|
||||
}
|
||||
|
||||
addToEmacsNativeLoadPath() {
|
||||
local nativeDir="$1"
|
||||
if [[ -d $nativeDir && ${EMACSNATIVELOADPATH-} != *"$nativeDir":* ]]; then
|
||||
export EMACSNATIVELOADPATH="$nativeDir:${EMACSNATIVELOADPATH-}"
|
||||
fi
|
||||
}
|
||||
|
||||
addEmacsVars () {
|
||||
addToEmacsLoadPath "$1/share/emacs/site-lisp"
|
||||
|
||||
if [ -n "${addEmacsNativeLoadPath:-}" ]; then
|
||||
addToEmacsNativeLoadPath "$1/share/emacs/native-lisp"
|
||||
fi
|
||||
|
||||
# Add sub paths to the Emacs load path if it is a directory
|
||||
# containing .el files. This is necessary to build some packages,
|
||||
# e.g., using trivialBuild.
|
||||
for lispDir in \
|
||||
"$1/share/emacs/site-lisp/"* \
|
||||
"$1/share/emacs/site-lisp/elpa/"*; do
|
||||
if [[ -d $lispDir && "$(echo "$lispDir"/*.el)" ]] ; then
|
||||
addToEmacsLoadPath "$lispDir"
|
||||
fi
|
||||
done
|
||||
}
|
||||
95
pkgs/build-support/emacs/generic.nix
Normal file
95
pkgs/build-support/emacs/generic.nix
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
# generic builder for Emacs packages
|
||||
|
||||
{ lib, stdenv, emacs, texinfo, writeText, gcc, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
{ pname
|
||||
, version ? null
|
||||
|
||||
, buildInputs ? []
|
||||
, packageRequires ? []
|
||||
|
||||
, meta ? {}
|
||||
|
||||
, ...
|
||||
}@args:
|
||||
|
||||
let
|
||||
|
||||
defaultMeta = {
|
||||
broken = false;
|
||||
platforms = emacs.meta.platforms;
|
||||
} // optionalAttrs ((args.src.meta.homepage or "") != "") {
|
||||
homepage = args.src.meta.homepage;
|
||||
};
|
||||
|
||||
in
|
||||
|
||||
stdenv.mkDerivation ({
|
||||
name = "emacs-${pname}${optionalString (version != null) "-${version}"}";
|
||||
|
||||
unpackCmd = ''
|
||||
case "$curSrc" in
|
||||
*.el)
|
||||
# keep original source filename without the hash
|
||||
local filename=$(basename "$curSrc")
|
||||
filename="''${filename:33}"
|
||||
cp $curSrc $filename
|
||||
chmod +w $filename
|
||||
sourceRoot="."
|
||||
;;
|
||||
*)
|
||||
_defaultUnpack "$curSrc"
|
||||
;;
|
||||
esac
|
||||
'';
|
||||
|
||||
buildInputs = [emacs texinfo] ++ packageRequires ++ buildInputs;
|
||||
propagatedBuildInputs = packageRequires;
|
||||
propagatedUserEnvPkgs = packageRequires;
|
||||
|
||||
setupHook = writeText "setup-hook.sh" ''
|
||||
source ${./emacs-funcs.sh}
|
||||
|
||||
if [[ ! -v emacsHookDone ]]; then
|
||||
emacsHookDone=1
|
||||
|
||||
# If this is for a wrapper derivation, emacs and the dependencies are all
|
||||
# run-time dependencies. If this is for precompiling packages into bytecode,
|
||||
# emacs is a compile-time dependency of the package.
|
||||
addEnvHooks "$hostOffset" addEmacsVars
|
||||
addEnvHooks "$targetOffset" addEmacsVars
|
||||
fi
|
||||
'';
|
||||
|
||||
doCheck = false;
|
||||
|
||||
meta = defaultMeta // meta;
|
||||
}
|
||||
|
||||
// lib.optionalAttrs (emacs.nativeComp or false) {
|
||||
|
||||
LIBRARY_PATH = "${lib.getLib stdenv.cc.libc}/lib";
|
||||
|
||||
nativeBuildInputs = [ gcc ];
|
||||
|
||||
addEmacsNativeLoadPath = true;
|
||||
|
||||
postInstall = ''
|
||||
# Besides adding the output directory to the native load path, make sure
|
||||
# the current package's elisp files are in the load path, otherwise
|
||||
# (require 'file-b) from file-a.el in the same package will fail.
|
||||
mkdir -p $out/share/emacs/native-lisp
|
||||
source ${./emacs-funcs.sh}
|
||||
addEmacsVars "$out"
|
||||
|
||||
find $out/share/emacs -type f -name '*.el' -print0 \
|
||||
| xargs -0 -n 1 -I {} -P $NIX_BUILD_CORES sh -c \
|
||||
"emacs --batch --eval '(setq large-file-warning-threshold nil)' -f batch-native-compile {} || true"
|
||||
'';
|
||||
}
|
||||
|
||||
// removeAttrs args [ "buildInputs" "packageRequires"
|
||||
"meta"
|
||||
])
|
||||
97
pkgs/build-support/emacs/melpa.nix
Normal file
97
pkgs/build-support/emacs/melpa.nix
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
# builder for Emacs packages built for packages.el
|
||||
# using MELPA package-build.el
|
||||
|
||||
{ lib, stdenv, fetchFromGitHub, emacs, texinfo, writeText, gcc }:
|
||||
|
||||
with lib;
|
||||
|
||||
{ /*
|
||||
pname: Nix package name without special symbols and without version or
|
||||
"emacs-" prefix.
|
||||
*/
|
||||
pname
|
||||
/*
|
||||
ename: Original Emacs package name, possibly containing special symbols.
|
||||
*/
|
||||
, ename ? null
|
||||
, version
|
||||
, recipe
|
||||
, meta ? {}
|
||||
, ...
|
||||
}@args:
|
||||
|
||||
let
|
||||
|
||||
defaultMeta = {
|
||||
homepage = args.src.meta.homepage or "https://melpa.org/#/${pname}";
|
||||
};
|
||||
|
||||
in
|
||||
|
||||
import ./generic.nix { inherit lib stdenv emacs texinfo writeText gcc; } ({
|
||||
|
||||
ename =
|
||||
if ename == null
|
||||
then pname
|
||||
else ename;
|
||||
|
||||
packageBuild = fetchFromGitHub {
|
||||
owner = "melpa";
|
||||
repo = "package-build";
|
||||
rev = "35017a2d87376c70c3239f48bdbac7efca85aa10";
|
||||
sha256 = "07hdmam85452v4r2vaabj1qfyami1hgbh0jgj9dcwbkpr0y1gvqj";
|
||||
};
|
||||
|
||||
elpa2nix = ./elpa2nix.el;
|
||||
melpa2nix = ./melpa2nix.el;
|
||||
|
||||
preUnpack = ''
|
||||
mkdir -p "$NIX_BUILD_TOP/recipes"
|
||||
if [ -n "$recipe" ]; then
|
||||
cp "$recipe" "$NIX_BUILD_TOP/recipes/$ename"
|
||||
fi
|
||||
|
||||
ln -s "$packageBuild" "$NIX_BUILD_TOP/package-build"
|
||||
|
||||
mkdir -p "$NIX_BUILD_TOP/packages"
|
||||
'';
|
||||
|
||||
postUnpack = ''
|
||||
mkdir -p "$NIX_BUILD_TOP/working"
|
||||
ln -s "$NIX_BUILD_TOP/$sourceRoot" "$NIX_BUILD_TOP/working/$ename"
|
||||
'';
|
||||
|
||||
buildPhase = ''
|
||||
runHook preBuild
|
||||
|
||||
cd "$NIX_BUILD_TOP"
|
||||
|
||||
emacs --batch -Q \
|
||||
-L "$NIX_BUILD_TOP/package-build" \
|
||||
-l "$melpa2nix" \
|
||||
-f melpa2nix-build-package \
|
||||
$ename $version $commit
|
||||
|
||||
runHook postBuild
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
|
||||
archive="$NIX_BUILD_TOP/packages/$ename-$version.el"
|
||||
if [ ! -f "$archive" ]; then
|
||||
archive="$NIX_BUILD_TOP/packages/$ename-$version.tar"
|
||||
fi
|
||||
|
||||
emacs --batch -Q \
|
||||
-l "$elpa2nix" \
|
||||
-f elpa2nix-install-package \
|
||||
"$archive" "$out/share/emacs/site-lisp/elpa"
|
||||
|
||||
runHook postInstall
|
||||
'';
|
||||
|
||||
meta = defaultMeta // meta;
|
||||
}
|
||||
|
||||
// removeAttrs args [ "meta" ])
|
||||
32
pkgs/build-support/emacs/melpa2nix.el
Normal file
32
pkgs/build-support/emacs/melpa2nix.el
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
(require 'package)
|
||||
(package-initialize)
|
||||
|
||||
(require 'package-recipe)
|
||||
(require 'package-build)
|
||||
|
||||
(setq package-build-working-dir (expand-file-name "working/"))
|
||||
(setq package-build-archive-dir (expand-file-name "packages/"))
|
||||
(setq package-build-recipes-dir (expand-file-name "recipes/"))
|
||||
|
||||
;; Allow installing package tarfiles larger than 10MB
|
||||
(setq large-file-warning-threshold nil)
|
||||
|
||||
(defun melpa2nix-build-package-1 (rcp version commit)
|
||||
(let ((source-dir (package-recipe--working-tree rcp)))
|
||||
(unwind-protect
|
||||
(let ((files (package-build-expand-files-spec rcp t)))
|
||||
(cond
|
||||
((= (length files) 1)
|
||||
(package-build--build-single-file-package
|
||||
rcp version commit files source-dir))
|
||||
((> (length files) 1)
|
||||
(package-build--build-multi-file-package
|
||||
rcp version commit files source-dir))
|
||||
(t (error "Unable to find files matching recipe patterns")))))))
|
||||
|
||||
(defun melpa2nix-build-package ()
|
||||
(if (not noninteractive)
|
||||
(error "`melpa2nix-build-package' is to be used only with -batch"))
|
||||
(pcase command-line-args-left
|
||||
(`(,package ,version ,commit)
|
||||
(melpa2nix-build-package-1 (package-recipe-lookup package) version commit))))
|
||||
6
pkgs/build-support/emacs/mk-wrapper-subdirs.el
Normal file
6
pkgs/build-support/emacs/mk-wrapper-subdirs.el
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
(defmacro mk-subdirs-expr (path)
|
||||
`(setq load-path
|
||||
(delete-dups (append '(,path)
|
||||
',(let ((default-directory path))
|
||||
(normal-top-level-add-subdirs-to-load-path))
|
||||
load-path))))
|
||||
29
pkgs/build-support/emacs/trivial.nix
Normal file
29
pkgs/build-support/emacs/trivial.nix
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
# trivial builder for Emacs packages
|
||||
|
||||
{ callPackage, lib, ... }@envargs:
|
||||
|
||||
with lib;
|
||||
|
||||
args:
|
||||
|
||||
callPackage ./generic.nix envargs ({
|
||||
buildPhase = ''
|
||||
runHook preBuild
|
||||
|
||||
emacs -L . --batch -f batch-byte-compile *.el
|
||||
|
||||
runHook postBuild
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
|
||||
LISPDIR=$out/share/emacs/site-lisp
|
||||
install -d $LISPDIR
|
||||
install *.el *.elc $LISPDIR
|
||||
|
||||
runHook postInstall
|
||||
'';
|
||||
}
|
||||
|
||||
// args)
|
||||
241
pkgs/build-support/emacs/wrapper.nix
Normal file
241
pkgs/build-support/emacs/wrapper.nix
Normal file
|
|
@ -0,0 +1,241 @@
|
|||
/*
|
||||
|
||||
# Usage
|
||||
|
||||
`emacs.pkgs.withPackages` takes a single argument: a function from a package
|
||||
set to a list of packages (the packages that will be available in
|
||||
Emacs). For example,
|
||||
```
|
||||
emacs.pkgs.withPackages (epkgs: [ epkgs.evil epkgs.magit ])
|
||||
```
|
||||
All the packages in the list should come from the provided package
|
||||
set. It is possible to add any package to the list, but the provided
|
||||
set is guaranteed to have consistent dependencies and be built with
|
||||
the correct version of Emacs.
|
||||
|
||||
# Overriding
|
||||
|
||||
`emacs.pkgs.withPackages` inherits the package set which contains it, so the
|
||||
correct way to override the provided package set is to override the
|
||||
set which contains `emacs.pkgs.withPackages`. For example, to override
|
||||
`emacs.pkgs.emacs.pkgs.withPackages`,
|
||||
```
|
||||
let customEmacsPackages =
|
||||
emacs.pkgs.overrideScope' (self: super: {
|
||||
# use a custom version of emacs
|
||||
emacs = ...;
|
||||
# use the unstable MELPA version of magit
|
||||
magit = self.melpaPackages.magit;
|
||||
});
|
||||
in customEmacsPackages.withPackages (epkgs: [ epkgs.evil epkgs.magit ])
|
||||
```
|
||||
|
||||
*/
|
||||
|
||||
{ lib, lndir, makeWrapper, runCommand, gcc }: self:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
inherit (self) emacs;
|
||||
|
||||
nativeComp = emacs.nativeComp or false;
|
||||
|
||||
in
|
||||
|
||||
packagesFun: # packages explicitly requested by the user
|
||||
|
||||
let
|
||||
explicitRequires =
|
||||
if lib.isFunction packagesFun
|
||||
then packagesFun self
|
||||
else packagesFun;
|
||||
in
|
||||
|
||||
runCommand
|
||||
(appendToName "with-packages" emacs).name
|
||||
{
|
||||
nativeBuildInputs = [ emacs lndir makeWrapper ];
|
||||
inherit emacs explicitRequires;
|
||||
|
||||
preferLocalBuild = true;
|
||||
allowSubstitutes = false;
|
||||
|
||||
# Store all paths we want to add to emacs here, so that we only need to add
|
||||
# one path to the load lists
|
||||
deps = runCommand "emacs-packages-deps"
|
||||
{
|
||||
inherit explicitRequires lndir emacs;
|
||||
nativeBuildInputs = lib.optional nativeComp gcc;
|
||||
}
|
||||
''
|
||||
findInputsOld() {
|
||||
local pkg="$1"; shift
|
||||
local var="$1"; shift
|
||||
local propagatedBuildInputsFiles=("$@")
|
||||
|
||||
# TODO(@Ericson2314): Restore using associative array once Darwin
|
||||
# nix-shell doesn't use impure bash. This should replace the O(n)
|
||||
# case with an O(1) hash map lookup, assuming bash is implemented
|
||||
# well :D.
|
||||
local varSlice="$var[*]"
|
||||
# ''${..-} to hack around old bash empty array problem
|
||||
case "''${!varSlice-}" in
|
||||
*" $pkg "*) return 0 ;;
|
||||
esac
|
||||
unset -v varSlice
|
||||
|
||||
eval "$var"'+=("$pkg")'
|
||||
|
||||
if ! [ -e "$pkg" ]; then
|
||||
echo "build input $pkg does not exist" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local file
|
||||
for file in "''${propagatedBuildInputsFiles[@]}"; do
|
||||
file="$pkg/nix-support/$file"
|
||||
[[ -f "$file" ]] || continue
|
||||
|
||||
local pkgNext
|
||||
for pkgNext in $(< "$file"); do
|
||||
findInputsOld "$pkgNext" "$var" "''${propagatedBuildInputsFiles[@]}"
|
||||
done
|
||||
done
|
||||
}
|
||||
mkdir -p $out/bin
|
||||
mkdir -p $out/share/emacs/site-lisp
|
||||
${optionalString nativeComp ''
|
||||
mkdir -p $out/share/emacs/native-lisp
|
||||
''}
|
||||
|
||||
local requires
|
||||
for pkg in $explicitRequires; do
|
||||
findInputsOld $pkg requires propagated-user-env-packages
|
||||
done
|
||||
# requires now holds all requested packages and their transitive dependencies
|
||||
|
||||
linkPath() {
|
||||
local pkg=$1
|
||||
local origin_path=$2
|
||||
local dest_path=$3
|
||||
|
||||
# Add the path to the search path list, but only if it exists
|
||||
if [[ -d "$pkg/$origin_path" ]]; then
|
||||
$lndir/bin/lndir -silent "$pkg/$origin_path" "$out/$dest_path"
|
||||
fi
|
||||
}
|
||||
|
||||
linkEmacsPackage() {
|
||||
linkPath "$1" "bin" "bin"
|
||||
linkPath "$1" "share/emacs/site-lisp" "share/emacs/site-lisp"
|
||||
${optionalString nativeComp ''
|
||||
linkPath "$1" "share/emacs/native-lisp" "share/emacs/native-lisp"
|
||||
''}
|
||||
}
|
||||
|
||||
siteAutoloads="$out/share/emacs/site-lisp/nix-generated-autoload.el"
|
||||
touch $siteAutoloads
|
||||
|
||||
# Iterate over the array of inputs (avoiding nix's own interpolation)
|
||||
for pkg in "''${requires[@]}"; do
|
||||
linkEmacsPackage $pkg
|
||||
find $pkg -name "*-autoloads.el" \
|
||||
-exec echo \(load \"{}\" \'noerror \'nomessage\) \; >> $siteAutoloads
|
||||
done
|
||||
echo "(provide 'nix-generated-autoload)" >> $siteAutoloads
|
||||
|
||||
siteStart="$out/share/emacs/site-lisp/site-start.el"
|
||||
siteStartByteCompiled="$siteStart"c
|
||||
subdirs="$out/share/emacs/site-lisp/subdirs.el"
|
||||
subdirsByteCompiled="$subdirs"c
|
||||
|
||||
# A dependency may have brought the original siteStart or subdirs, delete
|
||||
# it and create our own
|
||||
# Begin the new site-start.el by loading the original, which sets some
|
||||
# NixOS-specific paths. Paths are searched in the reverse of the order
|
||||
# they are specified in, so user and system profile paths are searched last.
|
||||
#
|
||||
# NOTE: Avoid displaying messages early at startup by binding
|
||||
# inhibit-message to t. This would prevent the Emacs GUI from showing up
|
||||
# prematurely. The messages would still be logged to the *Messages*
|
||||
# buffer.
|
||||
rm -f $siteStart $siteStartByteCompiled $subdirs $subdirsByteCompiled
|
||||
cat >"$siteStart" <<EOF
|
||||
(let ((inhibit-message t))
|
||||
(load-file "$emacs/share/emacs/site-lisp/site-start.el"))
|
||||
(add-to-list 'load-path "$out/share/emacs/site-lisp")
|
||||
(add-to-list 'exec-path "$out/bin")
|
||||
${optionalString nativeComp ''
|
||||
(add-to-list 'native-comp-eln-load-path "$out/share/emacs/native-lisp/")
|
||||
''}
|
||||
EOF
|
||||
|
||||
# Generate a subdirs.el that statically adds all subdirectories to load-path.
|
||||
$emacs/bin/emacs \
|
||||
--batch \
|
||||
--load ${./mk-wrapper-subdirs.el} \
|
||||
--eval "(prin1 (macroexpand-1 '(mk-subdirs-expr \"$out/share/emacs/site-lisp\")))" \
|
||||
> "$subdirs"
|
||||
|
||||
# Byte-compiling improves start-up time only slightly, but costs nothing.
|
||||
$emacs/bin/emacs --batch -f batch-byte-compile "$siteStart" "$subdirs" "$siteAutoloads"
|
||||
|
||||
${optionalString nativeComp ''
|
||||
$emacs/bin/emacs --batch \
|
||||
--eval "(add-to-list 'native-comp-eln-load-path \"$out/share/emacs/native-lisp/\")" \
|
||||
-f batch-native-compile "$siteStart" "$subdirs" "$siteAutoloads"
|
||||
''}
|
||||
'';
|
||||
|
||||
inherit (emacs) meta;
|
||||
}
|
||||
''
|
||||
mkdir -p "$out/bin"
|
||||
|
||||
# Wrap emacs and friends so they find our site-start.el before the original.
|
||||
for prog in $emacs/bin/*; do # */
|
||||
local progname=$(basename "$prog")
|
||||
local autoloadExpression=""
|
||||
rm -f "$out/bin/$progname"
|
||||
if [[ $progname == emacs ]]; then
|
||||
# progs other than "emacs" do not understand the `-l` switches
|
||||
autoloadExpression="-l cl-loaddefs -l nix-generated-autoload"
|
||||
fi
|
||||
|
||||
substitute ${./wrapper.sh} $out/bin/$progname \
|
||||
--subst-var-by bash ${emacs.stdenv.shell} \
|
||||
--subst-var-by wrapperSiteLisp "$deps/share/emacs/site-lisp" \
|
||||
--subst-var-by wrapperSiteLispNative "$deps/share/emacs/native-lisp:" \
|
||||
--subst-var autoloadExpression \
|
||||
--subst-var prog
|
||||
chmod +x $out/bin/$progname
|
||||
done
|
||||
|
||||
# Wrap MacOS app
|
||||
# this has to pick up resources and metadata
|
||||
# to recognize it as an "app"
|
||||
if [ -d "$emacs/Applications/Emacs.app" ]; then
|
||||
mkdir -p $out/Applications/Emacs.app/Contents/MacOS
|
||||
cp -r $emacs/Applications/Emacs.app/Contents/Info.plist \
|
||||
$emacs/Applications/Emacs.app/Contents/PkgInfo \
|
||||
$emacs/Applications/Emacs.app/Contents/Resources \
|
||||
$out/Applications/Emacs.app/Contents
|
||||
|
||||
|
||||
substitute ${./wrapper.sh} $out/Applications/Emacs.app/Contents/MacOS/Emacs \
|
||||
--subst-var-by bash ${emacs.stdenv.shell} \
|
||||
--subst-var-by wrapperSiteLisp "$deps/share/emacs/site-lisp" \
|
||||
--subst-var-by wrapperSiteLispNative "$deps/share/emacs/native-lisp:" \
|
||||
--subst-var-by autoloadExpression "-l cl-loaddefs -l nix-generated-autoload" \
|
||||
--subst-var-by prog "$emacs/Applications/Emacs.app/Contents/MacOS/Emacs"
|
||||
chmod +x $out/Applications/Emacs.app/Contents/MacOS/Emacs
|
||||
fi
|
||||
|
||||
mkdir -p $out/share
|
||||
# Link icons and desktop files into place
|
||||
for dir in applications icons info man emacs; do
|
||||
ln -s $emacs/share/$dir $out/share/$dir
|
||||
done
|
||||
''
|
||||
47
pkgs/build-support/emacs/wrapper.sh
Normal file
47
pkgs/build-support/emacs/wrapper.sh
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
#!@bash@
|
||||
|
||||
IFS=:
|
||||
|
||||
newLoadPath=()
|
||||
newNativeLoadPath=()
|
||||
added=
|
||||
|
||||
if [[ -n $EMACSLOADPATH ]]
|
||||
then
|
||||
while read -rd: entry
|
||||
do
|
||||
if [[ -z $entry && -z $added ]]
|
||||
then
|
||||
newLoadPath+=(@wrapperSiteLisp@)
|
||||
added=1
|
||||
fi
|
||||
newLoadPath+=("$entry")
|
||||
done <<< "$EMACSLOADPATH:"
|
||||
else
|
||||
newLoadPath+=(@wrapperSiteLisp@)
|
||||
newLoadPath+=("")
|
||||
fi
|
||||
|
||||
if [[ -n $EMACSNATIVELOADPATH ]]
|
||||
then
|
||||
while read -rd: entry
|
||||
do
|
||||
if [[ -z $entry && -z $added ]]
|
||||
then
|
||||
newNativeLoadPath+=(@wrapperSiteLispNative@)
|
||||
added=1
|
||||
fi
|
||||
newNativeLoadPath+=("$entry")
|
||||
done <<< "$EMACSNATIVELOADPATH:"
|
||||
else
|
||||
newNativeLoadPath+=(@wrapperSiteLispNative@)
|
||||
newNativeLoadPath+=("")
|
||||
fi
|
||||
|
||||
export EMACSLOADPATH="${newLoadPath[*]}"
|
||||
export emacsWithPackages_siteLisp=@wrapperSiteLisp@
|
||||
|
||||
export EMACSNATIVELOADPATH="${newNativeLoadPath[*]}"
|
||||
export emacsWithPackages_siteLispNative=@wrapperSiteLispNative@
|
||||
|
||||
exec @prog@ @autoloadExpression@ "$@"
|
||||
21
pkgs/build-support/expand-response-params/default.nix
Normal file
21
pkgs/build-support/expand-response-params/default.nix
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
{ stdenv }:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
name = "expand-response-params";
|
||||
src = ./expand-response-params.c;
|
||||
strictDeps = true;
|
||||
enableParallelBuilding = true;
|
||||
# Work around "stdenv-darwin-boot-2 is not allowed to refer to path
|
||||
# /nix/store/...-expand-response-params.c"
|
||||
unpackPhase = ''
|
||||
cp "$src" expand-response-params.c
|
||||
src=$PWD
|
||||
'';
|
||||
buildPhase = ''
|
||||
NIX_CC_USE_RESPONSE_FILE=0 "$CC" -std=c99 -O3 -o "expand-response-params" expand-response-params.c
|
||||
'';
|
||||
installPhase = ''
|
||||
mkdir -p $prefix/bin
|
||||
mv expand-response-params $prefix/bin/
|
||||
'';
|
||||
}
|
||||
|
|
@ -0,0 +1,84 @@
|
|||
#include <assert.h>
|
||||
#include <ctype.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
typedef struct { char *data; size_t len, cap; } String;
|
||||
|
||||
void resize(String *s, size_t len) {
|
||||
s->len = len;
|
||||
if (s->cap < s->len) {
|
||||
s->cap = s->len * 2;
|
||||
s->data = (char *)realloc(s->data, s->cap);
|
||||
assert(s->data);
|
||||
}
|
||||
}
|
||||
|
||||
void append(String *s, const char *data, size_t len) {
|
||||
resize(s, s->len + len);
|
||||
memcpy(s->data + s->len - len, data, len);
|
||||
}
|
||||
|
||||
typedef enum { space = 0, other = 1, backslash = 2, apostrophe = 3, quotation_mark = 4 } CharClass;
|
||||
typedef enum { outside, unq, unq_esc, sq, sq_esc, dq, dq_esc } State;
|
||||
|
||||
// current State -> CharClass -> next State
|
||||
const State transitions[][5] = {
|
||||
[outside] = {outside, unq, unq_esc, sq, dq},
|
||||
[unq] = {outside, unq, unq_esc, sq, dq},
|
||||
[unq_esc] = {unq, unq, unq, unq, unq},
|
||||
[sq] = {sq, sq, sq_esc, unq, sq},
|
||||
[sq_esc] = {sq, sq, sq, sq, sq},
|
||||
[dq] = {dq, dq, dq_esc, dq, unq},
|
||||
[dq_esc] = {dq, dq, dq, dq, dq},
|
||||
};
|
||||
|
||||
CharClass charClass(int c) {
|
||||
return c == '\\' ? backslash : c == '\'' ? apostrophe : c == '"' ? quotation_mark :
|
||||
isspace(c) ? space : other;
|
||||
}
|
||||
|
||||
// expandArg writes NULL-terminated expansions of `arg', a NULL-terminated
|
||||
// string, to stdout. If arg does not begin with `@' or does not refer to a
|
||||
// file, it is written as is. Otherwise the contents of the file are
|
||||
// recursively expanded. On unexpected EOF in malformed response files an
|
||||
// incomplete final argument is written, even if it is empty, to parse like GCC.
|
||||
void expandArg(String *arg) {
|
||||
FILE *f;
|
||||
if (arg->data[0] != '@' || !(f = fopen(&arg->data[1], "r"))) {
|
||||
fwrite(arg->data, 1, arg->len, stdout);
|
||||
return;
|
||||
}
|
||||
|
||||
resize(arg, 0);
|
||||
State cur = outside;
|
||||
int c;
|
||||
do {
|
||||
c = fgetc(f);
|
||||
State next = transitions[cur][charClass(c)];
|
||||
if ((cur == unq && next == outside) || (cur != outside && c == EOF)) {
|
||||
append(arg, "", 1);
|
||||
expandArg(arg);
|
||||
resize(arg, 0);
|
||||
} else if (cur == unq_esc || cur == sq_esc || cur == dq_esc ||
|
||||
(cur == outside ? next == unq : cur == next)) {
|
||||
char s = c;
|
||||
append(arg, &s, 1);
|
||||
}
|
||||
cur = next;
|
||||
} while (c != EOF);
|
||||
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
String arg = { 0 };
|
||||
while (*++argv) {
|
||||
resize(&arg, 0);
|
||||
append(&arg, *argv, strlen(*argv) + 1);
|
||||
expandArg(&arg);
|
||||
}
|
||||
free(arg.data);
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
24
pkgs/build-support/fake-nss/default.nix
Normal file
24
pkgs/build-support/fake-nss/default.nix
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
# Provide a /etc/passwd and /etc/group that contain root and nobody.
|
||||
# Useful when packaging binaries that insist on using nss to look up
|
||||
# username/groups (like nginx).
|
||||
# /bin/sh is fine to not exist, and provided by another shim.
|
||||
{ symlinkJoin, writeTextDir, runCommand }:
|
||||
symlinkJoin {
|
||||
name = "fake-nss";
|
||||
paths = [
|
||||
(writeTextDir "etc/passwd" ''
|
||||
root:x:0:0:root user:/var/empty:/bin/sh
|
||||
nobody:x:65534:65534:nobody:/var/empty:/bin/sh
|
||||
'')
|
||||
(writeTextDir "etc/group" ''
|
||||
root:x:0:
|
||||
nobody:x:65534:
|
||||
'')
|
||||
(writeTextDir "etc/nsswitch.conf" ''
|
||||
hosts: files dns
|
||||
'')
|
||||
(runCommand "var-empty" { } ''
|
||||
mkdir -p $out/var/empty
|
||||
'')
|
||||
];
|
||||
}
|
||||
9
pkgs/build-support/fetchbitbucket/default.nix
Normal file
9
pkgs/build-support/fetchbitbucket/default.nix
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
{ fetchzip }:
|
||||
|
||||
{ owner, repo, rev, name ? "source"
|
||||
, ... # For hash agility
|
||||
}@args: fetchzip ({
|
||||
inherit name;
|
||||
url = "https://bitbucket.org/${owner}/${repo}/get/${rev}.tar.gz";
|
||||
meta.homepage = "https://bitbucket.org/${owner}/${repo}/";
|
||||
} // removeAttrs args [ "owner" "repo" "rev" ]) // { inherit rev; }
|
||||
28
pkgs/build-support/fetchbower/default.nix
Normal file
28
pkgs/build-support/fetchbower/default.nix
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
{ stdenvNoCC, lib, bower2nix, cacert }:
|
||||
let
|
||||
bowerVersion = version:
|
||||
let
|
||||
components = lib.splitString "#" version;
|
||||
hash = lib.last components;
|
||||
ver = if builtins.length components == 1 then (cleanName version) else hash;
|
||||
in ver;
|
||||
|
||||
cleanName = name: lib.replaceStrings ["/" ":"] ["-" "-"] name;
|
||||
|
||||
fetchbower = name: version: target: outputHash: stdenvNoCC.mkDerivation {
|
||||
name = "${cleanName name}-${bowerVersion version}";
|
||||
buildCommand = ''
|
||||
fetch-bower --quiet --out=$PWD/out "${name}" "${target}" "${version}"
|
||||
# In some cases, the result of fetchBower is different depending
|
||||
# on the output directory (e.g. if the bower package contains
|
||||
# symlinks). So use a local output directory before copying to
|
||||
# $out.
|
||||
cp -R out $out
|
||||
'';
|
||||
outputHashMode = "recursive";
|
||||
outputHashAlgo = "sha256";
|
||||
inherit outputHash;
|
||||
nativeBuildInputs = [ bower2nix cacert ];
|
||||
};
|
||||
|
||||
in fetchbower
|
||||
9
pkgs/build-support/fetchbzr/builder.sh
Normal file
9
pkgs/build-support/fetchbzr/builder.sh
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
source "$stdenv/setup"
|
||||
|
||||
header "exporting \`$url' (revision $rev) into \`$out'"
|
||||
|
||||
# Perform a lightweight checkout so that we don't end up importing
|
||||
# all the repository's history.
|
||||
BZR_LOG=/dev/null bzr -Ossl.cert_reqs=none export -r "$rev" --format=dir "$out" "$url"
|
||||
|
||||
stopNest
|
||||
15
pkgs/build-support/fetchbzr/default.nix
Normal file
15
pkgs/build-support/fetchbzr/default.nix
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
{ stdenvNoCC, breezy }:
|
||||
{ url, rev, sha256 }:
|
||||
|
||||
stdenvNoCC.mkDerivation {
|
||||
name = "bzr-export";
|
||||
|
||||
builder = ./builder.sh;
|
||||
nativeBuildInputs = [ breezy ];
|
||||
|
||||
outputHashAlgo = "sha256";
|
||||
outputHashMode = "recursive";
|
||||
outputHash = sha256;
|
||||
|
||||
inherit url rev;
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue