uboot: (firmwareOdroidC2/C4) don't invoke patch tool, use patches = [] instead

https://github.com/NixOS/nixpkgs/blob/master/pkgs/stdenv/generic/setup.sh#L948
this can do it nicely.

Signed-off-by: Anton Arapov <anton@deadbeef.mx>
This commit is contained in:
Anton Arapov 2021-04-03 12:58:10 +02:00 committed by Alan Daniels
commit 56de2bcd43
30691 changed files with 3076956 additions and 0 deletions

View file

@ -0,0 +1,127 @@
From 7d58e303159b2fb343af9a1ec4512238efa147c7 Mon Sep 17 00:00:00 2001
From: Eelco Dolstra <edolstra@gmail.com>
Date: Mon, 6 Aug 2018 17:15:04 +0200
Subject: [PATCH] TransferManager: Allow setting a content-encoding for S3 uploads
--- a/aws-cpp-sdk-transfer/include/aws/transfer/TransferHandle.h
+++ b/aws-cpp-sdk-transfer/include/aws/transfer/TransferHandle.h
@@ -297,6 +297,14 @@ namespace Aws
* Content type of the object being transferred
*/
inline void SetContentType(const Aws::String& value) { std::lock_guard<std::mutex> locker(m_getterSetterLock); m_contentType = value; }
+ /**
+ * Content encoding of the object being transferred
+ */
+ inline const Aws::String GetContentEncoding() const { std::lock_guard<std::mutex> locker(m_getterSetterLock); return m_contentEncoding; }
+ /**
+ * Content type of the object being transferred
+ */
+ inline void SetContentEncoding(const Aws::String& value) { std::lock_guard<std::mutex> locker(m_getterSetterLock); m_contentEncoding = value; }
/**
* In case of an upload, this is the metadata that was placed on the object when it was uploaded.
* In the case of a download, this is the object metadata from the GetObject operation.
@@ -383,6 +391,7 @@ namespace Aws
Aws::String m_key;
Aws::String m_fileName;
Aws::String m_contentType;
+ Aws::String m_contentEncoding;
Aws::String m_versionId;
Aws::Map<Aws::String, Aws::String> m_metadata;
TransferStatus m_status;
--- a/aws-cpp-sdk-transfer/include/aws/transfer/TransferManager.h
+++ b/aws-cpp-sdk-transfer/include/aws/transfer/TransferManager.h
@@ -154,7 +154,8 @@ namespace Aws
const Aws::String& keyName,
const Aws::String& contentType,
const Aws::Map<Aws::String, Aws::String>& metadata,
- const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr);
+ const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr,
+ const Aws::String& contentEncoding = "");
/**
* Downloads the contents of bucketName/keyName in S3 to the file specified by writeToFile. This will perform a GetObject operation.
@@ -246,7 +247,8 @@ namespace Aws
const Aws::Map<Aws::String,
Aws::String>& metadata,
const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context,
- const Aws::String& fileName = "");
+ const Aws::String& fileName = "",
+ const Aws::String& contentEncoding = "");
/**
* Submits the actual task to task schecduler
@@ -262,7 +264,8 @@ namespace Aws
const Aws::String& keyName,
const Aws::String& contentType,
const Aws::Map<Aws::String, Aws::String>& metadata,
- const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context);
+ const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context,
+ const Aws::String& contentEncoding);
/**
* Uploads the contents of file, to bucketName/keyName in S3. contentType and metadata will be added to the object. If the object is larger than the configured bufferSize,
--- a/aws-cpp-sdk-transfer/source/transfer/TransferManager.cpp
+++ b/aws-cpp-sdk-transfer/source/transfer/TransferManager.cpp
@@ -87,9 +87,10 @@ namespace Aws
const Aws::String& bucketName,
const Aws::String& keyName, const Aws::String& contentType,
const Aws::Map<Aws::String, Aws::String>& metadata,
- const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context)
+ const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context,
+ const Aws::String& contentEncoding)
{
- return this->DoUploadFile(fileStream, bucketName, keyName, contentType, metadata, context);
+ return this->DoUploadFile(fileStream, bucketName, keyName, contentType, metadata, context, contentEncoding);
}
std::shared_ptr<TransferHandle> TransferManager::DownloadFile(const Aws::String& bucketName,
@@ -286,6 +287,9 @@ namespace Aws
createMultipartRequest.WithKey(handle->GetKey());
createMultipartRequest.WithMetadata(handle->GetMetadata());
+ if (handle->GetContentEncoding() != "")
+ createMultipartRequest.WithContentEncoding(handle->GetContentEncoding());
+
auto createMultipartResponse = m_transferConfig.s3Client->CreateMultipartUpload(createMultipartRequest);
if (createMultipartResponse.IsSuccess())
{
@@ -441,6 +445,9 @@ namespace Aws
putObjectRequest.SetContentType(handle->GetContentType());
+ if (handle->GetContentEncoding() != "")
+ putObjectRequest.SetContentEncoding(handle->GetContentEncoding());
+
auto buffer = m_bufferManager.Acquire();
auto lengthToWrite = (std::min)(m_transferConfig.bufferSize, handle->GetBytesTotalSize());
@@ -1140,12 +1147,15 @@ namespace Aws
const Aws::String& contentType,
const Aws::Map<Aws::String, Aws::String>& metadata,
const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context,
- const Aws::String& fileName)
+ const Aws::String& fileName,
+ const Aws::String& contentEncoding)
{
auto handle = Aws::MakeShared<TransferHandle>(CLASS_TAG, bucketName, keyName, 0, fileName);
handle->SetContentType(contentType);
handle->SetMetadata(metadata);
handle->SetContext(context);
+ if (contentEncoding != "")
+ handle->SetContentEncoding(contentEncoding);
if (!fileStream->good())
{
@@ -1213,9 +1223,10 @@ namespace Aws
const Aws::String& keyName,
const Aws::String& contentType,
const Aws::Map<Aws::String, Aws::String>& metadata,
- const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context)
+ const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context,
+ const Aws::String& contentEncoding)
{
- auto handle = CreateUploadFileHandle(fileStream.get(), bucketName, keyName, contentType, metadata, context);
+ auto handle = CreateUploadFileHandle(fileStream.get(), bucketName, keyName, contentType, metadata, context, "", contentEncoding);
return SubmitUpload(handle, fileStream);
}

View file

@ -0,0 +1,45 @@
diff --git a/pthread_stop_world.c b/pthread_stop_world.c
index 4b2c429..1fb4c52 100644
--- a/pthread_stop_world.c
+++ b/pthread_stop_world.c
@@ -673,6 +673,8 @@ GC_INNER void GC_push_all_stacks(void)
struct GC_traced_stack_sect_s *traced_stack_sect;
pthread_t self = pthread_self();
word total_size = 0;
+ size_t stack_limit;
+ pthread_attr_t pattr;
if (!EXPECT(GC_thr_initialized, TRUE))
GC_thr_init();
@@ -722,6 +724,31 @@ GC_INNER void GC_push_all_stacks(void)
hi = p->altstack + p->altstack_size;
/* FIXME: Need to scan the normal stack too, but how ? */
/* FIXME: Assume stack grows down */
+ } else {
+ if (pthread_getattr_np(p->id, &pattr)) {
+ ABORT("GC_push_all_stacks: pthread_getattr_np failed!");
+ }
+ if (pthread_attr_getstacksize(&pattr, &stack_limit)) {
+ ABORT("GC_push_all_stacks: pthread_attr_getstacksize failed!");
+ }
+ if (pthread_attr_destroy(&pattr)) {
+ ABORT("GC_push_all_stacks: pthread_attr_destroy failed!");
+ }
+ // When a thread goes into a coroutine, we lose its original sp until
+ // control flow returns to the thread.
+ // While in the coroutine, the sp points outside the thread stack,
+ // so we can detect this and push the entire thread stack instead,
+ // as an approximation.
+ // We assume that the coroutine has similarly added its entire stack.
+ // This could be made accurate by cooperating with the application
+ // via new functions and/or callbacks.
+ #ifndef STACK_GROWS_UP
+ if (lo >= hi || lo < hi - stack_limit) { // sp outside stack
+ lo = hi - stack_limit;
+ }
+ #else
+ #error "STACK_GROWS_UP not supported in boost_coroutine2 (as of june 2021), so we don't support it in Nix."
+ #endif
}
GC_push_all_stack_sections(lo, hi, traced_stack_sect);
# ifdef STACK_GROWS_UP

View file

@ -0,0 +1,36 @@
From 3884f7a69a57d8ecfcbcaae476ec2ff53ffbd549 Mon Sep 17 00:00:00 2001
From: Robert Hensing <robert@roberthensing.nl>
Date: Thu, 11 Nov 2021 11:03:21 +0100
Subject: [PATCH] Install nlohmann_json headers
These headers are included by the libexpr, libfetchers, libstore
and libutil headers.
Considering that these are vendored sources, Nix should expose them,
as it is not a good idea for reverse dependencies to rely on a
potentially different source that can go out of sync.
---
Makefile | 1 +
src/nlohmann/local.mk | 2 ++
2 files changed, 3 insertions(+)
create mode 100644 src/nlohmann/local.mk
diff --git a/Makefile b/Makefile
index 5040d288485..e6ce50cbdb7 100644
--- a/Makefile
+++ b/Makefile
@@ -10,6 +10,7 @@ makefiles = \
src/libexpr/local.mk \
src/libcmd/local.mk \
src/nix/local.mk \
+ src/nlohmann/local.mk \
src/resolve-system-dependencies/local.mk \
scripts/local.mk \
misc/bash/local.mk \
diff --git a/src/nlohmann/local.mk b/src/nlohmann/local.mk
new file mode 100644
index 00000000000..63c427e000e
--- /dev/null
+++ b/src/nlohmann/local.mk
@@ -0,0 +1,2 @@
+$(foreach i, $(wildcard src/nlohmann/*.hpp), \
+ $(eval $(call install-file-in, $(i), $(includedir)/nlohmann, 0644)))