From: "Nguyễn Thái Ngọc Duy" <pclouds@gmail.com>
To: git@vger.kernel.org
Cc: "Junio C Hamano" <gitster@pobox.com>,
"Nguyễn Thái Ngọc Duy" <pclouds@gmail.com>
Subject: [PATCH 2/7] Add more large blob test cases
Date: Wed, 7 Mar 2012 17:54:16 +0700 [thread overview]
Message-ID: <1331117661-19378-3-git-send-email-pclouds@gmail.com> (raw)
In-Reply-To: <1331117661-19378-1-git-send-email-pclouds@gmail.com>
New test cases list commands that should work when memory is
limited. All memory allocation functions (*) learn to reject any
allocation larger than $GIT_ALLOC_LIMIT if set.
(*) Not exactly all. Some places do not use x* functions, but
malloc/calloc directly, notably diff-delta. These code path should
never be run on large blobs.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
---
t/t1050-large.sh | 38 ++++++++++++++++++++++++++++++++++++--
wrapper.c | 27 ++++++++++++++++++++++++---
2 files changed, 60 insertions(+), 5 deletions(-)
diff --git a/t/t1050-large.sh b/t/t1050-large.sh
index 29d6024..ded66b3 100755
--- a/t/t1050-large.sh
+++ b/t/t1050-large.sh
@@ -6,11 +6,15 @@ test_description='adding and checking out large blobs'
. ./test-lib.sh
test_expect_success setup '
- git config core.bigfilethreshold 200k &&
+ # clone does not allow us to pass core.bigfilethreshold to
+ # new repos, so set core.bigfilethreshold globally
+ git config --global core.bigfilethreshold 200k &&
echo X | dd of=large1 bs=1k seek=2000 &&
echo X | dd of=large2 bs=1k seek=2000 &&
echo X | dd of=large3 bs=1k seek=2000 &&
- echo Y | dd of=huge bs=1k seek=2500
+ echo Y | dd of=huge bs=1k seek=2500 &&
+ GIT_ALLOC_LIMIT=1500 &&
+ export GIT_ALLOC_LIMIT
'
test_expect_success 'add a large file or two' '
@@ -100,4 +104,34 @@ test_expect_success 'packsize limit' '
)
'
+test_expect_success 'diff --raw' '
+ git commit -q -m initial &&
+ echo modified >>large1 &&
+ git add large1 &&
+ git commit -q -m modified &&
+ git diff --raw HEAD^
+'
+
+test_expect_success 'hash-object' '
+ git hash-object large1
+'
+
+test_expect_failure 'cat-file a large file' '
+ git cat-file blob :large1 >/dev/null
+'
+
+test_expect_failure 'cat-file a large file from a tag' '
+ git tag -m largefile largefiletag :large1 &&
+ git cat-file blob largefiletag >/dev/null
+'
+
+test_expect_failure 'git-show a large file' '
+ git show :large1 >/dev/null
+
+'
+
+test_expect_failure 'repack' '
+ git repack -ad
+'
+
test_done
diff --git a/wrapper.c b/wrapper.c
index 85f09df..6ccd059 100644
--- a/wrapper.c
+++ b/wrapper.c
@@ -9,6 +9,18 @@ static void do_nothing(size_t size)
static void (*try_to_free_routine)(size_t size) = do_nothing;
+static void memory_limit_check(size_t size)
+{
+ static int limit = -1;
+ if (limit == -1) {
+ const char *env = getenv("GIT_ALLOC_LIMIT");
+ limit = env ? atoi(env) * 1024 : 0;
+ }
+ if (limit && size > limit)
+ die("attempting to allocate %"PRIuMAX" over limit %d",
+ (intmax_t)size, limit);
+}
+
try_to_free_t set_try_to_free_routine(try_to_free_t routine)
{
try_to_free_t old = try_to_free_routine;
@@ -32,7 +44,10 @@ char *xstrdup(const char *str)
void *xmalloc(size_t size)
{
- void *ret = malloc(size);
+ void *ret;
+
+ memory_limit_check(size);
+ ret = malloc(size);
if (!ret && !size)
ret = malloc(1);
if (!ret) {
@@ -79,7 +94,10 @@ char *xstrndup(const char *str, size_t len)
void *xrealloc(void *ptr, size_t size)
{
- void *ret = realloc(ptr, size);
+ void *ret;
+
+ memory_limit_check(size);
+ ret = realloc(ptr, size);
if (!ret && !size)
ret = realloc(ptr, 1);
if (!ret) {
@@ -95,7 +113,10 @@ void *xrealloc(void *ptr, size_t size)
void *xcalloc(size_t nmemb, size_t size)
{
- void *ret = calloc(nmemb, size);
+ void *ret;
+
+ memory_limit_check(size * nmemb);
+ ret = calloc(nmemb, size);
if (!ret && (!nmemb || !size))
ret = calloc(1, 1);
if (!ret) {
--
1.7.8.36.g69ee2
next prev parent reply other threads:[~2012-03-07 10:53 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-03-06 7:15 What's cooking in git.git (Mar 2012, #03; Mon, 5) Junio C Hamano
2012-03-06 9:40 ` Nguyen Thai Ngoc Duy
2012-03-06 18:23 ` Junio C Hamano
2012-03-07 10:54 ` [PATCH 0/7] nd/stream-more updates Nguyễn Thái Ngọc Duy
2012-03-07 10:54 ` [PATCH 1/7] streaming: make streaming-write-entry to be more reusable Nguyễn Thái Ngọc Duy
2012-03-07 10:54 ` Nguyễn Thái Ngọc Duy [this message]
2012-03-07 10:54 ` [PATCH 3/7] cat-file: use streaming API to print blobs Nguyễn Thái Ngọc Duy
2012-03-07 10:54 ` [PATCH 4/7] parse_object: avoid putting whole blob in core Nguyễn Thái Ngọc Duy
2012-03-07 10:54 ` [PATCH 5/7] show: use streaming API for showing blobs Nguyễn Thái Ngọc Duy
2012-03-07 10:54 ` [PATCH 6/7] fsck: use streaming API for writing lost-found blobs Nguyễn Thái Ngọc Duy
2012-03-07 10:54 ` [PATCH 7/7] update-server-info: respect core.bigfilethreshold Nguyễn Thái Ngọc Duy
2012-03-07 17:13 ` [PATCH 0/7] nd/stream-more updates Junio C Hamano
2012-03-07 6:28 ` In preparation for 1.7.10-rc0 Junio C Hamano
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
List information: http://vger.kernel.org/majordomo-info.html
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1331117661-19378-3-git-send-email-pclouds@gmail.com \
--to=pclouds@gmail.com \
--cc=git@vger.kernel.org \
--cc=gitster@pobox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
Code repositories for project(s) associated with this public inbox
https://80x24.org/mirrors/git.git
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).