This is the mail archive of the
libc-alpha@sourceware.org
mailing list for the glibc project.
[PATCH] malloc: push down the memset for huge pages
- From: Joern Engel <joern at purestorage dot com>
- To: "GNU C. Library" <libc-alpha at sourceware dot org>
- Cc: Siddhesh Poyarekar <siddhesh dot poyarekar at gmail dot com>, Joern Engel <joern at purestorage dot org>
- Date: Mon, 25 Jan 2016 16:24:43 -0800
- Subject: [PATCH] malloc: push down the memset for huge pages
- Authentication-results: sourceware.org; auth=none
- References: <1453767942-19369-1-git-send-email-joern at purestorage dot com>
From: Joern Engel <joern@purestorage.org>
mmap tends to return unaligned memory, so malloc maps twice the required
memory and trims off the unaligned bits. That means we memset twice as
much as necessary. Fix that.
JIRA: PURE-27597
---
tpc/malloc2.13/arena.ch | 14 +++++++++-----
1 file changed, 9 insertions(+), 5 deletions(-)
diff --git a/tpc/malloc2.13/arena.ch b/tpc/malloc2.13/arena.ch
index 372dc7ced2b9..09bdf0fd26b5 100644
--- a/tpc/malloc2.13/arena.ch
+++ b/tpc/malloc2.13/arena.ch
@@ -687,7 +687,7 @@ dump_heap(heap) heap_info *heap;
multiple threads, but only one will succeed. */
static char *aligned_heap_area;
-static void *mmap_for_heap(void *addr, size_t length)
+static void *mmap_for_heap(void *addr, size_t length, int *must_clear)
{
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE;
@@ -695,9 +695,10 @@ static void *mmap_for_heap(void *addr, size_t length)
ret = MMAP(addr, length, prot, flags | MAP_HUGETLB);
if (ret != MAP_FAILED) {
- memset(ret, 0, length);
+ *must_clear = 1;
return ret;
}
+ *must_clear = 0;
return MMAP(addr, length, prot, flags | MAP_NORESERVE);
}
@@ -709,6 +710,7 @@ static heap_info *new_heap(size_t size, size_t top_pad)
char *p1, *p2;
unsigned long ul;
heap_info *h;
+ int must_clear;
if (size + top_pad < HEAP_MIN_SIZE)
size = HEAP_MIN_SIZE;
@@ -726,7 +728,7 @@ static heap_info *new_heap(size_t size, size_t top_pad)
anyway). */
p2 = MAP_FAILED;
if (aligned_heap_area) {
- p2 = mmap_for_heap(aligned_heap_area, HEAP_MAX_SIZE);
+ p2 = mmap_for_heap(aligned_heap_area, HEAP_MAX_SIZE, &must_clear);
aligned_heap_area = NULL;
if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE - 1))) {
munmap(p2, HEAP_MAX_SIZE);
@@ -734,7 +736,7 @@ static heap_info *new_heap(size_t size, size_t top_pad)
}
}
if (p2 == MAP_FAILED) {
- p1 = mmap_for_heap(0, HEAP_MAX_SIZE << 1);
+ p1 = mmap_for_heap(0, HEAP_MAX_SIZE << 1, &must_clear);
if (p1 != MAP_FAILED) {
p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE - 1))
& ~(HEAP_MAX_SIZE - 1));
@@ -747,7 +749,7 @@ static heap_info *new_heap(size_t size, size_t top_pad)
} else {
/* Try to take the chance that an allocation of only HEAP_MAX_SIZE
is already aligned. */
- p2 = mmap_for_heap(0, HEAP_MAX_SIZE);
+ p2 = mmap_for_heap(0, HEAP_MAX_SIZE, &must_clear);
if (p2 == MAP_FAILED)
return 0;
if ((unsigned long)p2 & (HEAP_MAX_SIZE - 1)) {
@@ -756,6 +758,8 @@ static heap_info *new_heap(size_t size, size_t top_pad)
}
}
}
+ if (must_clear)
+ memset(p2, 0, HEAP_MAX_SIZE);
h = (heap_info *) p2;
h->size = size;
h->mprotect_size = size;
--
2.7.0.rc3