This is the mail archive of the
libc-alpha@sourceware.org
mailing list for the glibc project.
[PATCH] malloc: kill mprotect
- From: Joern Engel <joern at purestorage dot com>
- To: "GNU C. Library" <libc-alpha at sourceware dot org>
- Cc: Siddhesh Poyarekar <siddhesh dot poyarekar at gmail dot com>, Joern Engel <joern at purestorage dot org>
- Date: Mon, 25 Jan 2016 16:24:40 -0800
- Subject: [PATCH] malloc: kill mprotect
- Authentication-results: sourceware.org; auth=none
- References: <1453767942-19369-1-git-send-email-joern at purestorage dot com>
From: Joern Engel <joern@purestorage.org>
The only reason to use mprotect is to reduce the "commit charge", i.e.
the amount of memory we are charged for when using overcommit_memory=2
(don't overcommit). We want to overcommit, so there is zero benefit to
us from using mprotect. But the cost is significant.
In a microbenchmark I got a 9x speed improvement from this change. How
much of that remains for foed is unclear, but a typical pattern was to
call mprotect for 4k chunks, which took the mmap_sem writeable each
time, so the contention on mmap_sem should be noticeably reduced.
shrink_heap() used to call mmap(PROT_NONE) for our build-parameters.
Change that to calling madvise(DONT_NEED) instead, as mmap(PROT_NONE)
would now cause a segfault and mmap(PROT_READ|PROT_WRITE) would be an
expensive noop.
JIRA: PURE-27597
---
tpc/malloc2.13/arena.ch | 30 ++++--------------------------
1 file changed, 4 insertions(+), 26 deletions(-)
diff --git a/tpc/malloc2.13/arena.ch b/tpc/malloc2.13/arena.ch
index 387476db2a68..3e778f3f96f7 100644
--- a/tpc/malloc2.13/arena.ch
+++ b/tpc/malloc2.13/arena.ch
@@ -719,7 +719,7 @@ new_heap(size, top_pad) size_t size, top_pad;
anyway). */
p2 = MAP_FAILED;
if(aligned_heap_area) {
- p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
+ p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_NORESERVE);
aligned_heap_area = NULL;
if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
@@ -728,7 +728,7 @@ new_heap(size, top_pad) size_t size, top_pad;
}
}
if(p2 == MAP_FAILED) {
- p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE,
+ p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_NORESERVE);
if(p1 != MAP_FAILED) {
p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
@@ -742,7 +742,7 @@ new_heap(size, top_pad) size_t size, top_pad;
} else {
/* Try to take the chance that an allocation of only HEAP_MAX_SIZE
is already aligned. */
- p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE);
+ p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_NORESERVE);
if(p2 == MAP_FAILED)
return 0;
if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
@@ -751,10 +751,6 @@ new_heap(size, top_pad) size_t size, top_pad;
}
}
}
- if(mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
- munmap(p2, HEAP_MAX_SIZE);
- return 0;
- }
h = (heap_info *)p2;
h->size = size;
h->mprotect_size = size;
@@ -780,10 +776,6 @@ grow_heap(h, diff) heap_info *h; long diff;
if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
return -1;
if((unsigned long) new_size > h->mprotect_size) {
- if (mprotect((char *)h + h->mprotect_size,
- (unsigned long) new_size - h->mprotect_size,
- PROT_READ|PROT_WRITE) != 0)
- return -2;
h->mprotect_size = new_size;
}
@@ -807,21 +799,7 @@ shrink_heap(h, diff) heap_info *h; long diff;
return -1;
/* Try to re-map the extra heap space freshly to save memory, and
make it inaccessible. */
-#ifdef _LIBC
- if (__builtin_expect (__libc_enable_secure, 0))
-#else
- if (1)
-#endif
- {
- if((char *)MMAP((char *)h + new_size, diff, PROT_NONE,
- MAP_PRIVATE|MAP_FIXED) == (char *) MAP_FAILED)
- return -2;
- h->mprotect_size = new_size;
- }
-#ifdef _LIBC
- else
- madvise ((char *)h + new_size, diff, MADV_DONTNEED);
-#endif
+ madvise ((char *)h + new_size, diff, MADV_DONTNEED);
/*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
h->size = new_size;
--
2.7.0.rc3