This is the mail archive of the libc-hacker@sources.redhat.com mailing list for the glibc project.
Note that libc-hacker is a closed list. You may look at the archives of this list, but subscription and posting are not open.
Index Nav: | [Date Index] [Subject Index] [Author Index] [Thread Index] | |
---|---|---|
Message Nav: | [Date Prev] [Date Next] | [Thread Prev] [Thread Next] |
Other format: | [Raw text] |
Hi! For details see: http://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=118574 If mmap addresses grow down (e.g. exec-shield has this property, sbrk grow bottom up and mmap areas top down), but malloc is moreless the only user of mmap for some time, new_heap will get already HEAP_MAX_SIZE aligned chunks. Say: 0xb7500000 0xb7300000 0xb7100000 0xb6f00000 and always munmap the second 1MB. This leads to bad fragmentation. The following (still untested) patch shouldn't be IMHO very costly and fix up this situation. I think it doesn't need any locking, worst case we'll try to mmap the same address (without MAP_FIXED) more than once. 2004-03-18 Jakub Jelinek <jakub@redhat.com> * malloc/arena.c (aligned_heap_area): New variable. (new_heap): If aligned_heap_area != NULL, attempt to use that first. If HEAP_MAX_SIZE << 1 area is already HEAP_MAX_SIZE bytes aligned, remember the second half in aligned_heap_area. (delete_heap): Clear aligned_heap_area if deleting the area right before aligned_heap_area. --- libc/malloc/arena.c.jj 2003-07-15 17:04:36.000000000 +0200 +++ libc/malloc/arena.c 2004-03-18 10:57:21.034171230 +0100 @@ -550,6 +550,16 @@ dump_heap(heap) heap_info *heap; #endif /* MALLOC_DEBUG > 1 */ +/* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing + addresses as opposed to increasing, new_heap would badly fragment the + address space. In that case remember the second HEAP_MAX_SIZE part + aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...) + call (if it is already aligned) and try to reuse it next time. We need + no locking for it, as kernel ensures the atomicity for us - worst case + we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in + multiple threads, but only one will succeed. */ +static char *aligned_heap_area; + /* Create a new heap. size is automatically rounded up to a multiple of the page size. */ @@ -580,21 +590,38 @@ new_heap(size, top_pad) size_t size, top No swap space needs to be reserved for the following large mapping (on Linux, this is the case for all non-writable mappings anyway). */ - p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE); - if(p1 != MAP_FAILED) { - p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1)) & ~(HEAP_MAX_SIZE-1)); - ul = p2 - p1; - munmap(p1, ul); - munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul); - } else { - /* Try to take the chance that an allocation of only HEAP_MAX_SIZE - is already aligned. */ - p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE); - if(p2 == MAP_FAILED) - return 0; - if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) { + p2 = MAP_FAILED; + if(aligned_heap_area) { + p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE, + MAP_PRIVATE|MAP_NORESERVE); + aligned_heap_area = NULL; + if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) { munmap(p2, HEAP_MAX_SIZE); - return 0; + p2 = MAP_FAILED; + } + } + if(p2 == MAP_FAILED) { + p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, + MAP_PRIVATE|MAP_NORESERVE); + if(p1 != MAP_FAILED) { + p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1)) + & ~(HEAP_MAX_SIZE-1)); + ul = p2 - p1; + if (ul) + munmap(p1, ul); + else + aligned_heap_area = p2 + HEAP_MAX_SIZE; + munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul); + } else { + /* Try to take the chance that an allocation of only HEAP_MAX_SIZE + is already aligned. */ + p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE); + if(p2 == MAP_FAILED) + return 0; + if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) { + munmap(p2, HEAP_MAX_SIZE); + return 0; + } } } if(mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) { @@ -644,7 +671,12 @@ grow_heap(h, diff) heap_info *h; long di /* Delete a heap. */ -#define delete_heap(heap) munmap((char*)(heap), HEAP_MAX_SIZE) +#define delete_heap(heap) \ + do { \ + if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \ + aligned_heap_area = NULL; \ + munmap((char*)(heap), HEAP_MAX_SIZE); \ + } while (0) static int internal_function Jakub
Index Nav: | [Date Index] [Subject Index] [Author Index] [Thread Index] | |
---|---|---|
Message Nav: | [Date Prev] [Date Next] | [Thread Prev] [Thread Next] |