This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH] main arena of discontinuous can free memory properly


From: he hongjun <he.hongjun@zte.com.cn>

[BZ #15321] When main_arena is discontinuous, we will move main_arena closer
to the other arenas which have the capability to have multiple heaps.

Signed-off-by: He Hongjun <he.hongjun@zte.com.cn>
Reviewed-by: Ma giang <ma.giang@zte.com.cn>
---
 malloc/Makefile                         |   1 +
 malloc/arena.c                          |  33 ++++++++++
 malloc/malloc.c                         |  61 ++++++++++--------
 malloc/tst-malloc-discontinuous-arena.c | 111 ++++++++++++++++++++++++++++++++
 4 files changed, 178 insertions(+), 28 deletions(-)
 create mode 100644 malloc/tst-malloc-discontinuous-arena.c

diff --git a/malloc/Makefile b/malloc/Makefile
index 3fa395b..8e0de3b 100644
--- a/malloc/Makefile
+++ b/malloc/Makefile
@@ -34,6 +34,7 @@ tests := mallocbug tst-malloc tst-valloc tst-calloc tst-obstack \
 	 tst-interpose-nothread \
 	 tst-interpose-thread \
 	 tst-alloc_buffer \
+         tst-malloc-discontinuous-arena.c \
 
 tests-static := \
 	 tst-interpose-static-nothread \
diff --git a/malloc/arena.c b/malloc/arena.c
index dc14fae..ab04467 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -649,6 +649,39 @@ heap_trim (heap_info *heap, size_t pad)
       set_head (top_chunk, new_size | PREV_INUSE);
       /*check_chunk(ar_ptr, top_chunk);*/
     }
+    
+    if(ar_ptr == &main_arena && heap->prev == NULL)
+    {
+      if(top_chunk == (mchunkptr)((char *)(heap + 1) + MALLOC_ALIGNMENT)) {
+        p = *(mchunkptr *)(heap + 1);
+        if(p != NULL) {
+          /* must be fencepost */
+          assert(chunksize_nomask (p) == (0|PREV_INUSE));
+          p = prev_chunk(p);
+          new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ);
+          if(!prev_inuse(p))
+            new_size += prev_size(p);
+		
+          if(new_size >= pad + MINSIZE + pagesz) {
+            ar_ptr->system_mem -= heap->size;
+            delete_heap(heap);
+            if(!prev_inuse(p)) {
+              p = prev_chunk(p);
+              unlink(ar_ptr, p, bck, fwd);
+	        }
+          }
+		
+          top(ar_ptr) = top_chunk = p;
+          set_head(top_chunk, new_size | PREV_INUSE);
+		
+          set_contiguous(ar_ptr);
+          if ((unsigned long)(chunksize(ar_ptr->top)) >= (unsigned long)(mp_.trim_threshold))
+            systrim(mp_.top_pad, ar_ptr);
+		
+          return 0;
+        }
+      }
+    }
 
   /* Uses similar logic for per-thread arenas as the main arena with systrim
      and _int_free by preserving the top pad and rounding down to the nearest
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 54e406b..5b84610 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -2418,7 +2418,7 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
   assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
 
 
-  if (av != &main_arena)
+  if ((av != &main_arena) || (av == &main_arena && !contiguous(av)))
     {
       heap_info *old_heap, *heap;
       size_t old_heap_size;
@@ -2522,35 +2522,40 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
              segregated mmap region.
            */
 
-          /* Cannot merge with old top, so add its size back in */
-          if (contiguous (av))
-            size = ALIGN_UP (size + old_size, pagesize);
-
-          /* If we are relying on mmap as backup, then use larger units */
-          if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE))
-            size = MMAP_AS_MORECORE_SIZE;
-
-          /* Don't try if size wraps around 0 */
-          if ((unsigned long) (size) > (unsigned long) (nb))
+          heap_info *heap;
+		  old_size = chunksize(old_top);
+		  old_top  = top(av);
+		  
+		  heap = new_heap(nb + (MINSIZE + sizeof(*heap) + MALLOC_ALIGNMENT), mp_.top_pad);
+		  if(!heap) {
+		    if(!tried_mmap)
+			  goto try_mmap;
+			else
             {
-              char *mbrk = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
-
-              if (mbrk != MAP_FAILED)
-                {
-                  /* We do not need, and cannot use, another sbrk call to find end */
-                  brk = mbrk;
-                  snd_brk = brk + size;
-
-                  /*
-                     Record that we no longer have a contiguous sbrk region.
-                     After the first time mmap is used as backup, we do not
-                     ever rely on contiguous space since this could incorrectly
-                     bridge regions.
-                   */
-                  set_noncontiguous (av);
-                }
+              __set_errno (ENOMEM);
+			  return 0;
             }
         }
+	  
+	  top(av) = (mchunkptr)((char *)(heap + 1) + MALLOC_ALIGNMENT);
+	  set_head(top(av), (((char*)heap + heap->size) - (char*)top(av)) | PREV_INUSE);
+	  
+	  heap->ar_ptr = av;
+	  heap->prev   = NULL;	  
+	  av->system_mem += heap->size;
+	  
+	  if(old_size != 0) {
+	    old_size -= MINSIZE;
+		set_head(chunk_at_offset(old_top, old_size + 2*SIZE_SZ), 0|PREV_INUSE);
+		*(mchunkptr*)(heap + 1) = chunk_at_offset(old_top, old_size + 2*SIZE_SZ);
+		set_head(old_top, (old_size + 2*SIZE_SZ)|PREV_INUSE);
+		set_foot(old_top, (old_size + 2*SIZE_SZ));
+	  }
+	  else {
+	    *(mchunkptr*)(heap + 1) = NULL;
+	  }
+	  set_noncontiguous(av);
+	}
 
       if (brk != (char *) (MORECORE_FAILURE))
         {
@@ -4382,7 +4387,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
       if (have_fastchunks(av))
 	malloc_consolidate(av);
 
-      if (av == &main_arena) {
+      if ((av == &main_arena ) && contiguous(av)){
 #ifndef MORECORE_CANNOT_TRIM
 	if ((unsigned long)(chunksize(av->top)) >=
 	    (unsigned long)(mp_.trim_threshold))
diff --git a/malloc/tst-malloc-discontinuous-arena.c b/malloc/tst-malloc-discontinuous-arena.c
new file mode 100644
index 0000000..a587f2b
--- /dev/null
+++ b/malloc/tst-malloc-discontinuous-arena.c
@@ -0,0 +1,111 @@
+/* Ensure that main arena of discontinuity can normal free memory.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#include <sys/mman.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+
+#define MEM_SIZE 1024 * 64
+#define BLK_SIZE 1024 * 32
+#define TOP_PAD_SIZE 1024 * 128
+
+static int do_test()
+{	
+
+	int i = 0;	
+	void **mem = (void**) malloc (sizeof(void*) * MEM_SIZE * 2); 
+	
+	for (; i < MEM_SIZE; i++)
+	{
+	  mem[i] = malloc (BLK_SIZE);
+	  if (mem[i] == NULL) 
+	    {
+	      printf ("malloc(BLK_SIZE) failed.\n");
+	      return 1;
+  	  }
+	}
+
+  /* Place a fence in front of the program break to make the main arena
+     discoutinuous. Make sure we do not cover any region that have been
+     accquire by sbrk. */
+  long page_sz = 	sysconf(_SC_PAGESIZE);
+  void *fence_addr = mem[i - 1] + TOP_PAD_SIZE + BLK_SIZE + page_sz;
+  fence_addr = (void *) ((long)fence_addr & ~(page_sz - 1));
+  
+  void *tt = mmap (fence_addr, page_sz, PROT_READ | PROT_WRITE, 
+                   MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+  if (tt == MAP_FAILED)
+    {
+	    printf ("mmap(fence_addr, page_sz, PROT_READ | PROT_WRITE, \
+              MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) failed.\n");
+	    return 1;    
+    }
+
+  for (; i < MEM_SIZE * 2; i++)
+  {
+    mem[i] = malloc (BLK_SIZE);
+    if (mem[i] == NULL)
+      {
+        printf ("malloc(BLK_SIZE) failed.\n");
+        return (1);
+      } 
+  }
+
+  /* Get the peak physical memory usage. */
+  struct rusage  rmax;
+  getrusage (RUSAGE_SELF, &rmax);
+
+  while (i--)
+    {
+     free (mem[i]);
+     mem[i] = NULL;	
+    }
+  free (mem);
+
+  /* Get current physical memory usage. It should be far less that peek. 
+     Unfortunately, There is no portable way to get the value we need. 
+     Using getrusage in a child seems the best way for the moment... */
+  int result;
+  if (fork ())
+    wait (&result);
+  else
+    {
+      struct rusage  rcurrent;
+      getrusage (RUSAGE_SELF, &rcurrent);
+      printf ("max RSS : %ld kb, RSS after free : %ld kb \n",
+              rmax.ru_maxrss, rcurrent.ru_maxrss);
+
+      if (rcurrent.ru_maxrss << 4  <  rmax.ru_maxrss)
+        exit (0);
+
+      exit (1);
+    }
+
+  if (result != 0)
+    return 1;
+
+  return 0;
+}
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
-- 
1.8.3.1


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]