This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH] malloc: Lindent new_heap


From: Joern Engel <joern@purestorage.org>

Cleanup before touching the function some more

JIRA: PURE-27597
---
 tpc/malloc2.13/arena.ch | 121 +++++++++++++++++++++++-------------------------
 1 file changed, 57 insertions(+), 64 deletions(-)

diff --git a/tpc/malloc2.13/arena.ch b/tpc/malloc2.13/arena.ch
index fae6c2f7ee4c..372dc7ced2b9 100644
--- a/tpc/malloc2.13/arena.ch
+++ b/tpc/malloc2.13/arena.ch
@@ -703,71 +703,64 @@ static void *mmap_for_heap(void *addr, size_t length)
 
 /* Create a new heap.  size is automatically rounded up to a multiple
    of the page size. */
-
-static heap_info *
-internal_function
-#if __STD_C
-new_heap(size_t size, size_t top_pad)
-#else
-new_heap(size, top_pad) size_t size, top_pad;
-#endif
+static heap_info *new_heap(size_t size, size_t top_pad)
 {
-  size_t page_mask = malloc_getpagesize - 1;
-  char *p1, *p2;
-  unsigned long ul;
-  heap_info *h;
-
-  if(size+top_pad < HEAP_MIN_SIZE)
-    size = HEAP_MIN_SIZE;
-  else if(size+top_pad <= HEAP_MAX_SIZE)
-    size += top_pad;
-  else if(size > HEAP_MAX_SIZE)
-    return 0;
-  else
-    size = HEAP_MAX_SIZE;
-  size = (size + page_mask) & ~page_mask;
-
-  /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
-     No swap space needs to be reserved for the following large
-     mapping (on Linux, this is the case for all non-writable mappings
-     anyway). */
-  p2 = MAP_FAILED;
-  if(aligned_heap_area) {
-    p2 = mmap_for_heap(aligned_heap_area, HEAP_MAX_SIZE);
-    aligned_heap_area = NULL;
-    if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
-      munmap(p2, HEAP_MAX_SIZE);
-      p2 = MAP_FAILED;
-    }
-  }
-  if(p2 == MAP_FAILED) {
-    p1 = mmap_for_heap(0, HEAP_MAX_SIZE<<1);
-    if(p1 != MAP_FAILED) {
-      p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
-		    & ~(HEAP_MAX_SIZE-1));
-      ul = p2 - p1;
-      if (ul)
-	munmap(p1, ul);
-      else
-	aligned_heap_area = p2 + HEAP_MAX_SIZE;
-      munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
-    } else {
-      /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
-	 is already aligned. */
-      p2 = mmap_for_heap(0, HEAP_MAX_SIZE);
-      if(p2 == MAP_FAILED)
-	return 0;
-      if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
-	munmap(p2, HEAP_MAX_SIZE);
-	return 0;
-      }
-    }
-  }
-  h = (heap_info *)p2;
-  h->size = size;
-  h->mprotect_size = size;
-  THREAD_STAT(stat_n_heaps++);
-  return h;
+	size_t page_mask = malloc_getpagesize - 1;
+	char *p1, *p2;
+	unsigned long ul;
+	heap_info *h;
+
+	if (size + top_pad < HEAP_MIN_SIZE)
+		size = HEAP_MIN_SIZE;
+	else if (size + top_pad <= HEAP_MAX_SIZE)
+		size += top_pad;
+	else if (size > HEAP_MAX_SIZE)
+		return 0;
+	else
+		size = HEAP_MAX_SIZE;
+	size = (size + page_mask) & ~page_mask;
+
+	/* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
+	   No swap space needs to be reserved for the following large
+	   mapping (on Linux, this is the case for all non-writable mappings
+	   anyway). */
+	p2 = MAP_FAILED;
+	if (aligned_heap_area) {
+		p2 = mmap_for_heap(aligned_heap_area, HEAP_MAX_SIZE);
+		aligned_heap_area = NULL;
+		if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE - 1))) {
+			munmap(p2, HEAP_MAX_SIZE);
+			p2 = MAP_FAILED;
+		}
+	}
+	if (p2 == MAP_FAILED) {
+		p1 = mmap_for_heap(0, HEAP_MAX_SIZE << 1);
+		if (p1 != MAP_FAILED) {
+			p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE - 1))
+				      & ~(HEAP_MAX_SIZE - 1));
+			ul = p2 - p1;
+			if (ul)
+				munmap(p1, ul);
+			else
+				aligned_heap_area = p2 + HEAP_MAX_SIZE;
+			munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
+		} else {
+			/* Try to take the chance that an allocation of only HEAP_MAX_SIZE
+			   is already aligned. */
+			p2 = mmap_for_heap(0, HEAP_MAX_SIZE);
+			if (p2 == MAP_FAILED)
+				return 0;
+			if ((unsigned long)p2 & (HEAP_MAX_SIZE - 1)) {
+				munmap(p2, HEAP_MAX_SIZE);
+				return 0;
+			}
+		}
+	}
+	h = (heap_info *) p2;
+	h->size = size;
+	h->mprotect_size = size;
+	THREAD_STAT(stat_n_heaps++);
+	return h;
 }
 
 /* Grow a heap.  size is automatically rounded up to a
-- 
2.7.0.rc3


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]