This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH] malloc: unifdef -m -DUSE_ARENAS -DHAVE_MMAP


From: Joern Engel <joern@purestorage.org>

Plus a bit of manual comment cleanup.

JIRA: PURE-27597
---
 tpc/malloc2.13/arena.h  | 28 -----------------
 tpc/malloc2.13/hooks.h  |  8 -----
 tpc/malloc2.13/malloc.c | 84 +------------------------------------------------
 3 files changed, 1 insertion(+), 119 deletions(-)

diff --git a/tpc/malloc2.13/arena.h b/tpc/malloc2.13/arena.h
index 0aaccb914d92..803d7b3bf020 100644
--- a/tpc/malloc2.13/arena.h
+++ b/tpc/malloc2.13/arena.h
@@ -98,7 +98,6 @@ static int __malloc_initialized = -1;
 
 /**************************************************************************/
 
-#if USE_ARENAS
 
 /* arena_get() acquires an arena and locks the corresponding mutex.
    First, try the one last locked successfully by this thread.  (This
@@ -141,29 +140,6 @@ static int __malloc_initialized = -1;
 #define arena_for_chunk(ptr) \
  (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
 
-#else /* !USE_ARENAS */
-
-/* There is only one arena, main_arena. */
-
-#if THREAD_STATS
-#define arena_get(ar_ptr, sz) do { \
-  ar_ptr = &main_arena; \
-  if(!mutex_trylock(&ar_ptr->mutex)) \
-    ++(ar_ptr->stat_lock_direct); \
-  else { \
-    (void)mutex_lock(&ar_ptr->mutex); \
-    ++(ar_ptr->stat_lock_wait); \
-  } \
-} while(0)
-#else
-#define arena_get(ar_ptr, sz) do { \
-  ar_ptr = &main_arena; \
-  (void)mutex_lock(&ar_ptr->mutex); \
-} while(0)
-#endif
-#define arena_for_chunk(ptr) (&main_arena)
-
-#endif /* USE_ARENAS */
 
 /**************************************************************************/
 
@@ -232,13 +208,11 @@ free_atfork(Void_t* mem, const Void_t *caller)
 
   p = mem2chunk(mem);         /* do not bother to replicate free_check here */
 
-#if HAVE_MMAP
   if (chunk_is_mmapped(p))                       /* release mmapped memory. */
   {
     munmap_chunk(p);
     return;
   }
-#endif
 
 #ifdef ATOMIC_FASTBINS
   ar_ptr = arena_for_chunk(p);
@@ -636,7 +610,6 @@ thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
 
 /* Managing heaps and arenas (for concurrent threads) */
 
-#if USE_ARENAS
 
 #if MALLOC_DEBUG > 1
 
@@ -1083,7 +1056,6 @@ arena_thread_freeres (void)
 text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
 #endif
 
-#endif /* USE_ARENAS */
 
 /*
  * Local variables:
diff --git a/tpc/malloc2.13/hooks.h b/tpc/malloc2.13/hooks.h
index 05cfafbb78ba..48f54f915275 100644
--- a/tpc/malloc2.13/hooks.h
+++ b/tpc/malloc2.13/hooks.h
@@ -249,13 +249,11 @@ free_check(Void_t* mem, const Void_t *caller)
     malloc_printerr(check_action, "free(): invalid pointer", mem);
     return;
   }
-#if HAVE_MMAP
   if (chunk_is_mmapped(p)) {
     (void)mutex_unlock(&main_arena.mutex);
     munmap_chunk(p);
     return;
   }
-#endif
 #if 0 /* Erase freed memory. */
   memset(mem, 0, chunksize(p) - (SIZE_SZ+1));
 #endif
@@ -295,7 +293,6 @@ realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller)
   checked_request2size(bytes+1, nb);
   (void)mutex_lock(&main_arena.mutex);
 
-#if HAVE_MMAP
   if (chunk_is_mmapped(oldp)) {
 #if HAVE_MREMAP
     mchunkptr newp = mremap_chunk(oldp, nb);
@@ -318,7 +315,6 @@ realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller)
       }
     }
   } else {
-#endif /* HAVE_MMAP */
     if (top_check() >= 0) {
       INTERNAL_SIZE_T nb;
       checked_request2size(bytes + 1, nb);
@@ -336,9 +332,7 @@ realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller)
 	     0, nb - (oldsize+SIZE_SZ));
     }
 #endif
-#if HAVE_MMAP
   }
-#endif
 
   /* mem2chunk_check changed the magic byte in the old chunk.
      If newmem is NULL, then the old chunk will still be used though,
@@ -414,12 +408,10 @@ free_starter(Void_t* mem, const Void_t *caller)
 
   if(!mem) return;
   p = mem2chunk(mem);
-#if HAVE_MMAP
   if (chunk_is_mmapped(p)) {
     munmap_chunk(p);
     return;
   }
-#endif
 #ifdef ATOMIC_FASTBINS
   _int_free(&main_arena, p, 1);
 #else
diff --git a/tpc/malloc2.13/malloc.c b/tpc/malloc2.13/malloc.c
index c366b6085953..4fff268316ed 100644
--- a/tpc/malloc2.13/malloc.c
+++ b/tpc/malloc2.13/malloc.c
@@ -649,23 +649,6 @@ extern Void_t*     sbrk(ptrdiff_t);
 
 
 /*
-  Define HAVE_MMAP as true to optionally make malloc() use mmap() to
-  allocate very large blocks.  These will be returned to the
-  operating system immediately after a free(). Also, if mmap
-  is available, it is used as a backup strategy in cases where
-  MORECORE fails to provide space from system.
-
-  This malloc is best tuned to work with mmap for large requests.
-  If you do not have mmap, operations involving very large chunks (1MB
-  or so) may be slower than you'd like.
-*/
-
-#ifndef HAVE_MMAP
-#define HAVE_MMAP 1
-#endif
-
-
-/*
    MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
    sbrk fails, and mmap is used as a backup (which is done only if
    HAVE_MMAP).  The value must be a multiple of page size.  This
@@ -696,17 +679,7 @@ extern Void_t*     sbrk(ptrdiff_t);
 #define HAVE_MREMAP 0
 #endif
 
-#endif /* HAVE_MMAP */
-
-/* Define USE_ARENAS to enable support for multiple `arenas'.  These
-   are allocated using mmap(), are necessary for threads and
-   occasionally useful to overcome address space limitations affecting
-   sbrk(). */
-
-#ifndef USE_ARENAS
-#define USE_ARENAS HAVE_MMAP
-#endif
-
+#endif /* HAVE_MREMAP */
 
 /*
   The system page size. To the extent possible, this malloc manages
@@ -1455,11 +1428,7 @@ int      dlposix_memalign(void **, size_t, size_t);
 #define M_MMAP_MAX             -4
 
 #ifndef DEFAULT_MMAP_MAX
-#if HAVE_MMAP
 #define DEFAULT_MMAP_MAX       (65536)
-#else
-#define DEFAULT_MMAP_MAX       (0)
-#endif
 #endif
 
 #ifdef __cplusplus
@@ -1629,7 +1598,6 @@ do {                                                                          \
 /* ------------------ MMAP support ------------------  */
 
 
-#if HAVE_MMAP
 
 #include <fcntl.h>
 #ifndef LACKS_SYS_MMAN_H
@@ -1674,7 +1642,6 @@ static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
 #endif
 
 
-#endif /* HAVE_MMAP */
 
 
 /*
@@ -2494,7 +2461,6 @@ static void do_check_chunk(struct malloc_state * av, mchunkptr p)
 
   }
   else {
-#if HAVE_MMAP
     /* address is outside main heap  */
     if (contiguous(av) && av->top != initial_top(av)) {
       assert(((char*)p) < min_address || ((char*)p) >= max_address);
@@ -2503,10 +2469,6 @@ static void do_check_chunk(struct malloc_state * av, mchunkptr p)
     assert(((p->prev_size + sz) & (mp_.pagesize-1)) == 0);
     /* mem is aligned */
     assert(aligned_OK(chunk2mem(p)));
-#else
-    /* force an appropriate assert violation if debug set */
-    assert(!chunk_is_mmapped(p));
-#endif
   }
 }
 
@@ -2836,7 +2798,6 @@ static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, struct malloc_state * av)
   bool            tried_mmap = false;
 
 
-#if HAVE_MMAP
 
   /*
     If have mmap, and the request size meets the mmap threshold, and
@@ -2920,7 +2881,6 @@ static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, struct malloc_state * av)
       }
     }
   }
-#endif
 
   /* Record incoming configuration of top */
 
@@ -3056,7 +3016,6 @@ static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, struct malloc_state * av)
     segregated mmap region.
   */
 
-#if HAVE_MMAP
     /* Cannot merge with old top, so add its size back in */
     if (contiguous(av))
       size = (size + old_size + pagemask) & ~pagemask;
@@ -3085,7 +3044,6 @@ static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, struct malloc_state * av)
 	set_noncontiguous(av);
       }
     }
-#endif
   }
 
   if (brk != (char*)(MORECORE_FAILURE)) {
@@ -3351,7 +3309,6 @@ static int sYSTRIm(size_t pad, struct malloc_state * av)
   return 0;
 }
 
-#ifdef HAVE_MMAP
 
 static void
 internal_function
@@ -3439,7 +3396,6 @@ mremap_chunk(mchunkptr p, size_t new_size)
 
 #endif /* HAVE_MREMAP */
 
-#endif /* HAVE_MMAP */
 
 /*------------------------ Public wrappers. --------------------------------*/
 
@@ -3468,7 +3424,6 @@ public_mALLOc(size_t bytes)
       victim = _int_malloc(ar_ptr, bytes);
       (void)mutex_unlock(&ar_ptr->mutex);
     } else {
-#if USE_ARENAS
       /* ... or sbrk() has failed and there is still a chance to mmap() */
       ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
       (void)mutex_unlock(&main_arena.mutex);
@@ -3476,7 +3431,6 @@ public_mALLOc(size_t bytes)
 	victim = _int_malloc(ar_ptr, bytes);
 	(void)mutex_unlock(&ar_ptr->mutex);
       }
-#endif
     }
   } else
     (void)mutex_unlock(&ar_ptr->mutex);
@@ -3506,7 +3460,6 @@ public_fREe(Void_t* mem)
 
   p = mem2chunk(mem);
 
-#if HAVE_MMAP
   if (chunk_is_mmapped(p))                       /* release mmapped memory. */
   {
     /* see if the dynamic brk/mmap threshold needs adjusting */
@@ -3520,7 +3473,6 @@ public_fREe(Void_t* mem)
     munmap_chunk(p);
     return;
   }
-#endif
 
   ar_ptr = arena_for_chunk(p);
 #ifdef ATOMIC_FASTBINS
@@ -3582,7 +3534,6 @@ public_rEALLOc(Void_t* oldmem, size_t bytes)
 
   checked_request2size(bytes, nb);
 
-#if HAVE_MMAP
   if (chunk_is_mmapped(oldp))
   {
     Void_t* newmem;
@@ -3600,7 +3551,6 @@ public_rEALLOc(Void_t* oldmem, size_t bytes)
     munmap_chunk(oldp);
     return newmem;
   }
-#endif
 
   ar_ptr = arena_for_chunk(oldp);
 #if THREAD_STATS
@@ -3688,7 +3638,6 @@ public_mEMALIGn(size_t alignment, size_t bytes)
       p = _int_memalign(ar_ptr, alignment, bytes);
       (void)mutex_unlock(&ar_ptr->mutex);
     } else {
-#if USE_ARENAS
       /* ... or sbrk() has failed and there is still a chance to mmap() */
       struct malloc_state * prev = ar_ptr->next ? ar_ptr : 0;
       (void)mutex_unlock(&ar_ptr->mutex);
@@ -3697,7 +3646,6 @@ public_mEMALIGn(size_t alignment, size_t bytes)
 	p = _int_memalign(ar_ptr, alignment, bytes);
 	(void)mutex_unlock(&ar_ptr->mutex);
       }
-#endif
     }
   } else
     (void)mutex_unlock(&ar_ptr->mutex);
@@ -3739,14 +3687,12 @@ public_vALLOc(size_t bytes)
       p = _int_memalign(ar_ptr, pagesz, bytes);
       (void)mutex_unlock(&ar_ptr->mutex);
     } else {
-#if USE_ARENAS
       /* ... or sbrk() has failed and there is still a chance to mmap() */
       ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
       if(ar_ptr) {
 	p = _int_memalign(ar_ptr, pagesz, bytes);
 	(void)mutex_unlock(&ar_ptr->mutex);
       }
-#endif
     }
   }
   assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
@@ -3785,7 +3731,6 @@ public_pVALLOc(size_t bytes)
       p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
       (void)mutex_unlock(&ar_ptr->mutex);
     } else {
-#if USE_ARENAS
       /* ... or sbrk() has failed and there is still a chance to mmap() */
       ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0,
 			  bytes + 2*pagesz + MINSIZE);
@@ -3793,7 +3738,6 @@ public_pVALLOc(size_t bytes)
 	p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
 	(void)mutex_unlock(&ar_ptr->mutex);
       }
-#endif
     }
   }
   assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
@@ -3878,7 +3822,6 @@ public_cALLOc(size_t n, size_t elem_size)
       mem = _int_malloc(&main_arena, sz);
       (void)mutex_unlock(&main_arena.mutex);
     } else {
-#if USE_ARENAS
       /* ... or sbrk() has failed and there is still a chance to mmap() */
       (void)mutex_lock(&main_arena.mutex);
       av = arena_get2(av->next ? av : 0, sz);
@@ -3887,21 +3830,18 @@ public_cALLOc(size_t n, size_t elem_size)
 	mem = _int_malloc(av, sz);
 	(void)mutex_unlock(&av->mutex);
       }
-#endif
     }
     if (mem == 0) return 0;
   }
   p = mem2chunk(mem);
 
   /* Two optional cases in which clearing not necessary */
-#if HAVE_MMAP
   if (chunk_is_mmapped (p))
     {
       if (__builtin_expect (perturb_byte, 0))
 	MALLOC_ZERO (mem, sz);
       return mem;
     }
-#endif
 
   csz = chunksize(p);
 
@@ -4877,9 +4817,7 @@ _int_free(struct malloc_state * av, mchunkptr p)
   */
 
   else {
-#if HAVE_MMAP
     munmap_chunk (p);
-#endif
   }
 }
 
@@ -5188,7 +5126,6 @@ _int_realloc(struct malloc_state * av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
   */
 
   else {
-#if HAVE_MMAP
 
 #if HAVE_MREMAP
     INTERNAL_SIZE_T offset = oldp->prev_size;
@@ -5244,12 +5181,6 @@ _int_realloc(struct malloc_state * av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
     }
     return newmem;
 
-#else
-    /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
-    check_malloc_state(av);
-    MALLOC_FAILURE_ACTION;
-    return 0;
-#endif
   }
 #endif
 }
@@ -5724,21 +5655,15 @@ void mSTATs()
     ar_ptr = ar_ptr->next;
     if(ar_ptr == &main_arena) break;
   }
-#if HAVE_MMAP
   fprintf(stderr, "Total (incl. mmap):\n");
-#else
-  fprintf(stderr, "Total:\n");
-#endif
   fprintf(stderr, "system bytes     = %14lu\n", system_b);
   fprintf(stderr, "in use bytes     = %14lu\n", in_use_b);
 #ifdef NO_THREADS
   fprintf(stderr, "max system bytes = %14lu\n", mp_.max_total_mem);
 #endif
-#if HAVE_MMAP
   fprintf(stderr, "max mmap regions = %14u\n", mp_.max_n_mmaps);
   fprintf(stderr, "max mmap bytes   = %14lu\n",
 	  (unsigned long)mp_.max_mmapped_mem);
-#endif
 #if THREAD_STATS
   fprintf(stderr, "heaps created    = %10d\n",  stat_n_heaps);
   fprintf(stderr, "locked directly  = %10ld\n", stat_lock_direct);
@@ -5789,22 +5714,15 @@ int mALLOPt(int param_number, int value)
     break;
 
   case M_MMAP_THRESHOLD:
-#if USE_ARENAS
     /* Forbid setting the threshold too high. */
     if((unsigned long)value > HEAP_MAX_SIZE/2)
       res = 0;
     else
-#endif
       mp_.mmap_threshold = value;
       mp_.no_dyn_threshold = 1;
     break;
 
   case M_MMAP_MAX:
-#if !HAVE_MMAP
-    if (value != 0)
-      res = 0;
-    else
-#endif
       mp_.n_mmaps_max = value;
       mp_.no_dyn_threshold = 1;
     break;
-- 
2.7.0.rc3


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]