This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH] malloc: remove __builtin_expect


From: Joern Engel <joern@purestorage.org>

It was disabled anyway and only served as obfuscation.  No change
post-compilation.

JIRA: PURE-27597
---
 tpc/malloc2.13/malloc.c | 112 ++++++++++++++++++++++++------------------------
 1 file changed, 55 insertions(+), 57 deletions(-)

diff --git a/tpc/malloc2.13/malloc.c b/tpc/malloc2.13/malloc.c
index 0a71065a7b90..06e0f258ea1a 100644
--- a/tpc/malloc2.13/malloc.c
+++ b/tpc/malloc2.13/malloc.c
@@ -472,8 +472,6 @@ extern "C" {
 
 #endif /* USE_DL_PREFIX */
 
-#define __builtin_expect(expr, val)	(expr)
-
 #define fwrite(buf, size, count, fp) _IO_fwrite (buf, size, count, fp)
 
 /*
@@ -1903,13 +1901,13 @@ typedef struct malloc_chunk* mbinptr;
 #define unlink(P, BK, FD) {                                            \
   FD = P->fd;                                                          \
   BK = P->bk;                                                          \
-  if (__builtin_expect (FD->bk != P || BK->fd != P, 0))                \
+  if (FD->bk != P || BK->fd != P)                \
     malloc_printerr (check_action, "corrupted double-linked list", P); \
   else {                                                               \
     FD->bk = BK;                                                       \
     BK->fd = FD;                                                       \
     if (!in_smallbin_range (P->size)				       \
-	&& __builtin_expect (P->fd_nextsize != NULL, 0)) {	       \
+	&& P->fd_nextsize != NULL) {	       \
       assert (P->fd_nextsize->bk_nextsize == P);		       \
       assert (P->bk_nextsize->fd_nextsize == P);		       \
       if (FD->fd_nextsize == NULL) {				       \
@@ -2935,7 +2933,7 @@ static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, struct malloc_state * av)
   if (brk != (char*)(MORECORE_FAILURE)) {
     /* Call the `morecore' hook if necessary.  */
     void (*hook) (void) = force_reg (dlafter_morecore_hook);
-    if (__builtin_expect (hook != NULL, 0))
+    if (hook != NULL)
       (*hook) ();
   } else {
   /*
@@ -3073,7 +3071,7 @@ static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, struct malloc_state * av)
 	} else {
 	  /* Call the `morecore' hook if necessary.  */
 	  void (*hook) (void) = force_reg (dlafter_morecore_hook);
-	  if (__builtin_expect (hook != NULL, 0))
+	  if (hook != NULL)
 	    (*hook) ();
 	}
       }
@@ -3220,7 +3218,7 @@ static int sYSTRIm(size_t pad, struct malloc_state * av)
       MORECORE(-extra);
       /* Call the `morecore' hook if necessary.  */
       void (*hook) (void) = force_reg (dlafter_morecore_hook);
-      if (__builtin_expect (hook != NULL, 0))
+      if (hook != NULL)
 	(*hook) ();
       new_brk = (char*)(MORECORE(0));
 
@@ -3260,7 +3258,7 @@ munmap_chunk(mchunkptr p)
      page size.  But gcc does not recognize the optimization possibility
      (in the moment at least) so we combine the two values into one before
      the bit test.  */
-  if (__builtin_expect (((block | total_size) & (mp_.pagesize - 1)) != 0, 0))
+  if (((block | total_size) & (mp_.pagesize - 1)) != 0)
     {
       malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
 		       chunk2mem (p));
@@ -3351,7 +3349,7 @@ Void_t *public_mALLOc(size_t bytes)
 
 	__malloc_ptr_t(*hook) (size_t, __const __malloc_ptr_t)
 	    = force_reg(dlmalloc_hook);
-	if (__builtin_expect(hook != NULL, 0))
+	if (hook != NULL)
 		return (*hook) (bytes, RETURN_ADDRESS(0));
 
 	ar_ptr = arena_get(bytes);
@@ -3375,7 +3373,7 @@ public_fREe(Void_t* mem)
 
   void (*hook) (__malloc_ptr_t, __const __malloc_ptr_t)
     = force_reg (dlfree_hook);
-  if (__builtin_expect (hook != NULL, 0)) {
+  if (hook != NULL) {
     (*hook)(mem, RETURN_ADDRESS (0));
     return;
   }
@@ -3428,7 +3426,7 @@ public_rEALLOc(Void_t* oldmem, size_t bytes)
 
   __malloc_ptr_t (*hook) (__malloc_ptr_t, size_t, __const __malloc_ptr_t) =
     force_reg (dlrealloc_hook);
-  if (__builtin_expect (hook != NULL, 0))
+  if (hook != NULL)
     return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
 
 #if REALLOC_ZERO_BYTES_FREES
@@ -3447,8 +3445,8 @@ public_rEALLOc(Void_t* oldmem, size_t bytes)
      allocator never wrapps around at the end of the address space.
      Therefore we can exclude some size values which might appear
      here by accident or by "design" from some intruder.  */
-  if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
-      || __builtin_expect (misaligned_chunk (oldp), 0))
+  if ((uintptr_t) oldp > (uintptr_t) -oldsize
+      || misaligned_chunk (oldp))
     {
       malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
       return NULL;
@@ -3532,7 +3530,7 @@ Void_t *public_mEMALIGn(size_t alignment, size_t bytes)
 	Void_t *p;
 
 	__malloc_ptr_t(*hook) __MALLOC_PMT((size_t, size_t, __const __malloc_ptr_t)) = force_reg(dlmemalign_hook);
-	if (__builtin_expect(hook != NULL, 0))
+	if (hook != NULL)
 		return (*hook) (alignment, bytes, RETURN_ADDRESS(0));
 
 	/* If need less alignment than we give anyway, just relay to malloc */
@@ -3567,7 +3565,7 @@ Void_t *public_vALLOc(size_t bytes)
 	size_t pagesz = mp_.pagesize;
 
 	__malloc_ptr_t(*hook) __MALLOC_PMT((size_t, size_t, __const __malloc_ptr_t)) = force_reg(dlmemalign_hook);
-	if (__builtin_expect(hook != NULL, 0))
+	if (hook != NULL)
 		return (*hook) (pagesz, bytes, RETURN_ADDRESS(0));
 
 	ar_ptr = arena_get(bytes + pagesz + MINSIZE);
@@ -3598,7 +3596,7 @@ Void_t *public_pVALLOc(size_t bytes)
 	size_t rounded_bytes = (bytes + page_mask) & ~(page_mask);
 
 	__malloc_ptr_t(*hook) __MALLOC_PMT((size_t, size_t, __const __malloc_ptr_t)) = force_reg(dlmemalign_hook);
-	if (__builtin_expect(hook != NULL, 0))
+	if (hook != NULL)
 		return (*hook) (pagesz, rounded_bytes, RETURN_ADDRESS(0));
 
 	ar_ptr = arena_get(bytes + 2 * pagesz + MINSIZE);
@@ -3628,7 +3626,7 @@ Void_t *public_cALLOc(size_t n, size_t elem_size)
 	bytes = n * elem_size;
 #define HALF_INTERNAL_SIZE_T \
   (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
-	if (__builtin_expect((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
+	if ((n | elem_size) >= HALF_INTERNAL_SIZE_T) {
 		if (elem_size != 0 && bytes / elem_size != n) {
 			MALLOC_FAILURE_ACTION;
 			return 0;
@@ -3636,7 +3634,7 @@ Void_t *public_cALLOc(size_t n, size_t elem_size)
 	}
 
 	__malloc_ptr_t(*hook) __MALLOC_PMT((size_t, __const __malloc_ptr_t)) = force_reg(dlmalloc_hook);
-	if (__builtin_expect(hook != NULL, 0)) {
+	if (hook != NULL) {
 		sz = bytes;
 		mem = (*hook) (sz, RETURN_ADDRESS(0));
 		if (mem == 0)
@@ -3686,7 +3684,7 @@ Void_t *public_cALLOc(size_t n, size_t elem_size)
 
 	/* Two optional cases in which clearing not necessary */
 	if (chunk_is_mmapped(p)) {
-		if (__builtin_expect(perturb_byte, 0))
+		if (perturb_byte)
 			MALLOC_ZERO(mem, sz);
 		return mem;
 	}
@@ -3899,7 +3897,7 @@ _int_malloc(struct malloc_state * av, size_t bytes)
     victim = *fb;
 #endif
     if (victim != 0) {
-      if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
+      if (fastbin_index (chunksize (victim)) != idx)
 	{
 	  errstr = "malloc(): memory corruption (fast)";
 	errout:
@@ -3911,7 +3909,7 @@ _int_malloc(struct malloc_state * av, size_t bytes)
 #endif
       check_remalloced_chunk(av, victim, nb);
       void *p = chunk2mem(victim);
-      if (__builtin_expect (perturb_byte, 0))
+      if (perturb_byte)
 	alloc_perturb (p, bytes);
       return p;
     }
@@ -3934,7 +3932,7 @@ _int_malloc(struct malloc_state * av, size_t bytes)
 	malloc_consolidate(av);
       else {
 	bck = victim->bk;
-	if (__builtin_expect (bck->fd != victim, 0))
+	if (bck->fd != victim)
 	  {
 	    errstr = "malloc(): smallbin double linked list corrupted";
 	    goto errout;
@@ -3947,7 +3945,7 @@ _int_malloc(struct malloc_state * av, size_t bytes)
 	  victim->size |= NON_MAIN_ARENA;
 	check_malloced_chunk(av, victim, nb);
 	void *p = chunk2mem(victim);
-	if (__builtin_expect (perturb_byte, 0))
+	if (perturb_byte)
 	  alloc_perturb (p, bytes);
 	return p;
       }
@@ -3989,8 +3987,8 @@ _int_malloc(struct malloc_state * av, size_t bytes)
     int iters = 0;
     while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
       bck = victim->bk;
-      if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
-	  || __builtin_expect (victim->size > av->system_mem, 0))
+      if (victim->size <= 2 * SIZE_SZ
+	  || victim->size > av->system_mem)
 	malloc_printerr (check_action, "malloc(): memory corruption",
 			 chunk2mem (victim));
       size = chunksize(victim);
@@ -4027,7 +4025,7 @@ _int_malloc(struct malloc_state * av, size_t bytes)
 
 	check_malloced_chunk(av, victim, nb);
 	void *p = chunk2mem(victim);
-	if (__builtin_expect (perturb_byte, 0))
+	if (perturb_byte)
 	  alloc_perturb (p, bytes);
 	return p;
       }
@@ -4044,7 +4042,7 @@ _int_malloc(struct malloc_state * av, size_t bytes)
 	  victim->size |= NON_MAIN_ARENA;
 	check_malloced_chunk(av, victim, nb);
 	void *p = chunk2mem(victim);
-	if (__builtin_expect (perturb_byte, 0))
+	if (perturb_byte)
 	  alloc_perturb (p, bytes);
 	return p;
       }
@@ -4148,7 +4146,7 @@ _int_malloc(struct malloc_state * av, size_t bytes)
 	     have to perform a complete insert here.  */
 	  bck = unsorted_chunks(av);
 	  fwd = bck->fd;
-	  if (__builtin_expect (fwd->bk != bck, 0))
+	  if (fwd->bk != bck)
 	    {
 	      errstr = "malloc(): corrupted unsorted chunks";
 	      goto errout;
@@ -4169,7 +4167,7 @@ _int_malloc(struct malloc_state * av, size_t bytes)
 	}
 	check_malloced_chunk(av, victim, nb);
 	void *p = chunk2mem(victim);
-	if (__builtin_expect (perturb_byte, 0))
+	if (perturb_byte)
 	  alloc_perturb (p, bytes);
 	return p;
       }
@@ -4248,7 +4246,7 @@ _int_malloc(struct malloc_state * av, size_t bytes)
 	     have to perform a complete insert here.  */
 	  bck = unsorted_chunks(av);
 	  fwd = bck->fd;
-	  if (__builtin_expect (fwd->bk != bck, 0))
+	  if (fwd->bk != bck)
 	    {
 	      errstr = "malloc(): corrupted unsorted chunks 2";
 	      goto errout;
@@ -4273,7 +4271,7 @@ _int_malloc(struct malloc_state * av, size_t bytes)
 	}
 	check_malloced_chunk(av, victim, nb);
 	void *p = chunk2mem(victim);
-	if (__builtin_expect (perturb_byte, 0))
+	if (perturb_byte)
 	  alloc_perturb (p, bytes);
 	return p;
       }
@@ -4308,7 +4306,7 @@ _int_malloc(struct malloc_state * av, size_t bytes)
 
       check_malloced_chunk(av, victim, nb);
       void *p = chunk2mem(victim);
-      if (__builtin_expect (perturb_byte, 0))
+      if (perturb_byte)
 	alloc_perturb (p, bytes);
       return p;
     }
@@ -4343,7 +4341,7 @@ _int_malloc(struct malloc_state * av, size_t bytes)
     */
     else {
       void *p = sYSMALLOc(nb, av);
-      if (p != NULL && __builtin_expect (perturb_byte, 0))
+      if (p != NULL && perturb_byte)
 	alloc_perturb (p, bytes);
       return p;
     }
@@ -4381,8 +4379,8 @@ _int_free(struct malloc_state * av, mchunkptr p)
      allocator never wrapps around at the end of the address space.
      Therefore we can exclude some size values which might appear
      here by accident or by "design" from some intruder.  */
-  if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
-      || __builtin_expect (misaligned_chunk (p), 0))
+  if ((uintptr_t) p > (uintptr_t) -size
+      || misaligned_chunk (p))
     {
       errstr = "free(): invalid pointer";
     errout:
@@ -4394,7 +4392,7 @@ _int_free(struct malloc_state * av, mchunkptr p)
       return;
     }
   /* We know that each chunk is at least MINSIZE bytes in size.  */
-  if (__builtin_expect (size < MINSIZE, 0))
+  if (size < MINSIZE)
     {
       errstr = "free(): invalid size";
       goto errout;
@@ -4418,9 +4416,9 @@ _int_free(struct malloc_state * av, mchunkptr p)
 #endif
       ) {
 
-    if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
-	|| __builtin_expect (chunksize (chunk_at_offset (p, size))
-			     >= av->system_mem, 0))
+    if (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
+	|| chunksize (chunk_at_offset (p, size))
+			     >= av->system_mem)
       {
 #ifdef ATOMIC_FASTBINS
 	/* We might not have a lock at this point and concurrent modifications
@@ -4447,7 +4445,7 @@ _int_free(struct malloc_state * av, mchunkptr p)
 #endif
       }
 
-    if (__builtin_expect (perturb_byte, 0))
+    if (perturb_byte)
       free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
 
     set_fastchunks(av);
@@ -4462,7 +4460,7 @@ _int_free(struct malloc_state * av, mchunkptr p)
       {
 	/* Another simple check: make sure the top of the bin is not the
 	   record we are going to add (i.e., double free).  */
-	if (__builtin_expect (old == p, 0))
+	if (old == p)
 	  {
 	    errstr = "double free or corruption (fasttop)";
 	    goto errout;
@@ -4473,7 +4471,7 @@ _int_free(struct malloc_state * av, mchunkptr p)
       }
     while ((old = catomic_compare_and_exchange_val_rel (fb, p, fd)) != fd);
 
-    if (fd != NULL && __builtin_expect (old_idx != idx, 0))
+    if (fd != NULL && old_idx != idx)
       {
 	errstr = "invalid fastbin entry (free)";
 	goto errout;
@@ -4481,13 +4479,13 @@ _int_free(struct malloc_state * av, mchunkptr p)
 #else
     /* Another simple check: make sure the top of the bin is not the
        record we are going to add (i.e., double free).  */
-    if (__builtin_expect (*fb == p, 0))
+    if (*fb == p)
       {
 	errstr = "double free or corruption (fasttop)";
 	goto errout;
       }
     if (*fb != NULL
-	&& __builtin_expect (fastbin_index(chunksize(*fb)) != idx, 0))
+	&& fastbin_index(chunksize(*fb)) != idx)
       {
 	errstr = "invalid fastbin entry (free)";
 	goto errout;
@@ -4523,35 +4521,35 @@ _int_free(struct malloc_state * av, mchunkptr p)
 
     /* Lightweight tests: check whether the block is already the
        top block.  */
-    if (__builtin_expect (p == av->top, 0))
+    if (p == av->top)
       {
 	errstr = "double free or corruption (top)";
 	goto errout;
       }
     /* Or whether the next chunk is beyond the boundaries of the arena.  */
-    if (__builtin_expect (contiguous (av)
+    if (contiguous (av)
 			  && (char *) nextchunk
-			  >= ((char *) av->top + chunksize(av->top)), 0))
+			  >= ((char *) av->top + chunksize(av->top)))
       {
 	errstr = "double free or corruption (out)";
 	goto errout;
       }
     /* Or whether the block is actually not marked used.  */
-    if (__builtin_expect (!prev_inuse(nextchunk), 0))
+    if (!prev_inuse(nextchunk))
       {
 	errstr = "double free or corruption (!prev)";
 	goto errout;
       }
 
     nextsize = chunksize(nextchunk);
-    if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
-	|| __builtin_expect (nextsize >= av->system_mem, 0))
+    if (nextchunk->size <= 2 * SIZE_SZ
+	|| nextsize >= av->system_mem)
       {
 	errstr = "free(): invalid next size (normal)";
 	goto errout;
       }
 
-    if (__builtin_expect (perturb_byte, 0))
+    if (perturb_byte)
       free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
 
     /* consolidate backward */
@@ -4581,7 +4579,7 @@ _int_free(struct malloc_state * av, mchunkptr p)
 
       bck = unsorted_chunks(av);
       fwd = bck->fd;
-      if (__builtin_expect (fwd->bk != bck, 0))
+      if (fwd->bk != bck)
 	{
 	  errstr = "free(): corrupted unsorted chunks";
 	  goto errout;
@@ -4822,8 +4820,8 @@ _int_realloc(struct malloc_state * av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
   const char *errstr = NULL;
 
   /* oldmem size */
-  if (__builtin_expect (oldp->size <= 2 * SIZE_SZ, 0)
-      || __builtin_expect (oldsize >= av->system_mem, 0))
+  if (oldp->size <= 2 * SIZE_SZ
+      || oldsize >= av->system_mem)
     {
       errstr = "realloc(): invalid old size";
     errout:
@@ -4843,8 +4841,8 @@ _int_realloc(struct malloc_state * av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
 
     next = chunk_at_offset(oldp, oldsize);
     INTERNAL_SIZE_T nextsize = chunksize(next);
-    if (__builtin_expect (next->size <= 2 * SIZE_SZ, 0)
-	|| __builtin_expect (nextsize >= av->system_mem, 0))
+    if (next->size <= 2 * SIZE_SZ
+	|| nextsize >= av->system_mem)
       {
 	errstr = "realloc(): invalid next size";
 	goto errout;
@@ -5769,7 +5767,7 @@ dlposix_memalign (void **memptr, size_t alignment, size_t size)
   __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
 					__const __malloc_ptr_t)) =
     force_reg (dlmemalign_hook);
-  if (__builtin_expect (hook != NULL, 0))
+  if (hook != NULL)
     mem = (*hook)(alignment, size, RETURN_ADDRESS (0));
   else
     mem = public_mEMALIGn (alignment, size);
-- 
2.7.0.rc3


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]