This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH 1/2] malloc/malloc.c: Validate SIZE passed to aligned_alloc.


The ISO C11 standard specifies that a SIZE passed to aligned_alloc
must be a multiple of ALIGNMENT. Aliasing aligned_alloc to memalign
does not enforce this restriction, so create a new function that
does this validation.

ChangeLog:

2013-11-07  Will Newton  <will.newton@linaro.org>

	* malloc/malloc.c (__aligned_alloc): New function.
	(__libc_memalign): Move main body of the code to
	_int_aligned_alloc and call that function.
	(_int_aligned_alloc): New function.
---
 malloc/malloc.c | 97 +++++++++++++++++++++++++++++++++++----------------------
 1 file changed, 60 insertions(+), 37 deletions(-)

diff --git a/malloc/malloc.c b/malloc/malloc.c
index 897c43a..67ad141 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1054,6 +1054,7 @@ static void     _int_free(mstate, mchunkptr, int);
 static void*  _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
 			   INTERNAL_SIZE_T);
 static void*  _int_memalign(mstate, size_t, size_t);
+static void*  _int_aligned_alloc(size_t, size_t);
 static void*  _int_valloc(mstate, size_t);
 static void*  _int_pvalloc(mstate, size_t);
 static void malloc_printerr(int action, const char *str, void *ptr);
@@ -3000,56 +3001,34 @@ __libc_realloc(void* oldmem, size_t bytes)
 libc_hidden_def (__libc_realloc)

 void*
-__libc_memalign(size_t alignment, size_t bytes)
+__aligned_alloc(size_t alignment, size_t bytes)
 {
-  mstate ar_ptr;
-  void *p;
-
   void *(*hook) (size_t, size_t, const void *) =
     force_reg (__memalign_hook);
   if (__builtin_expect (hook != NULL, 0))
     return (*hook)(alignment, bytes, RETURN_ADDRESS (0));

-  /* If need less alignment than we give anyway, just relay to malloc */
-  if (alignment <= MALLOC_ALIGNMENT) return __libc_malloc(bytes);
-
-  /* Otherwise, ensure that it is at least a minimum chunk size */
-  if (alignment <  MINSIZE) alignment = MINSIZE;
-
-  /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
-     power of 2 and will cause overflow in the check below.  */
-  if (alignment > SIZE_MAX / 2 + 1)
+  /* Check size is integral multiple of alignment.  */
+  if (bytes % alignment != 0)
     {
       __set_errno (EINVAL);
       return 0;
     }

-  /* Check for overflow.  */
-  if (bytes > SIZE_MAX - alignment - MINSIZE)
-    {
-      __set_errno (ENOMEM);
-      return 0;
-    }
+  return _int_aligned_alloc(alignment, bytes);
+}
+weak_alias (__aligned_alloc, aligned_alloc)

-  arena_get(ar_ptr, bytes + alignment + MINSIZE);
-  if(!ar_ptr)
-    return 0;
-  p = _int_memalign(ar_ptr, alignment, bytes);
-  if(!p) {
-    LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
-    ar_ptr = arena_get_retry (ar_ptr, bytes);
-    if (__builtin_expect(ar_ptr != NULL, 1)) {
-      p = _int_memalign(ar_ptr, alignment, bytes);
-      (void)mutex_unlock(&ar_ptr->mutex);
-    }
-  } else
-    (void)mutex_unlock(&ar_ptr->mutex);
-  assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
-	 ar_ptr == arena_for_chunk(mem2chunk(p)));
-  return p;
+void*
+__libc_memalign(size_t alignment, size_t bytes)
+{
+  void *(*hook) (size_t, size_t, const void *) =
+    force_reg (__memalign_hook);
+  if (__builtin_expect (hook != NULL, 0))
+    return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
+
+  return _int_aligned_alloc(alignment, bytes);
 }
-/* For ISO C11.  */
-weak_alias (__libc_memalign, aligned_alloc)
 libc_hidden_def (__libc_memalign)

 void*
@@ -4404,6 +4383,50 @@ _int_memalign(mstate av, size_t alignment, size_t bytes)
   return chunk2mem(p);
 }

+static void *
+_int_aligned_alloc(size_t alignment, size_t bytes)
+{
+  mstate ar_ptr;
+  void *p;
+
+  /* If need less alignment than we give anyway, just relay to malloc */
+  if (alignment <= MALLOC_ALIGNMENT) return __libc_malloc(bytes);
+
+  /* Otherwise, ensure that it is at least a minimum chunk size */
+  if (alignment <  MINSIZE) alignment = MINSIZE;
+
+  /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
+     power of 2 and will cause overflow in the check below.  */
+  if (alignment > SIZE_MAX / 2 + 1)
+    {
+      __set_errno (EINVAL);
+      return 0;
+    }
+
+  /* Check for overflow.  */
+  if (bytes > SIZE_MAX - alignment - MINSIZE)
+    {
+      __set_errno (ENOMEM);
+      return 0;
+    }
+
+  arena_get(ar_ptr, bytes + alignment + MINSIZE);
+  if(!ar_ptr)
+    return 0;
+  p = _int_memalign(ar_ptr, alignment, bytes);
+  if(!p) {
+    LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
+    ar_ptr = arena_get_retry (ar_ptr, bytes);
+    if (__builtin_expect(ar_ptr != NULL, 1)) {
+      p = _int_memalign(ar_ptr, alignment, bytes);
+      (void)mutex_unlock(&ar_ptr->mutex);
+    }
+  } else
+    (void)mutex_unlock(&ar_ptr->mutex);
+  assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
+	 ar_ptr == arena_for_chunk(mem2chunk(p)));
+  return p;
+}

 /*
   ------------------------------ valloc ------------------------------
-- 
1.8.1.4


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]