[PATCH 07/10] Add single-threaded path to _int_free
Siddhesh Poyarekar
siddhesh@sourceware.org
Sun Jan 1 00:00:00 GMT 2017
From: Wilco Dijkstra <wdijkstr@arm.com>
This patch adds single-threaded fast paths to _int_free.
Bypass the explicit locking for larger allocations.
* malloc/malloc.c (_int_free): Add SINGLE_THREAD_P fast paths.
(cherry-picked from a15d53e2de4c7d83bda251469d92a3c7b49a90db)
---
ChangeLog | 4 ++++
malloc/malloc.c | 41 ++++++++++++++++++++++++++++-------------
2 files changed, 32 insertions(+), 13 deletions(-)
diff --git a/ChangeLog b/ChangeLog
index 49b720f..30e6f50 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,7 @@
+2017-10-20 Wilco Dijkstra <wdijkstr@arm.com>
+
+ * malloc/malloc.c (_int_free): Add SINGLE_THREAD_P fast paths.
+
2017-10-19 Wilco Dijkstra <wdijkstr@arm.com>
* malloc/malloc.c (_int_free): Fix deadlock bug in consistency check.
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 44996e0..78676a6 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -4172,24 +4172,34 @@ _int_free (mstate av, mchunkptr p, int have_lock)
/* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
mchunkptr old = *fb, old2;
- unsigned int old_idx = ~0u;
- do
+
+ if (SINGLE_THREAD_P)
{
- /* Check that the top of the bin is not the record we are going to add
- (i.e., double free). */
+ /* Check that the top of the bin is not the record we are going to
+ add (i.e., double free). */
if (__builtin_expect (old == p, 0))
malloc_printerr ("double free or corruption (fasttop)");
- /* Check that size of fastbin chunk at the top is the same as
- size of the chunk that we are adding. We can dereference OLD
- only if we have the lock, otherwise it might have already been
- deallocated. See use of OLD_IDX below for the actual check. */
- if (have_lock && old != NULL)
- old_idx = fastbin_index(chunksize(old));
- p->fd = old2 = old;
+ p->fd = old;
+ *fb = p;
}
- while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
+ else
+ do
+ {
+ /* Check that the top of the bin is not the record we are going to
+ add (i.e., double free). */
+ if (__builtin_expect (old == p, 0))
+ malloc_printerr ("double free or corruption (fasttop)");
+ p->fd = old2 = old;
+ }
+ while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
+ != old2);
- if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
+ /* Check that size of fastbin chunk at the top is the same as
+ size of the chunk that we are adding. We can dereference OLD
+ only if we have the lock, otherwise it might have already been
+ allocated again. */
+ if (have_lock && old != NULL
+ && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
malloc_printerr ("invalid fastbin entry (free)");
}
@@ -4198,6 +4208,11 @@ _int_free (mstate av, mchunkptr p, int have_lock)
*/
else if (!chunk_is_mmapped(p)) {
+
+ /* If we're single-threaded, don't lock the arena. */
+ if (SINGLE_THREAD_P)
+ have_lock = true;
+
if (!have_lock)
__libc_lock_lock (av->mutex);
--
2.7.5
More information about the Libc-stable
mailing list