]> sourceware.org Git - glibc.git/blame - malloc/arena.c
[BZ #13939]
[glibc.git] / malloc / arena.c
CommitLineData
fa8d436c 1/* Malloc implementation for multiple threads without lock contention.
a784e502 2 Copyright (C) 2001,2002,2003,2004,2005,2006,2007,2009,2010,2011,2012
c7fd3362 3 Free Software Foundation, Inc.
fa8d436c
UD
4 This file is part of the GNU C Library.
5 Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
6
7 The GNU C Library is free software; you can redistribute it and/or
cc7375ce
RM
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
fa8d436c
UD
10 License, or (at your option) any later version.
11
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
cc7375ce 15 Lesser General Public License for more details.
fa8d436c 16
cc7375ce 17 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
18 License along with the GNU C Library; see the file COPYING.LIB. If
19 not, see <http://www.gnu.org/licenses/>. */
fa8d436c 20
a28b6b0a
RM
21#include <stdbool.h>
22
fa8d436c
UD
23/* Compile-time constants. */
24
25#define HEAP_MIN_SIZE (32*1024)
26#ifndef HEAP_MAX_SIZE
e404fb16 27# ifdef DEFAULT_MMAP_THRESHOLD_MAX
bd2c2341 28# define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
e404fb16
UD
29# else
30# define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
31# endif
fa8d436c
UD
32#endif
33
34/* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
35 that are dynamically created for multi-threaded programs. The
36 maximum size must be a power of two, for fast determination of
37 which heap belongs to a chunk. It should be much larger than the
38 mmap threshold, so that requests with a size just below that
39 threshold can be fulfilled without creating too many heaps. */
40
41
42#ifndef THREAD_STATS
43#define THREAD_STATS 0
44#endif
45
46/* If THREAD_STATS is non-zero, some statistics on mutex locking are
47 computed. */
48
49/***************************************************************************/
50
51#define top(ar_ptr) ((ar_ptr)->top)
52
53/* A heap is a single contiguous memory region holding (coalesceable)
54 malloc_chunks. It is allocated with mmap() and always starts at an
22a89187 55 address aligned to HEAP_MAX_SIZE. */
fa8d436c
UD
56
57typedef struct _heap_info {
58 mstate ar_ptr; /* Arena for this heap. */
59 struct _heap_info *prev; /* Previous heap. */
60 size_t size; /* Current size in bytes. */
c7fd3362
JJ
61 size_t mprotect_size; /* Size in bytes that has been mprotected
62 PROT_READ|PROT_WRITE. */
7d013a64
RM
63 /* Make sure the following data is properly aligned, particularly
64 that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
c7fd3362
JJ
65 MALLOC_ALIGNMENT. */
66 char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
fa8d436c
UD
67} heap_info;
68
7d013a64
RM
69/* Get a compile-time error if the heap_info padding is not correct
70 to make alignment work as expected in sYSMALLOc. */
71extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
72 + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
73 ? -1 : 1];
74
fa8d436c
UD
75/* Thread specific data */
76
77static tsd_key_t arena_key;
02d46fc4 78static mutex_t list_lock = MUTEX_INITIALIZER;
425ce2ed 79#ifdef PER_THREAD
02d46fc4 80static size_t narenas = 1;
425ce2ed
UD
81static mstate free_list;
82#endif
fa8d436c
UD
83
84#if THREAD_STATS
85static int stat_n_heaps;
86#define THREAD_STAT(x) x
87#else
88#define THREAD_STAT(x) do ; while(0)
89#endif
90
91/* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
92static unsigned long arena_mem;
93
2a652f5a
RM
94/* Already initialized? */
95int __malloc_initialized = -1;
96
fa8d436c
UD
97/**************************************************************************/
98
fa8d436c
UD
99
100/* arena_get() acquires an arena and locks the corresponding mutex.
101 First, try the one last locked successfully by this thread. (This
102 is the common case and handled with a macro for speed.) Then, loop
103 once over the circularly linked list of arenas. If no arena is
104 readily available, create a new one. In this latter case, `size'
105 is just a hint as to how much memory will be required immediately
106 in the new arena. */
107
108#define arena_get(ptr, size) do { \
425ce2ed
UD
109 arena_lookup(ptr); \
110 arena_lock(ptr, size); \
111} while(0)
112
113#define arena_lookup(ptr) do { \
22a89187 114 void *vptr = NULL; \
fa8d436c 115 ptr = (mstate)tsd_getspecific(arena_key, vptr); \
425ce2ed
UD
116} while(0)
117
118#ifdef PER_THREAD
22a89187 119# define arena_lock(ptr, size) do { \
425ce2ed
UD
120 if(ptr) \
121 (void)mutex_lock(&ptr->mutex); \
122 else \
bf51f568 123 ptr = arena_get2(ptr, (size), NULL); \
425ce2ed
UD
124} while(0)
125#else
22a89187 126# define arena_lock(ptr, size) do { \
fa8d436c
UD
127 if(ptr && !mutex_trylock(&ptr->mutex)) { \
128 THREAD_STAT(++(ptr->stat_lock_direct)); \
129 } else \
bf51f568 130 ptr = arena_get2(ptr, (size), NULL); \
fa8d436c 131} while(0)
425ce2ed 132#endif
fa8d436c
UD
133
134/* find the heap and corresponding arena for a given ptr */
135
136#define heap_for_ptr(ptr) \
137 ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
138#define arena_for_chunk(ptr) \
139 (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
140
fa8d436c
UD
141
142/**************************************************************************/
143
fa8d436c
UD
144/* atfork support. */
145
06d6611a 146static __malloc_ptr_t (*save_malloc_hook) (size_t __size,
a784e502 147 const __malloc_ptr_t);
06d6611a 148static void (*save_free_hook) (__malloc_ptr_t __ptr,
a784e502 149 const __malloc_ptr_t);
22a89187 150static void* save_arena;
fa8d436c 151
666aa020
UD
152#ifdef ATFORK_MEM
153ATFORK_MEM;
154#endif
155
fa8d436c
UD
156/* Magic value for the thread-specific arena pointer when
157 malloc_atfork() is in use. */
158
22a89187 159#define ATFORK_ARENA_PTR ((void*)-1)
fa8d436c
UD
160
161/* The following hooks are used while the `atfork' handling mechanism
162 is active. */
163
22a89187
UD
164static void*
165malloc_atfork(size_t sz, const void *caller)
fa8d436c 166{
22a89187
UD
167 void *vptr = NULL;
168 void *victim;
fa8d436c
UD
169
170 tsd_getspecific(arena_key, vptr);
171 if(vptr == ATFORK_ARENA_PTR) {
172 /* We are the only thread that may allocate at all. */
173 if(save_malloc_hook != malloc_check) {
174 return _int_malloc(&main_arena, sz);
175 } else {
176 if(top_check()<0)
b9b42ee0 177 return 0;
fa8d436c
UD
178 victim = _int_malloc(&main_arena, sz+1);
179 return mem2mem_check(victim, sz);
180 }
181 } else {
182 /* Suspend the thread until the `atfork' handlers have completed.
183 By that time, the hooks will have been reset as well, so that
184 mALLOc() can be used again. */
185 (void)mutex_lock(&list_lock);
186 (void)mutex_unlock(&list_lock);
3b49edc0 187 return __libc_malloc(sz);
fa8d436c
UD
188 }
189}
190
191static void
22a89187 192free_atfork(void* mem, const void *caller)
fa8d436c 193{
22a89187 194 void *vptr = NULL;
fa8d436c
UD
195 mstate ar_ptr;
196 mchunkptr p; /* chunk corresponding to mem */
197
198 if (mem == 0) /* free(0) has no effect */
199 return;
200
201 p = mem2chunk(mem); /* do not bother to replicate free_check here */
202
fa8d436c
UD
203 if (chunk_is_mmapped(p)) /* release mmapped memory. */
204 {
205 munmap_chunk(p);
206 return;
207 }
fa8d436c 208
425ce2ed
UD
209 ar_ptr = arena_for_chunk(p);
210 tsd_getspecific(arena_key, vptr);
211 _int_free(ar_ptr, p, vptr == ATFORK_ARENA_PTR);
fa8d436c
UD
212}
213
7dac9f3d
UD
214
215/* Counter for number of times the list is locked by the same thread. */
216static unsigned int atfork_recursive_cntr;
217
fa8d436c
UD
218/* The following two functions are registered via thread_atfork() to
219 make sure that the mutexes remain in a consistent state in the
220 fork()ed version of a thread. Also adapt the malloc and free hooks
221 temporarily, because the `atfork' handler mechanism may use
222 malloc/free internally (e.g. in LinuxThreads). */
223
224static void
06d6611a 225ptmalloc_lock_all (void)
fa8d436c
UD
226{
227 mstate ar_ptr;
228
2a652f5a
RM
229 if(__malloc_initialized < 1)
230 return;
7dac9f3d
UD
231 if (mutex_trylock(&list_lock))
232 {
22a89187 233 void *my_arena;
7dac9f3d
UD
234 tsd_getspecific(arena_key, my_arena);
235 if (my_arena == ATFORK_ARENA_PTR)
236 /* This is the same thread which already locks the global list.
237 Just bump the counter. */
238 goto out;
239
240 /* This thread has to wait its turn. */
241 (void)mutex_lock(&list_lock);
242 }
fa8d436c
UD
243 for(ar_ptr = &main_arena;;) {
244 (void)mutex_lock(&ar_ptr->mutex);
245 ar_ptr = ar_ptr->next;
246 if(ar_ptr == &main_arena) break;
247 }
248 save_malloc_hook = __malloc_hook;
249 save_free_hook = __free_hook;
250 __malloc_hook = malloc_atfork;
251 __free_hook = free_atfork;
252 /* Only the current thread may perform malloc/free calls now. */
253 tsd_getspecific(arena_key, save_arena);
254 tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
7dac9f3d
UD
255 out:
256 ++atfork_recursive_cntr;
fa8d436c
UD
257}
258
259static void
06d6611a 260ptmalloc_unlock_all (void)
fa8d436c
UD
261{
262 mstate ar_ptr;
263
2a652f5a
RM
264 if(__malloc_initialized < 1)
265 return;
7dac9f3d
UD
266 if (--atfork_recursive_cntr != 0)
267 return;
fa8d436c
UD
268 tsd_setspecific(arena_key, save_arena);
269 __malloc_hook = save_malloc_hook;
270 __free_hook = save_free_hook;
271 for(ar_ptr = &main_arena;;) {
272 (void)mutex_unlock(&ar_ptr->mutex);
273 ar_ptr = ar_ptr->next;
274 if(ar_ptr == &main_arena) break;
275 }
276 (void)mutex_unlock(&list_lock);
277}
278
279#ifdef __linux__
280
e851dca1 281/* In NPTL, unlocking a mutex in the child process after a
fa8d436c
UD
282 fork() is currently unsafe, whereas re-initializing it is safe and
283 does not leak resources. Therefore, a special atfork handler is
284 installed for the child. */
285
286static void
06d6611a 287ptmalloc_unlock_all2 (void)
fa8d436c
UD
288{
289 mstate ar_ptr;
290
2a652f5a
RM
291 if(__malloc_initialized < 1)
292 return;
fa8d436c
UD
293 tsd_setspecific(arena_key, save_arena);
294 __malloc_hook = save_malloc_hook;
295 __free_hook = save_free_hook;
425ce2ed
UD
296#ifdef PER_THREAD
297 free_list = NULL;
fa8d436c
UD
298#endif
299 for(ar_ptr = &main_arena;;) {
fdb933e2 300 mutex_init(&ar_ptr->mutex);
425ce2ed
UD
301#ifdef PER_THREAD
302 if (ar_ptr != save_arena) {
303 ar_ptr->next_free = free_list;
304 free_list = ar_ptr;
305 }
306#endif
fa8d436c
UD
307 ar_ptr = ar_ptr->next;
308 if(ar_ptr == &main_arena) break;
309 }
fdb933e2 310 mutex_init(&list_lock);
e851dca1 311 atfork_recursive_cntr = 0;
fa8d436c
UD
312}
313
314#else
315
316#define ptmalloc_unlock_all2 ptmalloc_unlock_all
317
318#endif
319
fa8d436c 320/* Initialization routine. */
fa8d436c
UD
321#include <string.h>
322extern char **_environ;
323
324static char *
325internal_function
326next_env_entry (char ***position)
327{
328 char **current = *position;
329 char *result = NULL;
330
331 while (*current != NULL)
332 {
333 if (__builtin_expect ((*current)[0] == 'M', 0)
334 && (*current)[1] == 'A'
335 && (*current)[2] == 'L'
336 && (*current)[3] == 'L'
337 && (*current)[4] == 'O'
338 && (*current)[5] == 'C'
339 && (*current)[6] == '_')
340 {
341 result = &(*current)[7];
342
343 /* Save current position for next visit. */
344 *position = ++current;
345
346 break;
347 }
348
349 ++current;
350 }
351
352 return result;
353}
fa8d436c 354
c0f62c56 355
22a89187 356#ifdef SHARED
c0f62c56
UD
357static void *
358__failing_morecore (ptrdiff_t d)
359{
360 return (void *) MORECORE_FAILURE;
361}
5f21997b
UD
362
363extern struct dl_open_hook *_dl_open_hook;
364libc_hidden_proto (_dl_open_hook);
fde89ad0
RM
365#endif
366
fa8d436c 367static void
06d6611a 368ptmalloc_init (void)
fa8d436c 369{
fa8d436c
UD
370 if(__malloc_initialized >= 0) return;
371 __malloc_initialized = 0;
372
22a89187 373#ifdef SHARED
5f21997b
UD
374 /* In case this libc copy is in a non-default namespace, never use brk.
375 Likewise if dlopened from statically linked program. */
c0f62c56
UD
376 Dl_info di;
377 struct link_map *l;
5f21997b
UD
378
379 if (_dl_open_hook != NULL
380 || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
381 && l->l_ns != LM_ID_BASE))
c0f62c56
UD
382 __morecore = __failing_morecore;
383#endif
384
fa8d436c 385 tsd_key_create(&arena_key, NULL);
22a89187 386 tsd_setspecific(arena_key, (void *)&main_arena);
fa8d436c 387 thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
02d46fc4 388 const char *s = NULL;
08e49216
RM
389 if (__builtin_expect (_environ != NULL, 1))
390 {
391 char **runp = _environ;
392 char *envline;
393
394 while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
395 0))
396 {
397 size_t len = strcspn (envline, "=");
398
399 if (envline[len] != '=')
400 /* This is a "MALLOC_" variable at the end of the string
401 without a '=' character. Ignore it since otherwise we
402 will access invalid memory below. */
403 continue;
404
405 switch (len)
406 {
407 case 6:
408 if (memcmp (envline, "CHECK_", 6) == 0)
409 s = &envline[7];
410 break;
411 case 8:
02d46fc4 412 if (! __builtin_expect (__libc_enable_secure, 0))
a5a33449
UD
413 {
414 if (memcmp (envline, "TOP_PAD_", 8) == 0)
3b49edc0 415 __libc_mallopt(M_TOP_PAD, atoi(&envline[9]));
a5a33449 416 else if (memcmp (envline, "PERTURB_", 8) == 0)
3b49edc0 417 __libc_mallopt(M_PERTURB, atoi(&envline[9]));
a5a33449 418 }
08e49216
RM
419 break;
420 case 9:
02d46fc4 421 if (! __builtin_expect (__libc_enable_secure, 0))
425ce2ed
UD
422 {
423 if (memcmp (envline, "MMAP_MAX_", 9) == 0)
3b49edc0 424 __libc_mallopt(M_MMAP_MAX, atoi(&envline[10]));
425ce2ed
UD
425#ifdef PER_THREAD
426 else if (memcmp (envline, "ARENA_MAX", 9) == 0)
3b49edc0 427 __libc_mallopt(M_ARENA_MAX, atoi(&envline[10]));
425ce2ed
UD
428#endif
429 }
08e49216 430 break;
425ce2ed
UD
431#ifdef PER_THREAD
432 case 10:
02d46fc4 433 if (! __builtin_expect (__libc_enable_secure, 0))
425ce2ed
UD
434 {
435 if (memcmp (envline, "ARENA_TEST", 10) == 0)
3b49edc0 436 __libc_mallopt(M_ARENA_TEST, atoi(&envline[11]));
425ce2ed
UD
437 }
438 break;
439#endif
08e49216 440 case 15:
02d46fc4 441 if (! __builtin_expect (__libc_enable_secure, 0))
08e49216
RM
442 {
443 if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
3b49edc0 444 __libc_mallopt(M_TRIM_THRESHOLD, atoi(&envline[16]));
08e49216 445 else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
3b49edc0 446 __libc_mallopt(M_MMAP_THRESHOLD, atoi(&envline[16]));
08e49216
RM
447 }
448 break;
449 default:
450 break;
451 }
452 }
453 }
ceba6be7 454 if(s && s[0]) {
3b49edc0 455 __libc_mallopt(M_CHECK_ACTION, (int)(s[0] - '0'));
a19fe332
UD
456 if (check_action != 0)
457 __malloc_check_init();
fa8d436c 458 }
df77455c
UD
459 void (*hook) (void) = force_reg (__malloc_initialize_hook);
460 if (hook != NULL)
461 (*hook)();
fa8d436c
UD
462 __malloc_initialized = 1;
463}
464
465/* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
466#ifdef thread_atfork_static
467thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
b9b42ee0 468 ptmalloc_unlock_all2)
fa8d436c
UD
469#endif
470
471\f
472
473/* Managing heaps and arenas (for concurrent threads) */
474
fa8d436c
UD
475#if MALLOC_DEBUG > 1
476
477/* Print the complete contents of a single heap to stderr. */
478
479static void
fa8d436c 480dump_heap(heap_info *heap)
fa8d436c
UD
481{
482 char *ptr;
483 mchunkptr p;
484
485 fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
486 ptr = (heap->ar_ptr != (mstate)(heap+1)) ?
487 (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state);
488 p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
b9b42ee0 489 ~MALLOC_ALIGN_MASK);
fa8d436c
UD
490 for(;;) {
491 fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
492 if(p == top(heap->ar_ptr)) {
493 fprintf(stderr, " (top)\n");
494 break;
495 } else if(p->size == (0|PREV_INUSE)) {
496 fprintf(stderr, " (fence)\n");
497 break;
498 }
499 fprintf(stderr, "\n");
500 p = next_chunk(p);
501 }
502}
503
504#endif /* MALLOC_DEBUG > 1 */
505
26d550d3
UD
506/* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
507 addresses as opposed to increasing, new_heap would badly fragment the
508 address space. In that case remember the second HEAP_MAX_SIZE part
509 aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
510 call (if it is already aligned) and try to reuse it next time. We need
511 no locking for it, as kernel ensures the atomicity for us - worst case
512 we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
513 multiple threads, but only one will succeed. */
514static char *aligned_heap_area;
515
fa8d436c
UD
516/* Create a new heap. size is automatically rounded up to a multiple
517 of the page size. */
518
519static heap_info *
520internal_function
fa8d436c 521new_heap(size_t size, size_t top_pad)
fa8d436c 522{
02d46fc4 523 size_t page_mask = GLRO(dl_pagesize) - 1;
fa8d436c
UD
524 char *p1, *p2;
525 unsigned long ul;
526 heap_info *h;
527
528 if(size+top_pad < HEAP_MIN_SIZE)
529 size = HEAP_MIN_SIZE;
530 else if(size+top_pad <= HEAP_MAX_SIZE)
531 size += top_pad;
532 else if(size > HEAP_MAX_SIZE)
533 return 0;
534 else
535 size = HEAP_MAX_SIZE;
536 size = (size + page_mask) & ~page_mask;
537
538 /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
539 No swap space needs to be reserved for the following large
540 mapping (on Linux, this is the case for all non-writable mappings
541 anyway). */
26d550d3
UD
542 p2 = MAP_FAILED;
543 if(aligned_heap_area) {
544 p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
3b49edc0 545 MAP_NORESERVE);
26d550d3
UD
546 aligned_heap_area = NULL;
547 if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
3b49edc0 548 __munmap(p2, HEAP_MAX_SIZE);
26d550d3
UD
549 p2 = MAP_FAILED;
550 }
551 }
552 if(p2 == MAP_FAILED) {
3b49edc0 553 p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_NORESERVE);
26d550d3
UD
554 if(p1 != MAP_FAILED) {
555 p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
556 & ~(HEAP_MAX_SIZE-1));
557 ul = p2 - p1;
558 if (ul)
3b49edc0 559 __munmap(p1, ul);
26d550d3
UD
560 else
561 aligned_heap_area = p2 + HEAP_MAX_SIZE;
3b49edc0 562 __munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
26d550d3
UD
563 } else {
564 /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
565 is already aligned. */
3b49edc0 566 p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
26d550d3
UD
567 if(p2 == MAP_FAILED)
568 return 0;
569 if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
3b49edc0 570 __munmap(p2, HEAP_MAX_SIZE);
26d550d3
UD
571 return 0;
572 }
fa8d436c
UD
573 }
574 }
3b49edc0
UD
575 if(__mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
576 __munmap(p2, HEAP_MAX_SIZE);
fa8d436c
UD
577 return 0;
578 }
579 h = (heap_info *)p2;
580 h->size = size;
c7fd3362 581 h->mprotect_size = size;
fa8d436c
UD
582 THREAD_STAT(stat_n_heaps++);
583 return h;
584}
585
cbf5760e
UD
586/* Grow a heap. size is automatically rounded up to a
587 multiple of the page size. */
fa8d436c
UD
588
589static int
fa8d436c 590grow_heap(heap_info *h, long diff)
fa8d436c 591{
02d46fc4 592 size_t page_mask = GLRO(dl_pagesize) - 1;
fa8d436c
UD
593 long new_size;
594
cbf5760e
UD
595 diff = (diff + page_mask) & ~page_mask;
596 new_size = (long)h->size + diff;
597 if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
598 return -1;
599 if((unsigned long) new_size > h->mprotect_size) {
3b49edc0
UD
600 if (__mprotect((char *)h + h->mprotect_size,
601 (unsigned long) new_size - h->mprotect_size,
602 PROT_READ|PROT_WRITE) != 0)
cbf5760e
UD
603 return -2;
604 h->mprotect_size = new_size;
605 }
606
607 h->size = new_size;
608 return 0;
609}
610
611/* Shrink a heap. */
612
613static int
cbf5760e 614shrink_heap(heap_info *h, long diff)
cbf5760e
UD
615{
616 long new_size;
617
618 new_size = (long)h->size - diff;
619 if(new_size < (long)sizeof(*h))
620 return -1;
621 /* Try to re-map the extra heap space freshly to save memory, and
622 make it inaccessible. */
cbf5760e 623 if (__builtin_expect (__libc_enable_secure, 0))
cbf5760e
UD
624 {
625 if((char *)MMAP((char *)h + new_size, diff, PROT_NONE,
3b49edc0 626 MAP_FIXED) == (char *) MAP_FAILED)
cbf5760e
UD
627 return -2;
628 h->mprotect_size = new_size;
629 }
cbf5760e
UD
630 else
631 madvise ((char *)h + new_size, diff, MADV_DONTNEED);
cbf5760e
UD
632 /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
633
fa8d436c
UD
634 h->size = new_size;
635 return 0;
636}
637
638/* Delete a heap. */
639
26d550d3
UD
640#define delete_heap(heap) \
641 do { \
642 if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \
643 aligned_heap_area = NULL; \
3b49edc0 644 __munmap((char*)(heap), HEAP_MAX_SIZE); \
26d550d3 645 } while (0)
fa8d436c
UD
646
647static int
648internal_function
fa8d436c 649heap_trim(heap_info *heap, size_t pad)
fa8d436c
UD
650{
651 mstate ar_ptr = heap->ar_ptr;
02d46fc4 652 unsigned long pagesz = GLRO(dl_pagesize);
fa8d436c
UD
653 mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
654 heap_info *prev_heap;
655 long new_size, top_size, extra;
656
657 /* Can this heap go away completely? */
658 while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
659 prev_heap = heap->prev;
660 p = chunk_at_offset(prev_heap, prev_heap->size - (MINSIZE-2*SIZE_SZ));
661 assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
662 p = prev_chunk(p);
663 new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ);
664 assert(new_size>0 && new_size<(long)(2*MINSIZE));
665 if(!prev_inuse(p))
666 new_size += p->prev_size;
667 assert(new_size>0 && new_size<HEAP_MAX_SIZE);
668 if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
669 break;
670 ar_ptr->system_mem -= heap->size;
671 arena_mem -= heap->size;
672 delete_heap(heap);
673 heap = prev_heap;
674 if(!prev_inuse(p)) { /* consolidate backward */
675 p = prev_chunk(p);
676 unlink(p, bck, fwd);
677 }
678 assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
679 assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
680 top(ar_ptr) = top_chunk = p;
681 set_head(top_chunk, new_size | PREV_INUSE);
682 /*check_chunk(ar_ptr, top_chunk);*/
683 }
684 top_size = chunksize(top_chunk);
b9b42ee0 685 extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1);
fa8d436c
UD
686 if(extra < (long)pagesz)
687 return 0;
688 /* Try to shrink. */
cbf5760e 689 if(shrink_heap(heap, extra) != 0)
fa8d436c
UD
690 return 0;
691 ar_ptr->system_mem -= extra;
692 arena_mem -= extra;
693
694 /* Success. Adjust top accordingly. */
695 set_head(top_chunk, (top_size - extra) | PREV_INUSE);
696 /*check_chunk(ar_ptr, top_chunk);*/
697 return 1;
698}
699
04ec80e4
UD
700/* Create a new arena with initial size "size". */
701
702static mstate
703_int_new_arena(size_t size)
704{
705 mstate a;
706 heap_info *h;
707 char *ptr;
708 unsigned long misalign;
709
710 h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT),
711 mp_.top_pad);
712 if(!h) {
713 /* Maybe size is too large to fit in a single heap. So, just try
714 to create a minimally-sized arena and let _int_malloc() attempt
715 to deal with the large request via mmap_chunk(). */
716 h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
717 if(!h)
718 return 0;
719 }
720 a = h->ar_ptr = (mstate)(h+1);
721 malloc_init_state(a);
722 /*a->next = NULL;*/
723 a->system_mem = a->max_system_mem = h->size;
724 arena_mem += h->size;
04ec80e4
UD
725
726 /* Set up the top chunk, with proper alignment. */
727 ptr = (char *)(a + 1);
728 misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
729 if (misalign > 0)
730 ptr += MALLOC_ALIGNMENT - misalign;
731 top(a) = (mchunkptr)ptr;
732 set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
733
22a89187 734 tsd_setspecific(arena_key, (void *)a);
425ce2ed
UD
735 mutex_init(&a->mutex);
736 (void)mutex_lock(&a->mutex);
737
738#ifdef PER_THREAD
739 (void)mutex_lock(&list_lock);
740#endif
741
742 /* Add the new arena to the global list. */
743 a->next = main_arena.next;
744 atomic_write_barrier ();
745 main_arena.next = a;
746
747#ifdef PER_THREAD
425ce2ed
UD
748 (void)mutex_unlock(&list_lock);
749#endif
750
751 THREAD_STAT(++(a->stat_lock_loop));
752
04ec80e4
UD
753 return a;
754}
755
425ce2ed
UD
756
757#ifdef PER_THREAD
758static mstate
759get_free_list (void)
760{
761 mstate result = free_list;
762 if (result != NULL)
763 {
764 (void)mutex_lock(&list_lock);
765 result = free_list;
766 if (result != NULL)
767 free_list = result->next_free;
768 (void)mutex_unlock(&list_lock);
769
770 if (result != NULL)
771 {
772 (void)mutex_lock(&result->mutex);
22a89187 773 tsd_setspecific(arena_key, (void *)result);
425ce2ed
UD
774 THREAD_STAT(++(result->stat_lock_loop));
775 }
776 }
777
778 return result;
779}
780
bf51f568
JL
781/* Lock and return an arena that can be reused for memory allocation.
782 Avoid AVOID_ARENA as we have already failed to allocate memory in
783 it and it is currently locked. */
425ce2ed 784static mstate
bf51f568 785reused_arena (mstate avoid_arena)
425ce2ed 786{
425ce2ed
UD
787 mstate result;
788 static mstate next_to_use;
789 if (next_to_use == NULL)
790 next_to_use = &main_arena;
791
792 result = next_to_use;
793 do
794 {
795 if (!mutex_trylock(&result->mutex))
796 goto out;
797
798 result = result->next;
799 }
800 while (result != next_to_use);
801
bf51f568
JL
802 /* Avoid AVOID_ARENA as we have already failed to allocate memory
803 in that arena and it is currently locked. */
804 if (result == avoid_arena)
805 result = result->next;
806
425ce2ed
UD
807 /* No arena available. Wait for the next in line. */
808 (void)mutex_lock(&result->mutex);
809
810 out:
22a89187 811 tsd_setspecific(arena_key, (void *)result);
425ce2ed
UD
812 THREAD_STAT(++(result->stat_lock_loop));
813 next_to_use = result->next;
814
815 return result;
816}
817#endif
818
fa8d436c
UD
819static mstate
820internal_function
bf51f568 821arena_get2(mstate a_tsd, size_t size, mstate avoid_arena)
fa8d436c
UD
822{
823 mstate a;
fa8d436c 824
425ce2ed 825#ifdef PER_THREAD
77cdc054
AS
826 static size_t narenas_limit;
827
828 a = get_free_list ();
829 if (a == NULL)
830 {
831 /* Nothing immediately available, so generate a new arena. */
832 if (narenas_limit == 0)
833 {
834 if (mp_.arena_max != 0)
835 narenas_limit = mp_.arena_max;
41b81892 836 else if (narenas > mp_.arena_test)
77cdc054
AS
837 {
838 int n = __get_nprocs ();
839
840 if (n >= 1)
841 narenas_limit = NARENAS_FROM_NCORES (n);
842 else
843 /* We have no information about the system. Assume two
844 cores. */
845 narenas_limit = NARENAS_FROM_NCORES (2);
846 }
847 }
848 repeat:;
849 size_t n = narenas;
41b81892
UD
850 /* NB: the following depends on the fact that (size_t)0 - 1 is a
851 very large number and that the underflow is OK. If arena_max
852 is set the value of arena_test is irrelevant. If arena_test
853 is set but narenas is not yet larger or equal to arena_test
854 narenas_limit is 0. There is no possibility for narenas to
855 be too big for the test to always fail since there is not
856 enough address space to create that many arenas. */
857 if (__builtin_expect (n <= narenas_limit - 1, 0))
77cdc054 858 {
a5fb313c 859 if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
77cdc054
AS
860 goto repeat;
861 a = _int_new_arena (size);
a5fb313c
AS
862 if (__builtin_expect (a == NULL, 0))
863 catomic_decrement (&narenas);
77cdc054 864 }
a5fb313c 865 else
bf51f568 866 a = reused_arena (avoid_arena);
77cdc054 867 }
425ce2ed 868#else
fa8d436c
UD
869 if(!a_tsd)
870 a = a_tsd = &main_arena;
871 else {
872 a = a_tsd->next;
873 if(!a) {
874 /* This can only happen while initializing the new arena. */
875 (void)mutex_lock(&main_arena.mutex);
876 THREAD_STAT(++(main_arena.stat_lock_wait));
877 return &main_arena;
878 }
879 }
880
881 /* Check the global, circularly linked list for available arenas. */
8c7d3691 882 bool retried = false;
fa8d436c
UD
883 repeat:
884 do {
885 if(!mutex_trylock(&a->mutex)) {
8c7d3691
UD
886 if (retried)
887 (void)mutex_unlock(&list_lock);
fa8d436c 888 THREAD_STAT(++(a->stat_lock_loop));
22a89187 889 tsd_setspecific(arena_key, (void *)a);
fa8d436c
UD
890 return a;
891 }
892 a = a->next;
893 } while(a != a_tsd);
894
895 /* If not even the list_lock can be obtained, try again. This can
896 happen during `atfork', or for example on systems where thread
897 creation makes it temporarily impossible to obtain _any_
898 locks. */
8c7d3691
UD
899 if(!retried && mutex_trylock(&list_lock)) {
900 /* We will block to not run in a busy loop. */
901 (void)mutex_lock(&list_lock);
902
903 /* Since we blocked there might be an arena available now. */
904 retried = true;
fa8d436c
UD
905 a = a_tsd;
906 goto repeat;
907 }
fa8d436c
UD
908
909 /* Nothing immediately available, so generate a new arena. */
910 a = _int_new_arena(size);
fa8d436c 911 (void)mutex_unlock(&list_lock);
425ce2ed 912#endif
fa8d436c 913
fa8d436c
UD
914 return a;
915}
916
425ce2ed
UD
917#ifdef PER_THREAD
918static void __attribute__ ((section ("__libc_thread_freeres_fn")))
919arena_thread_freeres (void)
920{
22a89187 921 void *vptr = NULL;
425ce2ed
UD
922 mstate a = tsd_getspecific(arena_key, vptr);
923 tsd_setspecific(arena_key, NULL);
924
925 if (a != NULL)
926 {
927 (void)mutex_lock(&list_lock);
928 a->next_free = free_list;
929 free_list = a;
930 (void)mutex_unlock(&list_lock);
931 }
932}
933text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
934#endif
935
fa8d436c
UD
936/*
937 * Local variables:
938 * c-basic-offset: 2
939 * End:
940 */
This page took 0.316219 seconds and 5 git commands to generate.