1 /* Copyright (C) 2002-2020 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
26 #include <sys/param.h>
27 #include <dl-sysdep.h>
31 #include <lowlevellock.h>
32 #include <futex-internal.h>
33 #include <kernel-features.h>
34 #include <stack-aliasing.h>
37 #ifndef NEED_SEPARATE_REGISTER_STACK
39 /* Most architectures have exactly one stack pointer. Some have more. */
40 # define STACK_VARIABLES void *stackaddr = NULL
42 /* How to pass the values to the 'create_thread' function. */
43 # define STACK_VARIABLES_ARGS stackaddr
45 /* How to declare function which gets there parameters. */
46 # define STACK_VARIABLES_PARMS void *stackaddr
48 /* How to declare allocate_stack. */
49 # define ALLOCATE_STACK_PARMS void **stack
51 /* This is how the function is called. We do it this way to allow
52 other variants of the function to have more parameters. */
53 # define ALLOCATE_STACK(attr, pd) allocate_stack (attr, pd, &stackaddr)
57 /* We need two stacks. The kernel will place them but we have to tell
58 the kernel about the size of the reserved address space. */
59 # define STACK_VARIABLES void *stackaddr = NULL; size_t stacksize = 0
61 /* How to pass the values to the 'create_thread' function. */
62 # define STACK_VARIABLES_ARGS stackaddr, stacksize
64 /* How to declare function which gets there parameters. */
65 # define STACK_VARIABLES_PARMS void *stackaddr, size_t stacksize
67 /* How to declare allocate_stack. */
68 # define ALLOCATE_STACK_PARMS void **stack, size_t *stacksize
70 /* This is how the function is called. We do it this way to allow
71 other variants of the function to have more parameters. */
72 # define ALLOCATE_STACK(attr, pd) \
73 allocate_stack (attr, pd, &stackaddr, &stacksize)
78 /* Default alignment of stack. */
80 # define STACK_ALIGN __alignof__ (long double)
83 /* Default value for minimal stack size after allocating thread
84 descriptor and guard. */
85 #ifndef MINIMAL_REST_STACK
86 # define MINIMAL_REST_STACK 4096
90 /* Newer kernels have the MAP_STACK flag to indicate a mapping is used for
91 a stack. Use it when possible. */
96 /* This yields the pointer that TLS support code calls the thread pointer. */
98 # define TLS_TPADJ(pd) (pd)
100 # define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE))
103 /* Cache handling for not-yet free stacks. */
105 /* Maximum size in kB of cache. */
106 static size_t stack_cache_maxsize
= 40 * 1024 * 1024; /* 40MiBi by default. */
107 static size_t stack_cache_actsize
;
109 /* Mutex protecting this variable. */
110 static int stack_cache_lock
= LLL_LOCK_INITIALIZER
;
112 /* List of queued stack frames. */
113 static LIST_HEAD (stack_cache
);
115 /* List of the stacks in use. */
116 static LIST_HEAD (stack_used
);
118 /* We need to record what list operations we are going to do so that,
119 in case of an asynchronous interruption due to a fork() call, we
120 can correct for the work. */
121 static uintptr_t in_flight_stack
;
123 /* List of the threads with user provided stacks in use. No need to
124 initialize this, since it's done in __pthread_initialize_minimal. */
125 list_t __stack_user
__attribute__ ((nocommon
));
126 hidden_data_def (__stack_user
)
129 /* Check whether the stack is still used or not. */
130 #define FREE_P(descr) ((descr)->tid <= 0)
134 stack_list_del (list_t
*elem
)
136 in_flight_stack
= (uintptr_t) elem
;
138 atomic_write_barrier ();
142 atomic_write_barrier ();
149 stack_list_add (list_t
*elem
, list_t
*list
)
151 in_flight_stack
= (uintptr_t) elem
| 1;
153 atomic_write_barrier ();
155 list_add (elem
, list
);
157 atomic_write_barrier ();
163 /* We create a double linked list of all cache entries. Double linked
164 because this allows removing entries from the end. */
167 /* Get a stack frame from the cache. We have to match by size since
168 some blocks might be too small or far too large. */
169 static struct pthread
*
170 get_cached_stack (size_t *sizep
, void **memp
)
172 size_t size
= *sizep
;
173 struct pthread
*result
= NULL
;
176 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
178 /* Search the cache for a matching entry. We search for the
179 smallest stack which has at least the required size. Note that
180 in normal situations the size of all allocated stacks is the
181 same. As the very least there are only a few different sizes.
182 Therefore this loop will exit early most of the time with an
184 list_for_each (entry
, &stack_cache
)
186 struct pthread
*curr
;
188 curr
= list_entry (entry
, struct pthread
, list
);
189 if (FREE_P (curr
) && curr
->stackblock_size
>= size
)
191 if (curr
->stackblock_size
== size
)
198 || result
->stackblock_size
> curr
->stackblock_size
)
203 if (__builtin_expect (result
== NULL
, 0)
204 /* Make sure the size difference is not too excessive. In that
205 case we do not use the block. */
206 || __builtin_expect (result
->stackblock_size
> 4 * size
, 0))
208 /* Release the lock. */
209 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
214 /* Don't allow setxid until cloned. */
215 result
->setxid_futex
= -1;
217 /* Dequeue the entry. */
218 stack_list_del (&result
->list
);
220 /* And add to the list of stacks in use. */
221 stack_list_add (&result
->list
, &stack_used
);
223 /* And decrease the cache size. */
224 stack_cache_actsize
-= result
->stackblock_size
;
226 /* Release the lock early. */
227 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
229 /* Report size and location of the stack to the caller. */
230 *sizep
= result
->stackblock_size
;
231 *memp
= result
->stackblock
;
233 /* Cancellation handling is back to the default. */
234 result
->cancelhandling
= 0;
235 result
->cleanup
= NULL
;
237 /* No pending event. */
238 result
->nextevent
= NULL
;
240 result
->tls_state
= (struct tls_internal_t
) { 0 };
243 dtv_t
*dtv
= GET_DTV (TLS_TPADJ (result
));
244 for (size_t cnt
= 0; cnt
< dtv
[-1].counter
; ++cnt
)
245 free (dtv
[1 + cnt
].pointer
.to_free
);
246 memset (dtv
, '\0', (dtv
[-1].counter
+ 1) * sizeof (dtv_t
));
248 /* Re-initialize the TLS. */
249 _dl_allocate_tls_init (TLS_TPADJ (result
));
255 /* Free stacks until cache size is lower than LIMIT. */
257 free_stacks (size_t limit
)
259 /* We reduce the size of the cache. Remove the last entries until
260 the size is below the limit. */
264 /* Search from the end of the list. */
265 list_for_each_prev_safe (entry
, prev
, &stack_cache
)
267 struct pthread
*curr
;
269 curr
= list_entry (entry
, struct pthread
, list
);
272 /* Unlink the block. */
273 stack_list_del (entry
);
275 /* Account for the freed memory. */
276 stack_cache_actsize
-= curr
->stackblock_size
;
278 /* Free the memory associated with the ELF TLS. */
279 _dl_deallocate_tls (TLS_TPADJ (curr
), false);
281 /* Remove this block. This should never fail. If it does
282 something is really wrong. */
283 if (__munmap (curr
->stackblock
, curr
->stackblock_size
) != 0)
286 /* Maybe we have freed enough. */
287 if (stack_cache_actsize
<= limit
)
293 /* Free all the stacks on cleanup. */
295 __nptl_stacks_freeres (void)
300 /* Add a stack frame which is not used anymore to the stack. Must be
301 called with the cache lock held. */
303 __attribute ((always_inline
))
304 queue_stack (struct pthread
*stack
)
306 /* We unconditionally add the stack to the list. The memory may
307 still be in use but it will not be reused until the kernel marks
308 the stack as not used anymore. */
309 stack_list_add (&stack
->list
, &stack_cache
);
311 stack_cache_actsize
+= stack
->stackblock_size
;
312 if (__glibc_unlikely (stack_cache_actsize
> stack_cache_maxsize
))
313 free_stacks (stack_cache_maxsize
);
318 change_stack_perm (struct pthread
*pd
319 #ifdef NEED_SEPARATE_REGISTER_STACK
324 #ifdef NEED_SEPARATE_REGISTER_STACK
325 void *stack
= (pd
->stackblock
326 + (((((pd
->stackblock_size
- pd
->guardsize
) / 2)
327 & pagemask
) + pd
->guardsize
) & pagemask
));
328 size_t len
= pd
->stackblock
+ pd
->stackblock_size
- stack
;
329 #elif _STACK_GROWS_DOWN
330 void *stack
= pd
->stackblock
+ pd
->guardsize
;
331 size_t len
= pd
->stackblock_size
- pd
->guardsize
;
332 #elif _STACK_GROWS_UP
333 void *stack
= pd
->stackblock
;
334 size_t len
= (uintptr_t) pd
- pd
->guardsize
- (uintptr_t) pd
->stackblock
;
336 # error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
338 if (__mprotect (stack
, len
, PROT_READ
| PROT_WRITE
| PROT_EXEC
) != 0)
344 /* Return the guard page position on allocated stack. */
346 __attribute ((always_inline
))
347 guard_position (void *mem
, size_t size
, size_t guardsize
, struct pthread
*pd
,
350 #ifdef NEED_SEPARATE_REGISTER_STACK
351 return mem
+ (((size
- guardsize
) / 2) & ~pagesize_m1
);
352 #elif _STACK_GROWS_DOWN
354 #elif _STACK_GROWS_UP
355 return (char *) (((uintptr_t) pd
- guardsize
) & ~pagesize_m1
);
359 /* Based on stack allocated with PROT_NONE, setup the required portions with
360 'prot' flags based on the guard page position. */
362 setup_stack_prot (char *mem
, size_t size
, char *guard
, size_t guardsize
,
365 char *guardend
= guard
+ guardsize
;
366 #if _STACK_GROWS_DOWN && !defined(NEED_SEPARATE_REGISTER_STACK)
367 /* As defined at guard_position, for architectures with downward stack
368 the guard page is always at start of the allocated area. */
369 if (__mprotect (guardend
, size
- guardsize
, prot
) != 0)
372 size_t mprots1
= (uintptr_t) guard
- (uintptr_t) mem
;
373 if (__mprotect (mem
, mprots1
, prot
) != 0)
375 size_t mprots2
= ((uintptr_t) mem
+ size
) - (uintptr_t) guardend
;
376 if (__mprotect (guardend
, mprots2
, prot
) != 0)
382 /* Mark the memory of the stack as usable to the kernel. It frees everything
383 except for the space used for the TCB itself. */
384 static __always_inline
void
385 advise_stack_range (void *mem
, size_t size
, uintptr_t pd
, size_t guardsize
)
387 uintptr_t sp
= (uintptr_t) CURRENT_STACK_FRAME
;
388 size_t pagesize_m1
= __getpagesize () - 1;
389 #if _STACK_GROWS_DOWN && !defined(NEED_SEPARATE_REGISTER_STACK)
390 size_t freesize
= (sp
- (uintptr_t) mem
) & ~pagesize_m1
;
391 assert (freesize
< size
);
392 if (freesize
> PTHREAD_STACK_MIN
)
393 __madvise (mem
, freesize
- PTHREAD_STACK_MIN
, MADV_DONTNEED
);
395 /* Page aligned start of memory to free (higher than or equal
396 to current sp plus the minimum stack size). */
397 uintptr_t freeblock
= (sp
+ PTHREAD_STACK_MIN
+ pagesize_m1
) & ~pagesize_m1
;
398 uintptr_t free_end
= (pd
- guardsize
) & ~pagesize_m1
;
399 if (free_end
> freeblock
)
401 size_t freesize
= free_end
- freeblock
;
402 assert (freesize
< size
);
403 __madvise ((void*) freeblock
, freesize
, MADV_DONTNEED
);
408 /* Returns a usable stack for a new thread either by allocating a
409 new stack or reusing a cached stack of sufficient size.
410 ATTR must be non-NULL and point to a valid pthread_attr.
411 PDP must be non-NULL. */
413 allocate_stack (const struct pthread_attr
*attr
, struct pthread
**pdp
,
414 ALLOCATE_STACK_PARMS
)
418 size_t pagesize_m1
= __getpagesize () - 1;
420 assert (powerof2 (pagesize_m1
+ 1));
421 assert (TCB_ALIGNMENT
>= STACK_ALIGN
);
423 /* Get the stack size from the attribute if it is set. Otherwise we
424 use the default we determined at start time. */
425 if (attr
->stacksize
!= 0)
426 size
= attr
->stacksize
;
429 lll_lock (__default_pthread_attr_lock
, LLL_PRIVATE
);
430 size
= __default_pthread_attr
.internal
.stacksize
;
431 lll_unlock (__default_pthread_attr_lock
, LLL_PRIVATE
);
434 /* Get memory for the stack. */
435 if (__glibc_unlikely (attr
->flags
& ATTR_FLAG_STACKADDR
))
438 char *stackaddr
= (char *) attr
->stackaddr
;
440 /* Assume the same layout as the _STACK_GROWS_DOWN case, with struct
441 pthread at the top of the stack block. Later we adjust the guard
442 location and stack address to match the _STACK_GROWS_UP case. */
444 stackaddr
+= attr
->stacksize
;
446 /* If the user also specified the size of the stack make sure it
448 if (attr
->stacksize
!= 0
449 && attr
->stacksize
< (__static_tls_size
+ MINIMAL_REST_STACK
))
452 /* Adjust stack size for alignment of the TLS block. */
454 adj
= ((uintptr_t) stackaddr
- TLS_TCB_SIZE
)
455 & __static_tls_align_m1
;
456 assert (size
> adj
+ TLS_TCB_SIZE
);
458 adj
= ((uintptr_t) stackaddr
- __static_tls_size
)
459 & __static_tls_align_m1
;
463 /* The user provided some memory. Let's hope it matches the
464 size... We do not allocate guard pages if the user provided
465 the stack. It is the user's responsibility to do this if it
468 pd
= (struct pthread
*) ((uintptr_t) stackaddr
469 - TLS_TCB_SIZE
- adj
);
471 pd
= (struct pthread
*) (((uintptr_t) stackaddr
472 - __static_tls_size
- adj
)
476 /* The user provided stack memory needs to be cleared. */
477 memset (pd
, '\0', sizeof (struct pthread
));
479 /* The first TSD block is included in the TCB. */
480 pd
->specific
[0] = pd
->specific_1stblock
;
482 /* Remember the stack-related values. */
483 pd
->stackblock
= (char *) stackaddr
- size
;
484 pd
->stackblock_size
= size
;
486 /* This is a user-provided stack. It will not be queued in the
487 stack cache nor will the memory (except the TLS memory) be freed. */
488 pd
->user_stack
= true;
490 /* This is at least the second thread. */
491 pd
->header
.multiple_threads
= 1;
492 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
493 __pthread_multiple_threads
= *__libc_multiple_threads_ptr
= 1;
496 #ifdef NEED_DL_SYSINFO
497 SETUP_THREAD_SYSINFO (pd
);
500 /* Don't allow setxid until cloned. */
501 pd
->setxid_futex
= -1;
503 /* Allocate the DTV for this thread. */
504 if (_dl_allocate_tls (TLS_TPADJ (pd
)) == NULL
)
506 /* Something went wrong. */
507 assert (errno
== ENOMEM
);
512 /* Prepare to modify global data. */
513 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
515 /* And add to the list of stacks in use. */
516 list_add (&pd
->list
, &__stack_user
);
518 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
522 /* Allocate some anonymous memory. If possible use the cache. */
524 size_t reported_guardsize
;
527 const int prot
= (PROT_READ
| PROT_WRITE
528 | ((GL(dl_stack_flags
) & PF_X
) ? PROT_EXEC
: 0));
530 /* Adjust the stack size for alignment. */
531 size
&= ~__static_tls_align_m1
;
534 /* Make sure the size of the stack is enough for the guard and
535 eventually the thread descriptor. On some targets there is
536 a minimum guard size requirement, ARCH_MIN_GUARD_SIZE, so
537 internally enforce it (unless the guard was disabled), but
538 report the original guard size for backward compatibility:
539 before POSIX 2008 the guardsize was specified to be one page
540 by default which is observable via pthread_attr_getguardsize
541 and pthread_getattr_np. */
542 guardsize
= (attr
->guardsize
+ pagesize_m1
) & ~pagesize_m1
;
543 reported_guardsize
= guardsize
;
544 if (guardsize
> 0 && guardsize
< ARCH_MIN_GUARD_SIZE
)
545 guardsize
= ARCH_MIN_GUARD_SIZE
;
546 if (guardsize
< attr
->guardsize
|| size
+ guardsize
< guardsize
)
547 /* Arithmetic overflow. */
550 if (__builtin_expect (size
< ((guardsize
+ __static_tls_size
551 + MINIMAL_REST_STACK
+ pagesize_m1
)
554 /* The stack is too small (or the guard too large). */
557 /* Try to get a stack from the cache. */
559 pd
= get_cached_stack (&size
, &mem
);
562 /* To avoid aliasing effects on a larger scale than pages we
563 adjust the allocated stack size if necessary. This way
564 allocations directly following each other will not have
565 aliasing problems. */
566 #if MULTI_PAGE_ALIASING != 0
567 if ((size
% MULTI_PAGE_ALIASING
) == 0)
568 size
+= pagesize_m1
+ 1;
571 /* If a guard page is required, avoid committing memory by first
572 allocate with PROT_NONE and then reserve with required permission
573 excluding the guard page. */
574 mem
= __mmap (NULL
, size
, (guardsize
== 0) ? prot
: PROT_NONE
,
575 MAP_PRIVATE
| MAP_ANONYMOUS
| MAP_STACK
, -1, 0);
577 if (__glibc_unlikely (mem
== MAP_FAILED
))
580 /* SIZE is guaranteed to be greater than zero.
581 So we can never get a null pointer back from mmap. */
582 assert (mem
!= NULL
);
584 /* Place the thread descriptor at the end of the stack. */
586 pd
= (struct pthread
*) ((((uintptr_t) mem
+ size
)
588 & ~__static_tls_align_m1
);
590 pd
= (struct pthread
*) ((((uintptr_t) mem
+ size
592 & ~__static_tls_align_m1
)
596 /* Now mprotect the required region excluding the guard area. */
597 if (__glibc_likely (guardsize
> 0))
599 char *guard
= guard_position (mem
, size
, guardsize
, pd
,
601 if (setup_stack_prot (mem
, size
, guard
, guardsize
, prot
) != 0)
603 __munmap (mem
, size
);
608 /* Remember the stack-related values. */
609 pd
->stackblock
= mem
;
610 pd
->stackblock_size
= size
;
611 /* Update guardsize for newly allocated guardsize to avoid
612 an mprotect in guard resize below. */
613 pd
->guardsize
= guardsize
;
615 /* We allocated the first block thread-specific data array.
616 This address will not change for the lifetime of this
618 pd
->specific
[0] = pd
->specific_1stblock
;
620 /* This is at least the second thread. */
621 pd
->header
.multiple_threads
= 1;
622 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
623 __pthread_multiple_threads
= *__libc_multiple_threads_ptr
= 1;
626 #ifdef NEED_DL_SYSINFO
627 SETUP_THREAD_SYSINFO (pd
);
630 /* Don't allow setxid until cloned. */
631 pd
->setxid_futex
= -1;
633 /* Allocate the DTV for this thread. */
634 if (_dl_allocate_tls (TLS_TPADJ (pd
)) == NULL
)
636 /* Something went wrong. */
637 assert (errno
== ENOMEM
);
639 /* Free the stack memory we just allocated. */
640 (void) __munmap (mem
, size
);
646 /* Prepare to modify global data. */
647 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
649 /* And add to the list of stacks in use. */
650 stack_list_add (&pd
->list
, &stack_used
);
652 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
655 /* There might have been a race. Another thread might have
656 caused the stacks to get exec permission while this new
657 stack was prepared. Detect if this was possible and
658 change the permission if necessary. */
659 if (__builtin_expect ((GL(dl_stack_flags
) & PF_X
) != 0
660 && (prot
& PROT_EXEC
) == 0, 0))
662 int err
= change_stack_perm (pd
663 #ifdef NEED_SEPARATE_REGISTER_STACK
669 /* Free the stack memory we just allocated. */
670 (void) __munmap (mem
, size
);
677 /* Note that all of the stack and the thread descriptor is
678 zeroed. This means we do not have to initialize fields
679 with initial value zero. This is specifically true for
680 the 'tid' field which is always set back to zero once the
681 stack is not used anymore and for the 'guardsize' field
682 which will be read next. */
685 /* Create or resize the guard area if necessary. */
686 if (__glibc_unlikely (guardsize
> pd
->guardsize
))
688 char *guard
= guard_position (mem
, size
, guardsize
, pd
,
690 if (__mprotect (guard
, guardsize
, PROT_NONE
) != 0)
693 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
695 /* Remove the thread from the list. */
696 stack_list_del (&pd
->list
);
698 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
700 /* Get rid of the TLS block we allocated. */
701 _dl_deallocate_tls (TLS_TPADJ (pd
), false);
703 /* Free the stack memory regardless of whether the size
704 of the cache is over the limit or not. If this piece
705 of memory caused problems we better do not use it
706 anymore. Uh, and we ignore possible errors. There
707 is nothing we could do. */
708 (void) __munmap (mem
, size
);
713 pd
->guardsize
= guardsize
;
715 else if (__builtin_expect (pd
->guardsize
- guardsize
> size
- reqsize
,
718 /* The old guard area is too large. */
720 #ifdef NEED_SEPARATE_REGISTER_STACK
721 char *guard
= mem
+ (((size
- guardsize
) / 2) & ~pagesize_m1
);
722 char *oldguard
= mem
+ (((size
- pd
->guardsize
) / 2) & ~pagesize_m1
);
725 && __mprotect (oldguard
, guard
- oldguard
, prot
) != 0)
728 if (__mprotect (guard
+ guardsize
,
729 oldguard
+ pd
->guardsize
- guard
- guardsize
,
732 #elif _STACK_GROWS_DOWN
733 if (__mprotect ((char *) mem
+ guardsize
, pd
->guardsize
- guardsize
,
736 #elif _STACK_GROWS_UP
737 char *new_guard
= (char *)(((uintptr_t) pd
- guardsize
)
739 char *old_guard
= (char *)(((uintptr_t) pd
- pd
->guardsize
)
741 /* The guard size difference might be > 0, but once rounded
742 to the nearest page the size difference might be zero. */
743 if (new_guard
> old_guard
744 && __mprotect (old_guard
, new_guard
- old_guard
, prot
) != 0)
748 pd
->guardsize
= guardsize
;
750 /* The pthread_getattr_np() calls need to get passed the size
751 requested in the attribute, regardless of how large the
752 actually used guardsize is. */
753 pd
->reported_guardsize
= reported_guardsize
;
756 /* Initialize the lock. We have to do this unconditionally since the
757 stillborn thread could be canceled while the lock is taken. */
758 pd
->lock
= LLL_LOCK_INITIALIZER
;
760 /* The robust mutex lists also need to be initialized
761 unconditionally because the cleanup for the previous stack owner
762 might have happened in the kernel. */
763 pd
->robust_head
.futex_offset
= (offsetof (pthread_mutex_t
, __data
.__lock
)
764 - offsetof (pthread_mutex_t
,
765 __data
.__list
.__next
));
766 pd
->robust_head
.list_op_pending
= NULL
;
767 #if __PTHREAD_MUTEX_HAVE_PREV
768 pd
->robust_prev
= &pd
->robust_head
;
770 pd
->robust_head
.list
= &pd
->robust_head
;
772 /* We place the thread descriptor at the end of the stack. */
775 #if _STACK_GROWS_DOWN
779 /* The stack begins before the TCB and the static TLS block. */
780 stacktop
= ((char *) (pd
+ 1) - __static_tls_size
);
782 stacktop
= (char *) (pd
- 1);
785 # ifdef NEED_SEPARATE_REGISTER_STACK
786 *stack
= pd
->stackblock
;
787 *stacksize
= stacktop
- *stack
;
792 *stack
= pd
->stackblock
;
800 __deallocate_stack (struct pthread
*pd
)
802 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
804 /* Remove the thread from the list of threads with user defined
806 stack_list_del (&pd
->list
);
808 /* Not much to do. Just free the mmap()ed memory. Note that we do
809 not reset the 'used' flag in the 'tid' field. This is done by
810 the kernel. If no thread has been created yet this field is
812 if (__glibc_likely (! pd
->user_stack
))
813 (void) queue_stack (pd
);
815 /* Free the memory associated with the ELF TLS. */
816 _dl_deallocate_tls (TLS_TPADJ (pd
), false);
818 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
823 __make_stacks_executable (void **stack_endp
)
825 /* First the main thread's stack. */
826 int err
= _dl_make_stack_executable (stack_endp
);
830 #ifdef NEED_SEPARATE_REGISTER_STACK
831 const size_t pagemask
= ~(__getpagesize () - 1);
834 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
837 list_for_each (runp
, &stack_used
)
839 err
= change_stack_perm (list_entry (runp
, struct pthread
, list
)
840 #ifdef NEED_SEPARATE_REGISTER_STACK
848 /* Also change the permission for the currently unused stacks. This
849 might be wasted time but better spend it here than adding a check
852 list_for_each (runp
, &stack_cache
)
854 err
= change_stack_perm (list_entry (runp
, struct pthread
, list
)
855 #ifdef NEED_SEPARATE_REGISTER_STACK
863 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
869 /* In case of a fork() call the memory allocation in the child will be
870 the same but only one thread is running. All stacks except that of
871 the one running thread are not used anymore. We have to recycle
874 __reclaim_stacks (void)
876 struct pthread
*self
= (struct pthread
*) THREAD_SELF
;
878 /* No locking necessary. The caller is the only stack in use. But
879 we have to be aware that we might have interrupted a list
882 if (in_flight_stack
!= 0)
884 bool add_p
= in_flight_stack
& 1;
885 list_t
*elem
= (list_t
*) (in_flight_stack
& ~(uintptr_t) 1);
889 /* We always add at the beginning of the list. So in this case we
890 only need to check the beginning of these lists to see if the
891 pointers at the head of the list are inconsistent. */
894 if (stack_used
.next
->prev
!= &stack_used
)
896 else if (stack_cache
.next
->prev
!= &stack_cache
)
901 assert (l
->next
->prev
== elem
);
902 elem
->next
= l
->next
;
909 /* We can simply always replay the delete operation. */
910 elem
->next
->prev
= elem
->prev
;
911 elem
->prev
->next
= elem
->next
;
915 /* Mark all stacks except the still running one as free. */
917 list_for_each (runp
, &stack_used
)
919 struct pthread
*curp
= list_entry (runp
, struct pthread
, list
);
922 /* This marks the stack as free. */
925 /* Account for the size of the stack. */
926 stack_cache_actsize
+= curp
->stackblock_size
;
928 if (curp
->specific_used
)
930 /* Clear the thread-specific data. */
931 memset (curp
->specific_1stblock
, '\0',
932 sizeof (curp
->specific_1stblock
));
934 curp
->specific_used
= false;
936 for (size_t cnt
= 1; cnt
< PTHREAD_KEY_1STLEVEL_SIZE
; ++cnt
)
937 if (curp
->specific
[cnt
] != NULL
)
939 memset (curp
->specific
[cnt
], '\0',
940 sizeof (curp
->specific_1stblock
));
942 /* We have allocated the block which we do not
943 free here so re-set the bit. */
944 curp
->specific_used
= true;
950 /* Add the stack of all running threads to the cache. */
951 list_splice (&stack_used
, &stack_cache
);
953 /* Remove the entry for the current thread to from the cache list
954 and add it to the list of running threads. Which of the two
955 lists is decided by the user_stack flag. */
956 stack_list_del (&self
->list
);
958 /* Re-initialize the lists for all the threads. */
959 INIT_LIST_HEAD (&stack_used
);
960 INIT_LIST_HEAD (&__stack_user
);
962 if (__glibc_unlikely (THREAD_GETMEM (self
, user_stack
)))
963 list_add (&self
->list
, &__stack_user
);
965 list_add (&self
->list
, &stack_used
);
967 /* There is one thread running. */
972 /* Initialize locks. */
973 stack_cache_lock
= LLL_LOCK_INITIALIZER
;
974 __default_pthread_attr_lock
= LLL_LOCK_INITIALIZER
;
979 setxid_mark_thread (struct xid_command
*cmdp
, struct pthread
*t
)
983 /* Wait until this thread is cloned. */
984 if (t
->setxid_futex
== -1
985 && ! atomic_compare_and_exchange_bool_acq (&t
->setxid_futex
, -2, -1))
987 futex_wait_simple (&t
->setxid_futex
, -2, FUTEX_PRIVATE
);
988 while (t
->setxid_futex
== -2);
990 /* Don't let the thread exit before the setxid handler runs. */
995 ch
= t
->cancelhandling
;
997 /* If the thread is exiting right now, ignore it. */
998 if ((ch
& EXITING_BITMASK
) != 0)
1000 /* Release the futex if there is no other setxid in
1002 if ((ch
& SETXID_BITMASK
) == 0)
1004 t
->setxid_futex
= 1;
1005 futex_wake (&t
->setxid_futex
, 1, FUTEX_PRIVATE
);
1010 while (atomic_compare_and_exchange_bool_acq (&t
->cancelhandling
,
1011 ch
| SETXID_BITMASK
, ch
));
1016 setxid_unmark_thread (struct xid_command
*cmdp
, struct pthread
*t
)
1022 ch
= t
->cancelhandling
;
1023 if ((ch
& SETXID_BITMASK
) == 0)
1026 while (atomic_compare_and_exchange_bool_acq (&t
->cancelhandling
,
1027 ch
& ~SETXID_BITMASK
, ch
));
1029 /* Release the futex just in case. */
1030 t
->setxid_futex
= 1;
1031 futex_wake (&t
->setxid_futex
, 1, FUTEX_PRIVATE
);
1036 setxid_signal_thread (struct xid_command
*cmdp
, struct pthread
*t
)
1038 if ((t
->cancelhandling
& SETXID_BITMASK
) == 0)
1042 pid_t pid
= __getpid ();
1043 val
= INTERNAL_SYSCALL_CALL (tgkill
, pid
, t
->tid
, SIGSETXID
);
1045 /* If this failed, it must have had not started yet or else exited. */
1046 if (!INTERNAL_SYSCALL_ERROR_P (val
))
1048 atomic_increment (&cmdp
->cntr
);
1055 /* Check for consistency across set*id system call results. The abort
1056 should not happen as long as all privileges changes happen through
1057 the glibc wrappers. ERROR must be 0 (no error) or an errno
1061 __nptl_setxid_error (struct xid_command
*cmdp
, int error
)
1065 int olderror
= cmdp
->error
;
1066 if (olderror
== error
)
1070 /* Mismatch between current and previous results. Save the
1071 error value to memory so that is not clobbered by the
1072 abort function and preserved in coredumps. */
1073 volatile int xid_err
__attribute__((unused
)) = error
;
1077 while (atomic_compare_and_exchange_bool_acq (&cmdp
->error
, error
, -1));
1082 __nptl_setxid (struct xid_command
*cmdp
)
1086 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
1092 struct pthread
*self
= THREAD_SELF
;
1094 /* Iterate over the list with system-allocated threads first. */
1096 list_for_each (runp
, &stack_used
)
1098 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1102 setxid_mark_thread (cmdp
, t
);
1105 /* Now the list with threads using user-allocated stacks. */
1106 list_for_each (runp
, &__stack_user
)
1108 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1112 setxid_mark_thread (cmdp
, t
);
1115 /* Iterate until we don't succeed in signalling anyone. That means
1116 we have gotten all running threads, and their children will be
1117 automatically correct once started. */
1122 list_for_each (runp
, &stack_used
)
1124 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1128 signalled
+= setxid_signal_thread (cmdp
, t
);
1131 list_for_each (runp
, &__stack_user
)
1133 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1137 signalled
+= setxid_signal_thread (cmdp
, t
);
1140 int cur
= cmdp
->cntr
;
1143 futex_wait_simple ((unsigned int *) &cmdp
->cntr
, cur
,
1148 while (signalled
!= 0);
1150 /* Clean up flags, so that no thread blocks during exit waiting
1151 for a signal which will never come. */
1152 list_for_each (runp
, &stack_used
)
1154 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1158 setxid_unmark_thread (cmdp
, t
);
1161 list_for_each (runp
, &__stack_user
)
1163 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1167 setxid_unmark_thread (cmdp
, t
);
1170 /* This must be last, otherwise the current thread might not have
1171 permissions to send SIGSETXID syscall to the other threads. */
1172 result
= INTERNAL_SYSCALL_NCS (cmdp
->syscall_no
, 3,
1173 cmdp
->id
[0], cmdp
->id
[1], cmdp
->id
[2]);
1175 if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result
)))
1177 error
= INTERNAL_SYSCALL_ERRNO (result
);
1178 __set_errno (error
);
1181 __nptl_setxid_error (cmdp
, error
);
1183 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
1187 static inline void __attribute__((always_inline
))
1188 init_one_static_tls (struct pthread
*curp
, struct link_map
*map
)
1191 void *dest
= (char *) curp
- map
->l_tls_offset
;
1192 # elif TLS_DTV_AT_TP
1193 void *dest
= (char *) curp
+ map
->l_tls_offset
+ TLS_PRE_TCB_SIZE
;
1195 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
1198 /* Initialize the memory. */
1199 memset (__mempcpy (dest
, map
->l_tls_initimage
, map
->l_tls_initimage_size
),
1200 '\0', map
->l_tls_blocksize
- map
->l_tls_initimage_size
);
1205 __pthread_init_static_tls (struct link_map
*map
)
1207 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
1209 /* Iterate over the list with system-allocated threads first. */
1211 list_for_each (runp
, &stack_used
)
1212 init_one_static_tls (list_entry (runp
, struct pthread
, list
), map
);
1214 /* Now the list with threads using user-allocated stacks. */
1215 list_for_each (runp
, &__stack_user
)
1216 init_one_static_tls (list_entry (runp
, struct pthread
, list
), map
);
1218 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);
1224 __wait_lookup_done (void)
1226 lll_lock (stack_cache_lock
, LLL_PRIVATE
);
1228 struct pthread
*self
= THREAD_SELF
;
1230 /* Iterate over the list with system-allocated threads first. */
1232 list_for_each (runp
, &stack_used
)
1234 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1235 if (t
== self
|| t
->header
.gscope_flag
== THREAD_GSCOPE_FLAG_UNUSED
)
1238 int *const gscope_flagp
= &t
->header
.gscope_flag
;
1240 /* We have to wait until this thread is done with the global
1241 scope. First tell the thread that we are waiting and
1242 possibly have to be woken. */
1243 if (atomic_compare_and_exchange_bool_acq (gscope_flagp
,
1244 THREAD_GSCOPE_FLAG_WAIT
,
1245 THREAD_GSCOPE_FLAG_USED
))
1249 futex_wait_simple ((unsigned int *) gscope_flagp
,
1250 THREAD_GSCOPE_FLAG_WAIT
, FUTEX_PRIVATE
);
1251 while (*gscope_flagp
== THREAD_GSCOPE_FLAG_WAIT
);
1254 /* Now the list with threads using user-allocated stacks. */
1255 list_for_each (runp
, &__stack_user
)
1257 struct pthread
*t
= list_entry (runp
, struct pthread
, list
);
1258 if (t
== self
|| t
->header
.gscope_flag
== THREAD_GSCOPE_FLAG_UNUSED
)
1261 int *const gscope_flagp
= &t
->header
.gscope_flag
;
1263 /* We have to wait until this thread is done with the global
1264 scope. First tell the thread that we are waiting and
1265 possibly have to be woken. */
1266 if (atomic_compare_and_exchange_bool_acq (gscope_flagp
,
1267 THREAD_GSCOPE_FLAG_WAIT
,
1268 THREAD_GSCOPE_FLAG_USED
))
1272 futex_wait_simple ((unsigned int *) gscope_flagp
,
1273 THREAD_GSCOPE_FLAG_WAIT
, FUTEX_PRIVATE
);
1274 while (*gscope_flagp
== THREAD_GSCOPE_FLAG_WAIT
);
1277 lll_unlock (stack_cache_lock
, LLL_PRIVATE
);