symbol_set_define (__libc_freeres_ptrs);
-extern __attribute__ ((weak)) void __libpthread_freeres (void);
+extern void __libpthread_freeres (void)
+#if PTHREAD_IN_LIBC && defined SHARED
+/* It is possible to call __libpthread_freeres directly in shared
+ builds with an integrated libpthread. */
+ attribute_hidden
+#else
+ __attribute__ ((weak))
+#endif
+ ;
void __libc_freeres_fn_section
__libc_freeres (void)
/* We run the resource freeing after IO cleanup. */
RUN_HOOK (__libc_subfreeres, ());
- /* Call the libpthread list of cleanup functions
- (weak-ref-and-check). */
- if (&__libpthread_freeres != NULL)
- __libpthread_freeres ();
+ call_function_static_weak (__libpthread_freeres);
#ifdef SHARED
__libc_unwind_link_freeres ();
libc_multiple_threads \
libc_pthread_init \
lowlevellock \
+ nptl-stack \
nptl_deallocate_tsd \
nptl_nthreads \
nptl_setxid \
+ nptlfreeres \
old_pthread_atfork \
old_pthread_cond_broadcast \
old_pthread_cond_destroy \
events \
libpthread-compat \
nptl-init \
- nptlfreeres \
pt-interp \
pthread_attr_getaffinity \
pthread_attr_getguardsize \
}
GLIBC_PRIVATE {
__default_pthread_attr;
- __default_pthread_attr_freeres;
__default_pthread_attr_lock;
__futex_abstimed_wait64;
__futex_abstimed_wait_cancelable64;
__lll_trylock_elision;
__lll_unlock_elision;
__mutex_aconf;
+ __nptl_deallocate_stack;
__nptl_deallocate_tsd;
__nptl_nthreads;
__nptl_setxid_sighandler;
+ __nptl_stack_list_add;
+ __nptl_stack_list_del;
__pthread_attr_copy;
__pthread_attr_destroy;
__pthread_attr_init;
}
GLIBC_PRIVATE {
- __libpthread_freeres;
__pthread_clock_gettime;
__pthread_clock_settime;
__pthread_get_minstack;
#include <lowlevellock.h>
#include <futex-internal.h>
#include <kernel-features.h>
-
+#include <nptl-stack.h>
#ifndef NEED_SEPARATE_REGISTER_STACK
# define MAP_STACK 0
#endif
-/* This yields the pointer that TLS support code calls the thread pointer. */
-#if TLS_TCB_AT_TP
-# define TLS_TPADJ(pd) (pd)
-#elif TLS_DTV_AT_TP
-# define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE))
-#endif
-
-/* Cache handling for not-yet free stacks. */
-
-/* Maximum size in kB of cache. */
-static size_t stack_cache_maxsize = 40 * 1024 * 1024; /* 40MiBi by default. */
-
-/* Check whether the stack is still used or not. */
-#define FREE_P(descr) ((descr)->tid <= 0)
-
-
-static void
-stack_list_del (list_t *elem)
-{
- GL (dl_in_flight_stack) = (uintptr_t) elem;
-
- atomic_write_barrier ();
-
- list_del (elem);
-
- atomic_write_barrier ();
-
- GL (dl_in_flight_stack) = 0;
-}
-
-
-static void
-stack_list_add (list_t *elem, list_t *list)
-{
- GL (dl_in_flight_stack) = (uintptr_t) elem | 1;
-
- atomic_write_barrier ();
-
- list_add (elem, list);
-
- atomic_write_barrier ();
-
- GL (dl_in_flight_stack) = 0;
-}
-
-
-/* We create a double linked list of all cache entries. Double linked
- because this allows removing entries from the end. */
-
-
/* Get a stack frame from the cache. We have to match by size since
some blocks might be too small or far too large. */
static struct pthread *
struct pthread *curr;
curr = list_entry (entry, struct pthread, list);
- if (FREE_P (curr) && curr->stackblock_size >= size)
+ if (__nptl_stack_in_use (curr) && curr->stackblock_size >= size)
{
if (curr->stackblock_size == size)
{
result->setxid_futex = -1;
/* Dequeue the entry. */
- stack_list_del (&result->list);
+ __nptl_stack_list_del (&result->list);
/* And add to the list of stacks in use. */
- stack_list_add (&result->list, &GL (dl_stack_used));
+ __nptl_stack_list_add (&result->list, &GL (dl_stack_used));
/* And decrease the cache size. */
GL (dl_stack_cache_actsize) -= result->stackblock_size;
return result;
}
-
-/* Free stacks until cache size is lower than LIMIT. */
-static void
-free_stacks (size_t limit)
-{
- /* We reduce the size of the cache. Remove the last entries until
- the size is below the limit. */
- list_t *entry;
- list_t *prev;
-
- /* Search from the end of the list. */
- list_for_each_prev_safe (entry, prev, &GL (dl_stack_cache))
- {
- struct pthread *curr;
-
- curr = list_entry (entry, struct pthread, list);
- if (FREE_P (curr))
- {
- /* Unlink the block. */
- stack_list_del (entry);
-
- /* Account for the freed memory. */
- GL (dl_stack_cache_actsize) -= curr->stackblock_size;
-
- /* Free the memory associated with the ELF TLS. */
- _dl_deallocate_tls (TLS_TPADJ (curr), false);
-
- /* Remove this block. This should never fail. If it does
- something is really wrong. */
- if (__munmap (curr->stackblock, curr->stackblock_size) != 0)
- abort ();
-
- /* Maybe we have freed enough. */
- if (GL (dl_stack_cache_actsize) <= limit)
- break;
- }
- }
-}
-
-/* Free all the stacks on cleanup. */
-void
-__nptl_stacks_freeres (void)
-{
- free_stacks (0);
-}
-
-/* Add a stack frame which is not used anymore to the stack. Must be
- called with the cache lock held. */
-static inline void
-__attribute ((always_inline))
-queue_stack (struct pthread *stack)
-{
- /* We unconditionally add the stack to the list. The memory may
- still be in use but it will not be reused until the kernel marks
- the stack as not used anymore. */
- stack_list_add (&stack->list, &GL (dl_stack_cache));
-
- GL (dl_stack_cache_actsize) += stack->stackblock_size;
- if (__glibc_unlikely (GL (dl_stack_cache_actsize) > stack_cache_maxsize))
- free_stacks (stack_cache_maxsize);
-}
-
/* Return the guard page position on allocated stack. */
static inline char *
__attribute ((always_inline))
lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
/* And add to the list of stacks in use. */
- stack_list_add (&pd->list, &GL (dl_stack_used));
+ __nptl_stack_list_add (&pd->list, &GL (dl_stack_used));
lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
/* Remove the thread from the list. */
- stack_list_del (&pd->list);
+ __nptl_stack_list_del (&pd->list);
lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
return 0;
}
-
-
-void
-__deallocate_stack (struct pthread *pd)
-{
- lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
-
- /* Remove the thread from the list of threads with user defined
- stacks. */
- stack_list_del (&pd->list);
-
- /* Not much to do. Just free the mmap()ed memory. Note that we do
- not reset the 'used' flag in the 'tid' field. This is done by
- the kernel. If no thread has been created yet this field is
- still zero. */
- if (__glibc_likely (! pd->user_stack))
- (void) queue_stack (pd);
- else
- /* Free the memory associated with the ELF TLS. */
- _dl_deallocate_tls (TLS_TPADJ (pd), false);
-
- lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
-}
(sizeof (struct pthread) - offsetof (struct pthread, end_padding))
} __attribute ((aligned (TCB_ALIGNMENT)));
+/* This yields the pointer that TLS support code calls the thread pointer. */
+#if TLS_TCB_AT_TP
+# define TLS_TPADJ(pd) (pd)
+#elif TLS_DTV_AT_TP
+# define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE))
+#endif
#endif /* descr.h */
--- /dev/null
+/* Stack cache management for NPTL.
+ Copyright (C) 2002-2021 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <nptl-stack.h>
+#include <ldsodefs.h>
+
+/* Maximum size in kB of cache. 40MiBi by default. */
+static const size_t stack_cache_maxsize = 40 * 1024 * 1024;
+
+void
+__nptl_stack_list_del (list_t *elem)
+{
+ GL (dl_in_flight_stack) = (uintptr_t) elem;
+
+ atomic_write_barrier ();
+
+ list_del (elem);
+
+ atomic_write_barrier ();
+
+ GL (dl_in_flight_stack) = 0;
+}
+libc_hidden_def (__nptl_stack_list_del)
+
+void
+__nptl_stack_list_add (list_t *elem, list_t *list)
+{
+ GL (dl_in_flight_stack) = (uintptr_t) elem | 1;
+
+ atomic_write_barrier ();
+
+ list_add (elem, list);
+
+ atomic_write_barrier ();
+
+ GL (dl_in_flight_stack) = 0;
+}
+libc_hidden_def (__nptl_stack_list_add)
+
+void
+__nptl_free_stacks (size_t limit)
+{
+ /* We reduce the size of the cache. Remove the last entries until
+ the size is below the limit. */
+ list_t *entry;
+ list_t *prev;
+
+ /* Search from the end of the list. */
+ list_for_each_prev_safe (entry, prev, &GL (dl_stack_cache))
+ {
+ struct pthread *curr;
+
+ curr = list_entry (entry, struct pthread, list);
+ if (__nptl_stack_in_use (curr))
+ {
+ /* Unlink the block. */
+ __nptl_stack_list_del (entry);
+
+ /* Account for the freed memory. */
+ GL (dl_stack_cache_actsize) -= curr->stackblock_size;
+
+ /* Free the memory associated with the ELF TLS. */
+ _dl_deallocate_tls (TLS_TPADJ (curr), false);
+
+ /* Remove this block. This should never fail. If it does
+ something is really wrong. */
+ if (__munmap (curr->stackblock, curr->stackblock_size) != 0)
+ abort ();
+
+ /* Maybe we have freed enough. */
+ if (GL (dl_stack_cache_actsize) <= limit)
+ break;
+ }
+ }
+}
+
+/* Add a stack frame which is not used anymore to the stack. Must be
+ called with the cache lock held. */
+static inline void
+__attribute ((always_inline))
+queue_stack (struct pthread *stack)
+{
+ /* We unconditionally add the stack to the list. The memory may
+ still be in use but it will not be reused until the kernel marks
+ the stack as not used anymore. */
+ __nptl_stack_list_add (&stack->list, &GL (dl_stack_cache));
+
+ GL (dl_stack_cache_actsize) += stack->stackblock_size;
+ if (__glibc_unlikely (GL (dl_stack_cache_actsize) > stack_cache_maxsize))
+ __nptl_free_stacks (stack_cache_maxsize);
+}
+
+void
+__nptl_deallocate_stack (struct pthread *pd)
+{
+ lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
+
+ /* Remove the thread from the list of threads with user defined
+ stacks. */
+ __nptl_stack_list_del (&pd->list);
+
+ /* Not much to do. Just free the mmap()ed memory. Note that we do
+ not reset the 'used' flag in the 'tid' field. This is done by
+ the kernel. If no thread has been created yet this field is
+ still zero. */
+ if (__glibc_likely (! pd->user_stack))
+ (void) queue_stack (pd);
+ else
+ /* Free the memory associated with the ELF TLS. */
+ _dl_deallocate_tls (TLS_TPADJ (pd), false);
+
+ lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
+}
+libc_hidden_def (__nptl_deallocate_stack)
--- /dev/null
+/* Stack cache management for NPTL.
+ Copyright (C) 2002-2021 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#ifndef _NPTL_STACK_H
+#define _NPTL_STACK_H
+
+#include <descr.h>
+#include <list.h>
+#include <stdbool.h>
+
+/* Check whether the stack is still used or not. */
+static inline bool
+__nptl_stack_in_use (struct pthread *pd)
+{
+ return pd->tid <= 0;
+}
+
+/* Remove the stack ELEM from its list. */
+void __nptl_stack_list_del (list_t *elem);
+libc_hidden_proto (__nptl_stack_list_del)
+
+/* Add ELEM to a stack list. LIST can be either &GL (dl_stack_used)
+ or &GL (dl_stack_cache). */
+void __nptl_stack_list_add (list_t *elem, list_t *list);
+libc_hidden_proto (__nptl_stack_list_add)
+
+/* Free allocated stack. */
+extern void __nptl_deallocate_stack (struct pthread *pd);
+libc_hidden_proto (__nptl_deallocate_stack)
+
+/* Free stacks until cache size is lower than LIMIT. */
+void __nptl_free_stacks (size_t limit) attribute_hidden;
+
+#endif /* _NPTL_STACK_H */
#include <set-hooks.h>
#include <libc-symbols.h>
#include <pthreadP.h>
+#include <nptl-stack.h>
/* Free libpthread.so resources.
Note: Caller ensures we are called only once. */
__libpthread_freeres (void)
{
call_function_static_weak (__default_pthread_attr_freeres);
- call_function_static_weak (__nptl_stacks_freeres);
+ __nptl_free_stacks (0);
}
extern int __default_pthread_attr_lock;
libc_hidden_proto (__default_pthread_attr_lock)
/* Called from __libc_freeres to deallocate the default attribute. */
-extern void __default_pthread_attr_freeres (void);
+extern void __default_pthread_attr_freeres (void) attribute_hidden;
/* Size and alignment of static TLS block. */
extern size_t __static_tls_size attribute_hidden;
descriptor is still valid. */
extern void __free_tcb (struct pthread *pd) attribute_hidden;
-/* Free allocated stack. */
-extern void __deallocate_stack (struct pthread *pd) attribute_hidden;
-
/* Change the permissions of a thread stack. Called from
_dl_make_stacks_executable and pthread_create. */
int
libc_hidden_proto (__nptl_setxid_sighandler)
extern int __nptl_setxid (struct xid_command *cmdp) attribute_hidden;
-extern void __nptl_stacks_freeres (void) attribute_hidden;
-
extern void __wait_lookup_done (void) attribute_hidden;
/* Allocates the extension space for ATTR. Returns an error code on
/* Queue the stack memory block for reuse and exit the process. The
kernel will signal via writing to the address returned by
QUEUE-STACK when the stack is available. */
- __deallocate_stack (pd);
+ __nptl_deallocate_stack (pd);
}
}
futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE);
/* Free the resources. */
- __deallocate_stack (pd);
+ __nptl_deallocate_stack (pd);
}
/* We have to translate error codes. */