This is the mail archive of the
libc-alpha@sourceware.org
mailing list for the glibc project.
[PATCH 1/2] nptl: Remove __ASSUME_SET_ROBUST_LIST
- From: Adhemerval Zanella <adhemerval dot zanella at linaro dot org>
- To: libc-alpha at sourceware dot org
- Date: Tue, 18 Apr 2017 18:13:19 -0300
- Subject: [PATCH 1/2] nptl: Remove __ASSUME_SET_ROBUST_LIST
- Authentication-results: sourceware.org; auth=none
This is another patch that was in my backlog, so I am sending it again
on the list since first version was send almost 5 months ago [1].
--
This patch removes the __ASSUME_SET_ROBUST_LIST usage on nptl generic
code. The set_robust_list availability is defined by '__set_robust_list_avail'
which is now defined regardless. Its initial value is set to -1 and
defined to a positive value if both __NR_set_robust_list is defined
and the syscall returns correctly.
A subsequent patch is intended to remove the Linux definitions of
__ASSUME_SET_ROBUST_LIST.
Tested on x86_64.
* nptl/nptl-init.c (set_robust_list_not_avail): Remove definition.
(__pthread_initialize_minimal_internal): Set __set_robust_list_avail
to 1 if syscall returns correctly.
(__set_robust_list_avail): Define regardless if
__ASSUME_SET_ROBUST_LIST is defined or not.
* nptl/pthreadP.h (__set_robust_list_avail): Likewise.
* nptl/pthread_create.c (START_THREAD_DEFN): Remove
__ASSUME_SET_ROBUST_LIST usage.
* nptl/pthread_mutex_init.c (__pthread_mutex_init): Likewise.
[1] https://sourceware.org/ml/libc-alpha/2016-09/msg00373.html
---
nptl/nptl-init.c | 12 +++---------
nptl/pthreadP.h | 2 --
nptl/pthread_create.c | 8 ++------
nptl/pthread_mutex_init.c | 2 --
4 files changed, 5 insertions(+), 19 deletions(-)
diff --git a/nptl/nptl-init.c b/nptl/nptl-init.c
index 2921607..c86b35e 100644
--- a/nptl/nptl-init.c
+++ b/nptl/nptl-init.c
@@ -48,14 +48,8 @@ int *__libc_multiple_threads_ptr attribute_hidden;
size_t __static_tls_size;
size_t __static_tls_align_m1;
-#ifndef __ASSUME_SET_ROBUST_LIST
/* Negative if we do not have the system call and we can use it. */
-int __set_robust_list_avail;
-# define set_robust_list_not_avail() \
- __set_robust_list_avail = -1
-#else
-# define set_robust_list_not_avail() do { } while (0)
-#endif
+int __set_robust_list_avail = -1;
#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
/* Nonzero if we do not have FUTEX_CLOCK_REALTIME. */
@@ -308,9 +302,9 @@ __pthread_initialize_minimal_internal (void)
INTERNAL_SYSCALL_DECL (err);
int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
sizeof (struct robust_list_head));
- if (INTERNAL_SYSCALL_ERROR_P (res, err))
+ if (!INTERNAL_SYSCALL_ERROR_P (res, err))
+ __set_robust_list_avail = 1;
#endif
- set_robust_list_not_avail ();
}
#ifdef __NR_futex
diff --git a/nptl/pthreadP.h b/nptl/pthreadP.h
index 7fc1e50..4125e7f 100644
--- a/nptl/pthreadP.h
+++ b/nptl/pthreadP.h
@@ -213,10 +213,8 @@ hidden_proto (__pthread_keys)
/* Number of threads running. */
extern unsigned int __nptl_nthreads attribute_hidden;
-#ifndef __ASSUME_SET_ROBUST_LIST
/* Negative if we do not have the system call and we can use it. */
extern int __set_robust_list_avail attribute_hidden;
-#endif
/* Thread Priority Protection. */
extern int __sched_fifo_min_prio attribute_hidden;
diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c
index d0d7414..76b0ac4 100644
--- a/nptl/pthread_create.c
+++ b/nptl/pthread_create.c
@@ -387,18 +387,16 @@ START_THREAD_DEFN
if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2))
futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE);
-#ifdef __NR_set_robust_list
-# ifndef __ASSUME_SET_ROBUST_LIST
if (__set_robust_list_avail >= 0)
-# endif
{
+#ifdef __NR_set_robust_list
INTERNAL_SYSCALL_DECL (err);
/* This call should never fail because the initial call in init.c
succeeded. */
INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
sizeof (struct robust_list_head));
- }
#endif
+ }
#ifdef SIGCANCEL
/* If the parent was running cancellation handlers while creating
@@ -508,7 +506,6 @@ START_THREAD_DEFN
the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
-#ifndef __ASSUME_SET_ROBUST_LIST
/* If this thread has any robust mutexes locked, handle them now. */
# ifdef __PTHREAD_MUTEX_HAVE_PREV
void *robust = pd->robust_head.list;
@@ -539,7 +536,6 @@ START_THREAD_DEFN
}
while (robust != (void *) &pd->robust_head);
}
-#endif
/* Mark the memory of the stack as usable to the kernel. We free
everything except for the space used for the TCB itself. */
diff --git a/nptl/pthread_mutex_init.c b/nptl/pthread_mutex_init.c
index 138e144..18724e8 100644
--- a/nptl/pthread_mutex_init.c
+++ b/nptl/pthread_mutex_init.c
@@ -91,11 +91,9 @@ __pthread_mutex_init (pthread_mutex_t *mutex,
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0)
{
-#ifndef __ASSUME_SET_ROBUST_LIST
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_PSHARED) != 0
&& __set_robust_list_avail < 0)
return ENOTSUP;
-#endif
mutex->__data.__kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP;
}
--
2.7.4