]> sourceware.org Git - glibc.git/commitdiff
Linux: set_robust_list syscall number is always available
authorFlorian Weimer <fweimer@redhat.com>
Sun, 9 Feb 2020 15:38:33 +0000 (16:38 +0100)
committerFlorian Weimer <fweimer@redhat.com>
Tue, 3 Mar 2020 11:49:44 +0000 (12:49 +0100)
Due to the built-in tables, __NR_set_robust_list is always defined
(although it may not be available at run time).

Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
nptl/nptl-init.c
nptl/pthread_create.c
sysdeps/nptl/fork.c

index 95ac91df154dbc31593f1c795155e7f15b545942..96b1444a01bf16e5fae7e43453b51dd1f8e44516 100644 (file)
@@ -117,10 +117,8 @@ static
 void
 __nptl_set_robust (struct pthread *self)
 {
-#ifdef __NR_set_robust_list
   INTERNAL_SYSCALL_CALL (set_robust_list, &self->robust_head,
                         sizeof (struct robust_list_head));
-#endif
 }
 
 
@@ -240,14 +238,12 @@ __pthread_initialize_minimal_internal (void)
     pd->robust_prev = &pd->robust_head;
 #endif
     pd->robust_head.list = &pd->robust_head;
-#ifdef __NR_set_robust_list
     pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
                                    - offsetof (pthread_mutex_t,
                                                __data.__list.__next));
     int res = INTERNAL_SYSCALL_CALL (set_robust_list, &pd->robust_head,
                                     sizeof (struct robust_list_head));
     if (INTERNAL_SYSCALL_ERROR_P (res))
-#endif
       set_robust_list_not_avail ();
   }
 
index 8614ec63f8d5340ac63eec3dbfe6457400903b86..7c752d0f99b45a48b117da4adfa0e4a8ac3c014a 100644 (file)
@@ -389,17 +389,15 @@ START_THREAD_DEFN
   if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2))
     futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE);
 
-#ifdef __NR_set_robust_list
-# ifndef __ASSUME_SET_ROBUST_LIST
+#ifndef __ASSUME_SET_ROBUST_LIST
   if (__set_robust_list_avail >= 0)
-# endif
+#endif
     {
       /* This call should never fail because the initial call in init.c
         succeeded.  */
       INTERNAL_SYSCALL_CALL (set_robust_list, &pd->robust_head,
                             sizeof (struct robust_list_head));
     }
-#endif
 
   /* If the parent was running cancellation handlers while creating
      the thread the new thread inherited the signal mask.  Reset the
index f5cf88d68c7af36dbf8f80b1226158554acd4f99..5091a000e3854637327a00d47e600f7d08988694 100644 (file)
@@ -83,7 +83,6 @@ __libc_fork (void)
       if (__fork_generation_pointer != NULL)
        *__fork_generation_pointer += __PTHREAD_ONCE_FORK_GEN_INCR;
 
-#ifdef __NR_set_robust_list
       /* Initialize the robust mutex list setting in the kernel which has
         been reset during the fork.  We do not check for errors because if
         it fails here, it must have failed at process startup as well and
@@ -94,19 +93,18 @@ __libc_fork (void)
         inherit the correct value from the parent.  We do not need to clear
         the pending operation because it must have been zero when fork was
         called.  */
-# if __PTHREAD_MUTEX_HAVE_PREV
+#if __PTHREAD_MUTEX_HAVE_PREV
       self->robust_prev = &self->robust_head;
-# endif
+#endif
       self->robust_head.list = &self->robust_head;
-# ifdef SHARED
+#ifdef SHARED
       if (__builtin_expect (__libc_pthread_functions_init, 0))
        PTHFCT_CALL (ptr_set_robust, (self));
-# else
+#else
       extern __typeof (__nptl_set_robust) __nptl_set_robust
        __attribute__((weak));
       if (__builtin_expect (__nptl_set_robust != NULL, 0))
        __nptl_set_robust (self);
-# endif
 #endif
 
       /* Reset the lock state in the multi-threaded case.  */
This page took 0.04722 seconds and 5 git commands to generate.