* init.c (__pthread_initialize_minimal_internal): Likewise.
* descr.h (struct xid_command): Pretty printing.
(struct pthread): Use __pthread_list_t or __pthread_slist_t for
robust_list. Adjust macros.
* pthread_create.c (start_thread): Adjust robust_list handling.
* phtread_mutex_unlock.c: Don't allow unlocking from any thread
but the owner for all robust mutex types.
* sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h: Define
__pthread_list_t and __pthread_slist_t. Use them in pthread_mutex_t.
* sysdeps/pthread/pthread.h: Adjust mutex initializers.
2006-02-12 Ulrich Drepper <drepper@redhat.com>
+ * allocatestack.c (allocate_stack): Initialize robust_list.
+ * init.c (__pthread_initialize_minimal_internal): Likewise.
+ * descr.h (struct xid_command): Pretty printing.
+ (struct pthread): Use __pthread_list_t or __pthread_slist_t for
+ robust_list. Adjust macros.
+ * pthread_create.c (start_thread): Adjust robust_list handling.
+ * phtread_mutex_unlock.c: Don't allow unlocking from any thread
+ but the owner for all robust mutex types.
+ * sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h: Define
+ __pthread_list_t and __pthread_slist_t. Use them in pthread_mutex_t.
+ * sysdeps/pthread/pthread.h: Adjust mutex initializers.
+
* sysdeps/unix/sysv/linux/i386/not-cancel.h: Define openat_not_cancel,
openat_not_cancel_3, openat64_not_cancel, and openat64_not_cancel_3.
-/* Copyright (C) 2002, 2003, 2004, 2005
- Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
/* The process ID is also the same as that of the caller. */
pd->pid = THREAD_GETMEM (THREAD_SELF, pid);
+ /* List of robust mutexes. */
+#ifdef __PTHREAD_MUTEX_HAVE_PREV
+ pd->robust_list.__prev = &pd->robust_list;
+#endif
+ pd->robust_list.__next = &pd->robust_list;
+
/* Allocate the DTV for this thread. */
if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
{
/* The process ID is also the same as that of the caller. */
pd->pid = THREAD_GETMEM (THREAD_SELF, pid);
+ /* List of robust mutexes. */
+#ifdef __PTHREAD_MUTEX_HAVE_PREV
+ pd->robust_list.__prev = &pd->robust_list;
+#endif
+ pd->robust_list.__next = &pd->robust_list;
+
/* Allocate the DTV for this thread. */
if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
{
struct xid_command
{
int syscall_no;
- long id[3];
+ long int id[3];
volatile int cntr;
};
pid_t pid;
/* List of robust mutexes the thread is holding. */
- struct __pthread_mutex_s *robust_list;
-
#ifdef __PTHREAD_MUTEX_HAVE_PREV
+ __pthread_list_t robust_list;
+
# define ENQUEUE_MUTEX(mutex) \
do { \
- mutex->__data.__next = THREAD_GETMEM (THREAD_SELF, robust_list); \
- THREAD_SETMEM (THREAD_SELF, robust_list, &mutex->__data); \
- if (mutex->__data.__next != NULL) \
- mutex->__data.__next->__prev = &mutex->__data; \
- mutex->__data.__prev = NULL; \
+ __pthread_list_t *next = THREAD_GETMEM (THREAD_SELF, robust_list.__next); \
+ next->__prev = &mutex->__data.__list; \
+ mutex->__data.__list.__next = next; \
+ mutex->__data.__list.__prev = &THREAD_SELF->robust_list; \
+ THREAD_SETMEM (THREAD_SELF, robust_list.__next, &mutex->__data.__list); \
} while (0)
# define DEQUEUE_MUTEX(mutex) \
do { \
- if (mutex->__data.__prev == NULL) \
- THREAD_SETMEM (THREAD_SELF, robust_list, mutex->__data.__next); \
- else \
- mutex->__data.__prev->__next = mutex->__data.__next; \
- if (mutex->__data.__next != NULL) \
- mutex->__data.__next->__prev = mutex->__data.__prev; \
- mutex->__data.__prev = NULL; \
- mutex->__data.__next = NULL; \
+ mutex->__data.__list.__next->__prev = mutex->__data.__list.__prev; \
+ mutex->__data.__list.__prev->__next = mutex->__data.__list.__next; \
+ mutex->__data.__list.__prev = NULL; \
+ mutex->__data.__list.__next = NULL; \
} while (0)
#else
+ __pthread_slist_t robust_list;
+
# define ENQUEUE_MUTEX(mutex) \
do { \
- mutex->__data.__next = THREAD_GETMEM (THREAD_SELF, robust_list); \
- THREAD_SETMEM (THREAD_SELF, robust_list, &mutex->__data); \
+ mutex->__data.__list.__next \
+ = THREAD_GETMEM (THREAD_SELF, robust_list.__next); \
+ THREAD_SETMEM (THREAD_SELF, robust_list.__next, &mutex->__data.__list); \
} while (0)
# define DEQUEUE_MUTEX(mutex) \
do { \
- struct __pthread_mutex_s *runp = THREAD_GETMEM (THREAD_SELF, robust_list);\
- if (runp == &mutex->__data) \
+ __pthread_slist_t *runp = THREAD_GETMEM (THREAD_SELF, robust_list.__next);\
+ if (runp == &mutex->__data.__list) \
THREAD_SETMEM (THREAD_SELF, robust_list, runp->__next); \
else \
{ \
- while (runp->__next != &mutex->__data) \
+ while (runp->__next != &mutex->__data.__list) \
runp = runp->__next; \
\
runp->__next = runp->__next->__next; \
- mutex->__data.__next = NULL; \
+ mutex->__data.__list.__next = NULL; \
} \
} while (0)
#endif
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
struct pthread *pd = THREAD_SELF;
INTERNAL_SYSCALL_DECL (err);
pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid);
+#ifdef __PTHREAD_MUTEX_HAVE_PREV
+ pd->robust_list.__prev = &pd->robust_list;
+#endif
+ pd->robust_list.__next = &pd->robust_list;
THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
THREAD_SETMEM (pd, user_stack, true);
if (LLL_LOCK_INITIALIZER != 0)
atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
/* If this thread has any robust mutexes locked, handle them now. */
- struct __pthread_mutex_s *robust = THREAD_GETMEM (pd, robust_list);
- if (__builtin_expect (robust != NULL, 0))
+#if __WORDSIZE == 64
+ __pthread_list_t *robust = pd->robust_list.__next;
+#else
+ __pthread_slist_t *robust = pd->robust_list.__next;
+#endif
+ if (__builtin_expect (robust != &pd->robust_list, 0))
{
do
{
- struct __pthread_mutex_s *this = robust;
+ struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
+ ((char *) robust - offsetof (struct __pthread_mutex_s, __list));
robust = robust->__next;
assert (lll_mutex_islocked (this->__lock));
--this->__nusers;
assert (this->__owner != PTHREAD_MUTEX_NOTRECOVERABLE);
this->__owner = PTHREAD_MUTEX_OWNERDEAD;
- this->__next = NULL;
+ this->__list.__next = NULL;
#ifdef __PTHREAD_MUTEX_HAVE_PREV
- this->__prev = NULL;
+ this->__list.__prev = NULL;
#endif
lll_mutex_unlock (this->__lock);
}
- while (robust != NULL);
+ while (robust != &pd->robust_list);
/* Clean up so that the thread descriptor can be reused. */
- THREAD_SETMEM (pd, robust_list, NULL);
+ pd->robust_list.__next = &pd->robust_list;
+#ifdef __PTHREAD_MUTEX_HAVE_PREV
+ pd->robust_list.__prev = &pd->robust_list;
+#endif
}
/* If the thread is detached free the TCB. */
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
goto robust;
case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
- /* Error checking mutex. */
+ case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
+ case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
if (abs (mutex->__data.__owner) != THREAD_GETMEM (THREAD_SELF, tid)
|| ! lll_mutex_islocked (mutex->__data.__lock))
return EPERM;
- /* FALLTHROUGH */
-
- case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
- case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
/* If the previous owner died and the caller did not succeed in
making the state consistent, mark the mutex as unrecoverable
and make all waiters. */
/* Mutex initializers. */
#if __WORDSIZE == 64
# define PTHREAD_MUTEX_INITIALIZER \
- { { 0, 0, 0, 0, 0, 0, 0, 0 } }
+ { { 0, 0, 0, 0, 0, 0, { 0, 0 } } }
# ifdef __USE_GNU
# define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP \
- { { 0, 0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, 0, 0, 0 } }
+ { { 0, 0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, 0, { 0, 0 } } }
# define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP \
- { { 0, 0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, 0, 0, 0 } }
+ { { 0, 0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, 0, { 0, 0 } } }
# define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \
- { { 0, 0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, 0, 0, 0 } }
+ { { 0, 0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, 0, { 0, 0 } } }
# endif
#else
# define PTHREAD_MUTEX_INITIALIZER \
} pthread_attr_t;
+typedef struct __pthread_internal_slist
+{
+ struct __pthread_internal_slist *__next;
+} __pthread_slist_t;
+
+
/* Data structures for mutex handling. The structure of the attribute
type is not exposed on purpose. */
typedef union
__extension__ union
{
int __spins;
- struct __pthread_mutex_s *__next;
+ __pthread_slist_t __list;
};
} __data;
char __size[__SIZEOF_PTHREAD_MUTEX_T];
} pthread_attr_t;
+#if __WORDSIZE == 64
+typedef struct __pthread_internal_list
+{
+ struct __pthread_internal_list *__prev;
+ struct __pthread_internal_list *__next;
+} __pthread_list_t;
+#else
+typedef struct __pthread_internal_slist
+{
+ struct __pthread_internal_slist *__next;
+} __pthread_slist_t;
+#endif
+
+
/* Data structures for mutex handling. The structure of the attribute
type is not exposed on purpose. */
typedef union
int __kind;
#if __WORDSIZE == 64
int __spins;
- struct __pthread_mutex_s *__next;
- struct __pthread_mutex_s *__prev;
+ __pthread_list_t __list;
# define __PTHREAD_MUTEX_HAVE_PREV 1
#else
unsigned int __nusers;
__extension__ union
{
int __spins;
- struct __pthread_mutex_s *__next;
+ __pthread_slist_t __list;
};
#endif
} __data;