]> sourceware.org Git - newlib-cygwin.git/blob - winsup/cygwin/thread.cc
* thread.h (class pthread): Add bool member canceled.
[newlib-cygwin.git] / winsup / cygwin / thread.cc
1 /* thread.cc: Locking and threading module functions
2
3 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
4 2006, 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
5
6 This file is part of Cygwin.
7
8 This software is a copyrighted work licensed under the terms of the
9 Cygwin license. Please consult the file "CYGWIN_LICENSE" for
10 details. */
11
12 /* Implementation overview and caveats:
13
14 Win32 puts some contraints on what can and cannot be implemented. Where
15 possible we work around those contrainsts. Where we cannot work around
16 the constraints we either pretend to be conformant, or return an error
17 code.
18
19 Some caveats: PROCESS_SHARED objects, while they pretend to be process
20 shared, may not actually work. Some test cases are needed to determine
21 win32's behaviour. My suspicion is that the win32 handle needs to be
22 opened with different flags for proper operation.
23
24 R.Collins, April 2001. */
25
26 #ifdef HAVE_CONFIG_H
27 #endif
28
29 #include "winsup.h"
30 #include "miscfuncs.h"
31 #include "path.h"
32 #include <stdlib.h>
33 #include "pinfo.h"
34 #include "sigproc.h"
35 #include "perprocess.h"
36 #include "cygtls.h"
37 #include "fhandler.h"
38 #include "dtable.h"
39 #include "cygheap.h"
40 #include "ntdll.h"
41
42 extern "C" void __fp_lock_all ();
43 extern "C" void __fp_unlock_all ();
44 extern "C" int valid_sched_parameters(const struct sched_param *);
45 extern "C" int sched_set_thread_priority(HANDLE thread, int priority);
46 static inline verifyable_object_state
47 verifyable_object_isvalid (void const * objectptr, thread_magic_t magic,
48 void *static_ptr1 = NULL,
49 void *static_ptr2 = NULL,
50 void *static_ptr3 = NULL);
51
52 extern int threadsafe;
53
54 const pthread_t pthread_mutex::_new_mutex = (pthread_t) 1;
55 const pthread_t pthread_mutex::_unlocked_mutex = (pthread_t) 2;
56 const pthread_t pthread_mutex::_destroyed_mutex = (pthread_t) 3;
57
58 inline bool
59 pthread_mutex::no_owner()
60 {
61 int res;
62 if (!owner)
63 {
64 debug_printf ("NULL owner value");
65 res = 1;
66 }
67 else if (owner == _destroyed_mutex)
68 {
69 paranoid_printf ("attempt to use destroyed mutex");
70 res = 1;
71 }
72 else if (owner == _new_mutex || owner == _unlocked_mutex)
73 res = 1;
74 else
75 res = 0;
76 return res;
77 }
78
79 #undef __getreent
80 extern "C" struct _reent *
81 __getreent ()
82 {
83 return &_my_tls.local_clib;
84 }
85
86 extern "C" void
87 __cygwin_lock_init (_LOCK_T *lock)
88 {
89 *lock = _LOCK_T_INITIALIZER;
90 }
91
92 extern "C" void
93 __cygwin_lock_init_recursive (_LOCK_T *lock)
94 {
95 *lock = _LOCK_T_RECURSIVE_INITIALIZER;
96 }
97
98 extern "C" void
99 __cygwin_lock_fini (_LOCK_T *lock)
100 {
101 pthread_mutex_destroy ((pthread_mutex_t*) lock);
102 }
103
104 extern "C" void
105 __cygwin_lock_lock (_LOCK_T *lock)
106 {
107 paranoid_printf ("threadcount %d. locking", MT_INTERFACE->threadcount);
108 pthread_mutex_lock ((pthread_mutex_t*) lock);
109 }
110
111 extern "C" int
112 __cygwin_lock_trylock (_LOCK_T *lock)
113 {
114 return pthread_mutex_trylock ((pthread_mutex_t*) lock);
115 }
116
117
118 extern "C" void
119 __cygwin_lock_unlock (_LOCK_T *lock)
120 {
121 pthread_mutex_unlock ((pthread_mutex_t*) lock);
122 paranoid_printf ("threadcount %d. unlocked", MT_INTERFACE->threadcount);
123 }
124
125 static inline verifyable_object_state
126 verifyable_object_isvalid (void const *objectptr, thread_magic_t magic, void *static_ptr1,
127 void *static_ptr2, void *static_ptr3)
128 {
129 myfault efault;
130 if (efault.faulted (objectptr))
131 return INVALID_OBJECT;
132
133 verifyable_object **object = (verifyable_object **) objectptr;
134
135 if ((static_ptr1 && *object == static_ptr1) ||
136 (static_ptr2 && *object == static_ptr2) ||
137 (static_ptr3 && *object == static_ptr3))
138 return VALID_STATIC_OBJECT;
139 if ((*object)->magic != magic)
140 return INVALID_OBJECT;
141 return VALID_OBJECT;
142 }
143
144 /* static members */
145 inline bool
146 pthread_attr::is_good_object (pthread_attr_t const *attr)
147 {
148 if (verifyable_object_isvalid (attr, PTHREAD_ATTR_MAGIC) != VALID_OBJECT)
149 return false;
150 return true;
151 }
152
153 inline bool
154 pthread_condattr::is_good_object (pthread_condattr_t const *attr)
155 {
156 if (verifyable_object_isvalid (attr, PTHREAD_CONDATTR_MAGIC) != VALID_OBJECT)
157 return false;
158 return true;
159 }
160
161 inline bool
162 pthread_rwlockattr::is_good_object (pthread_rwlockattr_t const *attr)
163 {
164 if (verifyable_object_isvalid (attr, PTHREAD_RWLOCKATTR_MAGIC) != VALID_OBJECT)
165 return false;
166 return true;
167 }
168
169 inline bool
170 pthread_key::is_good_object (pthread_key_t const *key)
171 {
172 if (verifyable_object_isvalid (key, PTHREAD_KEY_MAGIC) != VALID_OBJECT)
173 return false;
174 return true;
175 }
176
177 inline bool
178 pthread_spinlock::is_good_object (pthread_spinlock_t const *mutex)
179 {
180 if (verifyable_object_isvalid (mutex, PTHREAD_SPINLOCK_MAGIC) != VALID_OBJECT)
181 return false;
182 return true;
183 }
184
185 inline bool
186 pthread_mutex::is_good_object (pthread_mutex_t const *mutex)
187 {
188 if (verifyable_object_isvalid (mutex, PTHREAD_MUTEX_MAGIC) != VALID_OBJECT)
189 return false;
190 return true;
191 }
192
193 inline bool
194 pthread_mutex::is_initializer (pthread_mutex_t const *mutex)
195 {
196 if (verifyable_object_isvalid (mutex, PTHREAD_MUTEX_MAGIC,
197 PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP,
198 PTHREAD_NORMAL_MUTEX_INITIALIZER_NP,
199 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP) != VALID_STATIC_OBJECT)
200 return false;
201 return true;
202 }
203
204 inline bool
205 pthread_mutex::is_initializer_or_object (pthread_mutex_t const *mutex)
206 {
207 if (verifyable_object_isvalid (mutex, PTHREAD_MUTEX_MAGIC,
208 PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP,
209 PTHREAD_NORMAL_MUTEX_INITIALIZER_NP,
210 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP) == INVALID_OBJECT)
211 return false;
212 return true;
213 }
214
215 /* FIXME: Accommodate PTHREAD_MUTEX_ERRORCHECK */
216 inline bool
217 pthread_mutex::can_be_unlocked ()
218 {
219 pthread_t self = pthread::self ();
220 /* Check if the mutex is owned by the current thread and can be unlocked.
221 * Also check for the ANONYMOUS owner to cover NORMAL mutexes as well. */
222 bool res = type == PTHREAD_MUTEX_NORMAL || no_owner ()
223 || (recursion_counter == 1 && pthread::equal (owner, self));
224 pthread_printf ("recursion_counter %d res %d", recursion_counter, res);
225 return res;
226 }
227
228 inline bool
229 pthread_mutexattr::is_good_object (pthread_mutexattr_t const * attr)
230 {
231 if (verifyable_object_isvalid (attr, PTHREAD_MUTEXATTR_MAGIC) != VALID_OBJECT)
232 return false;
233 return true;
234 }
235
236 inline bool __attribute__ ((used))
237 pthread::is_good_object (pthread_t const *thread)
238 {
239 if (verifyable_object_isvalid (thread, PTHREAD_MAGIC) != VALID_OBJECT)
240 return false;
241 return true;
242 }
243
244 /* Thread synchronisation */
245 inline bool
246 pthread_cond::is_good_object (pthread_cond_t const *cond)
247 {
248 if (verifyable_object_isvalid (cond, PTHREAD_COND_MAGIC) != VALID_OBJECT)
249 return false;
250 return true;
251 }
252
253 inline bool
254 pthread_cond::is_initializer (pthread_cond_t const *cond)
255 {
256 if (verifyable_object_isvalid (cond, PTHREAD_COND_MAGIC, PTHREAD_COND_INITIALIZER) != VALID_STATIC_OBJECT)
257 return false;
258 return true;
259 }
260
261 inline bool
262 pthread_cond::is_initializer_or_object (pthread_cond_t const *cond)
263 {
264 if (verifyable_object_isvalid (cond, PTHREAD_COND_MAGIC, PTHREAD_COND_INITIALIZER) == INVALID_OBJECT)
265 return false;
266 return true;
267 }
268
269 /* RW locks */
270 inline bool
271 pthread_rwlock::is_good_object (pthread_rwlock_t const *rwlock)
272 {
273 if (verifyable_object_isvalid (rwlock, PTHREAD_RWLOCK_MAGIC) != VALID_OBJECT)
274 return false;
275 return true;
276 }
277
278 inline bool
279 pthread_rwlock::is_initializer (pthread_rwlock_t const *rwlock)
280 {
281 if (verifyable_object_isvalid (rwlock, PTHREAD_RWLOCK_MAGIC, PTHREAD_RWLOCK_INITIALIZER) != VALID_STATIC_OBJECT)
282 return false;
283 return true;
284 }
285
286 inline bool
287 pthread_rwlock::is_initializer_or_object (pthread_rwlock_t const *rwlock)
288 {
289 if (verifyable_object_isvalid (rwlock, PTHREAD_RWLOCK_MAGIC, PTHREAD_RWLOCK_INITIALIZER) == INVALID_OBJECT)
290 return false;
291 return true;
292 }
293
294 inline bool
295 semaphore::is_good_object (sem_t const * sem)
296 {
297 if (verifyable_object_isvalid (sem, SEM_MAGIC) != VALID_OBJECT)
298 return false;
299 return true;
300 }
301
302 void
303 MTinterface::Init ()
304 {
305 pthread_mutex::init_mutex ();
306 pthread_cond::init_mutex ();
307 pthread_rwlock::init_mutex ();
308 }
309
310 void
311 MTinterface::fixup_before_fork ()
312 {
313 pthread_key::fixup_before_fork ();
314 }
315
316 /* This function is called from a single threaded process */
317 void
318 MTinterface::fixup_after_fork ()
319 {
320 pthread_key::fixup_after_fork ();
321
322 threadcount = 0;
323 pthread::init_mainthread ();
324
325 pthread::fixup_after_fork ();
326 pthread_mutex::fixup_after_fork ();
327 pthread_cond::fixup_after_fork ();
328 pthread_rwlock::fixup_after_fork ();
329 semaphore::fixup_after_fork ();
330 }
331
332 /* pthread calls */
333
334 /* static methods */
335 void
336 pthread::init_mainthread ()
337 {
338 pthread *thread = _my_tls.tid;
339 if (!thread)
340 {
341 thread = new pthread ();
342 if (!thread)
343 api_fatal ("failed to create mainthread object");
344 }
345
346 set_tls_self_pointer (thread);
347 thread->thread_id = GetCurrentThreadId ();
348 if (!DuplicateHandle (GetCurrentProcess (), GetCurrentThread (),
349 GetCurrentProcess (), &thread->win32_obj_id,
350 0, FALSE, DUPLICATE_SAME_ACCESS))
351 api_fatal ("failed to create mainthread handle");
352 if (!thread->create_cancel_event ())
353 api_fatal ("couldn't create cancel event for main thread");
354 VerifyHandle (thread->win32_obj_id);
355 thread->postcreate ();
356 }
357
358 pthread *
359 pthread::self ()
360 {
361 pthread *thread = _my_tls.tid;
362 if (!thread)
363 {
364 thread = pthread_null::get_null_pthread ();
365 set_tls_self_pointer (thread);
366 }
367 return thread;
368 }
369
370 void
371 pthread::set_tls_self_pointer (pthread *thread)
372 {
373 thread->cygtls = &_my_tls;
374 _my_tls.tid = thread;
375 }
376
377 List<pthread> pthread::threads;
378
379 /* member methods */
380 pthread::pthread ():verifyable_object (PTHREAD_MAGIC), win32_obj_id (0),
381 valid (false), suspended (false), canceled (false),
382 cancelstate (0), canceltype (0), cancel_event (0),
383 joiner (NULL), next (NULL), cleanup_stack (NULL)
384 {
385 if (this != pthread_null::get_null_pthread ())
386 threads.insert (this);
387 parent_tls = &_my_tls;
388 }
389
390 pthread::~pthread ()
391 {
392 if (win32_obj_id)
393 CloseHandle (win32_obj_id);
394 if (cancel_event)
395 CloseHandle (cancel_event);
396
397 if (this != pthread_null::get_null_pthread ())
398 threads.remove (this);
399 }
400
401 bool
402 pthread::create_cancel_event ()
403 {
404 cancel_event = ::CreateEvent (&sec_none_nih, true, false, NULL);
405 if (!cancel_event)
406 {
407 system_printf ("couldn't create cancel event, %E");
408 /* we need the event for correct behaviour */
409 return false;
410 }
411 return true;
412 }
413
414 void
415 pthread::precreate (pthread_attr *newattr)
416 {
417 pthread_mutex *verifyable_mutex_obj = &mutex;
418
419 /* already running ? */
420 if (win32_obj_id)
421 return;
422
423 if (newattr)
424 {
425 attr.joinable = newattr->joinable;
426 attr.contentionscope = newattr->contentionscope;
427 attr.inheritsched = newattr->inheritsched;
428 attr.stacksize = newattr->stacksize;
429 }
430
431 if (!pthread_mutex::is_good_object (&verifyable_mutex_obj))
432 {
433 thread_printf ("New thread object access mutex is not valid. this %p",
434 this);
435 magic = 0;
436 return;
437 }
438 /* Change the mutex type to NORMAL to speed up mutex operations */
439 mutex.set_type (PTHREAD_MUTEX_NORMAL);
440 if (!create_cancel_event ())
441 magic = 0;
442 }
443
444 bool
445 pthread::create (void *(*func) (void *), pthread_attr *newattr,
446 void *threadarg)
447 {
448 bool retval;
449
450 precreate (newattr);
451 if (!magic)
452 return false;
453
454 function = func;
455 arg = threadarg;
456
457 mutex.lock ();
458 win32_obj_id = ::CreateThread (&sec_none_nih, attr.stacksize,
459 thread_init_wrapper, this, 0, &thread_id);
460
461 if (!win32_obj_id)
462 {
463 thread_printf ("CreateThread failed: this %p, %E", this);
464 magic = 0;
465 }
466 else
467 {
468 postcreate ();
469 while (!cygtls)
470 yield ();
471 }
472 retval = magic;
473 mutex.unlock ();
474 return retval;
475 }
476
477 void
478 pthread::postcreate ()
479 {
480 valid = true;
481
482 InterlockedIncrement (&MT_INTERFACE->threadcount);
483 /* FIXME: set the priority appropriately for system contention scope */
484 if (attr.inheritsched == PTHREAD_EXPLICIT_SCHED)
485 {
486 /* FIXME: set the scheduling settings for the new thread */
487 /* sched_thread_setparam (win32_obj_id, attr.schedparam); */
488 }
489 }
490
491 void
492 pthread::exit (void *value_ptr)
493 {
494 class pthread *thread = this;
495
496 // run cleanup handlers
497 pop_all_cleanup_handlers ();
498
499 pthread_key::run_all_destructors ();
500
501 mutex.lock ();
502 // cleanup if thread is in detached state and not joined
503 if (equal (joiner, thread))
504 delete this;
505 else
506 {
507 valid = false;
508 return_ptr = value_ptr;
509 mutex.unlock ();
510 }
511
512 if (_my_tls.local_clib.__sdidinit < 0)
513 _my_tls.local_clib.__sdidinit = 0;
514 (_reclaim_reent) (_REENT);
515
516 if (InterlockedDecrement (&MT_INTERFACE->threadcount) == 0)
517 ::exit (0);
518 else
519 ExitThread (0);
520 }
521
522 int
523 pthread::cancel ()
524 {
525 class pthread *thread = this;
526 class pthread *self = pthread::self ();
527
528 mutex.lock ();
529
530 if (!valid)
531 {
532 mutex.unlock ();
533 return 0;
534 }
535
536 if (canceltype == PTHREAD_CANCEL_DEFERRED ||
537 cancelstate == PTHREAD_CANCEL_DISABLE)
538 {
539 // cancel deferred
540 mutex.unlock ();
541 canceled = true;
542 SetEvent (cancel_event);
543 return 0;
544 }
545 else if (equal (thread, self))
546 {
547 mutex.unlock ();
548 cancel_self ();
549 return 0; // Never reached
550 }
551
552 // cancel asynchronous
553 SuspendThread (win32_obj_id);
554 if (WaitForSingleObject (win32_obj_id, 0) == WAIT_TIMEOUT)
555 {
556 CONTEXT context;
557 context.ContextFlags = CONTEXT_CONTROL;
558 GetThreadContext (win32_obj_id, &context);
559 context.Eip = (DWORD) pthread::static_cancel_self;
560 SetThreadContext (win32_obj_id, &context);
561 }
562 mutex.unlock ();
563 ResumeThread (win32_obj_id);
564
565 return 0;
566 }
567
568 /* TODO: Insert pthread_testcancel into the required functions.
569
570 Here are the lists of required and optional functions per POSIX.1-2001
571 and POSIX.1-2008. A start (*) indicates that the Cygwin function already
572 is a cancellation point (aka "calls pthread_testcancel").
573
574 Required cancellation points:
575
576 accept ()
577 aio_suspend ()
578 clock_nanosleep ()
579 * close ()
580 connect ()
581 * creat ()
582 fcntl () F_SETLKW
583 fdatasync ()
584 fsync ()
585 getmsg ()
586 getpmsg ()
587 lockf () F_LOCK
588 * mq_receive ()
589 * mq_send ()
590 * mq_timedreceive ()
591 * mq_timedsend ()
592 msgrcv ()
593 msgsnd ()
594 msync ()
595 nanosleep ()
596 open ()
597 openat ()
598 * pause ()
599 poll ()
600 pread ()
601 pselect ()
602 * pthread_cond_timedwait ()
603 * pthread_cond_wait ()
604 * pthread_join ()
605 * pthread_testcancel ()
606 putmsg ()
607 putpmsg ()
608 pwrite ()
609 read ()
610 readv ()
611 recv ()
612 recvfrom ()
613 recvmsg ()
614 select ()
615 * sem_timedwait ()
616 * sem_wait ()
617 send ()
618 sendmsg ()
619 sendto ()
620 * sigpause ()
621 * sigsuspend ()
622 sigtimedwait ()
623 sigwait ()
624 sigwaitinfo ()
625 * sleep ()
626 * system ()
627 tcdrain ()
628 * usleep ()
629 * wait ()
630 * wait3()
631 waitid ()
632 * waitpid ()
633 write ()
634 writev ()
635
636 Optional cancellation points:
637
638 access ()
639 asctime ()
640 asctime_r ()
641 catclose ()
642 catgets ()
643 catopen ()
644 chmod ()
645 chown ()
646 closedir ()
647 closelog ()
648 ctermid ()
649 ctime ()
650 ctime_r ()
651 dbm_close ()
652 dbm_delete ()
653 dbm_fetch ()
654 dbm_nextkey ()
655 dbm_open ()
656 dbm_store ()
657 dlclose ()
658 dlopen ()
659 dprintf ()
660 endgrent ()
661 endhostent ()
662 endnetent ()
663 endprotoent ()
664 endpwent ()
665 endservent ()
666 endutxent ()
667 faccessat ()
668 fchmod ()
669 fchmodat ()
670 fchown ()
671 fchownat ()
672 fclose ()
673 fcntl () (any value)
674 fflush ()
675 fgetc ()
676 fgetpos ()
677 fgets ()
678 fgetwc ()
679 fgetws ()
680 fmtmsg ()
681 fopen ()
682 fpathconf ()
683 fprintf ()
684 fputc ()
685 fputs ()
686 fputwc ()
687 fputws ()
688 fread ()
689 freopen ()
690 fscanf ()
691 fseek ()
692 fseeko ()
693 fsetpos ()
694 fstat ()
695 fstatat ()
696 ftell ()
697 ftello ()
698 ftw ()
699 futimens ()
700 fwprintf ()
701 fwrite ()
702 fwscanf ()
703 getaddrinfo ()
704 getc ()
705 getc_unlocked ()
706 getchar ()
707 getchar_unlocked ()
708 getcwd ()
709 getdate ()
710 getdelim ()
711 getgrent ()
712 getgrgid ()
713 getgrgid_r ()
714 getgrnam ()
715 getgrnam_r ()
716 gethostbyaddr ()
717 gethostbyname ()
718 gethostent ()
719 gethostid ()
720 gethostname ()
721 getline ()
722 getlogin ()
723 getlogin_r ()
724 getnameinfo ()
725 getnetbyaddr ()
726 getnetbyname ()
727 getnetent ()
728 getopt () (if opterr is nonzero)
729 getprotobyname ()
730 getprotobynumber ()
731 getprotoent ()
732 getpwent ()
733 * getpwnam ()
734 * getpwnam_r ()
735 * getpwuid ()
736 * getpwuid_r ()
737 gets ()
738 getservbyname ()
739 getservbyport ()
740 getservent ()
741 getutxent ()
742 getutxid ()
743 getutxline ()
744 getwc ()
745 getwchar ()
746 getwd ()
747 glob ()
748 iconv_close ()
749 iconv_open ()
750 ioctl ()
751 link ()
752 linkat ()
753 lio_listio ()
754 localtime ()
755 localtime_r ()
756 lockf ()
757 lseek ()
758 lstat ()
759 mkdir ()
760 mkdirat ()
761 mkdtemp ()
762 mkfifo ()
763 mkfifoat ()
764 mknod ()
765 mknodat ()
766 mkstemp ()
767 mktime ()
768 nftw ()
769 opendir ()
770 openlog ()
771 pathconf ()
772 pclose ()
773 perror ()
774 popen ()
775 posix_fadvise ()
776 posix_fallocate ()
777 posix_madvise ()
778 posix_openpt ()
779 posix_spawn ()
780 posix_spawnp ()
781 posix_trace_clear ()
782 posix_trace_close ()
783 posix_trace_create ()
784 posix_trace_create_withlog ()
785 posix_trace_eventtypelist_getnext_id ()
786 posix_trace_eventtypelist_rewind ()
787 posix_trace_flush ()
788 posix_trace_get_attr ()
789 posix_trace_get_filter ()
790 posix_trace_get_status ()
791 posix_trace_getnext_event ()
792 posix_trace_open ()
793 posix_trace_rewind ()
794 posix_trace_set_filter ()
795 posix_trace_shutdown ()
796 posix_trace_timedgetnext_event ()
797 posix_typed_mem_open ()
798 printf ()
799 psiginfo ()
800 psignal ()
801 pthread_rwlock_rdlock ()
802 pthread_rwlock_timedrdlock ()
803 pthread_rwlock_timedwrlock ()
804 pthread_rwlock_wrlock ()
805 putc ()
806 putc_unlocked ()
807 putchar ()
808 putchar_unlocked ()
809 puts ()
810 pututxline ()
811 putwc ()
812 putwchar ()
813 readdir ()
814 readdir_r ()
815 readlink ()
816 readlinkat ()
817 remove ()
818 rename ()
819 renameat ()
820 rewind ()
821 rewinddir ()
822 scandir ()
823 scanf ()
824 seekdir ()
825 semop ()
826 setgrent ()
827 sethostent ()
828 setnetent ()
829 setprotoent ()
830 setpwent ()
831 setservent ()
832 setutxent ()
833 sigpause ()
834 stat ()
835 strerror ()
836 strerror_r ()
837 strftime ()
838 symlink ()
839 symlinkat ()
840 sync ()
841 syslog ()
842 tmpfile ()
843 tmpnam ()
844 ttyname ()
845 ttyname_r ()
846 tzset ()
847 ungetc ()
848 ungetwc ()
849 unlink ()
850 unlinkat ()
851 utime ()
852 utimensat ()
853 utimes ()
854 vdprintf ()
855 vfprintf ()
856 vfwprintf ()
857 vprintf ()
858 vwprintf ()
859 wcsftime ()
860 wordexp ()
861 wprintf ()
862 wscanf ()
863
864 An implementation may also mark other functions not specified in the
865 standard as cancellation points. In particular, an implementation is
866 likely to mark any nonstandard function that may block as a
867 cancellation point. */
868
869 void
870 pthread::testcancel ()
871 {
872 if (cancelstate == PTHREAD_CANCEL_DISABLE)
873 return;
874
875 /* We check for the canceled flag first. This allows to use the
876 pthread_testcancel function a lot without adding the overhead of
877 an OS call. Only if the thread is marked as canceled, we wait for
878 cancel_event being really set, on the off-chance that pthread_cancel
879 gets interrupted before calling SetEvent. */
880 if (canceled)
881 {
882 WaitForSingleObject (cancel_event, INFINITE);
883 cancel_self ();
884 }
885 }
886
887 void
888 pthread::static_cancel_self ()
889 {
890 pthread::self ()->cancel_self ();
891 }
892
893 DWORD
894 cancelable_wait (HANDLE object, DWORD timeout,
895 const cw_cancel_action cancel_action,
896 const enum cw_sig_wait sig_wait)
897 {
898 DWORD res;
899 DWORD num = 0;
900 HANDLE wait_objects[3];
901 pthread_t thread = pthread::self ();
902
903 /* Do not change the wait order.
904 The object must have higher priority than the cancel event,
905 because WaitForMultipleObjects will return the smallest index
906 if both objects are signaled. */
907 wait_objects[num++] = object;
908 DWORD cancel_n;
909 if (cancel_action == cw_no_cancel || !pthread::is_good_object (&thread) ||
910 thread->cancelstate == PTHREAD_CANCEL_DISABLE)
911 cancel_n = (DWORD) -1;
912 else
913 {
914 cancel_n = WAIT_OBJECT_0 + num++;
915 wait_objects[cancel_n] = thread->cancel_event;
916 }
917
918 DWORD sig_n;
919 if (sig_wait == cw_sig_nosig || &_my_tls != _main_tls)
920 sig_n = (DWORD) -1;
921 else
922 {
923 sig_n = WAIT_OBJECT_0 + num++;
924 wait_objects[sig_n] = signal_arrived;
925 }
926
927 while (1)
928 {
929 res = WaitForMultipleObjects (num, wait_objects, FALSE, timeout);
930 if (res == cancel_n)
931 {
932 if (cancel_action == cw_cancel_self)
933 pthread::static_cancel_self ();
934 res = WAIT_CANCELED;
935 }
936 else if (res != sig_n)
937 /* all set */;
938 else if (sig_wait == cw_sig_eintr)
939 res = WAIT_SIGNALED;
940 else
941 {
942 _my_tls.call_signal_handler ();
943 continue;
944 }
945 break;
946 }
947 return res;
948 }
949
950 int
951 pthread::setcancelstate (int state, int *oldstate)
952 {
953 int result = 0;
954
955 mutex.lock ();
956
957 if (state != PTHREAD_CANCEL_ENABLE && state != PTHREAD_CANCEL_DISABLE)
958 result = EINVAL;
959 else
960 {
961 if (oldstate)
962 *oldstate = cancelstate;
963 cancelstate = state;
964 }
965
966 mutex.unlock ();
967
968 return result;
969 }
970
971 int
972 pthread::setcanceltype (int type, int *oldtype)
973 {
974 int result = 0;
975
976 mutex.lock ();
977
978 if (type != PTHREAD_CANCEL_DEFERRED && type != PTHREAD_CANCEL_ASYNCHRONOUS)
979 result = EINVAL;
980 else
981 {
982 if (oldtype)
983 *oldtype = canceltype;
984 canceltype = type;
985 }
986
987 mutex.unlock ();
988
989 return result;
990 }
991
992 void
993 pthread::push_cleanup_handler (__pthread_cleanup_handler *handler)
994 {
995 if (this != self ())
996 // TODO: do it?
997 api_fatal ("Attempt to push a cleanup handler across threads");
998 handler->next = cleanup_stack;
999 cleanup_stack = handler;
1000 }
1001
1002 void
1003 pthread::pop_cleanup_handler (int const execute)
1004 {
1005 if (this != self ())
1006 // TODO: send a signal or something to the thread ?
1007 api_fatal ("Attempt to execute a cleanup handler across threads");
1008
1009 mutex.lock ();
1010
1011 if (cleanup_stack != NULL)
1012 {
1013 __pthread_cleanup_handler *handler = cleanup_stack;
1014
1015 if (execute)
1016 (*handler->function) (handler->arg);
1017 cleanup_stack = handler->next;
1018 }
1019
1020 mutex.unlock ();
1021 }
1022
1023 void
1024 pthread::pop_all_cleanup_handlers ()
1025 {
1026 while (cleanup_stack != NULL)
1027 pop_cleanup_handler (1);
1028 }
1029
1030 void
1031 pthread::cancel_self ()
1032 {
1033 exit (PTHREAD_CANCELED);
1034 }
1035
1036 DWORD
1037 pthread::get_thread_id ()
1038 {
1039 return thread_id;
1040 }
1041
1042 void
1043 pthread::_fixup_after_fork ()
1044 {
1045 /* set thread to not running if it is not the forking thread */
1046 if (this != pthread::self ())
1047 {
1048 magic = 0;
1049 valid = false;
1050 win32_obj_id = NULL;
1051 canceled = false;
1052 cancel_event = NULL;
1053 }
1054 }
1055
1056 void
1057 pthread::suspend_except_self ()
1058 {
1059 if (valid && this != pthread::self ())
1060 SuspendThread (win32_obj_id);
1061 }
1062
1063 void
1064 pthread::resume ()
1065 {
1066 if (valid)
1067 ResumeThread (win32_obj_id);
1068 }
1069
1070 /* instance members */
1071
1072 pthread_attr::pthread_attr ():verifyable_object (PTHREAD_ATTR_MAGIC),
1073 joinable (PTHREAD_CREATE_JOINABLE), contentionscope (PTHREAD_SCOPE_PROCESS),
1074 inheritsched (PTHREAD_INHERIT_SCHED), stacksize (0)
1075 {
1076 schedparam.sched_priority = 0;
1077 }
1078
1079 pthread_attr::~pthread_attr ()
1080 {
1081 }
1082
1083 pthread_condattr::pthread_condattr ():verifyable_object
1084 (PTHREAD_CONDATTR_MAGIC), shared (PTHREAD_PROCESS_PRIVATE)
1085 {
1086 }
1087
1088 pthread_condattr::~pthread_condattr ()
1089 {
1090 }
1091
1092 List<pthread_cond> pthread_cond::conds;
1093
1094 /* This is used for cond creation protection within a single process only */
1095 fast_mutex NO_COPY pthread_cond::cond_initialization_lock;
1096
1097 /* We can only be called once.
1098 TODO: (no rush) use a non copied memory section to
1099 hold an initialization flag. */
1100 void
1101 pthread_cond::init_mutex ()
1102 {
1103 if (!cond_initialization_lock.init ())
1104 api_fatal ("Could not create win32 Mutex for pthread cond static initializer support.");
1105 }
1106
1107 pthread_cond::pthread_cond (pthread_condattr *attr) :
1108 verifyable_object (PTHREAD_COND_MAGIC),
1109 shared (0), waiting (0), pending (0), sem_wait (NULL),
1110 mtx_cond(NULL), next (NULL)
1111 {
1112 pthread_mutex *verifyable_mutex_obj;
1113
1114 if (attr)
1115 if (attr->shared != PTHREAD_PROCESS_PRIVATE)
1116 {
1117 magic = 0;
1118 return;
1119 }
1120
1121 verifyable_mutex_obj = &mtx_in;
1122 if (!pthread_mutex::is_good_object (&verifyable_mutex_obj))
1123 {
1124 thread_printf ("Internal cond mutex is not valid. this %p", this);
1125 magic = 0;
1126 return;
1127 }
1128 /*
1129 * Change the mutex type to NORMAL.
1130 * This mutex MUST be of type normal
1131 */
1132 mtx_in.set_type (PTHREAD_MUTEX_NORMAL);
1133
1134 verifyable_mutex_obj = &mtx_out;
1135 if (!pthread_mutex::is_good_object (&verifyable_mutex_obj))
1136 {
1137 thread_printf ("Internal cond mutex is not valid. this %p", this);
1138 magic = 0;
1139 return;
1140 }
1141 /* Change the mutex type to NORMAL to speed up mutex operations */
1142 mtx_out.set_type (PTHREAD_MUTEX_NORMAL);
1143
1144 sem_wait = ::CreateSemaphore (&sec_none_nih, 0, LONG_MAX, NULL);
1145 if (!sem_wait)
1146 {
1147 pthread_printf ("CreateSemaphore failed. %E");
1148 magic = 0;
1149 return;
1150 }
1151
1152 conds.insert (this);
1153 }
1154
1155 pthread_cond::~pthread_cond ()
1156 {
1157 if (sem_wait)
1158 CloseHandle (sem_wait);
1159
1160 conds.remove (this);
1161 }
1162
1163 void
1164 pthread_cond::unblock (const bool all)
1165 {
1166 unsigned long releaseable;
1167
1168 /*
1169 * Block outgoing threads (and avoid simultanous unblocks)
1170 */
1171 mtx_out.lock ();
1172
1173 releaseable = waiting - pending;
1174 if (releaseable)
1175 {
1176 unsigned long released;
1177
1178 if (!pending)
1179 {
1180 /*
1181 * Block incoming threads until all waiting threads are released.
1182 */
1183 mtx_in.lock ();
1184
1185 /*
1186 * Calculate releaseable again because threads can enter until
1187 * the semaphore has been taken, but they can not leave, therefore pending
1188 * is unchanged and releaseable can only get higher
1189 */
1190 releaseable = waiting - pending;
1191 }
1192
1193 released = all ? releaseable : 1;
1194 pending += released;
1195 /*
1196 * Signal threads
1197 */
1198 ::ReleaseSemaphore (sem_wait, released, NULL);
1199 }
1200
1201 /*
1202 * And let the threads release.
1203 */
1204 mtx_out.unlock ();
1205 }
1206
1207 int
1208 pthread_cond::wait (pthread_mutex_t mutex, DWORD dwMilliseconds)
1209 {
1210 DWORD rv;
1211
1212 mtx_in.lock ();
1213 if (InterlockedIncrement ((long *)&waiting) == 1)
1214 mtx_cond = mutex;
1215 else if (mtx_cond != mutex)
1216 {
1217 InterlockedDecrement ((long *)&waiting);
1218 mtx_in.unlock ();
1219 return EINVAL;
1220 }
1221 mtx_in.unlock ();
1222
1223 /*
1224 * Release the mutex and wait on semaphore
1225 */
1226 ++mutex->condwaits;
1227 mutex->unlock ();
1228
1229 rv = cancelable_wait (sem_wait, dwMilliseconds, cw_no_cancel_self, cw_sig_eintr);
1230
1231 mtx_out.lock ();
1232
1233 if (rv != WAIT_OBJECT_0)
1234 {
1235 /*
1236 * It might happen that a signal is sent while the thread got canceled
1237 * or timed out. Try to take one.
1238 * If the thread gets one than a signal|broadcast is in progress.
1239 */
1240 if (WaitForSingleObject (sem_wait, 0) == WAIT_OBJECT_0)
1241 /*
1242 * thread got cancelled ot timed out while a signalling is in progress.
1243 * Set wait result back to signaled
1244 */
1245 rv = WAIT_OBJECT_0;
1246 }
1247
1248 InterlockedDecrement ((long *)&waiting);
1249
1250 if (rv == WAIT_OBJECT_0 && --pending == 0)
1251 /*
1252 * All signaled threads are released,
1253 * new threads can enter Wait
1254 */
1255 mtx_in.unlock ();
1256
1257 mtx_out.unlock ();
1258
1259 mutex->lock ();
1260 --mutex->condwaits;
1261
1262 if (rv == WAIT_CANCELED)
1263 pthread::static_cancel_self ();
1264 else if (rv == WAIT_SIGNALED)
1265 /* SUSv3 states: If a signal is delivered to a thread waiting for a
1266 condition variable, upon return from the signal handler the thread
1267 resumes waiting for the condition variable as if it was not
1268 interrupted, or it shall return zero due to spurious wakeup.
1269 We opt for the latter choice here. */
1270 return 0;
1271 else if (rv == WAIT_TIMEOUT)
1272 return ETIMEDOUT;
1273
1274 return 0;
1275 }
1276
1277 void
1278 pthread_cond::_fixup_after_fork ()
1279 {
1280 waiting = pending = 0;
1281 mtx_cond = NULL;
1282
1283 /* Unlock eventually locked mutexes */
1284 mtx_in.unlock ();
1285 mtx_out.unlock ();
1286
1287 sem_wait = ::CreateSemaphore (&sec_none_nih, 0, LONG_MAX, NULL);
1288 if (!sem_wait)
1289 api_fatal ("pthread_cond::_fixup_after_fork () failed to recreate win32 semaphore");
1290 }
1291
1292 pthread_rwlockattr::pthread_rwlockattr ():verifyable_object
1293 (PTHREAD_RWLOCKATTR_MAGIC), shared (PTHREAD_PROCESS_PRIVATE)
1294 {
1295 }
1296
1297 pthread_rwlockattr::~pthread_rwlockattr ()
1298 {
1299 }
1300
1301 List<pthread_rwlock> pthread_rwlock::rwlocks;
1302
1303 /* This is used for rwlock creation protection within a single process only */
1304 fast_mutex NO_COPY pthread_rwlock::rwlock_initialization_lock;
1305
1306 /* We can only be called once.
1307 TODO: (no rush) use a non copied memory section to
1308 hold an initialization flag. */
1309 void
1310 pthread_rwlock::init_mutex ()
1311 {
1312 if (!rwlock_initialization_lock.init ())
1313 api_fatal ("Could not create win32 Mutex for pthread rwlock static initializer support.");
1314 }
1315
1316 pthread_rwlock::pthread_rwlock (pthread_rwlockattr *attr) :
1317 verifyable_object (PTHREAD_RWLOCK_MAGIC),
1318 shared (0), waiting_readers (0), waiting_writers (0), writer (NULL),
1319 readers (NULL), readers_mx (), mtx (NULL), cond_readers (NULL), cond_writers (NULL),
1320 next (NULL)
1321 {
1322 pthread_mutex *verifyable_mutex_obj = &mtx;
1323 pthread_cond *verifyable_cond_obj;
1324
1325 if (!readers_mx.init ())
1326 {
1327 thread_printf ("Internal rwlock synchronisation mutex is not valid. this %p", this);
1328 magic = 0;
1329 return;
1330 }
1331
1332 if (attr)
1333 if (attr->shared != PTHREAD_PROCESS_PRIVATE)
1334 {
1335 magic = 0;
1336 return;
1337 }
1338
1339 if (!pthread_mutex::is_good_object (&verifyable_mutex_obj))
1340 {
1341 thread_printf ("Internal rwlock mutex is not valid. this %p", this);
1342 magic = 0;
1343 return;
1344 }
1345 /* Change the mutex type to NORMAL to speed up mutex operations */
1346 mtx.set_type (PTHREAD_MUTEX_NORMAL);
1347
1348 verifyable_cond_obj = &cond_readers;
1349 if (!pthread_cond::is_good_object (&verifyable_cond_obj))
1350 {
1351 thread_printf ("Internal rwlock readers cond is not valid. this %p", this);
1352 magic = 0;
1353 return;
1354 }
1355
1356 verifyable_cond_obj = &cond_writers;
1357 if (!pthread_cond::is_good_object (&verifyable_cond_obj))
1358 {
1359 thread_printf ("Internal rwlock writers cond is not valid. this %p", this);
1360 magic = 0;
1361 return;
1362 }
1363
1364
1365 rwlocks.insert (this);
1366 }
1367
1368 pthread_rwlock::~pthread_rwlock ()
1369 {
1370 rwlocks.remove (this);
1371 }
1372
1373 int
1374 pthread_rwlock::rdlock ()
1375 {
1376 int result = 0;
1377 struct RWLOCK_READER *reader;
1378 pthread_t self = pthread::self ();
1379
1380 mtx.lock ();
1381
1382 reader = lookup_reader (self);
1383 if (reader)
1384 {
1385 if (reader->n < ULONG_MAX)
1386 ++reader->n;
1387 else
1388 errno = EAGAIN;
1389 goto DONE;
1390 }
1391
1392 reader = new struct RWLOCK_READER;
1393 if (!reader)
1394 {
1395 result = EAGAIN;
1396 goto DONE;
1397 }
1398
1399 while (writer || waiting_writers)
1400 {
1401 pthread_cleanup_push (pthread_rwlock::rdlock_cleanup, this);
1402
1403 ++waiting_readers;
1404 cond_readers.wait (&mtx);
1405 --waiting_readers;
1406
1407 pthread_cleanup_pop (0);
1408 }
1409
1410 reader->thread = self;
1411 reader->n = 1;
1412 add_reader (reader);
1413
1414 DONE:
1415 mtx.unlock ();
1416
1417 return result;
1418 }
1419
1420 int
1421 pthread_rwlock::tryrdlock ()
1422 {
1423 int result = 0;
1424 pthread_t self = pthread::self ();
1425
1426 mtx.lock ();
1427
1428 if (writer || waiting_writers || lookup_reader (self))
1429 result = EBUSY;
1430 else
1431 {
1432 struct RWLOCK_READER *reader;
1433
1434 reader = lookup_reader (self);
1435 if (reader && reader->n < ULONG_MAX)
1436 ++reader->n;
1437 else if ((reader = new struct RWLOCK_READER))
1438 {
1439 reader->thread = self;
1440 reader->n = 1;
1441 add_reader (reader);
1442 }
1443 else
1444 result = EAGAIN;
1445 }
1446
1447 mtx.unlock ();
1448
1449 return result;
1450 }
1451
1452 int
1453 pthread_rwlock::wrlock ()
1454 {
1455 int result = 0;
1456 pthread_t self = pthread::self ();
1457
1458 mtx.lock ();
1459
1460 if (writer == self || lookup_reader (self))
1461 {
1462 result = EDEADLK;
1463 goto DONE;
1464 }
1465
1466 while (writer || readers)
1467 {
1468 pthread_cleanup_push (pthread_rwlock::wrlock_cleanup, this);
1469
1470 ++waiting_writers;
1471 cond_writers.wait (&mtx);
1472 --waiting_writers;
1473
1474 pthread_cleanup_pop (0);
1475 }
1476
1477 writer = self;
1478
1479 DONE:
1480 mtx.unlock ();
1481
1482 return result;
1483 }
1484
1485 int
1486 pthread_rwlock::trywrlock ()
1487 {
1488 int result = 0;
1489 pthread_t self = pthread::self ();
1490
1491 mtx.lock ();
1492
1493 if (writer || readers)
1494 result = EBUSY;
1495 else
1496 writer = self;
1497
1498 mtx.unlock ();
1499
1500 return result;
1501 }
1502
1503 int
1504 pthread_rwlock::unlock ()
1505 {
1506 int result = 0;
1507 pthread_t self = pthread::self ();
1508
1509 mtx.lock ();
1510
1511 if (writer)
1512 {
1513 if (writer != self)
1514 {
1515 result = EPERM;
1516 goto DONE;
1517 }
1518
1519 writer = NULL;
1520 }
1521 else
1522 {
1523 struct RWLOCK_READER *reader = lookup_reader (self);
1524
1525 if (!reader)
1526 {
1527 result = EPERM;
1528 goto DONE;
1529 }
1530 if (--reader->n > 0)
1531 goto DONE;
1532
1533 remove_reader (reader);
1534 delete reader;
1535 }
1536
1537 release ();
1538
1539 DONE:
1540 mtx.unlock ();
1541
1542 return result;
1543 }
1544
1545 void
1546 pthread_rwlock::add_reader (struct RWLOCK_READER *rd)
1547 {
1548 List_insert (readers, rd);
1549 }
1550
1551 void
1552 pthread_rwlock::remove_reader (struct RWLOCK_READER *rd)
1553 {
1554 List_remove (readers_mx, readers, rd);
1555 }
1556
1557 struct pthread_rwlock::RWLOCK_READER *
1558 pthread_rwlock::lookup_reader (pthread_t thread)
1559 {
1560 readers_mx.lock ();
1561
1562 struct RWLOCK_READER *cur = readers;
1563
1564 while (cur && cur->thread != thread)
1565 cur = cur->next;
1566
1567 readers_mx.unlock ();
1568
1569 return cur;
1570 }
1571
1572 void
1573 pthread_rwlock::rdlock_cleanup (void *arg)
1574 {
1575 pthread_rwlock *rwlock = (pthread_rwlock *) arg;
1576
1577 --(rwlock->waiting_readers);
1578 rwlock->release ();
1579 rwlock->mtx.unlock ();
1580 }
1581
1582 void
1583 pthread_rwlock::wrlock_cleanup (void *arg)
1584 {
1585 pthread_rwlock *rwlock = (pthread_rwlock *) arg;
1586
1587 --(rwlock->waiting_writers);
1588 rwlock->release ();
1589 rwlock->mtx.unlock ();
1590 }
1591
1592 void
1593 pthread_rwlock::_fixup_after_fork ()
1594 {
1595 pthread_t self = pthread::self ();
1596 struct RWLOCK_READER **temp = &readers;
1597
1598 waiting_readers = 0;
1599 waiting_writers = 0;
1600
1601 if (!readers_mx.init ())
1602 api_fatal ("pthread_rwlock::_fixup_after_fork () failed to recreate mutex");
1603
1604 /* Unlock eventually locked mutex */
1605 mtx.unlock ();
1606 /*
1607 * Remove all readers except self
1608 */
1609 while (*temp)
1610 {
1611 if ((*temp)->thread == self)
1612 temp = &((*temp)->next);
1613 else
1614 {
1615 struct RWLOCK_READER *cur = *temp;
1616 *temp = (*temp)->next;
1617 delete cur;
1618 }
1619 }
1620 }
1621
1622 /* pthread_key */
1623 /* static members */
1624 /* This stores pthread_key information across fork() boundaries */
1625 List<pthread_key> pthread_key::keys;
1626
1627 /* non-static members */
1628
1629 pthread_key::pthread_key (void (*aDestructor) (void *)):verifyable_object (PTHREAD_KEY_MAGIC), destructor (aDestructor)
1630 {
1631 tls_index = TlsAlloc ();
1632 if (tls_index == TLS_OUT_OF_INDEXES)
1633 magic = 0;
1634 else
1635 keys.insert (this);
1636 }
1637
1638 pthread_key::~pthread_key ()
1639 {
1640 /* We may need to make the list code lock the list during operations
1641 */
1642 if (magic != 0)
1643 {
1644 keys.remove (this);
1645 TlsFree (tls_index);
1646 }
1647 }
1648
1649 void
1650 pthread_key::_fixup_before_fork ()
1651 {
1652 fork_buf = get ();
1653 }
1654
1655 void
1656 pthread_key::_fixup_after_fork ()
1657 {
1658 tls_index = TlsAlloc ();
1659 if (tls_index == TLS_OUT_OF_INDEXES)
1660 api_fatal ("pthread_key::recreate_key_from_buffer () failed to reallocate Tls storage");
1661 set (fork_buf);
1662 }
1663
1664 void
1665 pthread_key::run_destructor ()
1666 {
1667 if (destructor)
1668 {
1669 void *oldValue = get ();
1670 if (oldValue)
1671 {
1672 set (NULL);
1673 destructor (oldValue);
1674 }
1675 }
1676 }
1677
1678 /* pshared mutexs */
1679
1680 /* static members */
1681
1682 List<pthread_mutex> pthread_mutex::mutexes;
1683
1684 /* This is used for mutex creation protection within a single process only */
1685 fast_mutex NO_COPY pthread_mutex::mutex_initialization_lock;
1686
1687 void
1688 pthread_mutex::init_mutex ()
1689 {
1690 if (!mutex_initialization_lock.init ())
1691 api_fatal ("Could not create win32 Mutex for pthread mutex static initializer support.");
1692 }
1693
1694 pthread_mutex::pthread_mutex (pthread_mutexattr *attr) :
1695 verifyable_object (0), /* set magic to zero initially */
1696 lock_counter (0),
1697 win32_obj_id (NULL), owner (_new_mutex),
1698 #ifdef DEBUGGING
1699 tid (0),
1700 #endif
1701 recursion_counter (0), condwaits (0),
1702 type (PTHREAD_MUTEX_ERRORCHECK),
1703 pshared (PTHREAD_PROCESS_PRIVATE)
1704 {
1705 win32_obj_id = ::CreateEvent (&sec_none_nih, false, false, NULL);
1706 if (!win32_obj_id)
1707 return;
1708 /*attr checked in the C call */
1709 if (!attr)
1710 /* handled in the caller */;
1711 else if (attr->pshared != PTHREAD_PROCESS_SHARED)
1712 type = attr->mutextype;
1713 else
1714 return; /* Not implemented */
1715
1716 magic = PTHREAD_MUTEX_MAGIC;
1717 mutexes.insert (this);
1718 }
1719
1720 pthread_mutex::~pthread_mutex ()
1721 {
1722 if (win32_obj_id)
1723 {
1724 CloseHandle (win32_obj_id);
1725 win32_obj_id = NULL;
1726 }
1727
1728 mutexes.remove (this);
1729 owner = _destroyed_mutex;
1730 magic = 0;
1731 }
1732
1733 int
1734 pthread_mutex::lock ()
1735 {
1736 pthread_t self = ::pthread_self ();
1737 int result = 0;
1738
1739 if (InterlockedIncrement ((long *) &lock_counter) == 1)
1740 set_owner (self);
1741 else if (type == PTHREAD_MUTEX_NORMAL /* potentially causes deadlock */
1742 || !pthread::equal (owner, self))
1743 {
1744 cancelable_wait (win32_obj_id, INFINITE, cw_no_cancel, cw_sig_resume);
1745 set_owner (self);
1746 }
1747 else
1748 {
1749 InterlockedDecrement ((long *) &lock_counter);
1750 if (type == PTHREAD_MUTEX_RECURSIVE)
1751 result = lock_recursive ();
1752 else
1753 result = EDEADLK;
1754 }
1755
1756 pthread_printf ("mutex %p, self %p, owner %p, lock_counter %d, recursion_counter %d",
1757 this, self, owner, lock_counter, recursion_counter);
1758 return result;
1759 }
1760
1761 int
1762 pthread_mutex::unlock ()
1763 {
1764 int res = 0;
1765 pthread_t self = ::pthread_self ();
1766 if (type == PTHREAD_MUTEX_NORMAL)
1767 /* no error checking */;
1768 else if (no_owner ())
1769 res = type == PTHREAD_MUTEX_ERRORCHECK ? EINVAL : 0;
1770 else if (!pthread::equal (owner, self))
1771 res = EPERM;
1772 if (!res && recursion_counter > 0 && --recursion_counter == 0)
1773 /* Don't try to unlock anything if recursion_counter == 0.
1774 This means the mutex was never locked or that we've forked. */
1775 {
1776 owner = (pthread_t) _unlocked_mutex;
1777 #ifdef DEBUGGING
1778 tid = 0;
1779 #endif
1780 if (InterlockedDecrement ((long *) &lock_counter))
1781 ::SetEvent (win32_obj_id); // Another thread is waiting
1782 res = 0;
1783 }
1784
1785 pthread_printf ("mutex %p, owner %p, self %p, lock_counter %d, recursion_counter %d, type %d, res %d",
1786 this, owner, self, lock_counter, recursion_counter, type, res);
1787 return res;
1788 }
1789
1790 int
1791 pthread_mutex::trylock ()
1792 {
1793 pthread_t self = ::pthread_self ();
1794 int result = 0;
1795
1796 if (InterlockedCompareExchange ((long *) &lock_counter, 1, 0) == 0)
1797 set_owner (self);
1798 else if (type == PTHREAD_MUTEX_RECURSIVE && pthread::equal (owner, self))
1799 result = lock_recursive ();
1800 else
1801 result = EBUSY;
1802
1803 return result;
1804 }
1805
1806 int
1807 pthread_mutex::destroy ()
1808 {
1809 if (condwaits || trylock ())
1810 // Do not destroy a condwaited or locked mutex
1811 return EBUSY;
1812 else if (recursion_counter > 1)
1813 {
1814 // Do not destroy a recursive locked mutex
1815 recursion_counter--;
1816 return EBUSY;
1817 }
1818
1819 delete this;
1820 return 0;
1821 }
1822
1823 void
1824 pthread_mutex::_fixup_after_fork ()
1825 {
1826 pthread_printf ("mutex %p", this);
1827 if (pshared != PTHREAD_PROCESS_PRIVATE)
1828 api_fatal ("pthread_mutex::_fixup_after_fork () doesn't understand PROCESS_SHARED mutex's");
1829
1830 /* All waiting threads are gone after a fork */
1831 recursion_counter = 0;
1832 lock_counter = 0;
1833 condwaits = 0;
1834 #ifdef DEBUGGING
1835 tid = 0xffffffff; /* Don't know the tid after a fork */
1836 #endif
1837 win32_obj_id = ::CreateEvent (&sec_none_nih, false, false, NULL);
1838 if (!win32_obj_id)
1839 api_fatal ("pthread_mutex::_fixup_after_fork () failed to recreate win32 event for mutex");
1840 }
1841
1842 pthread_mutexattr::pthread_mutexattr ():verifyable_object (PTHREAD_MUTEXATTR_MAGIC),
1843 pshared (PTHREAD_PROCESS_PRIVATE), mutextype (PTHREAD_MUTEX_ERRORCHECK)
1844 {
1845 }
1846
1847 pthread_mutexattr::~pthread_mutexattr ()
1848 {
1849 }
1850
1851 /* pshared spinlocks
1852
1853 The infrastructure is provided by the underlying pthread_mutex class.
1854 The rest is a simplification implementing spin locking. */
1855
1856 pthread_spinlock::pthread_spinlock (int pshared) :
1857 pthread_mutex (NULL)
1858 {
1859 magic = PTHREAD_SPINLOCK_MAGIC;
1860 set_type (PTHREAD_MUTEX_NORMAL);
1861 set_shared (pshared);
1862 }
1863
1864 int
1865 pthread_spinlock::lock ()
1866 {
1867 pthread_t self = ::pthread_self ();
1868 int result = -1;
1869
1870 do
1871 {
1872 if (InterlockedExchange ((long *) &lock_counter, 1) == 0)
1873 {
1874 set_owner (self);
1875 result = 0;
1876 }
1877 else if (pthread::equal (owner, self))
1878 result = EDEADLK;
1879 else /* Minimal timeout to minimize CPU usage while still spinning. */
1880 cancelable_wait (win32_obj_id, 1L, cw_no_cancel, cw_sig_resume);
1881 }
1882 while (result == -1);
1883 pthread_printf ("spinlock %p, self %p, owner %p", this, self, owner);
1884 return result;
1885 }
1886
1887 int
1888 pthread_spinlock::unlock ()
1889 {
1890 pthread_t self = ::pthread_self ();
1891 int result = 0;
1892
1893 if (!pthread::equal (owner, self))
1894 result = EPERM;
1895 else
1896 {
1897 owner = (pthread_t) _unlocked_mutex;
1898 #ifdef DEBUGGING
1899 tid = 0;
1900 #endif
1901 InterlockedExchange ((long *) &lock_counter, 0);
1902 ::SetEvent (win32_obj_id);
1903 result = 0;
1904 }
1905 pthread_printf ("spinlock %p, owner %p, self %p, res %d",
1906 this, owner, self, result);
1907 return result;
1908 }
1909
1910 DWORD WINAPI
1911 pthread::thread_init_wrapper (void *arg)
1912 {
1913 pthread *thread = (pthread *) arg;
1914 set_tls_self_pointer (thread);
1915
1916 thread->mutex.lock ();
1917
1918 // if thread is detached force cleanup on exit
1919 if (thread->attr.joinable == PTHREAD_CREATE_DETACHED && thread->joiner == NULL)
1920 thread->joiner = thread;
1921 _my_tls.sigmask = thread->parent_tls->sigmask;
1922 thread->mutex.unlock ();
1923
1924 thread_printf ("started thread %p %p %p %p %p %p", arg, &_my_tls.local_clib,
1925 _impure_ptr, thread, thread->function, thread->arg);
1926
1927 // call the user's thread
1928 void *ret = thread->function (thread->arg);
1929
1930 thread->exit (ret);
1931
1932 return 0; // just for show. Never returns.
1933 }
1934
1935 unsigned long
1936 pthread::getsequence_np ()
1937 {
1938 return get_thread_id ();
1939 }
1940
1941 int
1942 pthread::create (pthread_t *thread, const pthread_attr_t *attr,
1943 void *(*start_routine) (void *), void *arg)
1944 {
1945 if (attr && !pthread_attr::is_good_object (attr))
1946 return EINVAL;
1947
1948 *thread = new pthread ();
1949 if (!(*thread)->create (start_routine, attr ? *attr : NULL, arg))
1950 {
1951 delete (*thread);
1952 *thread = NULL;
1953 return EAGAIN;
1954 }
1955
1956 return 0;
1957 }
1958
1959 int
1960 pthread::once (pthread_once_t *once_control, void (*init_routine) (void))
1961 {
1962 // already done ?
1963 if (once_control->state)
1964 return 0;
1965
1966 pthread_mutex_lock (&once_control->mutex);
1967 /* Here we must set a cancellation handler to unlock the mutex if needed */
1968 /* but a cancellation handler is not the right thing. We need this in the thread
1969 *cleanup routine. Assumption: a thread can only be in one pthread_once routine
1970 *at a time. Stote a mutex_t *in the pthread_structure. if that's non null unlock
1971 *on pthread_exit ();
1972 */
1973 if (!once_control->state)
1974 {
1975 init_routine ();
1976 once_control->state = 1;
1977 }
1978 /* Here we must remove our cancellation handler */
1979 pthread_mutex_unlock (&once_control->mutex);
1980 return 0;
1981 }
1982
1983 int
1984 pthread::cancel (pthread_t thread)
1985 {
1986 if (!is_good_object (&thread))
1987 return ESRCH;
1988
1989 return thread->cancel ();
1990 }
1991
1992 void
1993 pthread::atforkprepare ()
1994 {
1995 callback *cb = MT_INTERFACE->pthread_prepare;
1996 while (cb)
1997 {
1998 cb->cb ();
1999 cb = cb->next;
2000 }
2001
2002 __fp_lock_all ();
2003
2004 MT_INTERFACE->fixup_before_fork ();
2005 }
2006
2007 void
2008 pthread::atforkparent ()
2009 {
2010 __fp_unlock_all ();
2011
2012 callback *cb = MT_INTERFACE->pthread_parent;
2013 while (cb)
2014 {
2015 cb->cb ();
2016 cb = cb->next;
2017 }
2018 }
2019
2020 void
2021 pthread::atforkchild ()
2022 {
2023 MT_INTERFACE->fixup_after_fork ();
2024
2025 __fp_unlock_all ();
2026
2027 callback *cb = MT_INTERFACE->pthread_child;
2028 while (cb)
2029 {
2030 cb->cb ();
2031 cb = cb->next;
2032 }
2033 }
2034
2035 /* Register a set of functions to run before and after fork.
2036 prepare calls are called in LI-FC order.
2037 parent and child calls are called in FI-FC order. */
2038 int
2039 pthread::atfork (void (*prepare)(void), void (*parent)(void), void (*child)(void))
2040 {
2041 callback *prepcb = NULL, *parentcb = NULL, *childcb = NULL;
2042 if (prepare)
2043 {
2044 prepcb = new callback;
2045 if (!prepcb)
2046 return ENOMEM;
2047 }
2048 if (parent)
2049 {
2050 parentcb = new callback;
2051 if (!parentcb)
2052 {
2053 if (prepcb)
2054 delete prepcb;
2055 return ENOMEM;
2056 }
2057 }
2058 if (child)
2059 {
2060 childcb = new callback;
2061 if (!childcb)
2062 {
2063 if (prepcb)
2064 delete prepcb;
2065 if (parentcb)
2066 delete parentcb;
2067 return ENOMEM;
2068 }
2069 }
2070
2071 if (prepcb)
2072 {
2073 prepcb->cb = prepare;
2074 List_insert (MT_INTERFACE->pthread_prepare, prepcb);
2075 }
2076 if (parentcb)
2077 {
2078 parentcb->cb = parent;
2079 callback **t = &MT_INTERFACE->pthread_parent;
2080 while (*t)
2081 t = &(*t)->next;
2082 /* t = pointer to last next in the list */
2083 List_insert (*t, parentcb);
2084 }
2085 if (childcb)
2086 {
2087 childcb->cb = child;
2088 callback **t = &MT_INTERFACE->pthread_child;
2089 while (*t)
2090 t = &(*t)->next;
2091 /* t = pointer to last next in the list */
2092 List_insert (*t, childcb);
2093 }
2094 return 0;
2095 }
2096
2097 extern "C" int
2098 pthread_attr_init (pthread_attr_t *attr)
2099 {
2100 if (pthread_attr::is_good_object (attr))
2101 return EBUSY;
2102
2103 *attr = new pthread_attr;
2104 if (!pthread_attr::is_good_object (attr))
2105 {
2106 delete (*attr);
2107 *attr = NULL;
2108 return ENOMEM;
2109 }
2110 return 0;
2111 }
2112
2113 extern "C" int
2114 pthread_attr_getinheritsched (const pthread_attr_t *attr,
2115 int *inheritsched)
2116 {
2117 if (!pthread_attr::is_good_object (attr))
2118 return EINVAL;
2119 *inheritsched = (*attr)->inheritsched;
2120 return 0;
2121 }
2122
2123 extern "C" int
2124 pthread_attr_getschedparam (const pthread_attr_t *attr,
2125 struct sched_param *param)
2126 {
2127 if (!pthread_attr::is_good_object (attr))
2128 return EINVAL;
2129 *param = (*attr)->schedparam;
2130 return 0;
2131 }
2132
2133 /* From a pure code point of view, this should call a helper in sched.cc,
2134 to allow for someone adding scheduler policy changes to win32 in the future.
2135 However that's extremely unlikely, so short and sweet will do us */
2136 extern "C" int
2137 pthread_attr_getschedpolicy (const pthread_attr_t *attr, int *policy)
2138 {
2139 if (!pthread_attr::is_good_object (attr))
2140 return EINVAL;
2141 *policy = SCHED_FIFO;
2142 return 0;
2143 }
2144
2145
2146 extern "C" int
2147 pthread_attr_getscope (const pthread_attr_t *attr, int *contentionscope)
2148 {
2149 if (!pthread_attr::is_good_object (attr))
2150 return EINVAL;
2151 *contentionscope = (*attr)->contentionscope;
2152 return 0;
2153 }
2154
2155 extern "C" int
2156 pthread_attr_setdetachstate (pthread_attr_t *attr, int detachstate)
2157 {
2158 if (!pthread_attr::is_good_object (attr))
2159 return EINVAL;
2160 if (detachstate < 0 || detachstate > 1)
2161 return EINVAL;
2162 (*attr)->joinable = detachstate;
2163 return 0;
2164 }
2165
2166 extern "C" int
2167 pthread_attr_getdetachstate (const pthread_attr_t *attr, int *detachstate)
2168 {
2169 if (!pthread_attr::is_good_object (attr))
2170 return EINVAL;
2171 *detachstate = (*attr)->joinable;
2172 return 0;
2173 }
2174
2175 extern "C" int
2176 pthread_attr_setinheritsched (pthread_attr_t *attr, int inheritsched)
2177 {
2178 if (!pthread_attr::is_good_object (attr))
2179 return EINVAL;
2180 if (inheritsched != PTHREAD_INHERIT_SCHED
2181 && inheritsched != PTHREAD_EXPLICIT_SCHED)
2182 return ENOTSUP;
2183 (*attr)->inheritsched = inheritsched;
2184 return 0;
2185 }
2186
2187 extern "C" int
2188 pthread_attr_setschedparam (pthread_attr_t *attr,
2189 const struct sched_param *param)
2190 {
2191 if (!pthread_attr::is_good_object (attr))
2192 return EINVAL;
2193 if (!valid_sched_parameters (param))
2194 return ENOTSUP;
2195 (*attr)->schedparam = *param;
2196 return 0;
2197 }
2198
2199 /* See __pthread_attr_getschedpolicy for some notes */
2200 extern "C" int
2201 pthread_attr_setschedpolicy (pthread_attr_t *attr, int policy)
2202 {
2203 if (!pthread_attr::is_good_object (attr))
2204 return EINVAL;
2205 if (policy != SCHED_FIFO)
2206 return ENOTSUP;
2207 return 0;
2208 }
2209
2210 extern "C" int
2211 pthread_attr_setscope (pthread_attr_t *attr, int contentionscope)
2212 {
2213 if (!pthread_attr::is_good_object (attr))
2214 return EINVAL;
2215 if (contentionscope != PTHREAD_SCOPE_SYSTEM
2216 && contentionscope != PTHREAD_SCOPE_PROCESS)
2217 return EINVAL;
2218 /* In future, we may be able to support system scope by escalating the thread
2219 priority to exceed the priority class. For now we only support PROCESS scope. */
2220 if (contentionscope != PTHREAD_SCOPE_PROCESS)
2221 return ENOTSUP;
2222 (*attr)->contentionscope = contentionscope;
2223 return 0;
2224 }
2225
2226 extern "C" int
2227 pthread_attr_setstacksize (pthread_attr_t *attr, size_t size)
2228 {
2229 if (!pthread_attr::is_good_object (attr))
2230 return EINVAL;
2231 (*attr)->stacksize = size;
2232 return 0;
2233 }
2234
2235 extern "C" int
2236 pthread_attr_getstacksize (const pthread_attr_t *attr, size_t *size)
2237 {
2238 if (!pthread_attr::is_good_object (attr))
2239 return EINVAL;
2240 *size = (*attr)->stacksize;
2241 return 0;
2242 }
2243
2244 extern "C" int
2245 pthread_attr_destroy (pthread_attr_t *attr)
2246 {
2247 if (!pthread_attr::is_good_object (attr))
2248 return EINVAL;
2249 delete (*attr);
2250 *attr = NULL;
2251 return 0;
2252 }
2253
2254 int
2255 pthread::join (pthread_t *thread, void **return_val)
2256 {
2257 pthread_t joiner = self ();
2258
2259 joiner->testcancel ();
2260
2261 // Initialize return val with NULL
2262 if (return_val)
2263 *return_val = NULL;
2264
2265 if (!is_good_object (&joiner))
2266 return EINVAL;
2267
2268 if (!is_good_object (thread))
2269 return ESRCH;
2270
2271 if (equal (*thread,joiner))
2272 return EDEADLK;
2273
2274 (*thread)->mutex.lock ();
2275
2276 if ((*thread)->attr.joinable == PTHREAD_CREATE_DETACHED)
2277 {
2278 (*thread)->mutex.unlock ();
2279 return EINVAL;
2280 }
2281 else
2282 {
2283 (*thread)->joiner = joiner;
2284 (*thread)->attr.joinable = PTHREAD_CREATE_DETACHED;
2285 (*thread)->mutex.unlock ();
2286
2287 switch (cancelable_wait ((*thread)->win32_obj_id, INFINITE, cw_no_cancel_self, cw_sig_resume))
2288 {
2289 case WAIT_OBJECT_0:
2290 if (return_val)
2291 *return_val = (*thread)->return_ptr;
2292 delete (*thread);
2293 break;
2294 case WAIT_CANCELED:
2295 // set joined thread back to joinable since we got canceled
2296 (*thread)->joiner = NULL;
2297 (*thread)->attr.joinable = PTHREAD_CREATE_JOINABLE;
2298 joiner->cancel_self ();
2299 // never reached
2300 break;
2301 default:
2302 // should never happen
2303 return EINVAL;
2304 }
2305 }
2306
2307 return 0;
2308 }
2309
2310 int
2311 pthread::detach (pthread_t *thread)
2312 {
2313 if (!is_good_object (thread))
2314 return ESRCH;
2315
2316 (*thread)->mutex.lock ();
2317 if ((*thread)->attr.joinable == PTHREAD_CREATE_DETACHED)
2318 {
2319 (*thread)->mutex.unlock ();
2320 return EINVAL;
2321 }
2322
2323 // check if thread is still alive
2324 if ((*thread)->valid && WaitForSingleObject ((*thread)->win32_obj_id, 0) == WAIT_TIMEOUT)
2325 {
2326 // force cleanup on exit
2327 (*thread)->joiner = *thread;
2328 (*thread)->attr.joinable = PTHREAD_CREATE_DETACHED;
2329 (*thread)->mutex.unlock ();
2330 }
2331 else
2332 {
2333 // thread has already terminated.
2334 (*thread)->mutex.unlock ();
2335 delete (*thread);
2336 }
2337
2338 return 0;
2339 }
2340
2341 int
2342 pthread::suspend (pthread_t *thread)
2343 {
2344 if (!is_good_object (thread))
2345 return ESRCH;
2346
2347 if ((*thread)->suspended == false)
2348 {
2349 (*thread)->suspended = true;
2350 SuspendThread ((*thread)->win32_obj_id);
2351 }
2352
2353 return 0;
2354 }
2355
2356
2357 int
2358 pthread::resume (pthread_t *thread)
2359 {
2360 if (!is_good_object (thread))
2361 return ESRCH;
2362
2363 if ((*thread)->suspended == true)
2364 ResumeThread ((*thread)->win32_obj_id);
2365 (*thread)->suspended = false;
2366
2367 return 0;
2368 }
2369
2370 /* provided for source level compatability.
2371 See http://www.opengroup.org/onlinepubs/007908799/xsh/pthread_getconcurrency.html
2372 */
2373 extern "C" int
2374 pthread_getconcurrency ()
2375 {
2376 return MT_INTERFACE->concurrency;
2377 }
2378
2379 /* keep this in sync with sched.cc */
2380 extern "C" int
2381 pthread_getschedparam (pthread_t thread, int *policy,
2382 struct sched_param *param)
2383 {
2384 if (!pthread::is_good_object (&thread))
2385 return ESRCH;
2386 *policy = SCHED_FIFO;
2387 /* we don't return the current effective priority, we return the current
2388 requested priority */
2389 *param = thread->attr.schedparam;
2390 return 0;
2391 }
2392
2393 /* Thread Specific Data */
2394 extern "C" int
2395 pthread_key_create (pthread_key_t *key, void (*destructor) (void *))
2396 {
2397 *key = new pthread_key (destructor);
2398
2399 if (!pthread_key::is_good_object (key))
2400 {
2401 delete (*key);
2402 *key = NULL;
2403 return EAGAIN;
2404 }
2405 return 0;
2406 }
2407
2408 extern "C" int
2409 pthread_key_delete (pthread_key_t key)
2410 {
2411 if (!pthread_key::is_good_object (&key))
2412 return EINVAL;
2413
2414 delete (key);
2415 return 0;
2416 }
2417
2418 /* provided for source level compatability. See
2419 http://www.opengroup.org/onlinepubs/007908799/xsh/pthread_getconcurrency.html
2420 */
2421 extern "C" int
2422 pthread_setconcurrency (int new_level)
2423 {
2424 if (new_level < 0)
2425 return EINVAL;
2426 MT_INTERFACE->concurrency = new_level;
2427 return 0;
2428 }
2429
2430 /* keep syncronised with sched.cc */
2431 extern "C" int
2432 pthread_setschedparam (pthread_t thread, int policy,
2433 const struct sched_param *param)
2434 {
2435 if (!pthread::is_good_object (&thread))
2436 return ESRCH;
2437 if (policy != SCHED_FIFO)
2438 return ENOTSUP;
2439 if (!param)
2440 return EINVAL;
2441 int rv =
2442 sched_set_thread_priority (thread->win32_obj_id, param->sched_priority);
2443 if (!rv)
2444 thread->attr.schedparam.sched_priority = param->sched_priority;
2445 return rv;
2446 }
2447
2448 extern "C" int
2449 pthread_setschedprio (pthread_t thread, int priority)
2450 {
2451 if (!pthread::is_good_object (&thread))
2452 return ESRCH;
2453 int rv =
2454 sched_set_thread_priority (thread->win32_obj_id, priority);
2455 if (!rv)
2456 thread->attr.schedparam.sched_priority = priority;
2457 return rv;
2458 }
2459
2460 extern "C" int
2461 pthread_setspecific (pthread_key_t key, const void *value)
2462 {
2463 if (!pthread_key::is_good_object (&key))
2464 return EINVAL;
2465 (key)->set (value);
2466 return 0;
2467 }
2468
2469 extern "C" void *
2470 pthread_getspecific (pthread_key_t key)
2471 {
2472 if (!pthread_key::is_good_object (&key))
2473 return NULL;
2474
2475 return (key)->get ();
2476
2477 }
2478
2479 extern "C" int
2480 pthread_cond_destroy (pthread_cond_t *cond)
2481 {
2482 if (pthread_cond::is_initializer (cond))
2483 return 0;
2484 if (!pthread_cond::is_good_object (cond))
2485 return EINVAL;
2486
2487 /* reads are atomic */
2488 if ((*cond)->waiting)
2489 return EBUSY;
2490
2491 delete (*cond);
2492 *cond = NULL;
2493
2494 return 0;
2495 }
2496
2497 int
2498 pthread_cond::init (pthread_cond_t *cond, const pthread_condattr_t *attr)
2499 {
2500 pthread_cond_t new_cond;
2501
2502 if (attr && !pthread_condattr::is_good_object (attr))
2503 return EINVAL;
2504
2505 cond_initialization_lock.lock ();
2506
2507 new_cond = new pthread_cond (attr ? (*attr) : NULL);
2508 if (!is_good_object (&new_cond))
2509 {
2510 delete new_cond;
2511 cond_initialization_lock.unlock ();
2512 return EAGAIN;
2513 }
2514
2515 myfault efault;
2516 if (efault.faulted ())
2517 {
2518 delete new_cond;
2519 cond_initialization_lock.unlock ();
2520 return EINVAL;
2521 }
2522
2523 *cond = new_cond;
2524 cond_initialization_lock.unlock ();
2525
2526 return 0;
2527 }
2528
2529 extern "C" int
2530 pthread_cond_broadcast (pthread_cond_t *cond)
2531 {
2532 if (pthread_cond::is_initializer (cond))
2533 return 0;
2534 if (!pthread_cond::is_good_object (cond))
2535 return EINVAL;
2536
2537 (*cond)->unblock (true);
2538
2539 return 0;
2540 }
2541
2542 extern "C" int
2543 pthread_cond_signal (pthread_cond_t *cond)
2544 {
2545 if (pthread_cond::is_initializer (cond))
2546 return 0;
2547 if (!pthread_cond::is_good_object (cond))
2548 return EINVAL;
2549
2550 (*cond)->unblock (false);
2551
2552 return 0;
2553 }
2554
2555 static int
2556 __pthread_cond_dowait (pthread_cond_t *cond, pthread_mutex_t *mutex,
2557 DWORD waitlength)
2558 {
2559 if (!pthread_mutex::is_good_object (mutex))
2560 return EINVAL;
2561 if (!(*mutex)->can_be_unlocked ())
2562 return EPERM;
2563
2564 if (pthread_cond::is_initializer (cond))
2565 pthread_cond::init (cond, NULL);
2566 if (!pthread_cond::is_good_object (cond))
2567 return EINVAL;
2568
2569 return (*cond)->wait (*mutex, waitlength);
2570 }
2571
2572 extern "C" int
2573 pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
2574 const struct timespec *abstime)
2575 {
2576 struct timeval tv;
2577 DWORD waitlength;
2578
2579 myfault efault;
2580 if (efault.faulted ())
2581 return EINVAL;
2582
2583 pthread_testcancel ();
2584
2585 /* According to SUSv3, the abstime value must be checked for validity. */
2586 if (abstime->tv_sec < 0
2587 || abstime->tv_nsec < 0
2588 || abstime->tv_nsec > 999999999)
2589 return EINVAL;
2590
2591 gettimeofday (&tv, NULL);
2592 /* Check for immediate timeout before converting to microseconds, since
2593 the resulting value can easily overflow long. This also allows to
2594 evaluate microseconds directly in DWORD. */
2595 if (tv.tv_sec > abstime->tv_sec
2596 || (tv.tv_sec == abstime->tv_sec
2597 && tv.tv_usec > abstime->tv_nsec / 1000))
2598 return ETIMEDOUT;
2599
2600 waitlength = (abstime->tv_sec - tv.tv_sec) * 1000;
2601 waitlength += (abstime->tv_nsec / 1000 - tv.tv_usec) / 1000;
2602 return __pthread_cond_dowait (cond, mutex, waitlength);
2603 }
2604
2605 extern "C" int
2606 pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)
2607 {
2608 pthread_testcancel ();
2609
2610 return __pthread_cond_dowait (cond, mutex, INFINITE);
2611 }
2612
2613 extern "C" int
2614 pthread_condattr_init (pthread_condattr_t *condattr)
2615 {
2616 if (pthread_condattr::is_good_object (condattr))
2617 return EBUSY;
2618
2619 *condattr = new pthread_condattr;
2620 if (!pthread_condattr::is_good_object (condattr))
2621 {
2622 delete (*condattr);
2623 *condattr = NULL;
2624 return ENOMEM;
2625 }
2626 return 0;
2627 }
2628
2629 extern "C" int
2630 pthread_condattr_getpshared (const pthread_condattr_t *attr, int *pshared)
2631 {
2632 if (!pthread_condattr::is_good_object (attr))
2633 return EINVAL;
2634 *pshared = (*attr)->shared;
2635 return 0;
2636 }
2637
2638 extern "C" int
2639 pthread_condattr_setpshared (pthread_condattr_t *attr, int pshared)
2640 {
2641 if (!pthread_condattr::is_good_object (attr))
2642 return EINVAL;
2643 if ((pshared < 0) || (pshared > 1))
2644 return EINVAL;
2645 /* shared cond vars not currently supported */
2646 if (pshared != PTHREAD_PROCESS_PRIVATE)
2647 return EINVAL;
2648 (*attr)->shared = pshared;
2649 return 0;
2650 }
2651
2652 extern "C" int
2653 pthread_condattr_destroy (pthread_condattr_t *condattr)
2654 {
2655 if (!pthread_condattr::is_good_object (condattr))
2656 return EINVAL;
2657 delete (*condattr);
2658 *condattr = NULL;
2659 return 0;
2660 }
2661
2662 extern "C" int
2663 pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
2664 {
2665 if (pthread_rwlock::is_initializer (rwlock))
2666 return 0;
2667 if (!pthread_rwlock::is_good_object (rwlock))
2668 return EINVAL;
2669
2670 if ((*rwlock)->writer || (*rwlock)->readers ||
2671 (*rwlock)->waiting_readers || (*rwlock)->waiting_writers)
2672 return EBUSY;
2673
2674 delete (*rwlock);
2675 *rwlock = NULL;
2676
2677 return 0;
2678 }
2679
2680 int
2681 pthread_rwlock::init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
2682 {
2683 pthread_rwlock_t new_rwlock;
2684
2685 if (attr && !pthread_rwlockattr::is_good_object (attr))
2686 return EINVAL;
2687
2688 rwlock_initialization_lock.lock ();
2689
2690 new_rwlock = new pthread_rwlock (attr ? (*attr) : NULL);
2691 if (!is_good_object (&new_rwlock))
2692 {
2693 delete new_rwlock;
2694 rwlock_initialization_lock.unlock ();
2695 return EAGAIN;
2696 }
2697
2698 myfault efault;
2699 if (efault.faulted ())
2700 {
2701 delete new_rwlock;
2702 rwlock_initialization_lock.unlock ();
2703 return EINVAL;
2704 }
2705
2706 *rwlock = new_rwlock;
2707 rwlock_initialization_lock.unlock ();
2708
2709 return 0;
2710 }
2711
2712 extern "C" int
2713 pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
2714 {
2715 pthread_testcancel ();
2716
2717 if (pthread_rwlock::is_initializer (rwlock))
2718 pthread_rwlock::init (rwlock, NULL);
2719 if (!pthread_rwlock::is_good_object (rwlock))
2720 return EINVAL;
2721
2722 return (*rwlock)->rdlock ();
2723 }
2724
2725 extern "C" int
2726 pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
2727 {
2728 if (pthread_rwlock::is_initializer (rwlock))
2729 pthread_rwlock::init (rwlock, NULL);
2730 if (!pthread_rwlock::is_good_object (rwlock))
2731 return EINVAL;
2732
2733 return (*rwlock)->tryrdlock ();
2734 }
2735
2736 extern "C" int
2737 pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
2738 {
2739 pthread_testcancel ();
2740
2741 if (pthread_rwlock::is_initializer (rwlock))
2742 pthread_rwlock::init (rwlock, NULL);
2743 if (!pthread_rwlock::is_good_object (rwlock))
2744 return EINVAL;
2745
2746 return (*rwlock)->wrlock ();
2747 }
2748
2749 extern "C" int
2750 pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
2751 {
2752 if (pthread_rwlock::is_initializer (rwlock))
2753 pthread_rwlock::init (rwlock, NULL);
2754 if (!pthread_rwlock::is_good_object (rwlock))
2755 return EINVAL;
2756
2757 return (*rwlock)->trywrlock ();
2758 }
2759
2760 extern "C" int
2761 pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
2762 {
2763 if (pthread_rwlock::is_initializer (rwlock))
2764 return 0;
2765 if (!pthread_rwlock::is_good_object (rwlock))
2766 return EINVAL;
2767
2768 return (*rwlock)->unlock ();
2769 }
2770
2771 extern "C" int
2772 pthread_rwlockattr_init (pthread_rwlockattr_t *rwlockattr)
2773 {
2774 if (pthread_rwlockattr::is_good_object (rwlockattr))
2775 return EBUSY;
2776
2777 *rwlockattr = new pthread_rwlockattr;
2778 if (!pthread_rwlockattr::is_good_object (rwlockattr))
2779 {
2780 delete (*rwlockattr);
2781 *rwlockattr = NULL;
2782 return ENOMEM;
2783 }
2784 return 0;
2785 }
2786
2787 extern "C" int
2788 pthread_rwlockattr_getpshared (const pthread_rwlockattr_t *attr, int *pshared)
2789 {
2790 if (!pthread_rwlockattr::is_good_object (attr))
2791 return EINVAL;
2792 *pshared = (*attr)->shared;
2793 return 0;
2794 }
2795
2796 extern "C" int
2797 pthread_rwlockattr_setpshared (pthread_rwlockattr_t *attr, int pshared)
2798 {
2799 if (!pthread_rwlockattr::is_good_object (attr))
2800 return EINVAL;
2801 if ((pshared < 0) || (pshared > 1))
2802 return EINVAL;
2803 /* shared rwlock vars not currently supported */
2804 if (pshared != PTHREAD_PROCESS_PRIVATE)
2805 return EINVAL;
2806 (*attr)->shared = pshared;
2807 return 0;
2808 }
2809
2810 extern "C" int
2811 pthread_rwlockattr_destroy (pthread_rwlockattr_t *rwlockattr)
2812 {
2813 if (!pthread_rwlockattr::is_good_object (rwlockattr))
2814 return EINVAL;
2815 delete (*rwlockattr);
2816 *rwlockattr = NULL;
2817 return 0;
2818 }
2819
2820 /* Thread signal */
2821 extern "C" int
2822 pthread_kill (pthread_t thread, int sig)
2823 {
2824 // lock myself, for the use of thread2signal
2825 // two different kills might clash: FIXME
2826
2827 if (!pthread::is_good_object (&thread))
2828 return EINVAL;
2829
2830 siginfo_t si = {0};
2831 si.si_signo = sig;
2832 si.si_code = SI_USER;
2833 si.si_pid = myself->pid;
2834 si.si_uid = myself->uid;
2835 int rval;
2836 if (!thread->valid)
2837 rval = ESRCH;
2838 else if (sig)
2839 {
2840 thread->cygtls->set_threadkill ();
2841 rval = sig_send (NULL, si, thread->cygtls);
2842 }
2843 else
2844 switch (WaitForSingleObject (thread->win32_obj_id, 0))
2845 {
2846 case WAIT_TIMEOUT:
2847 rval = 0;
2848 break;
2849 default:
2850 rval = ESRCH;
2851 break;
2852 }
2853
2854 // unlock myself
2855 return rval;
2856 }
2857
2858 extern "C" int
2859 pthread_sigmask (int operation, const sigset_t *set, sigset_t *old_set)
2860 {
2861 return handle_sigprocmask (operation, set, old_set, _my_tls.sigmask);
2862 }
2863
2864 /* ID */
2865
2866 extern "C" int
2867 pthread_equal (pthread_t t1, pthread_t t2)
2868 {
2869 return pthread::equal (t1, t2);
2870 }
2871
2872 /* Mutexes */
2873
2874 int
2875 pthread_mutex::init (pthread_mutex_t *mutex,
2876 const pthread_mutexattr_t *attr,
2877 const pthread_mutex_t initializer)
2878 {
2879 if (attr && !pthread_mutexattr::is_good_object (attr))
2880 return EINVAL;
2881
2882 mutex_initialization_lock.lock ();
2883 if (initializer == NULL || pthread_mutex::is_initializer (mutex))
2884 {
2885 pthread_mutex_t new_mutex = new pthread_mutex (attr ? (*attr) : NULL);
2886 if (!is_good_object (&new_mutex))
2887 {
2888 delete new_mutex;
2889 mutex_initialization_lock.unlock ();
2890 return EAGAIN;
2891 }
2892
2893 if (!attr && initializer)
2894 {
2895 if (initializer == PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP)
2896 new_mutex->type = PTHREAD_MUTEX_RECURSIVE;
2897 else if (initializer == PTHREAD_NORMAL_MUTEX_INITIALIZER_NP)
2898 new_mutex->type = PTHREAD_MUTEX_NORMAL;
2899 else if (initializer == PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP)
2900 new_mutex->type = PTHREAD_MUTEX_ERRORCHECK;
2901 }
2902
2903 myfault efault;
2904 if (efault.faulted ())
2905 {
2906 delete new_mutex;
2907 mutex_initialization_lock.unlock ();
2908 return EINVAL;
2909 }
2910
2911 *mutex = new_mutex;
2912 }
2913 mutex_initialization_lock.unlock ();
2914 pthread_printf ("*mutex %p, attr %p, initializer %p", *mutex, attr, initializer);
2915
2916 return 0;
2917 }
2918
2919 extern "C" int
2920 pthread_mutex_getprioceiling (const pthread_mutex_t *mutex,
2921 int *prioceiling)
2922 {
2923 /* We don't define _POSIX_THREAD_PRIO_PROTECT because we do't currently support
2924 mutex priorities.
2925
2926 We can support mutex priorities in the future though:
2927 Store a priority with each mutex.
2928 When the mutex is optained, set the thread priority as appropriate
2929 When the mutex is released, reset the thread priority. */
2930 return ENOSYS;
2931 }
2932
2933 extern "C" int
2934 pthread_mutex_lock (pthread_mutex_t *mutex)
2935 {
2936 if (pthread_mutex::is_initializer (mutex))
2937 pthread_mutex::init (mutex, NULL, *mutex);
2938 if (!pthread_mutex::is_good_object (mutex))
2939 return EINVAL;
2940 return (*mutex)->lock ();
2941 }
2942
2943 extern "C" int
2944 pthread_mutex_trylock (pthread_mutex_t *mutex)
2945 {
2946 if (pthread_mutex::is_initializer (mutex))
2947 pthread_mutex::init (mutex, NULL, *mutex);
2948 if (!pthread_mutex::is_good_object (mutex))
2949 return EINVAL;
2950 return (*mutex)->trylock ();
2951 }
2952
2953 extern "C" int
2954 pthread_mutex_unlock (pthread_mutex_t *mutex)
2955 {
2956 if (pthread_mutex::is_initializer (mutex))
2957 return EPERM;
2958 if (!pthread_mutex::is_good_object (mutex))
2959 return EINVAL;
2960 return (*mutex)->unlock ();
2961 }
2962
2963 extern "C" int
2964 pthread_mutex_destroy (pthread_mutex_t *mutex)
2965 {
2966 int rv;
2967
2968 if (pthread_mutex::is_initializer (mutex))
2969 return 0;
2970 if (!pthread_mutex::is_good_object (mutex))
2971 return EINVAL;
2972
2973 rv = (*mutex)->destroy ();
2974 if (rv)
2975 return rv;
2976
2977 *mutex = NULL;
2978 return 0;
2979 }
2980
2981 extern "C" int
2982 pthread_mutex_setprioceiling (pthread_mutex_t *mutex, int prioceiling,
2983 int *old_ceiling)
2984 {
2985 return ENOSYS;
2986 }
2987
2988 /* Spinlocks */
2989
2990 int
2991 pthread_spinlock::init (pthread_spinlock_t *spinlock, int pshared)
2992 {
2993 pthread_spinlock_t new_spinlock = new pthread_spinlock (pshared);
2994 if (!is_good_object (&new_spinlock))
2995 {
2996 delete new_spinlock;
2997 return EAGAIN;
2998 }
2999
3000 myfault efault;
3001 if (efault.faulted ())
3002 {
3003 delete new_spinlock;
3004 return EINVAL;
3005 }
3006
3007 *spinlock = new_spinlock;
3008 pthread_printf ("*spinlock %p, pshared %d", *spinlock, pshared);
3009
3010 return 0;
3011 }
3012
3013 extern "C" int
3014 pthread_spin_lock (pthread_spinlock_t *spinlock)
3015 {
3016 if (!pthread_spinlock::is_good_object (spinlock))
3017 return EINVAL;
3018 return (*spinlock)->lock ();
3019 }
3020
3021 extern "C" int
3022 pthread_spin_trylock (pthread_spinlock_t *spinlock)
3023 {
3024 if (!pthread_spinlock::is_good_object (spinlock))
3025 return EINVAL;
3026 return (*spinlock)->trylock ();
3027 }
3028
3029 extern "C" int
3030 pthread_spin_unlock (pthread_spinlock_t *spinlock)
3031 {
3032 if (!pthread_spinlock::is_good_object (spinlock))
3033 return EINVAL;
3034 return (*spinlock)->unlock ();
3035 }
3036
3037 extern "C" int
3038 pthread_spin_destroy (pthread_spinlock_t *spinlock)
3039 {
3040 if (!pthread_spinlock::is_good_object (spinlock))
3041 return EINVAL;
3042 return (*spinlock)->destroy ();
3043 }
3044
3045 /* Win32 doesn't support mutex priorities - see __pthread_mutex_getprioceiling
3046 for more detail */
3047 extern "C" int
3048 pthread_mutexattr_getprotocol (const pthread_mutexattr_t *attr,
3049 int *protocol)
3050 {
3051 if (!pthread_mutexattr::is_good_object (attr))
3052 return EINVAL;
3053 return ENOSYS;
3054 }
3055
3056 extern "C" int
3057 pthread_mutexattr_getpshared (const pthread_mutexattr_t *attr,
3058 int *pshared)
3059 {
3060 if (!pthread_mutexattr::is_good_object (attr))
3061 return EINVAL;
3062 *pshared = (*attr)->pshared;
3063 return 0;
3064 }
3065
3066 extern "C" int
3067 pthread_mutexattr_gettype (const pthread_mutexattr_t *attr, int *type)
3068 {
3069 if (!pthread_mutexattr::is_good_object (attr))
3070 return EINVAL;
3071 *type = (*attr)->mutextype;
3072 return 0;
3073 }
3074
3075 /* FIXME: write and test process shared mutex's. */
3076 extern "C" int
3077 pthread_mutexattr_init (pthread_mutexattr_t *attr)
3078 {
3079 if (pthread_mutexattr::is_good_object (attr))
3080 return EBUSY;
3081
3082 *attr = new pthread_mutexattr ();
3083 if (!pthread_mutexattr::is_good_object (attr))
3084 {
3085 delete (*attr);
3086 *attr = NULL;
3087 return ENOMEM;
3088 }
3089 return 0;
3090 }
3091
3092 extern "C" int
3093 pthread_mutexattr_destroy (pthread_mutexattr_t *attr)
3094 {
3095 if (!pthread_mutexattr::is_good_object (attr))
3096 return EINVAL;
3097 delete (*attr);
3098 *attr = NULL;
3099 return 0;
3100 }
3101
3102
3103 /* Win32 doesn't support mutex priorities */
3104 extern "C" int
3105 pthread_mutexattr_setprotocol (pthread_mutexattr_t *attr, int protocol)
3106 {
3107 if (!pthread_mutexattr::is_good_object (attr))
3108 return EINVAL;
3109 return ENOSYS;
3110 }
3111
3112 /* Win32 doesn't support mutex priorities */
3113 extern "C" int
3114 pthread_mutexattr_setprioceiling (pthread_mutexattr_t *attr,
3115 int prioceiling)
3116 {
3117 if (!pthread_mutexattr::is_good_object (attr))
3118 return EINVAL;
3119 return ENOSYS;
3120 }
3121
3122 extern "C" int
3123 pthread_mutexattr_getprioceiling (const pthread_mutexattr_t *attr,
3124 int *prioceiling)
3125 {
3126 if (!pthread_mutexattr::is_good_object (attr))
3127 return EINVAL;
3128 return ENOSYS;
3129 }
3130
3131 extern "C" int
3132 pthread_mutexattr_setpshared (pthread_mutexattr_t *attr, int pshared)
3133 {
3134 if (!pthread_mutexattr::is_good_object (attr))
3135 return EINVAL;
3136 /* we don't use pshared for anything as yet. We need to test PROCESS_SHARED
3137 *functionality
3138 */
3139 if (pshared != PTHREAD_PROCESS_PRIVATE)
3140 return EINVAL;
3141 (*attr)->pshared = pshared;
3142 return 0;
3143 }
3144
3145 /* see pthread_mutex_gettype */
3146 extern "C" int
3147 pthread_mutexattr_settype (pthread_mutexattr_t *attr, int type)
3148 {
3149 if (!pthread_mutexattr::is_good_object (attr))
3150 return EINVAL;
3151
3152 switch (type)
3153 {
3154 case PTHREAD_MUTEX_ERRORCHECK:
3155 case PTHREAD_MUTEX_RECURSIVE:
3156 case PTHREAD_MUTEX_NORMAL:
3157 (*attr)->mutextype = type;
3158 break;
3159 default:
3160 return EINVAL;
3161 }
3162
3163 return 0;
3164 }
3165
3166 /* Semaphores */
3167
3168 List<semaphore> semaphore::semaphores;
3169
3170 semaphore::semaphore (int pshared, unsigned int value)
3171 : verifyable_object (SEM_MAGIC),
3172 shared (pshared),
3173 currentvalue (value),
3174 fd (-1),
3175 hash (0ULL),
3176 sem (NULL)
3177 {
3178 SECURITY_ATTRIBUTES sa = (pshared != PTHREAD_PROCESS_PRIVATE)
3179 ? sec_all : sec_none_nih;
3180 this->win32_obj_id = ::CreateSemaphore (&sa, value, LONG_MAX, NULL);
3181 if (!this->win32_obj_id)
3182 magic = 0;
3183
3184 semaphores.insert (this);
3185 }
3186
3187 semaphore::semaphore (unsigned long long shash, LUID sluid, int sfd,
3188 sem_t *ssem, int oflag, mode_t mode, unsigned int value)
3189 : verifyable_object (SEM_MAGIC),
3190 shared (PTHREAD_PROCESS_SHARED),
3191 currentvalue (value), /* Unused for named semaphores. */
3192 fd (sfd),
3193 hash (shash),
3194 luid (sluid),
3195 sem (ssem)
3196 {
3197 char name[MAX_PATH];
3198
3199 __small_sprintf (name, "semaphore/%016X%08x%08x",
3200 hash, luid.HighPart, luid.LowPart);
3201 this->win32_obj_id = ::CreateSemaphore (&sec_all, value, LONG_MAX, name);
3202 if (!this->win32_obj_id)
3203 magic = 0;
3204 if (GetLastError () == ERROR_ALREADY_EXISTS && (oflag & O_EXCL))
3205 {
3206 __seterrno ();
3207 CloseHandle (this->win32_obj_id);
3208 magic = 0;
3209 }
3210
3211 semaphores.insert (this);
3212 }
3213
3214 semaphore::~semaphore ()
3215 {
3216 if (win32_obj_id)
3217 CloseHandle (win32_obj_id);
3218
3219 semaphores.remove (this);
3220 }
3221
3222 void
3223 semaphore::_post ()
3224 {
3225 if (ReleaseSemaphore (win32_obj_id, 1, &currentvalue))
3226 currentvalue++;
3227 }
3228
3229 int
3230 semaphore::_getvalue (int *sval)
3231 {
3232 long val;
3233
3234 switch (WaitForSingleObject (win32_obj_id, 0))
3235 {
3236 case WAIT_OBJECT_0:
3237 ReleaseSemaphore (win32_obj_id, 1, &val);
3238 *sval = val + 1;
3239 break;
3240 case WAIT_TIMEOUT:
3241 *sval = 0;
3242 break;
3243 default:
3244 set_errno (EAGAIN);
3245 return -1;
3246 }
3247 return 0;
3248 }
3249
3250 int
3251 semaphore::_trywait ()
3252 {
3253 /* FIXME: signals should be able to interrupt semaphores...
3254 We probably need WaitForMultipleObjects here. */
3255 if (WaitForSingleObject (win32_obj_id, 0) == WAIT_TIMEOUT)
3256 {
3257 set_errno (EAGAIN);
3258 return -1;
3259 }
3260 currentvalue--;
3261 return 0;
3262 }
3263
3264 int
3265 semaphore::_timedwait (const struct timespec *abstime)
3266 {
3267 struct timeval tv;
3268 long waitlength;
3269
3270 myfault efault;
3271 if (efault.faulted ())
3272 {
3273 /* According to SUSv3, abstime need not be checked for validity,
3274 if the semaphore can be locked immediately. */
3275 if (!_trywait ())
3276 return 0;
3277 set_errno (EINVAL);
3278 return -1;
3279 }
3280
3281 gettimeofday (&tv, NULL);
3282 waitlength = abstime->tv_sec * 1000 + abstime->tv_nsec / (1000 * 1000);
3283 waitlength -= tv.tv_sec * 1000 + tv.tv_usec / 1000;
3284 if (waitlength < 0)
3285 waitlength = 0;
3286 switch (cancelable_wait (win32_obj_id, waitlength, cw_cancel_self, cw_sig_eintr))
3287 {
3288 case WAIT_OBJECT_0:
3289 currentvalue--;
3290 break;
3291 case WAIT_SIGNALED:
3292 set_errno (EINTR);
3293 return -1;
3294 case WAIT_TIMEOUT:
3295 set_errno (ETIMEDOUT);
3296 return -1;
3297 default:
3298 pthread_printf ("cancelable_wait failed. %E");
3299 __seterrno ();
3300 return -1;
3301 }
3302 return 0;
3303 }
3304
3305 int
3306 semaphore::_wait ()
3307 {
3308 switch (cancelable_wait (win32_obj_id, INFINITE, cw_cancel_self, cw_sig_eintr))
3309 {
3310 case WAIT_OBJECT_0:
3311 currentvalue--;
3312 break;
3313 case WAIT_SIGNALED:
3314 set_errno (EINTR);
3315 return -1;
3316 default:
3317 pthread_printf ("cancelable_wait failed. %E");
3318 break;
3319 }
3320 return 0;
3321 }
3322
3323 void
3324 semaphore::_fixup_after_fork ()
3325 {
3326 if (shared == PTHREAD_PROCESS_PRIVATE)
3327 {
3328 pthread_printf ("sem %x", this);
3329 /* FIXME: duplicate code here and in the constructor. */
3330 this->win32_obj_id = ::CreateSemaphore (&sec_none_nih, currentvalue,
3331 LONG_MAX, NULL);
3332 if (!win32_obj_id)
3333 api_fatal ("failed to create new win32 semaphore, error %d");
3334 }
3335 }
3336
3337 void
3338 semaphore::_terminate ()
3339 {
3340 int _sem_close (sem_t *, bool);
3341
3342 if (sem)
3343 _sem_close (sem, false);
3344 }
3345
3346 /* static members */
3347
3348 int
3349 semaphore::init (sem_t *sem, int pshared, unsigned int value)
3350 {
3351 /*
3352 We can't tell the difference between reinitialising an
3353 existing semaphore and initialising a semaphore who's
3354 contents happen to be a valid pointer
3355 */
3356 if (is_good_object (sem))
3357 {
3358 paranoid_printf ("potential attempt to reinitialise a semaphore");
3359 }
3360
3361 if (value > SEM_VALUE_MAX)
3362 {
3363 set_errno(EINVAL);
3364 return -1;
3365 }
3366
3367 *sem = new semaphore (pshared, value);
3368
3369 if (!is_good_object (sem))
3370 {
3371 delete (*sem);
3372 *sem = NULL;
3373 set_errno(EAGAIN);
3374 return -1;
3375 }
3376 return 0;
3377 }
3378
3379 int
3380 semaphore::destroy (sem_t *sem)
3381 {
3382 if (!is_good_object (sem))
3383 {
3384 set_errno(EINVAL);
3385 return -1;
3386 }
3387
3388 /* It's invalid to destroy a semaphore not opened with sem_init. */
3389 if ((*sem)->fd != -1)
3390 {
3391 set_errno(EINVAL);
3392 return -1;
3393 }
3394
3395 /* FIXME - new feature - test for busy against threads... */
3396
3397 delete (*sem);
3398 *sem = NULL;
3399 return 0;
3400 }
3401
3402 int
3403 semaphore::close (sem_t *sem)
3404 {
3405 if (!is_good_object (sem))
3406 {
3407 set_errno(EINVAL);
3408 return -1;
3409 }
3410
3411 /* It's invalid to close a semaphore not opened with sem_open. */
3412 if ((*sem)->fd == -1)
3413 {
3414 set_errno(EINVAL);
3415 return -1;
3416 }
3417
3418 delete (*sem);
3419 delete sem;
3420 return 0;
3421 }
3422
3423 sem_t *
3424 semaphore::open (unsigned long long hash, LUID luid, int fd, int oflag,
3425 mode_t mode, unsigned int value, bool &wasopen)
3426 {
3427 if (value > SEM_VALUE_MAX)
3428 {
3429 set_errno (EINVAL);
3430 return NULL;
3431 }
3432
3433 /* sem_open is supposed to return the same pointer, if the same named
3434 semaphore is opened multiple times in the same process, as long as
3435 the semaphore hasn't been closed or unlinked in the meantime. */
3436 semaphores.mx.lock ();
3437 for (semaphore *sema = semaphores.head; sema; sema = sema->next)
3438 if (sema->fd >= 0 && sema->hash == hash
3439 && sema->luid.HighPart == luid.HighPart
3440 && sema->luid.LowPart == sema->luid.LowPart)
3441 {
3442 wasopen = true;
3443 semaphores.mx.unlock ();
3444 return sema->sem;
3445 }
3446 semaphores.mx.unlock ();
3447
3448 wasopen = false;
3449 sem_t *sem = new sem_t;
3450 if (!sem)
3451 {
3452 set_errno (ENOMEM);
3453 return NULL;
3454 }
3455
3456 *sem = new semaphore (hash, luid, fd, sem, oflag, mode, value);
3457
3458 if (!is_good_object (sem))
3459 {
3460 delete *sem;
3461 delete sem;
3462 return NULL;
3463 }
3464 return sem;
3465 }
3466
3467 int
3468 semaphore::wait (sem_t *sem)
3469 {
3470 pthread_testcancel ();
3471
3472 if (!is_good_object (sem))
3473 {
3474 set_errno (EINVAL);
3475 return -1;
3476 }
3477
3478 return (*sem)->_wait ();
3479 }
3480
3481 int
3482 semaphore::trywait (sem_t *sem)
3483 {
3484 if (!is_good_object (sem))
3485 {
3486 set_errno (EINVAL);
3487 return -1;
3488 }
3489
3490 return (*sem)->_trywait ();
3491 }
3492
3493 int
3494 semaphore::timedwait (sem_t *sem, const struct timespec *abstime)
3495 {
3496 if (!is_good_object (sem))
3497 {
3498 set_errno (EINVAL);
3499 return -1;
3500 }
3501
3502 return (*sem)->_timedwait (abstime);
3503 }
3504
3505 int
3506 semaphore::post (sem_t *sem)
3507 {
3508 if (!is_good_object (sem))
3509 {
3510 set_errno (EINVAL);
3511 return -1;
3512 }
3513
3514 (*sem)->_post ();
3515 return 0;
3516 }
3517
3518 int
3519 semaphore::getvalue (sem_t *sem, int *sval)
3520 {
3521 myfault efault;
3522 if (efault.faulted () || !is_good_object (sem))
3523 {
3524 set_errno (EINVAL);
3525 return -1;
3526 }
3527
3528 return (*sem)->_getvalue (sval);
3529 }
3530
3531 int
3532 semaphore::getinternal (sem_t *sem, int *sfd, unsigned long long *shash,
3533 LUID *sluid, unsigned int *sval)
3534 {
3535 myfault efault;
3536 if (efault.faulted () || !is_good_object (sem))
3537 {
3538 set_errno (EINVAL);
3539 return -1;
3540 }
3541 if ((*sfd = (*sem)->fd) < 0)
3542 {
3543 set_errno (EINVAL);
3544 return -1;
3545 }
3546 *shash = (*sem)->hash;
3547 *sluid = (*sem)->luid;
3548 /* POSIX defines the value in calls to sem_init/sem_open as unsigned, but
3549 the sem_getvalue gets a pointer to int to return the value. Go figure! */
3550 return (*sem)->_getvalue ((int *)sval);
3551 }
3552
3553 /* pthread_null */
3554 pthread *
3555 pthread_null::get_null_pthread ()
3556 {
3557 /* because of weird entry points */
3558 _instance.magic = 0;
3559 return &_instance;
3560 }
3561
3562 pthread_null::pthread_null ()
3563 {
3564 attr.joinable = PTHREAD_CREATE_DETACHED;
3565 /* Mark ourselves as invalid */
3566 magic = 0;
3567 }
3568
3569 pthread_null::~pthread_null ()
3570 {
3571 }
3572
3573 bool
3574 pthread_null::create (void *(*)(void *), pthread_attr *, void *)
3575 {
3576 return true;
3577 }
3578
3579 void
3580 pthread_null::exit (void *value_ptr)
3581 {
3582 _my_tls.remove (INFINITE);
3583 ExitThread (0);
3584 }
3585
3586 int
3587 pthread_null::cancel ()
3588 {
3589 return 0;
3590 }
3591
3592 void
3593 pthread_null::testcancel ()
3594 {
3595 }
3596
3597 int
3598 pthread_null::setcancelstate (int state, int *oldstate)
3599 {
3600 return EINVAL;
3601 }
3602
3603 int
3604 pthread_null::setcanceltype (int type, int *oldtype)
3605 {
3606 return EINVAL;
3607 }
3608
3609 void
3610 pthread_null::push_cleanup_handler (__pthread_cleanup_handler *handler)
3611 {
3612 }
3613
3614 void
3615 pthread_null::pop_cleanup_handler (int const execute)
3616 {
3617 }
3618
3619 unsigned long
3620 pthread_null::getsequence_np ()
3621 {
3622 return 0;
3623 }
3624
3625 pthread_null pthread_null::_instance;
This page took 0.1893 seconds and 6 git commands to generate.