CPPMyth
Library to interoperate with MythTV server
winpthreads.c
1 /*
2  * Posix Threads library for Microsoft Windows
3  *
4  * Use at own risk, there is no implied warranty to this code.
5  * It uses undocumented features of Microsoft Windows that can change
6  * at any time in the future.
7  *
8  * (C) 2010 Lockless Inc.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without modification,
12  * are permitted provided that the following conditions are met:
13  *
14  *
15  * * Redistributions of source code must retain the above copyright notice,
16  * this list of conditions and the following disclaimer.
17  * * Redistributions in binary form must reproduce the above copyright notice,
18  * this list of conditions and the following disclaimer in the documentation
19  * and/or other materials provided with the distribution.
20  * * Neither the name of Lockless Inc. nor the names of its contributors may be
21  * used to endorse or promote products derived from this software without
22  * specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AN
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27  * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
28  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
32  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
33  * OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /*
37  * Version 1.0.1 Released 2 Feb 2012
38  * Fixes pthread_barrier_destroy() to wait for threads to exit the barrier.
39  */
40 
41 #include "winpthreads.h"
42 #include <sys/timeb.h>
43 #include <setjmp.h>
44 #include <intrin.h>
45 #include <process.h>
46 
47 typedef struct _pthread_cleanup _pthread_cleanup;
49 {
50  void (*func)(void *);
51  void *arg;
52  _pthread_cleanup *next;
53 };
54 
55 struct _pthread_v
56 {
57  void *ret_arg;
58  void *(* func)(void *);
59  _pthread_cleanup *clean;
60  HANDLE h;
61  int cancelled;
62  unsigned p_state;
63  unsigned keymax;
64  void **keyval;
65 
66  jmp_buf jb;
67 };
68 
69 volatile long _pthread_cancelling;
70 
71 int _pthread_concur;
72 
73 /* Will default to zero as needed */
74 pthread_once_t _pthread_tls_once;
75 DWORD _pthread_tls;
76 
77 /* Note initializer is zero, so this works */
78 pthread_rwlock_t _pthread_key_lock;
79 unsigned _pthread_key_max;
80 unsigned _pthread_key_sch;
81 void (**_pthread_key_dest)(void *);
82 
83 
84 #define pthread_cleanup_push(F, A)\
85 {\
86  const _pthread_cleanup _pthread_cup = {(F), (A), pthread_self()->clean};\
87  _ReadWriteBarrier();\
88  pthread_self()->clean = (_pthread_cleanup *) &_pthread_cup;\
89  _ReadWriteBarrier()
90 
91 /* Note that if async cancelling is used, then there is a race here */
92 #define pthread_cleanup_pop(E)\
93  (pthread_self()->clean = _pthread_cup.next, (E ? _pthread_cup.func(_pthread_cup.arg) : 0));}
94 
95 static void _pthread_once_cleanup(pthread_once_t *o)
96 {
97  *o = 0;
98 }
99 
100 int pthread_once(pthread_once_t *o, void (*func)(void))
101 {
102  long state = *o;
103 
104  _ReadWriteBarrier();
105 
106  while (state != 1)
107  {
108  if (!state)
109  {
110  if (!_InterlockedCompareExchange(o, 2, 0))
111  {
112  /* Success */
113  pthread_cleanup_push((void(*)(void*))_pthread_once_cleanup, o);
114  func();
115  pthread_cleanup_pop(0);
116 
117  /* Mark as done */
118  *o = 1;
119 
120  return 0;
121  }
122  }
123 
124  YieldProcessor();
125 
126  _ReadWriteBarrier();
127 
128  state = *o;
129  }
130 
131  /* Done */
132  return 0;
133 }
134 
135 static int _pthread_once_raw(pthread_once_t *o, void (*func)(void))
136 {
137  long state = *o;
138 
139  _ReadWriteBarrier();
140 
141  while (state != 1)
142  {
143  if (!state)
144  {
145  if (!_InterlockedCompareExchange(o, 2, 0))
146  {
147  /* Success */
148  func();
149 
150  /* Mark as done */
151  *o = 1;
152 
153  return 0;
154  }
155  }
156 
157  YieldProcessor();
158 
159  _ReadWriteBarrier();
160 
161  state = *o;
162  }
163 
164  /* Done */
165  return 0;
166 }
167 
168 int pthread_mutex_lock(pthread_mutex_t *m)
169 {
170  EnterCriticalSection(m);
171  return 0;
172 }
173 
174 int pthread_mutex_unlock(pthread_mutex_t *m)
175 {
176  LeaveCriticalSection(m);
177  return 0;
178 }
179 
180 int pthread_mutex_trylock(pthread_mutex_t *m)
181 {
182  return TryEnterCriticalSection(m) ? 0 : EBUSY;
183 }
184 
185 int pthread_mutex_init(pthread_mutex_t *m, pthread_mutexattr_t *a)
186 {
187  (void) a;
188  InitializeCriticalSection(m);
189 
190  return 0;
191 }
192 
193 int pthread_mutex_destroy(pthread_mutex_t *m)
194 {
195  DeleteCriticalSection(m);
196  return 0;
197 }
198 
199 int pthread_equal(pthread_t t1, pthread_t t2)
200 {
201  return t1 == t2;
202 }
203 
204 int pthread_rwlock_init(pthread_rwlock_t *l, pthread_rwlockattr_t *a)
205 {
206  (void) a;
207  InitializeSRWLock(l);
208 
209  return 0;
210 }
211 
212 int pthread_rwlock_destroy(pthread_rwlock_t *l)
213 {
214  (void) *l;
215  return 0;
216 }
217 
218 int pthread_rwlock_rdlock(pthread_rwlock_t *l)
219 {
220  pthread_testcancel();
221  AcquireSRWLockShared(l);
222 
223  return 0;
224 }
225 
226 int pthread_rwlock_wrlock(pthread_rwlock_t *l)
227 {
228  pthread_testcancel();
229  AcquireSRWLockExclusive(l);
230 
231  return 0;
232 }
233 
234 int pthread_rwlock_unlock(pthread_rwlock_t *l)
235 {
236  void *state = *(void **)l;
237 
238  if (state == (void *)1)
239  {
240  /* Known to be an exclusive lock */
241  ReleaseSRWLockExclusive(l);
242  }
243  else
244  {
245  /* A shared unlock will work */
246  ReleaseSRWLockShared(l);
247  }
248 
249  return 0;
250 }
251 
252 int pthread_rwlock_tryrdlock(pthread_rwlock_t *l)
253 {
254  /* Get the current state of the lock */
255  void *state = *(void **)l;
256 
257  if (!state)
258  {
259  /* Unlocked to locked */
260  if (!_InterlockedCompareExchangePointer((void *volatile *)l, (void *)0x11, NULL)) return 0;
261  return EBUSY;
262  }
263 
264  /* A single writer exists */
265  if (state == (void *)1) return EBUSY;
266 
267  /* Multiple writers exist? */
268  if ((uintptr_t)state & 14) return EBUSY;
269 
270  if (_InterlockedCompareExchangePointer((void *volatile *)l, (void *)((uintptr_t)state + 16), state) == state) return 0;
271 
272  return EBUSY;
273 }
274 
275 int pthread_rwlock_trywrlock(pthread_rwlock_t *l)
276 {
277  /* Try to grab lock if it has no users */
278  if (!_InterlockedCompareExchangePointer((void *volatile *)l, (void *)1, NULL)) return 0;
279 
280  return EBUSY;
281 }
282 
283 void pthread_tls_init(void)
284 {
285  _pthread_tls = TlsAlloc();
286 
287  /* Cannot continue if out of indexes */
288  if (_pthread_tls == TLS_OUT_OF_INDEXES) abort();
289 }
290 
291 static void _pthread_cleanup_dest(pthread_t t)
292 {
293  unsigned i, j;
294 
295  for (j = 0; j < PTHREAD_DESTRUCTOR_ITERATIONS; j++)
296  {
297  int flag = 0;
298 
299  for (i = 0; i < t->keymax; i++)
300  {
301  void *val = t->keyval[i];
302 
303  if (val)
304  {
305  pthread_rwlock_rdlock(&_pthread_key_lock);
306  if ((uintptr_t) _pthread_key_dest[i] > 1)
307  {
308  /* Call destructor */
309  t->keyval[i] = NULL;
310  _pthread_key_dest[i](val);
311  flag = 1;
312  }
313  pthread_rwlock_unlock(&_pthread_key_lock);
314  }
315  }
316 
317  /* Nothing to do? */
318  if (!flag) return;
319  }
320 }
321 
322 pthread_t pthread_self(void)
323 {
324  pthread_t t;
325 
326  _pthread_once_raw(&_pthread_tls_once, pthread_tls_init);
327 
328  t = (struct _pthread_v*)TlsGetValue(_pthread_tls);
329  /* Main thread? */
330  if (!t)
331  {
332  t = (struct _pthread_v*)malloc(sizeof(struct _pthread_v));
333 
334  /* If cannot initialize main thread, then the only thing we can do is abort */
335  if (!t) abort();
336 
337  t->ret_arg = NULL;
338  t->func = NULL;
339  t->clean = NULL;
340  t->cancelled = 0;
341  t->p_state = PTHREAD_DEFAULT_ATTR;
342  t->keymax = 0;
343  t->keyval = NULL;
344  t->h = GetCurrentThread();
345 
346  /* Save for later */
347  TlsSetValue(_pthread_tls, t);
348 
349  if (setjmp(t->jb))
350  {
351  /* Make sure we free ourselves if we are detached */
352  if (!t->h) free(t);
353 
354  /* Time to die */
355  _endthreadex(0);
356  }
357  }
358 
359  return t;
360 }
361 
362 static unsigned long long _pthread_time_in_ms(void)
363 {
364  struct __timeb64 tb;
365 
366  _ftime64(&tb);
367 
368  return tb.time * 1000 + tb.millitm;
369 }
370 
371 static unsigned long long _pthread_time_in_ms_from_timespec(const struct timespec *ts)
372 {
373  unsigned long long t = ts->tv_sec * 1000;
374  t += ts->tv_nsec / 1000000;
375 
376  return t;
377 }
378 
379 static unsigned long long _pthread_rel_time_in_ms(const struct timespec *ts)
380 {
381  unsigned long long t1 = _pthread_time_in_ms_from_timespec(ts);
382  unsigned long long t2 = _pthread_time_in_ms();
383 
384  /* Prevent underflow */
385  if (t1 < t2) return 0;
386  return t1 - t2;
387 }
388 
389 int pthread_rwlock_timedrdlock(pthread_rwlock_t *l, const struct timespec *ts)
390 {
391  unsigned long long ct = _pthread_time_in_ms();
392  unsigned long long t = _pthread_time_in_ms_from_timespec(ts);
393 
394  pthread_testcancel();
395 
396  /* Use a busy-loop */
397  while (1)
398  {
399  /* Try to grab lock */
400  if (!pthread_rwlock_tryrdlock(l)) return 0;
401 
402  /* Get current time */
403  ct = _pthread_time_in_ms();
404 
405  /* Have we waited long enough? */
406  if (ct > t) return ETIMEDOUT;
407  }
408 }
409 
410 int pthread_rwlock_timedwrlock(pthread_rwlock_t *l, const struct timespec *ts)
411 {
412  unsigned long long ct = _pthread_time_in_ms();
413  unsigned long long t = _pthread_time_in_ms_from_timespec(ts);
414 
415  pthread_testcancel();
416 
417  /* Use a busy-loop */
418  while (1)
419  {
420  /* Try to grab lock */
421  if (!pthread_rwlock_trywrlock(l)) return 0;
422 
423  /* Get current time */
424  ct = _pthread_time_in_ms();
425 
426  /* Have we waited long enough? */
427  if (ct > t) return ETIMEDOUT;
428  }
429 }
430 
431 int pthread_get_concurrency(int *val)
432 {
433  *val = _pthread_concur;
434  return 0;
435 }
436 
437 int pthread_set_concurrency(int val)
438 {
439  _pthread_concur = val;
440  return 0;
441 }
442 
443 int pthread_exit(void *res)
444 {
445  pthread_t t = pthread_self();
446 
447  t->ret_arg = res;
448 
449  _pthread_cleanup_dest(t);
450 
451  longjmp(t->jb, 1);
452 }
453 
454 
455 static void _pthread_invoke_cancel(void)
456 {
457  _pthread_cleanup *pcup;
458 
459  _InterlockedDecrement(&_pthread_cancelling);
460 
461  /* Call cancel queue */
462  for (pcup = pthread_self()->clean; pcup; pcup = pcup->next)
463  {
464  pcup->func(pcup->arg);
465  }
466 
467  pthread_exit(PTHREAD_CANCELED);
468 }
469 
470 void pthread_testcancel(void)
471 {
472  if (_pthread_cancelling)
473  {
474  pthread_t t = pthread_self();
475 
476  if (t->cancelled && (t->p_state & PTHREAD_CANCEL_ENABLE))
477  {
478  _pthread_invoke_cancel();
479  }
480  }
481 }
482 
483 
484 int pthread_cancel(pthread_t t)
485 {
486 #if !defined(WINAPI_FAMILY) || (WINAPI_FAMILY != WINAPI_FAMILY_APP)
487  if (t->p_state & PTHREAD_CANCEL_ASYNCHRONOUS)
488  {
489  /* Dangerous asynchronous cancelling */
490  CONTEXT ctxt;
491 
492  /* Already done? */
493  if (t->cancelled) return ESRCH;
494 
495  ctxt.ContextFlags = CONTEXT_CONTROL;
496 
497  SuspendThread(t->h);
498  GetThreadContext(t->h, &ctxt);
499 #ifdef _M_X64
500  ctxt.Rip = (uintptr_t) _pthread_invoke_cancel;
501 #else
502  ctxt.Eip = (uintptr_t) _pthread_invoke_cancel;
503 #endif
504  SetThreadContext(t->h, &ctxt);
505 
506  /* Also try deferred Cancelling */
507  t->cancelled = 1;
508 
509  /* Notify everyone to look */
510  _InterlockedIncrement(&_pthread_cancelling);
511 
512  ResumeThread(t->h);
513  }
514  else
515 #endif
516  {
517  /* Safe deferred Cancelling */
518  t->cancelled = 1;
519 
520  /* Notify everyone to look */
521  _InterlockedIncrement(&_pthread_cancelling);
522  }
523 
524  return 0;
525 }
526 
527 static unsigned _pthread_get_state(pthread_attr_t *attr, unsigned flag)
528 {
529  return attr->p_state & flag;
530 }
531 
532 static int _pthread_set_state(pthread_attr_t *attr, unsigned flag, unsigned val)
533 {
534  if (~flag & val) return EINVAL;
535  attr->p_state &= ~flag;
536  attr->p_state |= val;
537 
538  return 0;
539 }
540 
541 int pthread_attr_init(pthread_attr_t *attr)
542 {
543  attr->p_state = PTHREAD_DEFAULT_ATTR;
544  attr->stack = NULL;
545  attr->s_size = 0;
546  return 0;
547 }
548 
549 int pthread_attr_destroy(pthread_attr_t *attr)
550 {
551  /* No need to do anything */
552  return 0;
553 }
554 
555 
556 int pthread_attr_setdetachstate(pthread_attr_t *a, int flag)
557 {
558  return _pthread_set_state(a, PTHREAD_CREATE_DETACHED, flag);
559 }
560 
561 int pthread_attr_getdetachstate(pthread_attr_t *a, int *flag)
562 {
563  *flag = _pthread_get_state(a, PTHREAD_CREATE_DETACHED);
564  return 0;
565 }
566 
567 int pthread_attr_setinheritsched(pthread_attr_t *a, int flag)
568 {
569  return _pthread_set_state(a, PTHREAD_INHERIT_SCHED, flag);
570 }
571 
572 int pthread_attr_getinheritsched(pthread_attr_t *a, int *flag)
573 {
574  *flag = _pthread_get_state(a, PTHREAD_INHERIT_SCHED);
575  return 0;
576 }
577 
578 int pthread_attr_setscope(pthread_attr_t *a, int flag)
579 {
580  return _pthread_set_state(a, PTHREAD_SCOPE_SYSTEM, flag);
581 }
582 
583 int pthread_attr_getscope(pthread_attr_t *a, int *flag)
584 {
585  *flag = _pthread_get_state(a, PTHREAD_SCOPE_SYSTEM);
586  return 0;
587 }
588 
589 int pthread_attr_getstackaddr(pthread_attr_t *attr, void **stack)
590 {
591  *stack = attr->stack;
592  return 0;
593 }
594 
595 int pthread_attr_setstackaddr(pthread_attr_t *attr, void *stack)
596 {
597  attr->stack = stack;
598  return 0;
599 }
600 
601 int pthread_attr_getstacksize(pthread_attr_t *attr, size_t *size)
602 {
603  *size = attr->s_size;
604  return 0;
605 }
606 
607 int pthread_attr_setstacksize(pthread_attr_t *attr, size_t size)
608 {
609  attr->s_size = size;
610  return 0;
611 }
612 
613 #define pthread_attr_getguardsize(A, S) ENOTSUP
614 #define pthread_attr_setgaurdsize(A, S) ENOTSUP
615 #define pthread_attr_getschedparam(A, S) ENOTSUP
616 #define pthread_attr_setschedparam(A, S) ENOTSUP
617 #define pthread_attr_getschedpolicy(A, S) ENOTSUP
618 #define pthread_attr_setschedpolicy(A, S) ENOTSUP
619 
620 
621 int pthread_setcancelstate(int state, int *oldstate)
622 {
623  pthread_t t = pthread_self();
624 
625  if ((state & PTHREAD_CANCEL_ENABLE) != state) return EINVAL;
626  if (oldstate) *oldstate = t->p_state & PTHREAD_CANCEL_ENABLE;
627  t->p_state &= ~PTHREAD_CANCEL_ENABLE;
628  t->p_state |= state;
629 
630  return 0;
631 }
632 
633 int pthread_setcanceltype(int type, int *oldtype)
634 {
635  pthread_t t = pthread_self();
636 
637  if ((type & PTHREAD_CANCEL_ASYNCHRONOUS) != type) return EINVAL;
638  if (oldtype) *oldtype = t->p_state & PTHREAD_CANCEL_ASYNCHRONOUS;
639  t->p_state &= ~PTHREAD_CANCEL_ASYNCHRONOUS;
640  t->p_state |= type;
641 
642  return 0;
643 }
644 
645 unsigned __stdcall pthread_create_wrapper(void *args)
646 {
647  struct _pthread_v *tv = (struct _pthread_v*)args;
648 
649  _pthread_once_raw(&_pthread_tls_once, pthread_tls_init);
650 
651  TlsSetValue(_pthread_tls, tv);
652 
653  if (!setjmp(tv->jb))
654  {
655  /* Call function and save return value */
656  tv->ret_arg = tv->func(tv->ret_arg);
657 
658  /* Clean up destructors */
659  _pthread_cleanup_dest(tv);
660  }
661 
662  /* If we exit too early, then we can race with create */
663  while (tv->h == (HANDLE) -1)
664  {
665  YieldProcessor();
666  _ReadWriteBarrier();
667  }
668 
669  /* Make sure we free ourselves if we are detached */
670  if (!tv->h) free(tv);
671 
672  return 0;
673 }
674 
675 int pthread_create(pthread_t *th, pthread_attr_t *attr, void *(* func)(void *), void *arg)
676 {
677  struct _pthread_v *tv = (struct _pthread_v*)malloc(sizeof(struct _pthread_v));
678  unsigned ssize = 0;
679 
680  if (!tv) return 1;
681 
682  *th = tv;
683 
684  /* Save data in pthread_t */
685  tv->ret_arg = arg;
686  tv->func = func;
687  tv->clean = NULL;
688  tv->cancelled = 0;
689  tv->p_state = PTHREAD_DEFAULT_ATTR;
690  tv->keymax = 0;
691  tv->keyval = NULL;
692  tv->h = (HANDLE) -1;
693 
694  if (attr)
695  {
696  tv->p_state = attr->p_state;
697  ssize = (unsigned) attr->s_size;
698  }
699 
700  /* Make sure tv->h has value of -1 */
701  _ReadWriteBarrier();
702 
703  tv->h = (HANDLE) _beginthreadex(NULL, ssize, pthread_create_wrapper, tv, 0, NULL);
704 
705  /* Failed */
706  if (!tv->h) return 1;
707 
708  if (tv->p_state & PTHREAD_CREATE_DETACHED)
709  {
710  CloseHandle(tv->h);
711  _ReadWriteBarrier();
712  tv->h = 0;
713  }
714 
715  return 0;
716 }
717 
718 int pthread_join(pthread_t t, void **res)
719 {
720  struct _pthread_v *tv = t;
721 
722  pthread_testcancel();
723 
724  WaitForSingleObject(tv->h, INFINITE);
725  CloseHandle(tv->h);
726 
727  /* Obtain return value */
728  if (res) *res = tv->ret_arg;
729 
730  free(tv);
731 
732  return 0;
733 }
734 
735 int pthread_detach(pthread_t t)
736 {
737  struct _pthread_v *tv = t;
738 
739  /*
740  * This can't race with thread exit because
741  * our call would be undefined if called on a dead thread.
742  */
743 
744  CloseHandle(tv->h);
745  _ReadWriteBarrier();
746  tv->h = 0;
747 
748  return 0;
749 }
750 
751 int pthread_mutexattr_init(pthread_mutexattr_t *a)
752 {
753  *a = 0;
754  return 0;
755 }
756 
757 int pthread_mutexattr_destroy(pthread_mutexattr_t *a)
758 {
759  (void) a;
760  return 0;
761 }
762 
763 int pthread_mutexattr_gettype(pthread_mutexattr_t *a, int *type)
764 {
765  *type = *a & 3;
766 
767  return 0;
768 }
769 
770 int pthread_mutexattr_settype(pthread_mutexattr_t *a, int type)
771 {
772  if ((unsigned) type > 3) return EINVAL;
773  *a &= ~3;
774  *a |= type;
775 
776  return 0;
777 }
778 
779 int pthread_mutexattr_getpshared(pthread_mutexattr_t *a, int *type)
780 {
781  *type = *a & 4;
782 
783  return 0;
784 }
785 
786 int pthread_mutexattr_setpshared(pthread_mutexattr_t * a, int type)
787 {
788  if ((type & 4) != type) return EINVAL;
789 
790  *a &= ~4;
791  *a |= type;
792 
793  return 0;
794 }
795 
796 int pthread_mutexattr_getprotocol(pthread_mutexattr_t *a, int *type)
797 {
798  *type = *a & (8 + 16);
799 
800  return 0;
801 }
802 
803 int pthread_mutexattr_setprotocol(pthread_mutexattr_t *a, int type)
804 {
805  if ((type & (8 + 16)) != 8 + 16) return EINVAL;
806 
807  *a &= ~(8 + 16);
808  *a |= type;
809 
810  return 0;
811 }
812 
813 int pthread_mutexattr_getprioceiling(pthread_mutexattr_t *a, int * prio)
814 {
815  *prio = *a / PTHREAD_PRIO_MULT;
816  return 0;
817 }
818 
819 int pthread_mutexattr_setprioceiling(pthread_mutexattr_t *a, int prio)
820 {
821  *a &= (PTHREAD_PRIO_MULT - 1);
822  *a += prio * PTHREAD_PRIO_MULT;
823 
824  return 0;
825 }
826 
827 int pthread_mutex_timedlock(pthread_mutex_t *m, struct timespec *ts)
828 {
829  unsigned long long t, ct;
830 
831  struct _pthread_crit_t
832  {
833  void *debug;
834  LONG count;
835  LONG r_count;
836  HANDLE owner;
837  HANDLE sem;
838  ULONG_PTR spin;
839  };
840 
841  /* Try to lock it without waiting */
842  if (!pthread_mutex_trylock(m)) return 0;
843 
844  ct = _pthread_time_in_ms();
845  t = _pthread_time_in_ms_from_timespec(ts);
846 
847  while (1)
848  {
849  /* Have we waited long enough? */
850  if (ct > t) return ETIMEDOUT;
851 
852  /* Wait on semaphore within critical section */
853  WaitForSingleObject(((struct _pthread_crit_t *)m)->sem, (DWORD)(t - ct));
854 
855  /* Try to grab lock */
856  if (!pthread_mutex_trylock(m)) return 0;
857 
858  /* Get current time */
859  ct = _pthread_time_in_ms();
860  }
861 }
862 
863 #define _PTHREAD_BARRIER_FLAG (1<<30)
864 
865 int pthread_barrier_destroy(pthread_barrier_t *b)
866 {
867  EnterCriticalSection(&b->m);
868 
869  while (b->total > _PTHREAD_BARRIER_FLAG)
870  {
871  /* Wait until everyone exits the barrier */
872  SleepConditionVariableCS(&b->cv, &b->m, INFINITE);
873  }
874 
875  LeaveCriticalSection(&b->m);
876 
877  DeleteCriticalSection(&b->m);
878 
879  return 0;
880 }
881 
882 int pthread_barrier_init(pthread_barrier_t *b, void *attr, int count)
883 {
884  /* Ignore attr */
885  (void) attr;
886 
887  b->count = count;
888  b->total = 0;
889 
890  InitializeCriticalSection(&b->m);
891  InitializeConditionVariable(&b->cv);
892 
893  return 0;
894 }
895 
896 int pthread_barrier_wait(pthread_barrier_t *b)
897 {
898  EnterCriticalSection(&b->m);
899 
900  while (b->total > _PTHREAD_BARRIER_FLAG)
901  {
902  /* Wait until everyone exits the barrier */
903  SleepConditionVariableCS(&b->cv, &b->m, INFINITE);
904  }
905 
906  /* Are we the first to enter? */
907  if (b->total == _PTHREAD_BARRIER_FLAG) b->total = 0;
908 
909  b->total++;
910 
911  if (b->total == b->count)
912  {
913  b->total += _PTHREAD_BARRIER_FLAG - 1;
914  WakeAllConditionVariable(&b->cv);
915 
916  LeaveCriticalSection(&b->m);
917 
918  return 1;
919  }
920  else
921  {
922  while (b->total < _PTHREAD_BARRIER_FLAG)
923  {
924  /* Wait until enough threads enter the barrier */
925  SleepConditionVariableCS(&b->cv, &b->m, INFINITE);
926  }
927 
928  b->total--;
929 
930  /* Get entering threads to wake up */
931  if (b->total == _PTHREAD_BARRIER_FLAG) WakeAllConditionVariable(&b->cv);
932 
933  LeaveCriticalSection(&b->m);
934 
935  return 0;
936  }
937 }
938 
939 int pthread_barrierattr_init(void **attr)
940 {
941  *attr = NULL;
942  return 0;
943 }
944 
945 int pthread_barrierattr_destroy(void **attr)
946 {
947  /* Ignore attr */
948  (void) attr;
949 
950  return 0;
951 }
952 
953 int pthread_barrierattr_setpshared(void **attr, int s)
954 {
955  *attr = (void *) s;
956  return 0;
957 }
958 
959 int pthread_barrierattr_getpshared(void **attr, int *s)
960 {
961  *s = (int) (size_t) *attr;
962 
963  return 0;
964 }
965 
966 int pthread_key_create(pthread_key_t *key, void (* dest)(void *))
967 {
968  unsigned i;
969  unsigned nmax;
970  void (**d)(void *);
971 
972  if (!key) return EINVAL;
973 
974  pthread_rwlock_wrlock(&_pthread_key_lock);
975 
976  for (i = _pthread_key_sch; i < _pthread_key_max; i++)
977  {
978  if (!_pthread_key_dest[i])
979  {
980  *key = i;
981  if (dest)
982  {
983  _pthread_key_dest[i] = dest;
984  }
985  else
986  {
987  _pthread_key_dest[i] = (void(*)(void *))1;
988  }
989  pthread_rwlock_unlock(&_pthread_key_lock);
990 
991  return 0;
992  }
993  }
994 
995  for (i = 0; i < _pthread_key_sch; i++)
996  {
997  if (!_pthread_key_dest[i])
998  {
999  *key = i;
1000  if (dest)
1001  {
1002  _pthread_key_dest[i] = dest;
1003  }
1004  else
1005  {
1006  _pthread_key_dest[i] = (void(*)(void *))1;
1007  }
1008  pthread_rwlock_unlock(&_pthread_key_lock);
1009 
1010  return 0;
1011  }
1012  }
1013 
1014  if (!_pthread_key_max) _pthread_key_max = 1;
1015  if (_pthread_key_max == PTHREAD_KEYS_MAX)
1016  {
1017  pthread_rwlock_unlock(&_pthread_key_lock);
1018 
1019  return ENOMEM;
1020  }
1021 
1022  nmax = _pthread_key_max * 2;
1023  if (nmax > PTHREAD_KEYS_MAX) nmax = PTHREAD_KEYS_MAX;
1024 
1025  /* No spare room anywhere */
1026  d = (void (**)(void*))realloc(_pthread_key_dest, nmax * sizeof(*d));
1027  if (!d)
1028  {
1029  pthread_rwlock_unlock(&_pthread_key_lock);
1030 
1031  return ENOMEM;
1032  }
1033 
1034  /* Clear new region */
1035  memset((void *) &d[_pthread_key_max], 0, (nmax-_pthread_key_max)*sizeof(void *));
1036 
1037  /* Use new region */
1038  _pthread_key_dest = d;
1039  _pthread_key_sch = _pthread_key_max + 1;
1040  *key = _pthread_key_max;
1041  _pthread_key_max = nmax;
1042 
1043  if (dest)
1044  {
1045  _pthread_key_dest[*key] = dest;
1046  }
1047  else
1048  {
1049  _pthread_key_dest[*key] = (void(*)(void *))1;
1050  }
1051 
1052  pthread_rwlock_unlock(&_pthread_key_lock);
1053 
1054  return 0;
1055 }
1056 
1057 int pthread_key_delete(pthread_key_t key)
1058 {
1059  if (key > _pthread_key_max) return EINVAL;
1060  if (!_pthread_key_dest) return EINVAL;
1061 
1062  pthread_rwlock_wrlock(&_pthread_key_lock);
1063  _pthread_key_dest[key] = NULL;
1064 
1065  /* Start next search from our location */
1066  if (_pthread_key_sch > key) _pthread_key_sch = key;
1067 
1068  pthread_rwlock_unlock(&_pthread_key_lock);
1069 
1070  return 0;
1071 }
1072 
1073 void *pthread_getspecific(pthread_key_t key)
1074 {
1075  pthread_t t = pthread_self();
1076 
1077  if (key >= t->keymax) return NULL;
1078 
1079  return t->keyval[key];
1080 
1081 }
1082 
1083 int pthread_setspecific(pthread_key_t key, const void *value)
1084 {
1085  pthread_t t = pthread_self();
1086 
1087  if (key > t->keymax)
1088  {
1089  int keymax = (key + 1) * 2;
1090  void **kv = (void**)realloc(t->keyval, keymax * sizeof(void *));
1091 
1092  if (!kv) return ENOMEM;
1093 
1094  /* Clear new region */
1095  memset(&kv[t->keymax], 0, (keymax - t->keymax)*sizeof(void*));
1096 
1097  t->keyval = kv;
1098  t->keymax = keymax;
1099  }
1100 
1101  t->keyval[key] = (void *) value;
1102 
1103  return 0;
1104 }
1105 
1106 
1107 int pthread_spin_init(pthread_spinlock_t *l, int pshared)
1108 {
1109  (void) pshared;
1110 
1111  *l = 0;
1112  return 0;
1113 }
1114 
1115 int pthread_spin_destroy(pthread_spinlock_t *l)
1116 {
1117  (void) l;
1118  return 0;
1119 }
1120 
1121 /* No-fair spinlock due to lack of knowledge of thread number */
1122 int pthread_spin_lock(pthread_spinlock_t *l)
1123 {
1124  while (_InterlockedExchange(l, EBUSY))
1125  {
1126  /* Don't lock the bus whilst waiting */
1127  while (*l)
1128  {
1129  YieldProcessor();
1130 
1131  /* Compiler barrier. Prevent caching of *l */
1132  _ReadWriteBarrier();
1133  }
1134  }
1135 
1136  return 0;
1137 }
1138 
1139 int pthread_spin_trylock(pthread_spinlock_t *l)
1140 {
1141  return _InterlockedExchange(l, EBUSY);
1142 }
1143 
1144 int pthread_spin_unlock(pthread_spinlock_t *l)
1145 {
1146  /* Compiler barrier. The store below acts with release symmantics */
1147  _ReadWriteBarrier();
1148 
1149  *l = 0;
1150 
1151  return 0;
1152 }
1153 
1154 int pthread_cond_init(pthread_cond_t *c, pthread_condattr_t *a)
1155 {
1156  (void) a;
1157 
1158  InitializeConditionVariable(c);
1159  return 0;
1160 }
1161 
1162 int pthread_cond_signal(pthread_cond_t *c)
1163 {
1164  WakeConditionVariable(c);
1165  return 0;
1166 }
1167 
1168 int pthread_cond_broadcast(pthread_cond_t *c)
1169 {
1170  WakeAllConditionVariable(c);
1171  return 0;
1172 }
1173 
1174 int pthread_cond_wait(pthread_cond_t *c, pthread_mutex_t *m)
1175 {
1176  pthread_testcancel();
1177  SleepConditionVariableCS(c, m, INFINITE);
1178  return 0;
1179 }
1180 
1181 int pthread_cond_destroy(pthread_cond_t *c)
1182 {
1183  (void) c;
1184  return 0;
1185 }
1186 
1187 int pthread_cond_timedwait(pthread_cond_t *c, pthread_mutex_t *m, struct timespec *t)
1188 {
1189  unsigned long long tm = _pthread_rel_time_in_ms(t);
1190 
1191  pthread_testcancel();
1192 
1193  if (!SleepConditionVariableCS(c, m, (DWORD)tm)) return ETIMEDOUT;
1194 
1195  /* We can have a spurious wakeup after the timeout */
1196  if (!_pthread_rel_time_in_ms(t)) return ETIMEDOUT;
1197 
1198  return 0;
1199 }
1200 
1201 int pthread_condattr_destroy(pthread_condattr_t *a)
1202 {
1203  (void) a;
1204  return 0;
1205 }
1206 
1207 int pthread_condattr_init(pthread_condattr_t *a)
1208 {
1209  *a = 0;
1210  return 0;
1211 }
1212 
1213 int pthread_condattr_getpshared(pthread_condattr_t *a, int *s)
1214 {
1215  *s = *a;
1216  return 0;
1217 }
1218 
1219 int pthread_condattr_setpshared(pthread_condattr_t *a, int s)
1220 {
1221  *a = s;
1222  return 0;
1223 }
1224 
1225 int pthread_rwlockattr_destroy(pthread_rwlockattr_t *a)
1226 {
1227  (void) a;
1228  return 0;
1229 }
1230 
1231 int pthread_rwlockattr_init(pthread_rwlockattr_t *a)
1232 {
1233  *a = 0;
1234  return 0;
1235 }
1236 
1237 int pthread_rwlockattr_getpshared(pthread_rwlockattr_t *a, int *s)
1238 {
1239  *s = *a;
1240  return 0;
1241 }
1242 
1243 int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *a, int s)
1244 {
1245  *a = s;
1246  return 0;
1247 }