pthread_cond_timedwait.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. #include "pthread_impl.h"
  2. /*
  3. * struct waiter
  4. *
  5. * Waiter objects have automatic storage on the waiting thread, and
  6. * are used in building a linked list representing waiters currently
  7. * waiting on the condition variable or a group of waiters woken
  8. * together by a broadcast or signal; in the case of signal, this is a
  9. * degenerate list of one member.
  10. *
  11. * Waiter lists attached to the condition variable itself are
  12. * protected by the lock on the cv. Detached waiter lists are never
  13. * modified again, but can only be traversed in reverse order, and are
  14. * protected by the "barrier" locks in each node, which are unlocked
  15. * in turn to control wake order.
  16. *
  17. * Since process-shared cond var semantics do not necessarily allow
  18. * one thread to see another's automatic storage (they may be in
  19. * different processes), the waiter list is not used for the
  20. * process-shared case, but the structure is still used to store data
  21. * needed by the cancellation cleanup handler.
  22. */
  23. struct waiter {
  24. struct waiter *prev, *next;
  25. volatile int state, barrier;
  26. volatile int *notify;
  27. };
  28. /* Self-synchronized-destruction-safe lock functions */
  29. static inline void lock(volatile int *l)
  30. {
  31. if (a_cas(l, 0, 1)) {
  32. a_cas(l, 1, 2);
  33. do __wait(l, 0, 2, 1);
  34. while (a_cas(l, 0, 2));
  35. }
  36. }
  37. static inline void unlock(volatile int *l)
  38. {
  39. if (a_swap(l, 0)==2)
  40. __wake(l, 1, 1);
  41. }
  42. static inline void unlock_requeue(volatile int *l, volatile int *r, int w)
  43. {
  44. a_store(l, 0);
  45. if (w) __wake(l, 1, 1);
  46. else __syscall(SYS_futex, l, FUTEX_REQUEUE|FUTEX_PRIVATE, 0, 1, r) != -ENOSYS
  47. || __syscall(SYS_futex, l, FUTEX_REQUEUE, 0, 1, r);
  48. }
  49. enum {
  50. WAITING,
  51. SIGNALED,
  52. LEAVING,
  53. };
  54. int __pthread_cond_timedwait(pthread_cond_t *restrict c, pthread_mutex_t *restrict m, const struct timespec *restrict ts)
  55. {
  56. struct waiter node = { 0 };
  57. int e, seq, clock = c->_c_clock, cs, shared=0, oldstate, tmp;
  58. volatile int *fut;
  59. if ((m->_m_type&15) && (m->_m_lock&INT_MAX) != __pthread_self()->tid)
  60. return EPERM;
  61. if (ts && ts->tv_nsec >= 1000000000UL)
  62. return EINVAL;
  63. __pthread_testcancel();
  64. if (c->_c_shared) {
  65. shared = 1;
  66. fut = &c->_c_seq;
  67. seq = c->_c_seq;
  68. a_inc(&c->_c_waiters);
  69. } else {
  70. lock(&c->_c_lock);
  71. seq = node.barrier = 2;
  72. fut = &node.barrier;
  73. node.state = WAITING;
  74. node.next = c->_c_head;
  75. c->_c_head = &node;
  76. if (!c->_c_tail) c->_c_tail = &node;
  77. else node.next->prev = &node;
  78. unlock(&c->_c_lock);
  79. }
  80. __pthread_mutex_unlock(m);
  81. __pthread_setcancelstate(PTHREAD_CANCEL_MASKED, &cs);
  82. if (cs == PTHREAD_CANCEL_DISABLE) __pthread_setcancelstate(cs, 0);
  83. do e = __timedwait_cp(fut, seq, clock, ts, !shared);
  84. while (*fut==seq && (!e || e==EINTR));
  85. if (e == EINTR) e = 0;
  86. if (shared) {
  87. /* Suppress cancellation if a signal was potentially
  88. * consumed; this is a legitimate form of spurious
  89. * wake even if not. */
  90. if (e == ECANCELED && c->_c_seq != seq) e = 0;
  91. if (a_fetch_add(&c->_c_waiters, -1) == -0x7fffffff)
  92. __wake(&c->_c_waiters, 1, 0);
  93. oldstate = WAITING;
  94. goto relock;
  95. }
  96. oldstate = a_cas(&node.state, WAITING, LEAVING);
  97. if (oldstate == WAITING) {
  98. /* Access to cv object is valid because this waiter was not
  99. * yet signaled and a new signal/broadcast cannot return
  100. * after seeing a LEAVING waiter without getting notified
  101. * via the futex notify below. */
  102. lock(&c->_c_lock);
  103. if (c->_c_head == &node) c->_c_head = node.next;
  104. else if (node.prev) node.prev->next = node.next;
  105. if (c->_c_tail == &node) c->_c_tail = node.prev;
  106. else if (node.next) node.next->prev = node.prev;
  107. unlock(&c->_c_lock);
  108. if (node.notify) {
  109. if (a_fetch_add(node.notify, -1)==1)
  110. __wake(node.notify, 1, 1);
  111. }
  112. } else {
  113. /* Lock barrier first to control wake order. */
  114. lock(&node.barrier);
  115. }
  116. relock:
  117. /* Errors locking the mutex override any existing error or
  118. * cancellation, since the caller must see them to know the
  119. * state of the mutex. */
  120. if ((tmp = pthread_mutex_lock(m))) e = tmp;
  121. if (oldstate == WAITING) goto done;
  122. if (!node.next && !(m->_m_type & 8))
  123. a_inc(&m->_m_waiters);
  124. /* Unlock the barrier that's holding back the next waiter, and
  125. * either wake it or requeue it to the mutex. */
  126. if (node.prev) {
  127. int val = m->_m_lock;
  128. if (val>0) a_cas(&m->_m_lock, val, val|0x80000000);
  129. unlock_requeue(&node.prev->barrier, &m->_m_lock, m->_m_type & (8|128));
  130. } else if (!(m->_m_type & 8)) {
  131. a_dec(&m->_m_waiters);
  132. }
  133. /* Since a signal was consumed, cancellation is not permitted. */
  134. if (e == ECANCELED) e = 0;
  135. done:
  136. __pthread_setcancelstate(cs, 0);
  137. if (e == ECANCELED) {
  138. __pthread_testcancel();
  139. __pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, 0);
  140. }
  141. return e;
  142. }
  143. int __private_cond_signal(pthread_cond_t *c, int n)
  144. {
  145. struct waiter *p, *first=0;
  146. volatile int ref = 0;
  147. int cur;
  148. lock(&c->_c_lock);
  149. for (p=c->_c_tail; n && p; p=p->prev) {
  150. if (a_cas(&p->state, WAITING, SIGNALED) != WAITING) {
  151. ref++;
  152. p->notify = &ref;
  153. } else {
  154. n--;
  155. if (!first) first=p;
  156. }
  157. }
  158. /* Split the list, leaving any remainder on the cv. */
  159. if (p) {
  160. if (p->next) p->next->prev = 0;
  161. p->next = 0;
  162. } else {
  163. c->_c_head = 0;
  164. }
  165. c->_c_tail = p;
  166. unlock(&c->_c_lock);
  167. /* Wait for any waiters in the LEAVING state to remove
  168. * themselves from the list before returning or allowing
  169. * signaled threads to proceed. */
  170. while ((cur = ref)) __wait(&ref, 0, cur, 1);
  171. /* Allow first signaled waiter, if any, to proceed. */
  172. if (first) unlock(&first->barrier);
  173. return 0;
  174. }
  175. weak_alias(__pthread_cond_timedwait, pthread_cond_timedwait);