pthread_barrier_wait.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. #include "pthread_impl.h"
  2. void __vm_lock_impl(int);
  3. void __vm_unlock_impl(void);
  4. static int pshared_barrier_wait(pthread_barrier_t *b)
  5. {
  6. int limit = (b->_b_limit & INT_MAX) + 1;
  7. int ret = 0;
  8. int v, w;
  9. if (limit==1) return PTHREAD_BARRIER_SERIAL_THREAD;
  10. while ((v=a_cas(&b->_b_lock, 0, limit)))
  11. __wait(&b->_b_lock, &b->_b_waiters, v, 0);
  12. /* Wait for <limit> threads to get to the barrier */
  13. if (++b->_b_count == limit) {
  14. a_store(&b->_b_count, 0);
  15. ret = PTHREAD_BARRIER_SERIAL_THREAD;
  16. if (b->_b_waiters2) __wake(&b->_b_count, -1, 0);
  17. } else {
  18. a_store(&b->_b_lock, 0);
  19. if (b->_b_waiters) __wake(&b->_b_lock, 1, 0);
  20. while ((v=b->_b_count)>0)
  21. __wait(&b->_b_count, &b->_b_waiters2, v, 0);
  22. }
  23. __vm_lock_impl(+1);
  24. /* Ensure all threads have a vm lock before proceeding */
  25. if (a_fetch_add(&b->_b_count, -1)==1-limit) {
  26. a_store(&b->_b_count, 0);
  27. if (b->_b_waiters2) __wake(&b->_b_count, -1, 0);
  28. } else {
  29. while ((v=b->_b_count))
  30. __wait(&b->_b_count, &b->_b_waiters2, v, 0);
  31. }
  32. /* Perform a recursive unlock suitable for self-sync'd destruction */
  33. do {
  34. v = b->_b_lock;
  35. w = b->_b_waiters;
  36. } while (a_cas(&b->_b_lock, v, v==INT_MIN+1 ? 0 : v-1) != v);
  37. /* Wake a thread waiting to reuse or destroy the barrier */
  38. if (v==INT_MIN+1 || (v==1 && w))
  39. __wake(&b->_b_lock, 1, 0);
  40. __vm_unlock_impl();
  41. return ret;
  42. }
  43. struct instance
  44. {
  45. int count;
  46. int last;
  47. int waiters;
  48. int finished;
  49. };
  50. int pthread_barrier_wait(pthread_barrier_t *b)
  51. {
  52. int limit = b->_b_limit;
  53. struct instance *inst;
  54. /* Trivial case: count was set at 1 */
  55. if (!limit) return PTHREAD_BARRIER_SERIAL_THREAD;
  56. /* Process-shared barriers require a separate, inefficient wait */
  57. if (limit < 0) return pshared_barrier_wait(b);
  58. /* Otherwise we need a lock on the barrier object */
  59. while (a_swap(&b->_b_lock, 1))
  60. __wait(&b->_b_lock, &b->_b_waiters, 1, 1);
  61. inst = b->_b_inst;
  62. /* First thread to enter the barrier becomes the "instance owner" */
  63. if (!inst) {
  64. struct instance new_inst = { 0 };
  65. int spins = 10000;
  66. b->_b_inst = inst = &new_inst;
  67. a_store(&b->_b_lock, 0);
  68. if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
  69. while (spins-- && !inst->finished)
  70. a_spin();
  71. a_inc(&inst->finished);
  72. while (inst->finished == 1)
  73. __syscall(SYS_futex, &inst->finished, FUTEX_WAIT,1,0);
  74. return PTHREAD_BARRIER_SERIAL_THREAD;
  75. }
  76. /* Last thread to enter the barrier wakes all non-instance-owners */
  77. if (++inst->count == limit) {
  78. b->_b_inst = 0;
  79. a_store(&b->_b_lock, 0);
  80. if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
  81. a_store(&inst->last, 1);
  82. if (inst->waiters)
  83. __wake(&inst->last, -1, 1);
  84. } else {
  85. a_store(&b->_b_lock, 0);
  86. if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
  87. __wait(&inst->last, &inst->waiters, 0, 1);
  88. }
  89. /* Last thread to exit the barrier wakes the instance owner */
  90. if (a_fetch_add(&inst->count,-1)==1 && a_fetch_add(&inst->finished,1))
  91. __wake(&inst->finished, 1, 1);
  92. return 0;
  93. }