pthread_barrier_wait.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. #include "pthread_impl.h"
  2. static int vmlock[2];
  3. void __vm_lock(int inc)
  4. {
  5. for (;;) {
  6. int v = vmlock[0];
  7. if (inc*v < 0) __wait(vmlock, vmlock+1, v, 1);
  8. else if (a_cas(vmlock, v, v+inc)==v) break;
  9. }
  10. }
  11. void __vm_unlock(void)
  12. {
  13. int inc = vmlock[0]>0 ? -1 : 1;
  14. if (a_fetch_add(vmlock, inc)==-inc && vmlock[1])
  15. __wake(vmlock, -1, 1);
  16. }
  17. static int pshared_barrier_wait(pthread_barrier_t *b)
  18. {
  19. int limit = (b->_b_limit & INT_MAX) + 1;
  20. int ret = 0;
  21. int v, w;
  22. if (limit==1) return PTHREAD_BARRIER_SERIAL_THREAD;
  23. while ((v=a_cas(&b->_b_lock, 0, limit)))
  24. __wait(&b->_b_lock, &b->_b_waiters, v, 0);
  25. /* Wait for <limit> threads to get to the barrier */
  26. if (++b->_b_count == limit) {
  27. a_store(&b->_b_count, 0);
  28. ret = PTHREAD_BARRIER_SERIAL_THREAD;
  29. if (b->_b_waiters2) __wake(&b->_b_count, -1, 0);
  30. } else {
  31. a_store(&b->_b_lock, 0);
  32. if (b->_b_waiters) __wake(&b->_b_lock, 1, 0);
  33. while ((v=b->_b_count)>0)
  34. __wait(&b->_b_count, &b->_b_waiters2, v, 0);
  35. }
  36. __vm_lock(+1);
  37. /* Ensure all threads have a vm lock before proceeding */
  38. if (a_fetch_add(&b->_b_count, -1)==1-limit) {
  39. a_store(&b->_b_count, 0);
  40. if (b->_b_waiters2) __wake(&b->_b_count, -1, 0);
  41. } else {
  42. while ((v=b->_b_count))
  43. __wait(&b->_b_count, &b->_b_waiters2, v, 0);
  44. }
  45. /* Perform a recursive unlock suitable for self-sync'd destruction */
  46. do {
  47. v = b->_b_lock;
  48. w = b->_b_waiters;
  49. } while (a_cas(&b->_b_lock, v, v==INT_MIN+1 ? 0 : v-1) != v);
  50. /* Wake a thread waiting to reuse or destroy the barrier */
  51. if (v==INT_MIN+1 || (v==1 && w))
  52. __wake(&b->_b_lock, 1, 0);
  53. __vm_unlock();
  54. return ret;
  55. }
  56. struct instance
  57. {
  58. int count;
  59. int last;
  60. int waiters;
  61. int finished;
  62. };
  63. int pthread_barrier_wait(pthread_barrier_t *b)
  64. {
  65. int limit = b->_b_limit;
  66. struct instance *inst;
  67. /* Trivial case: count was set at 1 */
  68. if (!limit) return PTHREAD_BARRIER_SERIAL_THREAD;
  69. /* Process-shared barriers require a separate, inefficient wait */
  70. if (limit < 0) return pshared_barrier_wait(b);
  71. /* Otherwise we need a lock on the barrier object */
  72. while (a_swap(&b->_b_lock, 1))
  73. __wait(&b->_b_lock, &b->_b_waiters, 1, 1);
  74. inst = b->_b_inst;
  75. /* First thread to enter the barrier becomes the "instance owner" */
  76. if (!inst) {
  77. struct instance new_inst = { 0 };
  78. int spins = 10000;
  79. b->_b_inst = inst = &new_inst;
  80. a_store(&b->_b_lock, 0);
  81. if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
  82. while (spins-- && !inst->finished)
  83. a_spin();
  84. a_inc(&inst->finished);
  85. while (inst->finished == 1)
  86. __syscall(SYS_futex, &inst->finished, FUTEX_WAIT,1,0);
  87. return PTHREAD_BARRIER_SERIAL_THREAD;
  88. }
  89. /* Last thread to enter the barrier wakes all non-instance-owners */
  90. if (++inst->count == limit) {
  91. b->_b_inst = 0;
  92. a_store(&b->_b_lock, 0);
  93. if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
  94. a_store(&inst->last, 1);
  95. if (inst->waiters)
  96. __wake(&inst->last, -1, 1);
  97. } else {
  98. a_store(&b->_b_lock, 0);
  99. if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
  100. __wait(&inst->last, &inst->waiters, 0, 1);
  101. }
  102. /* Last thread to exit the barrier wakes the instance owner */
  103. if (a_fetch_add(&inst->count,-1)==1 && a_fetch_add(&inst->finished,1))
  104. __wake(&inst->finished, 1, 1);
  105. return 0;
  106. }