pthread_create.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. #include "pthread_impl.h"
  2. void __pthread_unwind_next(struct __ptcb *cb)
  3. {
  4. int i, j, not_finished;
  5. pthread_t self;
  6. if (cb->__next) longjmp((void *)cb->__next->__jb, 1);
  7. self = pthread_self();
  8. if (self->cancel) self->result = PTHREAD_CANCELLED;
  9. LOCK(&self->exitlock);
  10. not_finished = self->tsd_used;
  11. for (j=0; not_finished && j<PTHREAD_DESTRUCTOR_ITERATIONS; j++) {
  12. not_finished = 0;
  13. for (i=0; i<PTHREAD_KEYS_MAX; i++) {
  14. if (self->tsd[i] && libc.tsd_keys[i]) {
  15. void *tmp = self->tsd[i];
  16. self->tsd[i] = 0;
  17. libc.tsd_keys[i](tmp);
  18. not_finished = 1;
  19. }
  20. }
  21. }
  22. syscall4(__NR_rt_sigprocmask, SIG_BLOCK, (long)(uint64_t[1]){-1},0,8);
  23. if (!a_fetch_add(&libc.threads_minus_1, -1))
  24. exit(0);
  25. if (self->detached && self->map_base)
  26. __unmapself(self->map_base, self->map_size);
  27. __syscall_exit(0);
  28. }
  29. static void docancel(struct pthread *self)
  30. {
  31. struct __ptcb cb = { .__next = self->cancelbuf };
  32. __pthread_unwind_next(&cb);
  33. }
  34. static void cancel_handler(int sig, siginfo_t *si, void *ctx)
  35. {
  36. struct pthread *self = __pthread_self();
  37. self->cancel = 1;
  38. if (self->canceldisable || (!self->cancelasync && !self->cancelpoint))
  39. return;
  40. docancel(self);
  41. }
  42. static void cancelpt(int x)
  43. {
  44. struct pthread *self = __pthread_self();
  45. if (self->canceldisable) return;
  46. self->cancelpoint = x;
  47. if (self->cancel) docancel(self);
  48. }
  49. /* "rsyscall" is a mechanism by which a thread can synchronously force all
  50. * other threads to perform an arbitrary syscall. It is necessary to work
  51. * around the non-conformant implementation of setuid() et al on Linux,
  52. * which affect only the calling thread and not the whole process. This
  53. * implementation performs some tricks with signal delivery to work around
  54. * the fact that it does not keep any list of threads in userspace. */
  55. static struct {
  56. volatile int lock, hold, blocks, cnt;
  57. unsigned long arg[6];
  58. int nr;
  59. int err;
  60. } rs;
  61. static void rsyscall_handler(int sig, siginfo_t *si, void *ctx)
  62. {
  63. if (rs.cnt == libc.threads_minus_1) return;
  64. if (syscall6(rs.nr, rs.arg[0], rs.arg[1], rs.arg[2],
  65. rs.arg[3], rs.arg[4], rs.arg[5]) < 0 && !rs.err) rs.err=errno;
  66. a_inc(&rs.cnt);
  67. __wake(&rs.cnt, 1, 1);
  68. while(rs.hold)
  69. __wait(&rs.hold, 0, 1, 1);
  70. a_dec(&rs.cnt);
  71. if (!rs.cnt) __wake(&rs.cnt, 1, 1);
  72. }
  73. static int rsyscall(int nr, long a, long b, long c, long d, long e, long f)
  74. {
  75. int i, ret;
  76. sigset_t set = { 0 };
  77. struct pthread *self = __pthread_self();
  78. sigaddset(&set, SIGSYSCALL);
  79. LOCK(&rs.lock);
  80. while ((i=rs.blocks))
  81. __wait(&rs.blocks, 0, i, 1);
  82. __libc_sigprocmask(SIG_BLOCK, &set, 0);
  83. rs.nr = nr;
  84. rs.arg[0] = a; rs.arg[1] = b;
  85. rs.arg[2] = c; rs.arg[3] = d;
  86. rs.arg[4] = d; rs.arg[5] = f;
  87. rs.hold = 1;
  88. rs.err = 0;
  89. rs.cnt = 0;
  90. /* Dispatch signals until all threads respond */
  91. for (i=libc.threads_minus_1; i; i--)
  92. sigqueue(self->pid, SIGSYSCALL, (union sigval){0});
  93. while ((i=rs.cnt) < libc.threads_minus_1) {
  94. sigqueue(self->pid, SIGSYSCALL, (union sigval){0});
  95. __wait(&rs.cnt, 0, i, 1);
  96. }
  97. /* Handle any lingering signals with no-op */
  98. __libc_sigprocmask(SIG_UNBLOCK, &set, 0);
  99. /* Resume other threads' signal handlers and wait for them */
  100. rs.hold = 0;
  101. __wake(&rs.hold, -1, 0);
  102. while((i=rs.cnt)) __wait(&rs.cnt, 0, i, 1);
  103. if (rs.err) errno = rs.err, ret = -1;
  104. else ret = syscall6(nr, a, b, c, d, e, f);
  105. UNLOCK(&rs.lock);
  106. return ret;
  107. }
  108. static void init_threads()
  109. {
  110. struct sigaction sa = { .sa_flags = SA_SIGINFO | SA_RESTART };
  111. libc.lock = __lock;
  112. libc.cancelpt = cancelpt;
  113. libc.rsyscall = rsyscall;
  114. sa.sa_sigaction = cancel_handler;
  115. __libc_sigaction(SIGCANCEL, &sa, 0);
  116. sigaddset(&sa.sa_mask, SIGSYSCALL);
  117. sigaddset(&sa.sa_mask, SIGCANCEL);
  118. sa.sa_sigaction = rsyscall_handler;
  119. __libc_sigaction(SIGSYSCALL, &sa, 0);
  120. sigprocmask(SIG_UNBLOCK, &sa.sa_mask, 0);
  121. }
  122. static int start(void *p)
  123. {
  124. struct pthread *self = p;
  125. pthread_exit(self->start(self->start_arg));
  126. return 0;
  127. }
  128. int __uniclone(void *, int (*)(), void *);
  129. #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
  130. /* pthread_key_create.c overrides this */
  131. static const size_t dummy = 0;
  132. weak_alias(dummy, __pthread_tsd_size);
  133. int pthread_create(pthread_t *res, const pthread_attr_t *attr, void *(*entry)(void *), void *arg)
  134. {
  135. static int init;
  136. int ret;
  137. size_t size, guard;
  138. struct pthread *self = pthread_self(), *new;
  139. unsigned char *map, *stack, *tsd;
  140. static const pthread_attr_t default_attr;
  141. if (!self) return errno = ENOSYS;
  142. if (!init && ++init) init_threads();
  143. if (!attr) attr = &default_attr;
  144. guard = ROUND(attr->_a_guardsize + DEFAULT_GUARD_SIZE);
  145. size = guard + ROUND(attr->_a_stacksize + DEFAULT_STACK_SIZE);
  146. size += __pthread_tsd_size;
  147. map = mmap(0, size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANON, -1, 0);
  148. if (!map) return EAGAIN;
  149. mprotect(map, guard, PROT_NONE);
  150. tsd = map + size - __pthread_tsd_size;
  151. new = (void *)(tsd - sizeof *new - PAGE_SIZE%sizeof *new);
  152. new->map_base = map;
  153. new->map_size = size;
  154. new->pid = self->pid;
  155. new->errno_ptr = &new->errno_val;
  156. new->start = entry;
  157. new->start_arg = arg;
  158. new->self = new;
  159. new->tsd = (void *)tsd;
  160. new->detached = attr->_a_detach;
  161. new->attr = *attr;
  162. memcpy(new->tlsdesc, self->tlsdesc, sizeof new->tlsdesc);
  163. new->tlsdesc[1] = (uintptr_t)new;
  164. stack = (void *)((uintptr_t)new-1 & ~(uintptr_t)15);
  165. /* We must synchronize new thread creation with rsyscall
  166. * delivery. This looks to be the least expensive way: */
  167. a_inc(&rs.blocks);
  168. while (rs.lock) __wait(&rs.lock, 0, 1, 1);
  169. a_inc(&libc.threads_minus_1);
  170. ret = __uniclone(stack, start, new);
  171. a_dec(&rs.blocks);
  172. if (rs.lock) __wake(&rs.blocks, 1, 1);
  173. if (ret < 0) {
  174. a_dec(&libc.threads_minus_1);
  175. munmap(map, size);
  176. return EAGAIN;
  177. }
  178. *res = new;
  179. return 0;
  180. }
  181. void pthread_exit(void *result)
  182. {
  183. struct pthread *self = pthread_self();
  184. self->result = result;
  185. docancel(self);
  186. }