pthread_create.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. #include "pthread_impl.h"
  2. #include "stdio_impl.h"
  3. #include "libc.h"
  4. #include <sys/mman.h>
  5. static void dummy_0()
  6. {
  7. }
  8. weak_alias(dummy_0, __acquire_ptc);
  9. weak_alias(dummy_0, __release_ptc);
  10. weak_alias(dummy_0, __pthread_tsd_run_dtors);
  11. _Noreturn void pthread_exit(void *result)
  12. {
  13. pthread_t self = pthread_self();
  14. sigset_t set;
  15. self->result = result;
  16. while (self->cancelbuf) {
  17. void (*f)(void *) = self->cancelbuf->__f;
  18. void *x = self->cancelbuf->__x;
  19. self->cancelbuf = self->cancelbuf->__next;
  20. f(x);
  21. }
  22. __pthread_tsd_run_dtors();
  23. __lock(self->exitlock);
  24. /* Mark this thread dead before decrementing count */
  25. __lock(self->killlock);
  26. self->dead = 1;
  27. /* Block all signals before decrementing the live thread count.
  28. * This is important to ensure that dynamically allocated TLS
  29. * is not under-allocated/over-committed, and possibly for other
  30. * reasons as well. */
  31. __block_all_sigs(&set);
  32. /* Wait to unlock the kill lock, which governs functions like
  33. * pthread_kill which target a thread id, until signals have
  34. * been blocked. This precludes observation of the thread id
  35. * as a live thread (with application code running in it) after
  36. * the thread was reported dead by ESRCH being returned. */
  37. __unlock(self->killlock);
  38. /* It's impossible to determine whether this is "the last thread"
  39. * until performing the atomic decrement, since multiple threads
  40. * could exit at the same time. For the last thread, revert the
  41. * decrement and unblock signals to give the atexit handlers and
  42. * stdio cleanup code a consistent state. */
  43. if (a_fetch_add(&libc.threads_minus_1, -1)==0) {
  44. libc.threads_minus_1 = 0;
  45. __restore_sigs(&set);
  46. exit(0);
  47. }
  48. if (self->detached && self->map_base) {
  49. /* Detached threads must avoid the kernel clear_child_tid
  50. * feature, since the virtual address will have been
  51. * unmapped and possibly already reused by a new mapping
  52. * at the time the kernel would perform the write. In
  53. * the case of threads that started out detached, the
  54. * initial clone flags are correct, but if the thread was
  55. * detached later (== 2), we need to clear it here. */
  56. if (self->detached == 2) __syscall(SYS_set_tid_address, 0);
  57. /* The following call unmaps the thread's stack mapping
  58. * and then exits without touching the stack. */
  59. __unmapself(self->map_base, self->map_size);
  60. }
  61. for (;;) __syscall(SYS_exit, 0);
  62. }
  63. void __do_cleanup_push(struct __ptcb *cb)
  64. {
  65. struct pthread *self = pthread_self();
  66. cb->__next = self->cancelbuf;
  67. self->cancelbuf = cb;
  68. }
  69. void __do_cleanup_pop(struct __ptcb *cb)
  70. {
  71. __pthread_self()->cancelbuf = cb->__next;
  72. }
  73. static int start(void *p)
  74. {
  75. pthread_t self = p;
  76. if (self->startlock[0]) {
  77. __wait(self->startlock, 0, 1, 1);
  78. if (self->startlock[0]) {
  79. self->detached = 2;
  80. pthread_exit(0);
  81. }
  82. __restore_sigs(self->sigmask);
  83. }
  84. if (self->unblock_cancel)
  85. __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK,
  86. SIGPT_SET, 0, _NSIG/8);
  87. pthread_exit(self->start(self->start_arg));
  88. return 0;
  89. }
  90. #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
  91. /* pthread_key_create.c overrides this */
  92. static const size_t dummy = 0;
  93. weak_alias(dummy, __pthread_tsd_size);
  94. static FILE *const dummy_file = 0;
  95. weak_alias(dummy_file, __stdin_used);
  96. weak_alias(dummy_file, __stdout_used);
  97. weak_alias(dummy_file, __stderr_used);
  98. static void init_file_lock(FILE *f)
  99. {
  100. if (f && f->lock<0) f->lock = 0;
  101. }
  102. void *__copy_tls(unsigned char *);
  103. int pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg)
  104. {
  105. int ret;
  106. size_t size, guard;
  107. struct pthread *self = pthread_self(), *new;
  108. unsigned char *map = 0, *stack = 0, *tsd = 0, *stack_limit;
  109. unsigned flags = 0x7d8f00;
  110. int do_sched = 0;
  111. pthread_attr_t attr = {0};
  112. if (!self) return ENOSYS;
  113. if (!libc.threaded) {
  114. for (FILE *f=libc.ofl_head; f; f=f->next)
  115. init_file_lock(f);
  116. init_file_lock(__stdin_used);
  117. init_file_lock(__stdout_used);
  118. init_file_lock(__stderr_used);
  119. libc.threaded = 1;
  120. }
  121. if (attrp) attr = *attrp;
  122. __acquire_ptc();
  123. if (attr._a_stackaddr) {
  124. size_t need = libc.tls_size + __pthread_tsd_size;
  125. size = attr._a_stacksize + DEFAULT_STACK_SIZE;
  126. stack = (void *)(attr._a_stackaddr & -16);
  127. stack_limit = (void *)(attr._a_stackaddr - size);
  128. /* Use application-provided stack for TLS only when
  129. * it does not take more than ~12% or 2k of the
  130. * application's stack space. */
  131. if (need < size/8 && need < 2048) {
  132. tsd = stack - __pthread_tsd_size;
  133. stack = tsd - libc.tls_size;
  134. } else {
  135. size = ROUND(need);
  136. guard = 0;
  137. }
  138. } else {
  139. guard = ROUND(DEFAULT_GUARD_SIZE + attr._a_guardsize);
  140. size = guard + ROUND(DEFAULT_STACK_SIZE + attr._a_stacksize
  141. + libc.tls_size + __pthread_tsd_size);
  142. }
  143. if (!tsd) {
  144. if (guard) {
  145. map = mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
  146. if (map == MAP_FAILED) goto fail;
  147. if (mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)) {
  148. munmap(map, size);
  149. goto fail;
  150. }
  151. } else {
  152. map = mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
  153. if (map == MAP_FAILED) goto fail;
  154. }
  155. tsd = map + size - __pthread_tsd_size;
  156. if (!stack) {
  157. stack = tsd - libc.tls_size;
  158. stack_limit = map + guard;
  159. }
  160. }
  161. new = __copy_tls(tsd - libc.tls_size);
  162. new->map_base = map;
  163. new->map_size = size;
  164. new->stack = stack;
  165. new->stack_size = stack - stack_limit;
  166. new->pid = self->pid;
  167. new->errno_ptr = &new->errno_val;
  168. new->start = entry;
  169. new->start_arg = arg;
  170. new->self = new;
  171. new->tsd = (void *)tsd;
  172. if (attr._a_detach) {
  173. new->detached = 1;
  174. flags -= 0x200000;
  175. }
  176. if (attr._a_sched) {
  177. do_sched = new->startlock[0] = 1;
  178. __block_app_sigs(new->sigmask);
  179. }
  180. new->unblock_cancel = self->cancel;
  181. new->canary = self->canary;
  182. a_inc(&libc.threads_minus_1);
  183. ret = __clone(start, stack, flags, new, &new->tid, TP_ADJ(new), &new->tid);
  184. __release_ptc();
  185. if (do_sched) {
  186. __restore_sigs(new->sigmask);
  187. }
  188. if (ret < 0) {
  189. a_dec(&libc.threads_minus_1);
  190. if (map) munmap(map, size);
  191. return EAGAIN;
  192. }
  193. if (do_sched) {
  194. ret = __syscall(SYS_sched_setscheduler, new->tid,
  195. attr._a_policy, &attr._a_prio);
  196. a_store(new->startlock, ret<0 ? 2 : 0);
  197. __wake(new->startlock, 1, 1);
  198. if (ret < 0) return -ret;
  199. }
  200. *res = new;
  201. return 0;
  202. fail:
  203. __release_ptc();
  204. return EAGAIN;
  205. }