pthread_create.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. #define _GNU_SOURCE
  2. #include "pthread_impl.h"
  3. #include "stdio_impl.h"
  4. #include "libc.h"
  5. #include <sys/mman.h>
  6. static void dummy_0()
  7. {
  8. }
  9. weak_alias(dummy_0, __acquire_ptc);
  10. weak_alias(dummy_0, __release_ptc);
  11. weak_alias(dummy_0, __pthread_tsd_run_dtors);
  12. _Noreturn void pthread_exit(void *result)
  13. {
  14. pthread_t self = pthread_self();
  15. sigset_t set;
  16. self->result = result;
  17. while (self->cancelbuf) {
  18. void (*f)(void *) = self->cancelbuf->__f;
  19. void *x = self->cancelbuf->__x;
  20. self->cancelbuf = self->cancelbuf->__next;
  21. f(x);
  22. }
  23. __pthread_tsd_run_dtors();
  24. __lock(self->exitlock);
  25. /* Mark this thread dead before decrementing count */
  26. __lock(self->killlock);
  27. self->dead = 1;
  28. /* Block all signals before decrementing the live thread count.
  29. * This is important to ensure that dynamically allocated TLS
  30. * is not under-allocated/over-committed, and possibly for other
  31. * reasons as well. */
  32. __block_all_sigs(&set);
  33. /* Wait to unlock the kill lock, which governs functions like
  34. * pthread_kill which target a thread id, until signals have
  35. * been blocked. This precludes observation of the thread id
  36. * as a live thread (with application code running in it) after
  37. * the thread was reported dead by ESRCH being returned. */
  38. __unlock(self->killlock);
  39. /* It's impossible to determine whether this is "the last thread"
  40. * until performing the atomic decrement, since multiple threads
  41. * could exit at the same time. For the last thread, revert the
  42. * decrement and unblock signals to give the atexit handlers and
  43. * stdio cleanup code a consistent state. */
  44. if (a_fetch_add(&libc.threads_minus_1, -1)==0) {
  45. libc.threads_minus_1 = 0;
  46. __restore_sigs(&set);
  47. exit(0);
  48. }
  49. if (self->detached && self->map_base) {
  50. /* Detached threads must avoid the kernel clear_child_tid
  51. * feature, since the virtual address will have been
  52. * unmapped and possibly already reused by a new mapping
  53. * at the time the kernel would perform the write. In
  54. * the case of threads that started out detached, the
  55. * initial clone flags are correct, but if the thread was
  56. * detached later (== 2), we need to clear it here. */
  57. if (self->detached == 2) __syscall(SYS_set_tid_address, 0);
  58. /* The following call unmaps the thread's stack mapping
  59. * and then exits without touching the stack. */
  60. __unmapself(self->map_base, self->map_size);
  61. }
  62. for (;;) __syscall(SYS_exit, 0);
  63. }
  64. void __do_cleanup_push(struct __ptcb *cb)
  65. {
  66. struct pthread *self = pthread_self();
  67. cb->__next = self->cancelbuf;
  68. self->cancelbuf = cb;
  69. }
  70. void __do_cleanup_pop(struct __ptcb *cb)
  71. {
  72. __pthread_self()->cancelbuf = cb->__next;
  73. }
  74. static int start(void *p)
  75. {
  76. pthread_t self = p;
  77. if (self->startlock[0]) {
  78. __wait(self->startlock, 0, 1, 1);
  79. if (self->startlock[0]) {
  80. self->detached = 2;
  81. pthread_exit(0);
  82. }
  83. __restore_sigs(self->sigmask);
  84. }
  85. if (self->unblock_cancel)
  86. __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK,
  87. SIGPT_SET, 0, _NSIG/8);
  88. pthread_exit(self->start(self->start_arg));
  89. return 0;
  90. }
  91. #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
  92. /* pthread_key_create.c overrides this */
  93. static const size_t dummy = 0;
  94. weak_alias(dummy, __pthread_tsd_size);
  95. static FILE *const dummy_file = 0;
  96. weak_alias(dummy_file, __stdin_used);
  97. weak_alias(dummy_file, __stdout_used);
  98. weak_alias(dummy_file, __stderr_used);
  99. static void init_file_lock(FILE *f)
  100. {
  101. if (f && f->lock<0) f->lock = 0;
  102. }
  103. void *__copy_tls(unsigned char *);
  104. int pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg)
  105. {
  106. int ret;
  107. size_t size, guard;
  108. struct pthread *self = pthread_self(), *new;
  109. unsigned char *map = 0, *stack = 0, *tsd = 0, *stack_limit;
  110. unsigned flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND
  111. | CLONE_THREAD | CLONE_SYSVSEM | CLONE_SETTLS
  112. | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID | CLONE_DETACHED;
  113. int do_sched = 0;
  114. pthread_attr_t attr = {0};
  115. if (!self) return ENOSYS;
  116. if (!libc.threaded) {
  117. for (FILE *f=libc.ofl_head; f; f=f->next)
  118. init_file_lock(f);
  119. init_file_lock(__stdin_used);
  120. init_file_lock(__stdout_used);
  121. init_file_lock(__stderr_used);
  122. libc.threaded = 1;
  123. }
  124. if (attrp) attr = *attrp;
  125. __acquire_ptc();
  126. if (attr._a_stackaddr) {
  127. size_t need = libc.tls_size + __pthread_tsd_size;
  128. size = attr._a_stacksize + DEFAULT_STACK_SIZE;
  129. stack = (void *)(attr._a_stackaddr & -16);
  130. stack_limit = (void *)(attr._a_stackaddr - size);
  131. /* Use application-provided stack for TLS only when
  132. * it does not take more than ~12% or 2k of the
  133. * application's stack space. */
  134. if (need < size/8 && need < 2048) {
  135. tsd = stack - __pthread_tsd_size;
  136. stack = tsd - libc.tls_size;
  137. } else {
  138. size = ROUND(need);
  139. guard = 0;
  140. }
  141. } else {
  142. guard = ROUND(DEFAULT_GUARD_SIZE + attr._a_guardsize);
  143. size = guard + ROUND(DEFAULT_STACK_SIZE + attr._a_stacksize
  144. + libc.tls_size + __pthread_tsd_size);
  145. }
  146. if (!tsd) {
  147. if (guard) {
  148. map = mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
  149. if (map == MAP_FAILED) goto fail;
  150. if (mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)) {
  151. munmap(map, size);
  152. goto fail;
  153. }
  154. } else {
  155. map = mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
  156. if (map == MAP_FAILED) goto fail;
  157. }
  158. tsd = map + size - __pthread_tsd_size;
  159. if (!stack) {
  160. stack = tsd - libc.tls_size;
  161. stack_limit = map + guard;
  162. }
  163. }
  164. new = __copy_tls(tsd - libc.tls_size);
  165. new->map_base = map;
  166. new->map_size = size;
  167. new->stack = stack;
  168. new->stack_size = stack - stack_limit;
  169. new->pid = self->pid;
  170. new->errno_ptr = &new->errno_val;
  171. new->start = entry;
  172. new->start_arg = arg;
  173. new->self = new;
  174. new->tsd = (void *)tsd;
  175. if (attr._a_detach) {
  176. new->detached = 1;
  177. flags -= CLONE_CHILD_CLEARTID;
  178. }
  179. if (attr._a_sched) {
  180. do_sched = new->startlock[0] = 1;
  181. __block_app_sigs(new->sigmask);
  182. }
  183. new->unblock_cancel = self->cancel;
  184. new->canary = self->canary;
  185. a_inc(&libc.threads_minus_1);
  186. ret = __clone(start, stack, flags, new, &new->tid, TP_ADJ(new), &new->tid);
  187. __release_ptc();
  188. if (do_sched) {
  189. __restore_sigs(new->sigmask);
  190. }
  191. if (ret < 0) {
  192. a_dec(&libc.threads_minus_1);
  193. if (map) munmap(map, size);
  194. return EAGAIN;
  195. }
  196. if (do_sched) {
  197. ret = __syscall(SYS_sched_setscheduler, new->tid,
  198. attr._a_policy, &attr._a_prio);
  199. a_store(new->startlock, ret<0 ? 2 : 0);
  200. __wake(new->startlock, 1, 1);
  201. if (ret < 0) return -ret;
  202. }
  203. *res = new;
  204. return 0;
  205. fail:
  206. __release_ptc();
  207. return EAGAIN;
  208. }