2 #include "pthread_impl.h"
3 #include "stdio_impl.h"
8 void *__mmap(void *, size_t, int, int, int, off_t);
9 int __munmap(void *, size_t);
10 int __mprotect(void *, size_t, int);
15 weak_alias(dummy_0, __acquire_ptc);
16 weak_alias(dummy_0, __release_ptc);
17 weak_alias(dummy_0, __pthread_tsd_run_dtors);
18 weak_alias(dummy_0, __do_private_robust_list);
19 weak_alias(dummy_0, __do_orphaned_stdio_locks);
21 _Noreturn void __pthread_exit(void *result)
23 pthread_t self = __pthread_self();
26 self->result = result;
28 while (self->cancelbuf) {
29 void (*f)(void *) = self->cancelbuf->__f;
30 void *x = self->cancelbuf->__x;
31 self->cancelbuf = self->cancelbuf->__next;
35 __pthread_tsd_run_dtors();
37 __lock(self->exitlock);
39 /* Mark this thread dead before decrementing count */
40 __lock(self->killlock);
43 /* Block all signals before decrementing the live thread count.
44 * This is important to ensure that dynamically allocated TLS
45 * is not under-allocated/over-committed, and possibly for other
47 __block_all_sigs(&set);
49 /* Wait to unlock the kill lock, which governs functions like
50 * pthread_kill which target a thread id, until signals have
51 * been blocked. This precludes observation of the thread id
52 * as a live thread (with application code running in it) after
53 * the thread was reported dead by ESRCH being returned. */
54 __unlock(self->killlock);
56 /* It's impossible to determine whether this is "the last thread"
57 * until performing the atomic decrement, since multiple threads
58 * could exit at the same time. For the last thread, revert the
59 * decrement and unblock signals to give the atexit handlers and
60 * stdio cleanup code a consistent state. */
61 if (a_fetch_add(&libc.threads_minus_1, -1)==0) {
62 libc.threads_minus_1 = 0;
67 if (self->locale != &libc.global_locale) {
68 a_dec(&libc.uselocale_cnt);
69 if (self->locale->ctype_utf8)
70 a_dec(&libc.bytelocale_cnt_minus_1);
73 __do_private_robust_list();
74 __do_orphaned_stdio_locks();
76 if (self->detached && self->map_base) {
77 /* Detached threads must avoid the kernel clear_child_tid
78 * feature, since the virtual address will have been
79 * unmapped and possibly already reused by a new mapping
80 * at the time the kernel would perform the write. In
81 * the case of threads that started out detached, the
82 * initial clone flags are correct, but if the thread was
83 * detached later (== 2), we need to clear it here. */
84 if (self->detached == 2) __syscall(SYS_set_tid_address, 0);
86 /* The following call unmaps the thread's stack mapping
87 * and then exits without touching the stack. */
88 __unmapself(self->map_base, self->map_size);
91 for (;;) __syscall(SYS_exit, 0);
94 void __do_cleanup_push(struct __ptcb *cb)
96 if (!libc.has_thread_pointer) return;
97 struct pthread *self = __pthread_self();
98 cb->__next = self->cancelbuf;
102 void __do_cleanup_pop(struct __ptcb *cb)
104 if (!libc.has_thread_pointer) return;
105 __pthread_self()->cancelbuf = cb->__next;
108 static int start(void *p)
111 if (self->startlock[0]) {
112 __wait(self->startlock, 0, 1, 1);
113 if (self->startlock[0]) {
117 __restore_sigs(self->sigmask);
119 if (self->unblock_cancel)
120 __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK,
121 SIGPT_SET, 0, _NSIG/8);
122 pthread_exit(self->start(self->start_arg));
126 #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
128 /* pthread_key_create.c overrides this */
129 static volatile size_t dummy = 0;
130 weak_alias(dummy, __pthread_tsd_size);
131 static void *dummy_tsd[1] = { 0 };
132 weak_alias(dummy_tsd, __pthread_tsd_main);
134 static FILE *volatile dummy_file = 0;
135 weak_alias(dummy_file, __stdin_used);
136 weak_alias(dummy_file, __stdout_used);
137 weak_alias(dummy_file, __stderr_used);
139 static void init_file_lock(FILE *f)
141 if (f && f->lock<0) f->lock = 0;
144 void *__copy_tls(unsigned char *);
146 int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg)
150 struct pthread *self, *new;
151 unsigned char *map = 0, *stack = 0, *tsd = 0, *stack_limit;
152 unsigned flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND
153 | CLONE_THREAD | CLONE_SYSVSEM | CLONE_SETTLS
154 | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID | CLONE_DETACHED;
156 pthread_attr_t attr = {0};
158 if (!libc.can_do_threads) return ENOSYS;
159 self = __pthread_self();
160 if (!libc.threaded) {
161 for (FILE *f=libc.ofl_head; f; f=f->next)
163 init_file_lock(__stdin_used);
164 init_file_lock(__stdout_used);
165 init_file_lock(__stderr_used);
166 __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, _NSIG/8);
167 self->tsd = (void **)__pthread_tsd_main;
170 if (attrp) attr = *attrp;
174 if (attr._a_stackaddr) {
175 size_t need = libc.tls_size + __pthread_tsd_size;
176 size = attr._a_stacksize + DEFAULT_STACK_SIZE;
177 stack = (void *)(attr._a_stackaddr & -16);
178 stack_limit = (void *)(attr._a_stackaddr - size);
179 /* Use application-provided stack for TLS only when
180 * it does not take more than ~12% or 2k of the
181 * application's stack space. */
182 if (need < size/8 && need < 2048) {
183 tsd = stack - __pthread_tsd_size;
184 stack = tsd - libc.tls_size;
185 memset(stack, 0, need);
191 guard = ROUND(DEFAULT_GUARD_SIZE + attr._a_guardsize);
192 size = guard + ROUND(DEFAULT_STACK_SIZE + attr._a_stacksize
193 + libc.tls_size + __pthread_tsd_size);
198 map = __mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
199 if (map == MAP_FAILED) goto fail;
200 if (__mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)) {
205 map = __mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
206 if (map == MAP_FAILED) goto fail;
208 tsd = map + size - __pthread_tsd_size;
210 stack = tsd - libc.tls_size;
211 stack_limit = map + guard;
215 new = __copy_tls(tsd - libc.tls_size);
217 new->map_size = size;
219 new->stack_size = stack - stack_limit;
221 new->start_arg = arg;
223 new->tsd = (void *)tsd;
224 new->locale = &libc.global_locale;
225 if (attr._a_detach) {
227 flags -= CLONE_CHILD_CLEARTID;
230 do_sched = new->startlock[0] = 1;
231 __block_app_sigs(new->sigmask);
233 new->unblock_cancel = self->cancel;
234 new->canary = self->canary;
236 a_inc(&libc.threads_minus_1);
237 ret = __clone(start, stack, flags, new, &new->tid, TP_ADJ(new), &new->tid);
242 __restore_sigs(new->sigmask);
246 a_dec(&libc.threads_minus_1);
247 if (map) __munmap(map, size);
252 ret = __syscall(SYS_sched_setscheduler, new->tid,
253 attr._a_policy, &attr._a_prio);
254 a_store(new->startlock, ret<0 ? 2 : 0);
255 __wake(new->startlock, 1, 1);
256 if (ret < 0) return -ret;
266 weak_alias(__pthread_exit, pthread_exit);
267 weak_alias(__pthread_create, pthread_create);