2 #include "pthread_impl.h"
3 #include "stdio_impl.h"
13 weak_alias(dummy_0, __acquire_ptc);
14 weak_alias(dummy_0, __release_ptc);
15 weak_alias(dummy_0, __pthread_tsd_run_dtors);
16 weak_alias(dummy_0, __do_orphaned_stdio_locks);
17 weak_alias(dummy_0, __dl_thread_cleanup);
18 weak_alias(dummy_0, __membarrier_init);
20 static int tl_lock_count;
21 static int tl_lock_waiters;
25 int tid = __pthread_self()->tid;
26 int val = __thread_list_lock;
31 while ((val = a_cas(&__thread_list_lock, 0, tid)))
32 __wait(&__thread_list_lock, &tl_lock_waiters, val, 0);
35 void __tl_unlock(void)
41 a_store(&__thread_list_lock, 0);
42 if (tl_lock_waiters) __wake(&__thread_list_lock, 1, 0);
45 void __tl_sync(pthread_t td)
48 int val = __thread_list_lock;
50 __wait(&__thread_list_lock, &tl_lock_waiters, val, 0);
51 if (tl_lock_waiters) __wake(&__thread_list_lock, 1, 0);
54 _Noreturn void __pthread_exit(void *result)
56 pthread_t self = __pthread_self();
59 self->canceldisable = 1;
60 self->cancelasync = 0;
61 self->result = result;
63 while (self->cancelbuf) {
64 void (*f)(void *) = self->cancelbuf->__f;
65 void *x = self->cancelbuf->__x;
66 self->cancelbuf = self->cancelbuf->__next;
70 __pthread_tsd_run_dtors();
72 __block_app_sigs(&set);
74 /* This atomic potentially competes with a concurrent pthread_detach
75 * call; the loser is responsible for freeing thread resources. */
76 int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING);
78 if (state==DT_DETACHED && self->map_base) {
79 /* Since __unmapself bypasses the normal munmap code path,
80 * explicitly wait for vmlock holders first. This must be
81 * done before any locks are taken, to avoid lock ordering
82 * issues that could lead to deadlock. */
86 /* Access to target the exiting thread with syscalls that use
87 * its kernel tid is controlled by killlock. For detached threads,
88 * any use past this point would have undefined behavior, but for
89 * joinable threads it's a valid usage that must be handled.
90 * Signals must be blocked since pthread_kill must be AS-safe. */
93 /* The thread list lock must be AS-safe, and thus depends on
94 * application signals being blocked above. */
97 /* If this is the only thread in the list, don't proceed with
98 * termination of the thread, but restore the previous lock and
99 * signal state to prepare for exit to call atexit handlers. */
100 if (self->next == self) {
102 UNLOCK(self->killlock);
103 self->detach_state = state;
104 __restore_sigs(&set);
108 /* At this point we are committed to thread termination. */
110 /* After the kernel thread exits, its tid may be reused. Clear it
111 * to prevent inadvertent use and inform functions that would use
112 * it that it's no longer available. At this point the killlock
113 * may be released, since functions that use it will consistently
114 * see the thread as having exited. Release it now so that no
115 * remaining locks (except thread list) are held if we end up
116 * resetting need_locks below. */
118 UNLOCK(self->killlock);
120 /* Process robust list in userspace to handle non-pshared mutexes
121 * and the detached thread case where the robust list head will
122 * be invalid when the kernel would process it. */
124 volatile void *volatile *rp;
125 while ((rp=self->robust_list.head) && rp != &self->robust_list.head) {
126 pthread_mutex_t *m = (void *)((char *)rp
127 - offsetof(pthread_mutex_t, _m_next));
128 int waiters = m->_m_waiters;
129 int priv = (m->_m_type & 128) ^ 128;
130 self->robust_list.pending = rp;
131 self->robust_list.head = *rp;
132 int cont = a_swap(&m->_m_lock, 0x40000000);
133 self->robust_list.pending = 0;
134 if (cont < 0 || waiters)
135 __wake(&m->_m_lock, 1, priv);
139 __do_orphaned_stdio_locks();
140 __dl_thread_cleanup();
142 /* Last, unlink thread from the list. This change will not be visible
143 * until the lock is released, which only happens after SYS_exit
144 * has been called, via the exit futex address pointing at the lock.
145 * This needs to happen after any possible calls to LOCK() that might
146 * skip locking if process appears single-threaded. */
147 if (!--libc.threads_minus_1) libc.need_locks = -1;
148 self->next->prev = self->prev;
149 self->prev->next = self->next;
150 self->prev = self->next = self;
152 if (state==DT_DETACHED && self->map_base) {
153 /* Detached threads must block even implementation-internal
154 * signals, since they will not have a stack in their last
155 * moments of existence. */
156 __block_all_sigs(&set);
158 /* Robust list will no longer be valid, and was already
159 * processed above, so unregister it with the kernel. */
160 if (self->robust_list.off)
161 __syscall(SYS_set_robust_list, 0, 3*sizeof(long));
163 /* The following call unmaps the thread's stack mapping
164 * and then exits without touching the stack. */
165 __unmapself(self->map_base, self->map_size);
168 /* Wake any joiner. */
169 a_store(&self->detach_state, DT_EXITED);
170 __wake(&self->detach_state, 1, 1);
172 for (;;) __syscall(SYS_exit, 0);
175 void __do_cleanup_push(struct __ptcb *cb)
177 struct pthread *self = __pthread_self();
178 cb->__next = self->cancelbuf;
179 self->cancelbuf = cb;
182 void __do_cleanup_pop(struct __ptcb *cb)
184 __pthread_self()->cancelbuf = cb->__next;
188 void *(*start_func)(void *);
190 volatile int control;
191 unsigned long sig_mask[_NSIG/8/sizeof(long)];
194 static int start(void *p)
196 struct start_args *args = p;
197 int state = args->control;
199 if (a_cas(&args->control, 1, 2)==1)
200 __wait(&args->control, 0, 2, 1);
202 __syscall(SYS_set_tid_address, &args->control);
203 for (;;) __syscall(SYS_exit, 0);
206 __syscall(SYS_rt_sigprocmask, SIG_SETMASK, &args->sig_mask, 0, _NSIG/8);
207 __pthread_exit(args->start_func(args->start_arg));
211 static int start_c11(void *p)
213 struct start_args *args = p;
214 int (*start)(void*) = (int(*)(void*)) args->start_func;
215 __pthread_exit((void *)(uintptr_t)start(args->start_arg));
219 #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
221 /* pthread_key_create.c overrides this */
222 static volatile size_t dummy = 0;
223 weak_alias(dummy, __pthread_tsd_size);
224 static void *dummy_tsd[1] = { 0 };
225 weak_alias(dummy_tsd, __pthread_tsd_main);
227 static FILE *volatile dummy_file = 0;
228 weak_alias(dummy_file, __stdin_used);
229 weak_alias(dummy_file, __stdout_used);
230 weak_alias(dummy_file, __stderr_used);
232 static void init_file_lock(FILE *f)
234 if (f && f->lock<0) f->lock = 0;
237 int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg)
239 int ret, c11 = (attrp == __ATTRP_C11_THREAD);
241 struct pthread *self, *new;
242 unsigned char *map = 0, *stack = 0, *tsd = 0, *stack_limit;
243 unsigned flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND
244 | CLONE_THREAD | CLONE_SYSVSEM | CLONE_SETTLS
245 | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID | CLONE_DETACHED;
246 pthread_attr_t attr = { 0 };
249 if (!libc.can_do_threads) return ENOSYS;
250 self = __pthread_self();
251 if (!libc.threaded) {
252 for (FILE *f=*__ofl_lock(); f; f=f->next)
255 init_file_lock(__stdin_used);
256 init_file_lock(__stdout_used);
257 init_file_lock(__stderr_used);
258 __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, _NSIG/8);
259 self->tsd = (void **)__pthread_tsd_main;
263 if (attrp && !c11) attr = *attrp;
267 attr._a_stacksize = __default_stacksize;
268 attr._a_guardsize = __default_guardsize;
271 if (attr._a_stackaddr) {
272 size_t need = libc.tls_size + __pthread_tsd_size;
273 size = attr._a_stacksize;
274 stack = (void *)(attr._a_stackaddr & -16);
275 stack_limit = (void *)(attr._a_stackaddr - size);
276 /* Use application-provided stack for TLS only when
277 * it does not take more than ~12% or 2k of the
278 * application's stack space. */
279 if (need < size/8 && need < 2048) {
280 tsd = stack - __pthread_tsd_size;
281 stack = tsd - libc.tls_size;
282 memset(stack, 0, need);
288 guard = ROUND(attr._a_guardsize);
289 size = guard + ROUND(attr._a_stacksize
290 + libc.tls_size + __pthread_tsd_size);
295 map = __mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
296 if (map == MAP_FAILED) goto fail;
297 if (__mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)
298 && errno != ENOSYS) {
303 map = __mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
304 if (map == MAP_FAILED) goto fail;
306 tsd = map + size - __pthread_tsd_size;
308 stack = tsd - libc.tls_size;
309 stack_limit = map + guard;
313 new = __copy_tls(tsd - libc.tls_size);
315 new->map_size = size;
317 new->stack_size = stack - stack_limit;
318 new->guard_size = guard;
320 new->tsd = (void *)tsd;
321 new->locale = &libc.global_locale;
322 if (attr._a_detach) {
323 new->detach_state = DT_DETACHED;
325 new->detach_state = DT_JOINABLE;
327 new->robust_list.head = &new->robust_list.head;
328 new->canary = self->canary;
329 new->sysinfo = self->sysinfo;
331 /* Setup argument structure for the new thread on its stack.
332 * It's safe to access from the caller only until the thread
333 * list is unlocked. */
334 stack -= (uintptr_t)stack % sizeof(uintptr_t);
335 stack -= sizeof(struct start_args);
336 struct start_args *args = (void *)stack;
337 args->start_func = entry;
338 args->start_arg = arg;
339 args->control = attr._a_sched ? 1 : 0;
341 /* Application signals (but not the synccall signal) must be
342 * blocked before the thread list lock can be taken, to ensure
343 * that the lock is AS-safe. */
344 __block_app_sigs(&set);
346 /* Ensure SIGCANCEL is unblocked in new thread. This requires
347 * working with a copy of the set so we can restore the
348 * original mask in the calling thread. */
349 memcpy(&args->sig_mask, &set, sizeof args->sig_mask);
350 args->sig_mask[(SIGCANCEL-1)/8/sizeof(long)] &=
351 ~(1UL<<((SIGCANCEL-1)%(8*sizeof(long))));
354 if (!libc.threads_minus_1++) libc.need_locks = 1;
355 ret = __clone((c11 ? start_c11 : start), stack, flags, args, &new->tid, TP_ADJ(new), &__thread_list_lock);
357 /* All clone failures translate to EAGAIN. If explicit scheduling
358 * was requested, attempt it before unlocking the thread list so
359 * that the failed thread is never exposed and so that we can
360 * clean up all transient resource usage before returning. */
363 } else if (attr._a_sched) {
364 ret = __syscall(SYS_sched_setscheduler,
365 new->tid, attr._a_policy, &attr._a_prio);
366 if (a_swap(&args->control, ret ? 3 : 0)==2)
367 __wake(&args->control, 1, 1);
369 __wait(&args->control, 0, 3, 0);
373 new->next = self->next;
375 new->next->prev = new;
376 new->prev->next = new;
378 if (!--libc.threads_minus_1) libc.need_locks = 0;
381 __restore_sigs(&set);
385 if (map) __munmap(map, size);
396 weak_alias(__pthread_exit, pthread_exit);
397 weak_alias(__pthread_create, pthread_create);