2 #include "pthread_impl.h"
3 #include "stdio_impl.h"
13 weak_alias(dummy_0, __acquire_ptc);
14 weak_alias(dummy_0, __release_ptc);
15 weak_alias(dummy_0, __pthread_tsd_run_dtors);
16 weak_alias(dummy_0, __do_orphaned_stdio_locks);
17 weak_alias(dummy_0, __dl_thread_cleanup);
21 if (!a_cas(&__thread_list_lock, 0, 1)) return;
23 a_cas(&__thread_list_lock, 1, 2);
24 __futexwait(&__thread_list_lock, 2, 0);
25 } while (a_cas(&__thread_list_lock, 0, 2));
28 void __tl_unlock(void)
30 if (a_swap(&__thread_list_lock, 0)==2)
31 __wake(&__thread_list_lock, 1, 0);
34 void __tl_sync(pthread_t td)
37 if (!__thread_list_lock) return;
38 a_cas(&__thread_list_lock, 1, 2);
39 __wait(&__thread_list_lock, 0, 2, 0);
40 __wake(&__thread_list_lock, 1, 0);
43 _Noreturn void __pthread_exit(void *result)
45 pthread_t self = __pthread_self();
48 self->canceldisable = 1;
49 self->cancelasync = 0;
50 self->result = result;
52 while (self->cancelbuf) {
53 void (*f)(void *) = self->cancelbuf->__f;
54 void *x = self->cancelbuf->__x;
55 self->cancelbuf = self->cancelbuf->__next;
59 __pthread_tsd_run_dtors();
61 /* Access to target the exiting thread with syscalls that use
62 * its kernel tid is controlled by killlock. For detached threads,
63 * any use past this point would have undefined behavior, but for
64 * joinable threads it's a valid usage that must be handled. */
67 /* The thread list lock must be AS-safe, and thus requires
68 * application signals to be blocked before it can be taken. */
69 __block_app_sigs(&set);
72 /* If this is the only thread in the list, don't proceed with
73 * termination of the thread, but restore the previous lock and
74 * signal state to prepare for exit to call atexit handlers. */
75 if (self->next == self) {
78 UNLOCK(self->killlock);
82 /* At this point we are committed to thread termination. Unlink
83 * the thread from the list. This change will not be visible
84 * until the lock is released, which only happens after SYS_exit
85 * has been called, via the exit futex address pointing at the lock. */
86 libc.threads_minus_1--;
87 self->next->prev = self->prev;
88 self->prev->next = self->next;
89 self->prev = self->next = self;
91 /* Process robust list in userspace to handle non-pshared mutexes
92 * and the detached thread case where the robust list head will
93 * be invalid when the kernel would process it. */
95 volatile void *volatile *rp;
96 while ((rp=self->robust_list.head) && rp != &self->robust_list.head) {
97 pthread_mutex_t *m = (void *)((char *)rp
98 - offsetof(pthread_mutex_t, _m_next));
99 int waiters = m->_m_waiters;
100 int priv = (m->_m_type & 128) ^ 128;
101 self->robust_list.pending = rp;
102 self->robust_list.head = *rp;
103 int cont = a_swap(&m->_m_lock, 0x40000000);
104 self->robust_list.pending = 0;
105 if (cont < 0 || waiters)
106 __wake(&m->_m_lock, 1, priv);
110 __do_orphaned_stdio_locks();
111 __dl_thread_cleanup();
113 /* This atomic potentially competes with a concurrent pthread_detach
114 * call; the loser is responsible for freeing thread resources. */
115 int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING);
117 if (state==DT_DETACHED && self->map_base) {
118 /* Detached threads must block even implementation-internal
119 * signals, since they will not have a stack in their last
120 * moments of existence. */
121 __block_all_sigs(&set);
123 /* Robust list will no longer be valid, and was already
124 * processed above, so unregister it with the kernel. */
125 if (self->robust_list.off)
126 __syscall(SYS_set_robust_list, 0, 3*sizeof(long));
128 /* Since __unmapself bypasses the normal munmap code path,
129 * explicitly wait for vmlock holders first. */
132 /* The following call unmaps the thread's stack mapping
133 * and then exits without touching the stack. */
134 __unmapself(self->map_base, self->map_size);
137 /* Wake any joiner. */
138 __wake(&self->detach_state, 1, 1);
140 /* After the kernel thread exits, its tid may be reused. Clear it
141 * to prevent inadvertent use and inform functions that would use
142 * it that it's no longer available. */
144 UNLOCK(self->killlock);
146 for (;;) __syscall(SYS_exit, 0);
149 void __do_cleanup_push(struct __ptcb *cb)
151 struct pthread *self = __pthread_self();
152 cb->__next = self->cancelbuf;
153 self->cancelbuf = cb;
156 void __do_cleanup_pop(struct __ptcb *cb)
158 __pthread_self()->cancelbuf = cb->__next;
162 void *(*start_func)(void *);
164 pthread_attr_t *attr;
166 unsigned long sig_mask[_NSIG/8/sizeof(long)];
169 static int start(void *p)
171 struct start_args *args = p;
173 pthread_t self = __pthread_self();
174 int ret = -__syscall(SYS_sched_setscheduler, self->tid,
175 args->attr->_a_policy, &args->attr->_a_prio);
176 if (a_swap(args->perr, ret)==-2)
177 __wake(args->perr, 1, 1);
179 self->detach_state = DT_DETACHED;
183 __syscall(SYS_rt_sigprocmask, SIG_SETMASK, &args->sig_mask, 0, _NSIG/8);
184 __pthread_exit(args->start_func(args->start_arg));
188 static int start_c11(void *p)
190 struct start_args *args = p;
191 int (*start)(void*) = (int(*)(void*)) args->start_func;
192 __pthread_exit((void *)(uintptr_t)start(args->start_arg));
196 #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
198 /* pthread_key_create.c overrides this */
199 static volatile size_t dummy = 0;
200 weak_alias(dummy, __pthread_tsd_size);
201 static void *dummy_tsd[1] = { 0 };
202 weak_alias(dummy_tsd, __pthread_tsd_main);
204 static FILE *volatile dummy_file = 0;
205 weak_alias(dummy_file, __stdin_used);
206 weak_alias(dummy_file, __stdout_used);
207 weak_alias(dummy_file, __stderr_used);
209 static void init_file_lock(FILE *f)
211 if (f && f->lock<0) f->lock = 0;
214 int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg)
216 int ret, c11 = (attrp == __ATTRP_C11_THREAD);
218 struct pthread *self, *new;
219 unsigned char *map = 0, *stack = 0, *tsd = 0, *stack_limit;
220 unsigned flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND
221 | CLONE_THREAD | CLONE_SYSVSEM | CLONE_SETTLS
222 | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID | CLONE_DETACHED;
223 pthread_attr_t attr = { 0 };
225 volatile int err = -1;
227 if (!libc.can_do_threads) return ENOSYS;
228 self = __pthread_self();
229 if (!libc.threaded) {
230 for (FILE *f=*__ofl_lock(); f; f=f->next)
233 init_file_lock(__stdin_used);
234 init_file_lock(__stdout_used);
235 init_file_lock(__stderr_used);
236 __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, _NSIG/8);
237 self->tsd = (void **)__pthread_tsd_main;
240 if (attrp && !c11) attr = *attrp;
244 attr._a_stacksize = __default_stacksize;
245 attr._a_guardsize = __default_guardsize;
248 if (attr._a_stackaddr) {
249 size_t need = libc.tls_size + __pthread_tsd_size;
250 size = attr._a_stacksize;
251 stack = (void *)(attr._a_stackaddr & -16);
252 stack_limit = (void *)(attr._a_stackaddr - size);
253 /* Use application-provided stack for TLS only when
254 * it does not take more than ~12% or 2k of the
255 * application's stack space. */
256 if (need < size/8 && need < 2048) {
257 tsd = stack - __pthread_tsd_size;
258 stack = tsd - libc.tls_size;
259 memset(stack, 0, need);
265 guard = ROUND(attr._a_guardsize);
266 size = guard + ROUND(attr._a_stacksize
267 + libc.tls_size + __pthread_tsd_size);
272 map = __mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
273 if (map == MAP_FAILED) goto fail;
274 if (__mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)
275 && errno != ENOSYS) {
280 map = __mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
281 if (map == MAP_FAILED) goto fail;
283 tsd = map + size - __pthread_tsd_size;
285 stack = tsd - libc.tls_size;
286 stack_limit = map + guard;
290 new = __copy_tls(tsd - libc.tls_size);
292 new->map_size = size;
294 new->stack_size = stack - stack_limit;
295 new->guard_size = guard;
297 new->tsd = (void *)tsd;
298 new->locale = &libc.global_locale;
299 if (attr._a_detach) {
300 new->detach_state = DT_DETACHED;
302 new->detach_state = DT_JOINABLE;
304 new->robust_list.head = &new->robust_list.head;
305 new->CANARY = self->CANARY;
307 /* Setup argument structure for the new thread on its stack.
308 * It's safe to access from the caller only until the thread
309 * list is unlocked. */
310 stack -= (uintptr_t)stack % sizeof(uintptr_t);
311 stack -= sizeof(struct start_args);
312 struct start_args *args = (void *)stack;
313 args->start_func = entry;
314 args->start_arg = arg;
323 /* Application signals (but not the synccall signal) must be
324 * blocked before the thread list lock can be taken, to ensure
325 * that the lock is AS-safe. */
326 __block_app_sigs(&set);
328 /* Ensure SIGCANCEL is unblocked in new thread. This requires
329 * working with a copy of the set so we can restore the
330 * original mask in the calling thread. */
331 memcpy(&args->sig_mask, &set, sizeof args->sig_mask);
332 args->sig_mask[(SIGCANCEL-1)/8/sizeof(long)] &=
333 ~(1UL<<((SIGCANCEL-1)%(8*sizeof(long))));
336 libc.threads_minus_1++;
337 ret = __clone((c11 ? start_c11 : start), stack, flags, args, &new->tid, TP_ADJ(new), &__thread_list_lock);
339 /* If clone succeeded, new thread must be linked on the thread
340 * list before unlocking it, even if scheduling may still fail. */
342 new->next = self->next;
344 new->next->prev = new;
345 new->prev->next = new;
348 __restore_sigs(&set);
352 libc.threads_minus_1--;
353 if (map) __munmap(map, size);
358 if (a_cas(&err, -1, -2)==-1)
359 __wait(&err, 0, -2, 1);
371 weak_alias(__pthread_exit, pthread_exit);
372 weak_alias(__pthread_create, pthread_create);