2 #include "pthread_impl.h"
3 #include "stdio_impl.h"
13 weak_alias(dummy_0, __acquire_ptc);
14 weak_alias(dummy_0, __release_ptc);
15 weak_alias(dummy_0, __pthread_tsd_run_dtors);
16 weak_alias(dummy_0, __do_orphaned_stdio_locks);
17 weak_alias(dummy_0, __dl_thread_cleanup);
21 if (!a_cas(&__thread_list_lock, 0, 1)) return;
23 a_cas(&__thread_list_lock, 1, 2);
24 __futexwait(&__thread_list_lock, 2, 0);
25 } while (a_cas(&__thread_list_lock, 0, 2));
28 void __tl_unlock(void)
30 if (a_swap(&__thread_list_lock, 0)==2)
31 __wake(&__thread_list_lock, 1, 0);
34 void __tl_sync(pthread_t td)
37 if (!__thread_list_lock) return;
38 a_cas(&__thread_list_lock, 1, 2);
39 __wait(&__thread_list_lock, 0, 2, 0);
40 __wake(&__thread_list_lock, 1, 0);
43 _Noreturn void __pthread_exit(void *result)
45 pthread_t self = __pthread_self();
48 self->canceldisable = 1;
49 self->cancelasync = 0;
50 self->result = result;
52 while (self->cancelbuf) {
53 void (*f)(void *) = self->cancelbuf->__f;
54 void *x = self->cancelbuf->__x;
55 self->cancelbuf = self->cancelbuf->__next;
59 __pthread_tsd_run_dtors();
61 /* Access to target the exiting thread with syscalls that use
62 * its kernel tid is controlled by killlock. For detached threads,
63 * any use past this point would have undefined behavior, but for
64 * joinable threads it's a valid usage that must be handled. */
67 /* The thread list lock must be AS-safe, and thus requires
68 * application signals to be blocked before it can be taken. */
69 __block_app_sigs(&set);
72 /* If this is the only thread in the list, don't proceed with
73 * termination of the thread, but restore the previous lock and
74 * signal state to prepare for exit to call atexit handlers. */
75 if (self->next == self) {
78 UNLOCK(self->killlock);
82 /* At this point we are committed to thread termination. Unlink
83 * the thread from the list. This change will not be visible
84 * until the lock is released, which only happens after SYS_exit
85 * has been called, via the exit futex address pointing at the lock. */
86 libc.threads_minus_1--;
87 self->next->prev = self->prev;
88 self->prev->next = self->next;
89 self->prev = self->next = self;
91 /* Process robust list in userspace to handle non-pshared mutexes
92 * and the detached thread case where the robust list head will
93 * be invalid when the kernel would process it. */
95 volatile void *volatile *rp;
96 while ((rp=self->robust_list.head) && rp != &self->robust_list.head) {
97 pthread_mutex_t *m = (void *)((char *)rp
98 - offsetof(pthread_mutex_t, _m_next));
99 int waiters = m->_m_waiters;
100 int priv = (m->_m_type & 128) ^ 128;
101 self->robust_list.pending = rp;
102 self->robust_list.head = *rp;
103 int cont = a_swap(&m->_m_lock, 0x40000000);
104 self->robust_list.pending = 0;
105 if (cont < 0 || waiters)
106 __wake(&m->_m_lock, 1, priv);
110 __do_orphaned_stdio_locks();
111 __dl_thread_cleanup();
113 /* This atomic potentially competes with a concurrent pthread_detach
114 * call; the loser is responsible for freeing thread resources. */
115 int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING);
117 if (state==DT_DETACHED && self->map_base) {
118 /* Detached threads must block even implementation-internal
119 * signals, since they will not have a stack in their last
120 * moments of existence. */
121 __block_all_sigs(&set);
123 /* Robust list will no longer be valid, and was already
124 * processed above, so unregister it with the kernel. */
125 if (self->robust_list.off)
126 __syscall(SYS_set_robust_list, 0, 3*sizeof(long));
128 /* Since __unmapself bypasses the normal munmap code path,
129 * explicitly wait for vmlock holders first. */
132 /* The following call unmaps the thread's stack mapping
133 * and then exits without touching the stack. */
134 __unmapself(self->map_base, self->map_size);
137 /* Wake any joiner. */
138 __wake(&self->detach_state, 1, 1);
140 /* After the kernel thread exits, its tid may be reused. Clear it
141 * to prevent inadvertent use and inform functions that would use
142 * it that it's no longer available. */
144 UNLOCK(self->killlock);
146 for (;;) __syscall(SYS_exit, 0);
149 void __do_cleanup_push(struct __ptcb *cb)
151 struct pthread *self = __pthread_self();
152 cb->__next = self->cancelbuf;
153 self->cancelbuf = cb;
156 void __do_cleanup_pop(struct __ptcb *cb)
158 __pthread_self()->cancelbuf = cb->__next;
162 void *(*start_func)(void *);
164 pthread_attr_t *attr;
166 unsigned long sig_mask[_NSIG/8/sizeof(long)];
169 static int start(void *p)
171 struct start_args *args = p;
173 pthread_t self = __pthread_self();
174 int ret = -__syscall(SYS_sched_setscheduler, self->tid,
175 args->attr->_a_policy, &args->attr->_a_prio);
176 if (a_swap(args->perr, ret)==-2)
177 __wake(args->perr, 1, 1);
179 self->detach_state = DT_DETACHED;
183 __syscall(SYS_rt_sigprocmask, SIG_SETMASK, &args->sig_mask, 0, _NSIG/8);
184 __pthread_exit(args->start_func(args->start_arg));
188 static int start_c11(void *p)
190 struct start_args *args = p;
191 int (*start)(void*) = (int(*)(void*)) args->start_func;
192 __pthread_exit((void *)(uintptr_t)start(args->start_arg));
196 #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
198 /* pthread_key_create.c overrides this */
199 static volatile size_t dummy = 0;
200 weak_alias(dummy, __pthread_tsd_size);
201 static void *dummy_tsd[1] = { 0 };
202 weak_alias(dummy_tsd, __pthread_tsd_main);
204 volatile int __block_new_threads = 0;
206 static FILE *volatile dummy_file = 0;
207 weak_alias(dummy_file, __stdin_used);
208 weak_alias(dummy_file, __stdout_used);
209 weak_alias(dummy_file, __stderr_used);
211 static void init_file_lock(FILE *f)
213 if (f && f->lock<0) f->lock = 0;
216 int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg)
218 int ret, c11 = (attrp == __ATTRP_C11_THREAD);
220 struct pthread *self, *new;
221 unsigned char *map = 0, *stack = 0, *tsd = 0, *stack_limit;
222 unsigned flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND
223 | CLONE_THREAD | CLONE_SYSVSEM | CLONE_SETTLS
224 | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID | CLONE_DETACHED;
225 pthread_attr_t attr = { 0 };
227 volatile int err = -1;
229 if (!libc.can_do_threads) return ENOSYS;
230 self = __pthread_self();
231 if (!libc.threaded) {
232 for (FILE *f=*__ofl_lock(); f; f=f->next)
235 init_file_lock(__stdin_used);
236 init_file_lock(__stdout_used);
237 init_file_lock(__stderr_used);
238 __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, _NSIG/8);
239 self->tsd = (void **)__pthread_tsd_main;
242 if (attrp && !c11) attr = *attrp;
246 attr._a_stacksize = __default_stacksize;
247 attr._a_guardsize = __default_guardsize;
250 if (__block_new_threads) __wait(&__block_new_threads, 0, 1, 1);
252 if (attr._a_stackaddr) {
253 size_t need = libc.tls_size + __pthread_tsd_size;
254 size = attr._a_stacksize;
255 stack = (void *)(attr._a_stackaddr & -16);
256 stack_limit = (void *)(attr._a_stackaddr - size);
257 /* Use application-provided stack for TLS only when
258 * it does not take more than ~12% or 2k of the
259 * application's stack space. */
260 if (need < size/8 && need < 2048) {
261 tsd = stack - __pthread_tsd_size;
262 stack = tsd - libc.tls_size;
263 memset(stack, 0, need);
269 guard = ROUND(attr._a_guardsize);
270 size = guard + ROUND(attr._a_stacksize
271 + libc.tls_size + __pthread_tsd_size);
276 map = __mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
277 if (map == MAP_FAILED) goto fail;
278 if (__mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)
279 && errno != ENOSYS) {
284 map = __mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
285 if (map == MAP_FAILED) goto fail;
287 tsd = map + size - __pthread_tsd_size;
289 stack = tsd - libc.tls_size;
290 stack_limit = map + guard;
294 new = __copy_tls(tsd - libc.tls_size);
296 new->map_size = size;
298 new->stack_size = stack - stack_limit;
299 new->guard_size = guard;
301 new->tsd = (void *)tsd;
302 new->locale = &libc.global_locale;
303 if (attr._a_detach) {
304 new->detach_state = DT_DETACHED;
306 new->detach_state = DT_JOINABLE;
308 new->robust_list.head = &new->robust_list.head;
309 new->CANARY = self->CANARY;
311 /* Setup argument structure for the new thread on its stack.
312 * It's safe to access from the caller only until the thread
313 * list is unlocked. */
314 stack -= (uintptr_t)stack % sizeof(uintptr_t);
315 stack -= sizeof(struct start_args);
316 struct start_args *args = (void *)stack;
317 args->start_func = entry;
318 args->start_arg = arg;
327 /* Application signals (but not the synccall signal) must be
328 * blocked before the thread list lock can be taken, to ensure
329 * that the lock is AS-safe. */
330 __block_app_sigs(&set);
332 /* Ensure SIGCANCEL is unblocked in new thread. This requires
333 * working with a copy of the set so we can restore the
334 * original mask in the calling thread. */
335 memcpy(&args->sig_mask, &set, sizeof args->sig_mask);
336 args->sig_mask[(SIGCANCEL-1)/8/sizeof(long)] &=
337 ~(1UL<<((SIGCANCEL-1)%(8*sizeof(long))));
340 libc.threads_minus_1++;
341 ret = __clone((c11 ? start_c11 : start), stack, flags, args, &new->tid, TP_ADJ(new), &__thread_list_lock);
343 /* If clone succeeded, new thread must be linked on the thread
344 * list before unlocking it, even if scheduling may still fail. */
346 new->next = self->next;
348 new->next->prev = new;
349 new->prev->next = new;
352 __restore_sigs(&set);
356 libc.threads_minus_1--;
357 if (map) __munmap(map, size);
362 if (a_cas(&err, -1, -2)==-1)
363 __wait(&err, 0, -2, 1);
375 weak_alias(__pthread_exit, pthread_exit);
376 weak_alias(__pthread_create, pthread_create);