2 #include "pthread_impl.h"
3 #include "stdio_impl.h"
9 void *__mmap(void *, size_t, int, int, int, off_t);
10 int __munmap(void *, size_t);
11 int __mprotect(void *, size_t, int);
16 weak_alias(dummy_0, __acquire_ptc);
17 weak_alias(dummy_0, __release_ptc);
18 weak_alias(dummy_0, __pthread_tsd_run_dtors);
19 weak_alias(dummy_0, __do_orphaned_stdio_locks);
20 weak_alias(dummy_0, __dl_thread_cleanup);
22 _Noreturn void __pthread_exit(void *result)
24 pthread_t self = __pthread_self();
27 self->canceldisable = 1;
28 self->cancelasync = 0;
29 self->result = result;
31 while (self->cancelbuf) {
32 void (*f)(void *) = self->cancelbuf->__f;
33 void *x = self->cancelbuf->__x;
34 self->cancelbuf = self->cancelbuf->__next;
38 __pthread_tsd_run_dtors();
40 __lock(self->exitlock);
42 /* Mark this thread dead before decrementing count */
43 __lock(self->killlock);
46 /* Block all signals before decrementing the live thread count.
47 * This is important to ensure that dynamically allocated TLS
48 * is not under-allocated/over-committed, and possibly for other
50 __block_all_sigs(&set);
52 /* Wait to unlock the kill lock, which governs functions like
53 * pthread_kill which target a thread id, until signals have
54 * been blocked. This precludes observation of the thread id
55 * as a live thread (with application code running in it) after
56 * the thread was reported dead by ESRCH being returned. */
57 __unlock(self->killlock);
59 /* It's impossible to determine whether this is "the last thread"
60 * until performing the atomic decrement, since multiple threads
61 * could exit at the same time. For the last thread, revert the
62 * decrement and unblock signals to give the atexit handlers and
63 * stdio cleanup code a consistent state. */
64 if (a_fetch_add(&libc.threads_minus_1, -1)==0) {
65 libc.threads_minus_1 = 0;
70 if (self->locale != &libc.global_locale) {
71 a_dec(&libc.uselocale_cnt);
72 if (self->locale->ctype_utf8)
73 a_dec(&libc.bytelocale_cnt_minus_1);
76 /* Process robust list in userspace to handle non-pshared mutexes
77 * and the detached thread case where the robust list head will
78 * be invalid when the kernel would process it. */
80 volatile void *volatile *rp;
81 while ((rp=self->robust_list.head) && rp != &self->robust_list.head) {
82 pthread_mutex_t *m = (void *)((char *)rp
83 - offsetof(pthread_mutex_t, _m_next));
84 int waiters = m->_m_waiters;
85 int priv = (m->_m_type & 128) ^ 128;
86 self->robust_list.pending = rp;
87 self->robust_list.head = *rp;
88 int cont = a_swap(&m->_m_lock, self->tid|0x40000000);
89 self->robust_list.pending = 0;
90 if (cont < 0 || waiters)
91 __wake(&m->_m_lock, 1, priv);
95 __do_orphaned_stdio_locks();
96 __dl_thread_cleanup();
98 if (self->detached && self->map_base) {
99 /* Detached threads must avoid the kernel clear_child_tid
100 * feature, since the virtual address will have been
101 * unmapped and possibly already reused by a new mapping
102 * at the time the kernel would perform the write. In
103 * the case of threads that started out detached, the
104 * initial clone flags are correct, but if the thread was
105 * detached later (== 2), we need to clear it here. */
106 if (self->detached == 2) __syscall(SYS_set_tid_address, 0);
108 /* Robust list will no longer be valid, and was already
109 * processed above, so unregister it with the kernel. */
110 if (self->robust_list.off)
111 __syscall(SYS_set_robust_list, 0, 3*sizeof(long));
113 /* Since __unmapself bypasses the normal munmap code path,
114 * explicitly wait for vmlock holders first. */
117 /* The following call unmaps the thread's stack mapping
118 * and then exits without touching the stack. */
119 __unmapself(self->map_base, self->map_size);
122 for (;;) __syscall(SYS_exit, 0);
125 void __do_cleanup_push(struct __ptcb *cb)
127 struct pthread *self = __pthread_self();
128 cb->__next = self->cancelbuf;
129 self->cancelbuf = cb;
132 void __do_cleanup_pop(struct __ptcb *cb)
134 __pthread_self()->cancelbuf = cb->__next;
137 static int start(void *p)
140 if (self->startlock[0]) {
141 __wait(self->startlock, 0, 1, 1);
142 if (self->startlock[0]) {
146 __restore_sigs(self->sigmask);
148 if (self->unblock_cancel)
149 __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK,
150 SIGPT_SET, 0, _NSIG/8);
151 __pthread_exit(self->start(self->start_arg));
155 static int start_c11(void *p)
158 int (*start)(void*) = (int(*)(void*)) self->start;
159 __pthread_exit((void *)(uintptr_t)start(self->start_arg));
163 #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
165 /* pthread_key_create.c overrides this */
166 static volatile size_t dummy = 0;
167 weak_alias(dummy, __pthread_tsd_size);
168 static void *dummy_tsd[1] = { 0 };
169 weak_alias(dummy_tsd, __pthread_tsd_main);
171 volatile int __block_new_threads = 0;
173 static FILE *volatile dummy_file = 0;
174 weak_alias(dummy_file, __stdin_used);
175 weak_alias(dummy_file, __stdout_used);
176 weak_alias(dummy_file, __stderr_used);
178 static void init_file_lock(FILE *f)
180 if (f && f->lock<0) f->lock = 0;
183 void *__copy_tls(unsigned char *);
185 int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg)
187 int ret, c11 = (attrp == __ATTRP_C11_THREAD);
189 struct pthread *self, *new;
190 unsigned char *map = 0, *stack = 0, *tsd = 0, *stack_limit;
191 unsigned flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND
192 | CLONE_THREAD | CLONE_SYSVSEM | CLONE_SETTLS
193 | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID | CLONE_DETACHED;
195 pthread_attr_t attr = {0};
197 if (!libc.can_do_threads) return ENOSYS;
198 self = __pthread_self();
199 if (!libc.threaded) {
200 for (FILE *f=libc.ofl_head; f; f=f->next)
202 init_file_lock(__stdin_used);
203 init_file_lock(__stdout_used);
204 init_file_lock(__stderr_used);
205 __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, _NSIG/8);
206 self->tsd = (void **)__pthread_tsd_main;
209 if (attrp && !c11) attr = *attrp;
212 if (__block_new_threads) __wait(&__block_new_threads, 0, 1, 1);
214 if (attr._a_stackaddr) {
215 size_t need = libc.tls_size + __pthread_tsd_size;
216 size = attr._a_stacksize + DEFAULT_STACK_SIZE;
217 stack = (void *)(attr._a_stackaddr & -16);
218 stack_limit = (void *)(attr._a_stackaddr - size);
219 /* Use application-provided stack for TLS only when
220 * it does not take more than ~12% or 2k of the
221 * application's stack space. */
222 if (need < size/8 && need < 2048) {
223 tsd = stack - __pthread_tsd_size;
224 stack = tsd - libc.tls_size;
225 memset(stack, 0, need);
231 guard = ROUND(DEFAULT_GUARD_SIZE + attr._a_guardsize);
232 size = guard + ROUND(DEFAULT_STACK_SIZE + attr._a_stacksize
233 + libc.tls_size + __pthread_tsd_size);
238 map = __mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
239 if (map == MAP_FAILED) goto fail;
240 if (__mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)) {
245 map = __mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
246 if (map == MAP_FAILED) goto fail;
248 tsd = map + size - __pthread_tsd_size;
250 stack = tsd - libc.tls_size;
251 stack_limit = map + guard;
255 new = __copy_tls(tsd - libc.tls_size);
257 new->map_size = size;
259 new->stack_size = stack - stack_limit;
261 new->start_arg = arg;
263 new->tsd = (void *)tsd;
264 new->locale = &libc.global_locale;
265 if (attr._a_detach) {
267 flags -= CLONE_CHILD_CLEARTID;
270 do_sched = new->startlock[0] = 1;
271 __block_app_sigs(new->sigmask);
273 new->robust_list.head = &new->robust_list.head;
274 new->unblock_cancel = self->cancel;
275 new->CANARY = self->CANARY;
277 a_inc(&libc.threads_minus_1);
278 ret = __clone((c11 ? start_c11 : start), stack, flags, new, &new->tid, TP_ADJ(new), &new->tid);
283 __restore_sigs(new->sigmask);
287 a_dec(&libc.threads_minus_1);
288 if (map) __munmap(map, size);
293 ret = __syscall(SYS_sched_setscheduler, new->tid,
294 attr._a_policy, &attr._a_prio);
295 a_store(new->startlock, ret<0 ? 2 : 0);
296 __wake(new->startlock, 1, 1);
297 if (ret < 0) return -ret;
307 weak_alias(__pthread_exit, pthread_exit);
308 weak_alias(__pthread_create, pthread_create);