1 #include "pthread_impl.h"
2 #include "stdio_impl.h"
9 weak_alias(dummy_0, __acquire_ptc);
10 weak_alias(dummy_0, __release_ptc);
11 weak_alias(dummy_0, __pthread_tsd_run_dtors);
13 _Noreturn void pthread_exit(void *result)
15 pthread_t self = pthread_self();
18 self->result = result;
20 while (self->cancelbuf) {
21 void (*f)(void *) = self->cancelbuf->__f;
22 void *x = self->cancelbuf->__x;
23 self->cancelbuf = self->cancelbuf->__next;
27 __pthread_tsd_run_dtors();
29 __lock(self->exitlock);
31 /* Mark this thread dead before decrementing count */
32 __lock(self->killlock);
35 /* Block all signals before decrementing the live thread count.
36 * This is important to ensure that dynamically allocated TLS
37 * is not under-allocated/over-committed, and possibly for other
39 __block_all_sigs(&set);
41 /* Wait to unlock the kill lock, which governs functions like
42 * pthread_kill which target a thread id, until signals have
43 * been blocked. This precludes observation of the thread id
44 * as a live thread (with application code running in it) after
45 * the thread was reported dead by ESRCH being returned. */
46 __unlock(self->killlock);
48 /* It's impossible to determine whether this is "the last thread"
49 * until performing the atomic decrement, since multiple threads
50 * could exit at the same time. For the last thread, revert the
51 * decrement and unblock signals to give the atexit handlers and
52 * stdio cleanup code a consistent state. */
53 if (a_fetch_add(&libc.threads_minus_1, -1)==0) {
54 libc.threads_minus_1 = 0;
59 if (self->detached && self->map_base) {
60 /* Detached threads must avoid the kernel clear_child_tid
61 * feature, since the virtual address will have been
62 * unmapped and possibly already reused by a new mapping
63 * at the time the kernel would perform the write. In
64 * the case of threads that started out detached, the
65 * initial clone flags are correct, but if the thread was
66 * detached later (== 2), we need to clear it here. */
67 if (self->detached == 2) __syscall(SYS_set_tid_address, 0);
69 /* The following call unmaps the thread's stack mapping
70 * and then exits without touching the stack. */
71 __unmapself(self->map_base, self->map_size);
74 for (;;) __syscall(SYS_exit, 0);
77 void __do_cleanup_push(struct __ptcb *cb)
79 struct pthread *self = pthread_self();
80 cb->__next = self->cancelbuf;
84 void __do_cleanup_pop(struct __ptcb *cb)
86 __pthread_self()->cancelbuf = cb->__next;
89 static int start(void *p)
92 if (self->startlock[0]) {
93 __wait(self->startlock, 0, 1, 1);
94 if (self->startlock[0]) {
98 __restore_sigs(self->sigmask);
100 if (self->unblock_cancel)
101 __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK,
102 SIGPT_SET, 0, _NSIG/8);
103 pthread_exit(self->start(self->start_arg));
107 #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
109 /* pthread_key_create.c overrides this */
110 static const size_t dummy = 0;
111 weak_alias(dummy, __pthread_tsd_size);
113 static FILE *const dummy_file = 0;
114 weak_alias(dummy_file, __stdin_used);
115 weak_alias(dummy_file, __stdout_used);
116 weak_alias(dummy_file, __stderr_used);
118 static void init_file_lock(FILE *f)
120 if (f && f->lock<0) f->lock = 0;
123 void *__copy_tls(unsigned char *);
125 int pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg)
129 struct pthread *self = pthread_self(), *new;
130 unsigned char *map = 0, *stack = 0, *tsd = 0, *stack_limit;
131 unsigned flags = 0x7d8f00;
133 pthread_attr_t attr = {0};
135 if (!self) return ENOSYS;
136 if (!libc.threaded) {
137 for (FILE *f=libc.ofl_head; f; f=f->next)
139 init_file_lock(__stdin_used);
140 init_file_lock(__stdout_used);
141 init_file_lock(__stderr_used);
144 if (attrp) attr = *attrp;
148 if (attr._a_stackaddr) {
149 size_t need = libc.tls_size + __pthread_tsd_size;
150 size = attr._a_stacksize + DEFAULT_STACK_SIZE;
151 stack = (void *)(attr._a_stackaddr & -16);
152 stack_limit = (void *)(attr._a_stackaddr - size);
153 /* Use application-provided stack for TLS only when
154 * it does not take more than ~12% or 2k of the
155 * application's stack space. */
156 if (need < size/8 && need < 2048) {
157 tsd = stack - __pthread_tsd_size;
158 stack = tsd - libc.tls_size;
164 guard = ROUND(DEFAULT_GUARD_SIZE + attr._a_guardsize);
165 size = guard + ROUND(DEFAULT_STACK_SIZE + attr._a_stacksize
166 + libc.tls_size + __pthread_tsd_size);
171 map = mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
172 if (map == MAP_FAILED) goto fail;
173 if (mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)) {
178 map = mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
179 if (map == MAP_FAILED) goto fail;
181 tsd = map + size - __pthread_tsd_size;
183 stack = tsd - libc.tls_size;
184 stack_limit = map + guard;
188 new = __copy_tls(tsd - libc.tls_size);
190 new->map_size = size;
192 new->stack_size = stack - stack_limit;
193 new->pid = self->pid;
194 new->errno_ptr = &new->errno_val;
196 new->start_arg = arg;
198 new->tsd = (void *)tsd;
199 if (attr._a_detach) {
204 do_sched = new->startlock[0] = 1;
205 __block_app_sigs(new->sigmask);
207 new->unblock_cancel = self->cancel;
208 new->canary = self->canary;
210 a_inc(&libc.threads_minus_1);
211 ret = __clone(start, stack, flags, new, &new->tid, TP_ADJ(new), &new->tid);
216 __restore_sigs(new->sigmask);
220 a_dec(&libc.threads_minus_1);
221 if (map) munmap(map, size);
226 ret = __syscall(SYS_sched_setscheduler, new->tid,
227 attr._a_policy, &attr._a_prio);
228 a_store(new->startlock, ret<0 ? 2 : 0);
229 __wake(new->startlock, 1, 1);
230 if (ret < 0) return -ret;