1 #include "pthread_impl.h"
3 static void dummy_1(pthread_t self)
6 weak_alias(dummy_1, __pthread_tsd_run_dtors);
8 #ifdef __pthread_unwind_next
9 #undef __pthread_unwind_next
10 #define __pthread_unwind_next __pthread_unwind_next_3
13 void __pthread_unwind_next(struct __ptcb *cb)
17 if (cb->__next) longjmp((void *)cb->__next->__jb, 1);
19 self = pthread_self();
21 LOCK(&self->exitlock);
23 __pthread_tsd_run_dtors(self);
25 /* Mark this thread dead before decrementing count */
28 if (!a_fetch_add(&libc.threads_minus_1, -1))
31 if (self->detached && self->map_base) {
32 syscall(__NR_rt_sigprocmask, SIG_BLOCK, (long)(uint64_t[1]){-1},0,8);
33 __unmapself(self->map_base, self->map_size);
39 static void docancel(struct pthread *self)
41 struct __ptcb cb = { .__next = self->cancelbuf };
42 self->canceldisable = 1;
43 self->cancelasync = 0;
44 __pthread_unwind_next(&cb);
47 static void cancel_handler(int sig, siginfo_t *si, void *ctx)
49 struct pthread *self = __pthread_self();
51 if (si->si_code == SI_TIMER && libc.sigtimer)
52 libc.sigtimer(sig, si, ctx);
55 if (self->canceldisable) return;
56 if (self->cancelasync || (self->cancelpoint==1 && PC_AT_SYS(ctx)))
60 static void cancelpt(int x)
62 struct pthread *self = __pthread_self();
63 if (self->canceldisable) return;
64 if ((self->cancelpoint+=x)==1 && x>=0 && self->cancel)
68 /* "rsyscall" is a mechanism by which a thread can synchronously force all
69 * other threads to perform an arbitrary syscall. It is necessary to work
70 * around the non-conformant implementation of setuid() et al on Linux,
71 * which affect only the calling thread and not the whole process. This
72 * implementation performs some tricks with signal delivery to work around
73 * the fact that it does not keep any list of threads in userspace. */
76 volatile int lock, hold, blocks, cnt;
82 static void rsyscall_handler(int sig, siginfo_t *si, void *ctx)
84 struct pthread *self = __pthread_self();
86 if (si->si_code > 0 || si->si_pid != self->pid ||
87 rs.cnt == libc.threads_minus_1) return;
89 /* Threads which have already decremented themselves from the
90 * thread count must not increment rs.cnt or otherwise act. */
92 __wait(&rs.hold, 0, 1, 1);
96 if (syscall(rs.nr, rs.arg[0], rs.arg[1], rs.arg[2],
97 rs.arg[3], rs.arg[4], rs.arg[5]) < 0 && !rs.err) rs.err=errno;
100 __wake(&rs.cnt, 1, 1);
102 __wait(&rs.hold, 0, 1, 1);
104 if (!rs.cnt) __wake(&rs.cnt, 1, 1);
107 static int rsyscall(int nr, long a, long b, long c, long d, long e, long f)
110 sigset_t set = { 0 };
111 struct pthread *self = __pthread_self();
112 sigaddset(&set, SIGSYSCALL);
115 while ((i=rs.blocks))
116 __wait(&rs.blocks, 0, i, 1);
118 __libc_sigprocmask(SIG_BLOCK, &set, 0);
121 rs.arg[0] = a; rs.arg[1] = b;
122 rs.arg[2] = c; rs.arg[3] = d;
123 rs.arg[4] = d; rs.arg[5] = f;
128 /* Dispatch signals until all threads respond */
129 for (i=libc.threads_minus_1; i; i--)
130 sigqueue(self->pid, SIGSYSCALL, (union sigval){0});
131 while ((i=rs.cnt) < libc.threads_minus_1) {
132 sigqueue(self->pid, SIGSYSCALL, (union sigval){0});
133 __wait(&rs.cnt, 0, i, 1);
136 /* Handle any lingering signals with no-op */
137 __libc_sigprocmask(SIG_UNBLOCK, &set, 0);
139 /* Resume other threads' signal handlers and wait for them */
141 __wake(&rs.hold, -1, 0);
142 while((i=rs.cnt)) __wait(&rs.cnt, 0, i, 1);
144 if (rs.err) errno = rs.err, ret = -1;
145 else ret = syscall(nr, a, b, c, d, e, f);
151 static void init_threads()
153 struct sigaction sa = { .sa_flags = SA_SIGINFO | SA_RESTART };
155 libc.lockfile = __lockfile;
156 libc.cancelpt = cancelpt;
157 libc.rsyscall = rsyscall;
158 sa.sa_sigaction = cancel_handler;
159 __libc_sigaction(SIGCANCEL, &sa, 0);
160 sigaddset(&sa.sa_mask, SIGSYSCALL);
161 sigaddset(&sa.sa_mask, SIGCANCEL);
162 sa.sa_sigaction = rsyscall_handler;
163 __libc_sigaction(SIGSYSCALL, &sa, 0);
164 sigprocmask(SIG_UNBLOCK, &sa.sa_mask, 0);
167 static int start(void *p)
169 struct pthread *self = p;
170 if (self->unblock_cancel) {
173 sigaddset(&set, SIGCANCEL);
174 __libc_sigprocmask(SIG_UNBLOCK, &set, 0);
176 pthread_exit(self->start(self->start_arg));
180 int __uniclone(void *, int (*)(), void *);
182 #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
184 /* pthread_key_create.c overrides this */
185 static const size_t dummy = 0;
186 weak_alias(dummy, __pthread_tsd_size);
188 int pthread_create(pthread_t *res, const pthread_attr_t *attr, void *(*entry)(void *), void *arg)
193 struct pthread *self = pthread_self(), *new;
194 unsigned char *map, *stack, *tsd;
195 static const pthread_attr_t default_attr;
197 if (!self) return errno = ENOSYS;
198 if (!init && ++init) init_threads();
200 if (!attr) attr = &default_attr;
201 guard = ROUND(attr->_a_guardsize + DEFAULT_GUARD_SIZE);
202 size = guard + ROUND(attr->_a_stacksize + DEFAULT_STACK_SIZE);
203 size += __pthread_tsd_size;
204 map = mmap(0, size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANON, -1, 0);
205 if (!map) return EAGAIN;
206 if (guard) mprotect(map, guard, PROT_NONE);
208 tsd = map + size - __pthread_tsd_size;
209 new = (void *)(tsd - sizeof *new - PAGE_SIZE%sizeof *new);
211 new->map_size = size;
212 new->pid = self->pid;
213 new->errno_ptr = &new->errno_val;
215 new->start_arg = arg;
217 new->tsd = (void *)tsd;
218 new->detached = attr->_a_detach;
220 new->unblock_cancel = self->cancel;
221 new->result = PTHREAD_CANCELED;
222 memcpy(new->tlsdesc, self->tlsdesc, sizeof new->tlsdesc);
223 new->tlsdesc[1] = (uintptr_t)new;
224 stack = (void *)((uintptr_t)new-1 & ~(uintptr_t)15);
226 /* We must synchronize new thread creation with rsyscall
227 * delivery. This looks to be the least expensive way: */
229 while (rs.lock) __wait(&rs.lock, 0, 1, 1);
231 a_inc(&libc.threads_minus_1);
232 ret = __uniclone(stack, start, new);
235 if (rs.lock) __wake(&rs.blocks, 1, 1);
238 a_dec(&libc.threads_minus_1);
246 void pthread_exit(void *result)
248 struct pthread *self = pthread_self();
249 self->result = result;