X-Git-Url: http://nsz.repo.hu/git/?p=musl;a=blobdiff_plain;f=src%2Fthread%2Fpthread_create.c;h=856015ff01762dcc65ae12b37b06462e19987009;hp=c73c52114074c66d3c04bf44b3cd18a1d2d78341;hb=acb04806628990ad2430e04261dd20f23babde5e;hpb=19eb13b9a4cf2f787f60b6e2a6d26a6cd7d3ffd2 diff --git a/src/thread/pthread_create.c b/src/thread/pthread_create.c index c73c5211..856015ff 100644 --- a/src/thread/pthread_create.c +++ b/src/thread/pthread_create.c @@ -1,155 +1,53 @@ #include "pthread_impl.h" -void __pthread_unwind_next(struct __ptcb *cb) -{ - int i, j, not_finished; - pthread_t self; - - if (cb->__next) longjmp((void *)cb->__next->__jb, 1); - - self = pthread_self(); - if (self->cancel) self->result = PTHREAD_CANCELLED; - - LOCK(&self->exitlock); - - not_finished = self->tsd_used; - for (j=0; not_finished && jtsd[i] && libc.tsd_keys[i]) { - void *tmp = self->tsd[i]; - self->tsd[i] = 0; - libc.tsd_keys[i](tmp); - not_finished = 1; - } - } - } - - syscall4(__NR_sigprocmask, SIG_BLOCK, (long)(uint64_t[1]){-1}, 0, 8); - - if (!a_fetch_add(&libc.threads_minus_1, -1)) - exit(0); - - if (self->detached && self->map_base) - __unmapself(self->map_base, self->map_size); - - __syscall_exit(0); -} - -static void docancel(struct pthread *self) +static void dummy_0() { - struct __ptcb cb = { .__next = self->cancelbuf }; - __pthread_unwind_next(&cb); } +weak_alias(dummy_0, __synccall_lock); +weak_alias(dummy_0, __synccall_unlock); +weak_alias(dummy_0, __pthread_tsd_run_dtors); -static void cancel_handler(int sig, siginfo_t *si, void *ctx) -{ - struct pthread *self = __pthread_self(); - self->cancel = 1; - if (self->canceldisable || (!self->cancelasync && !self->cancelpoint)) - return; - docancel(self); -} +#ifdef __pthread_unwind_next +#undef __pthread_unwind_next +#define __pthread_unwind_next __pthread_unwind_next_3 +#endif -static void cancelpt(int x) -{ - struct pthread *self = __pthread_self(); - if (self->canceldisable) return; - self->cancelpoint = x; - if (self->cancel) docancel(self); -} - -/* "rsyscall" is a mechanism by which a thread can synchronously force all - * other threads to perform an arbitrary syscall. It is necessary to work - * around the non-conformant implementation of setuid() et al on Linux, - * which affect only the calling thread and not the whole process. This - * implementation performs some tricks with signal delivery to work around - * the fact that it does not keep any list of threads in userspace. */ - -static struct { - volatile int lock, hold, blocks, cnt; - unsigned long arg[6]; - int nr; - int err; -} rs; - -static void rsyscall_handler(int sig, siginfo_t *si, void *ctx) +void __pthread_unwind_next(struct __ptcb *cb) { - if (rs.cnt == libc.threads_minus_1) return; - - if (syscall6(rs.nr, rs.arg[0], rs.arg[1], rs.arg[2], - rs.arg[3], rs.arg[4], rs.arg[5]) < 0 && !rs.err) rs.err=errno; - - a_inc(&rs.cnt); - __wake(&rs.cnt, 1, 1); - while(rs.hold) - __wait(&rs.hold, 0, 1, 1); - a_dec(&rs.cnt); - if (!rs.cnt) __wake(&rs.cnt, 1, 1); -} + pthread_t self = pthread_self(); + int n; -static int rsyscall(int nr, long a, long b, long c, long d, long e, long f) -{ - int i, ret; - sigset_t set = { 0 }; - struct pthread *self = __pthread_self(); - sigaddset(&set, SIGSYSCALL); - - LOCK(&rs.lock); - while ((i=rs.blocks)) - __wait(&rs.blocks, 0, i, 1); - - __libc_sigprocmask(SIG_BLOCK, &set, 0); - - rs.nr = nr; - rs.arg[0] = a; rs.arg[1] = b; - rs.arg[2] = c; rs.arg[3] = d; - rs.arg[4] = d; rs.arg[5] = f; - rs.hold = 1; - rs.err = 0; - rs.cnt = 0; - - /* Dispatch signals until all threads respond */ - for (i=libc.threads_minus_1; i; i--) - sigqueue(self->pid, SIGSYSCALL, (union sigval){0}); - while ((i=rs.cnt) < libc.threads_minus_1) { - sigqueue(self->pid, SIGSYSCALL, (union sigval){0}); - __wait(&rs.cnt, 0, i, 1); + if (cb->__next) { + self->cancelbuf = cb->__next->__next; + longjmp((void *)cb->__next->__jb, 1); } - /* Handle any lingering signals with no-op */ - __libc_sigprocmask(SIG_UNBLOCK, &set, 0); + __pthread_tsd_run_dtors(); - /* Resume other threads' signal handlers and wait for them */ - rs.hold = 0; - __wake(&rs.hold, -1, 0); - while((i=rs.cnt)) __wait(&rs.cnt, 0, i, 1); + __lock(&self->exitlock); - if (rs.err) errno = rs.err, ret = -1; - else ret = syscall6(nr, a, b, c, d, e, f); + /* Mark this thread dead before decrementing count */ + __lock(&self->killlock); + self->dead = 1; + a_store(&self->killlock, 0); - UNLOCK(&rs.lock); - return ret; -} + do n = libc.threads_minus_1; + while (n && a_cas(&libc.threads_minus_1, n, n-1)!=n); + if (!n) exit(0); -static void init_threads() -{ - struct sigaction sa = { .sa_flags = SA_SIGINFO | SA_RESTART }; - libc.lock = __lock; - libc.cancelpt = cancelpt; - libc.rsyscall = rsyscall; - sa.sa_sigaction = cancel_handler; - __libc_sigaction(SIGCANCEL, &sa, 0); - sigaddset(&sa.sa_mask, SIGSYSCALL); - sigaddset(&sa.sa_mask, SIGCANCEL); - sa.sa_sigaction = rsyscall_handler; - __libc_sigaction(SIGSYSCALL, &sa, 0); - sigprocmask(SIG_UNBLOCK, &sa.sa_mask, 0); + if (self->detached && self->map_base) { + __syscall(SYS_rt_sigprocmask, SIG_BLOCK, (uint64_t[]){-1},0,8); + __unmapself(self->map_base, self->map_size); + } + + __syscall(SYS_exit, 0); } static int start(void *p) { struct pthread *self = p; + if (self->unblock_cancel) + __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, 8); pthread_exit(self->start(self->start_arg)); return 0; } @@ -164,23 +62,26 @@ weak_alias(dummy, __pthread_tsd_size); int pthread_create(pthread_t *res, const pthread_attr_t *attr, void *(*entry)(void *), void *arg) { - static int init; int ret; - size_t size, guard; + size_t size = DEFAULT_STACK_SIZE + DEFAULT_GUARD_SIZE; + size_t guard = DEFAULT_GUARD_SIZE; struct pthread *self = pthread_self(), *new; unsigned char *map, *stack, *tsd; - static const pthread_attr_t default_attr; - if (!self) return errno = ENOSYS; - if (!init && ++init) init_threads(); + if (!self) return ENOSYS; + if (!libc.threaded) { + __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, 8); + libc.threaded = 1; + } - if (!attr) attr = &default_attr; - guard = ROUND(attr->_a_guardsize + DEFAULT_GUARD_SIZE); - size = guard + ROUND(attr->_a_stacksize + DEFAULT_STACK_SIZE); + if (attr) { + guard = ROUND(attr->_a_guardsize + DEFAULT_GUARD_SIZE); + size = guard + ROUND(attr->_a_stacksize + DEFAULT_STACK_SIZE); + } size += __pthread_tsd_size; map = mmap(0, size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANON, -1, 0); if (!map) return EAGAIN; - mprotect(map, guard, PROT_NONE); + if (guard) mprotect(map, guard, PROT_NONE); tsd = map + size - __pthread_tsd_size; new = (void *)(tsd - sizeof *new - PAGE_SIZE%sizeof *new); @@ -192,22 +93,18 @@ int pthread_create(pthread_t *res, const pthread_attr_t *attr, void *(*entry)(vo new->start_arg = arg; new->self = new; new->tsd = (void *)tsd; - new->detached = attr->_a_detach; - new->attr = *attr; + if (attr) new->detached = attr->_a_detach; + new->unblock_cancel = self->cancel; memcpy(new->tlsdesc, self->tlsdesc, sizeof new->tlsdesc); new->tlsdesc[1] = (uintptr_t)new; stack = (void *)((uintptr_t)new-1 & ~(uintptr_t)15); - /* We must synchronize new thread creation with rsyscall - * delivery. This looks to be the least expensive way: */ - a_inc(&rs.blocks); - while (rs.lock) __wait(&rs.lock, 0, 1, 1); + __synccall_lock(); a_inc(&libc.threads_minus_1); ret = __uniclone(stack, start, new); - a_dec(&rs.blocks); - if (rs.lock) __wake(&rs.blocks, 1, 1); + __synccall_unlock(); if (ret < 0) { a_dec(&libc.threads_minus_1); @@ -221,6 +118,7 @@ int pthread_create(pthread_t *res, const pthread_attr_t *attr, void *(*entry)(vo void pthread_exit(void *result) { struct pthread *self = pthread_self(); + struct __ptcb cb = { .__next = self->cancelbuf }; self->result = result; - docancel(self); + __pthread_unwind_next(&cb); }