#include "pthread_impl.h"
-#define pthread_self __pthread_self
+void __pthread_unwind_next(struct __ptcb *cb)
+{
+ int i, j, not_finished;
+ pthread_t self;
+
+ if (cb->__next) longjmp((void *)cb->__next->__jb, 1);
+
+ self = pthread_self();
+ if (self->cancel) self->result = PTHREAD_CANCELLED;
+
+ LOCK(&self->exitlock);
+
+ not_finished = self->tsd_used;
+ for (j=0; not_finished && j<PTHREAD_DESTRUCTOR_ITERATIONS; j++) {
+ not_finished = 0;
+ for (i=0; i<PTHREAD_KEYS_MAX; i++) {
+ if (self->tsd[i] && libc.tsd_keys[i]) {
+ void *tmp = self->tsd[i];
+ self->tsd[i] = 0;
+ libc.tsd_keys[i](tmp);
+ not_finished = 1;
+ }
+ }
+ }
+
+ /* Mark this thread dead before decrementing count */
+ self->dead = 1;
+
+ if (!a_fetch_add(&libc.threads_minus_1, -1))
+ exit(0);
+
+ if (self->detached && self->map_base) {
+ syscall4(__NR_rt_sigprocmask, SIG_BLOCK, (long)(uint64_t[1]){-1},0,8);
+ __unmapself(self->map_base, self->map_size);
+ }
+
+ __syscall_exit(0);
+}
static void docancel(struct pthread *self)
{
static void cancel_handler(int sig, siginfo_t *si, void *ctx)
{
- struct pthread *self = pthread_self();
+ struct pthread *self = __pthread_self();
+ if (si->si_code > 0 || si->si_pid != self->pid) return;
self->cancel = 1;
if (self->canceldisable || (!self->cancelasync && !self->cancelpoint))
return;
docancel(self);
}
+static void cancelpt(int x)
+{
+ struct pthread *self = __pthread_self();
+ if (self->canceldisable) return;
+ self->cancelpoint = x;
+ if (self->cancel) docancel(self);
+}
+
/* "rsyscall" is a mechanism by which a thread can synchronously force all
* other threads to perform an arbitrary syscall. It is necessary to work
* around the non-conformant implementation of setuid() et al on Linux,
static void rsyscall_handler(int sig, siginfo_t *si, void *ctx)
{
- if (rs.cnt == libc.threads_minus_1) return;
+ struct pthread *self = __pthread_self();
+
+ if (si->si_code > 0 || si->si_pid != self->pid ||
+ rs.cnt == libc.threads_minus_1) return;
+
+ /* Threads which have already decremented themselves from the
+ * thread count must not increment rs.cnt or otherwise act. */
+ if (self->dead) {
+ __wait(&rs.hold, 0, 1, 1);
+ return;
+ }
if (syscall6(rs.nr, rs.arg[0], rs.arg[1], rs.arg[2],
rs.arg[3], rs.arg[4], rs.arg[5]) < 0 && !rs.err) rs.err=errno;
{
int i, ret;
sigset_t set = { 0 };
- struct pthread *self = pthread_self();
+ struct pthread *self = __pthread_self();
sigaddset(&set, SIGSYSCALL);
LOCK(&rs.lock);
return ret;
}
-static void cancelpt(int x)
-{
- struct pthread *self = pthread_self();
- if (self->canceldisable) return;
- self->cancelpoint = x;
- if (self->cancel) docancel(self);
-}
-
static void init_threads()
{
struct sigaction sa = { .sa_flags = SA_SIGINFO | SA_RESTART };
libc.lock = __lock;
+ libc.lockfile = __lockfile;
libc.cancelpt = cancelpt;
libc.rsyscall = rsyscall;
sa.sa_sigaction = cancel_handler;
return 0;
}
-#undef pthread_self
-
-#define CLONE_MAGIC 0x7d0f00
-int __clone(int (*)(void *), void *, int, void *, pid_t *, void *, pid_t *);
+int __uniclone(void *, int (*)(), void *);
#define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
if (!init && ++init) init_threads();
if (!attr) attr = &default_attr;
- guard = ROUND(attr->__guardsize + DEFAULT_GUARD_SIZE);
- size = guard + ROUND(attr->__stacksize + DEFAULT_STACK_SIZE);
+ guard = ROUND(attr->_a_guardsize + DEFAULT_GUARD_SIZE);
+ size = guard + ROUND(attr->_a_stacksize + DEFAULT_STACK_SIZE);
size += __pthread_tsd_size;
map = mmap(0, size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANON, -1, 0);
if (!map) return EAGAIN;
- mprotect(map, guard, PROT_NONE);
+ if (guard) mprotect(map, guard, PROT_NONE);
tsd = map + size - __pthread_tsd_size;
new = (void *)(tsd - sizeof *new - PAGE_SIZE%sizeof *new);
new->start_arg = arg;
new->self = new;
new->tsd = (void *)tsd;
- new->detached = attr->__detach;
+ new->detached = attr->_a_detach;
new->attr = *attr;
memcpy(new->tlsdesc, self->tlsdesc, sizeof new->tlsdesc);
new->tlsdesc[1] = (uintptr_t)new;
while (rs.lock) __wait(&rs.lock, 0, 1, 1);
a_inc(&libc.threads_minus_1);
- ret = __clone(start, stack, CLONE_MAGIC, new,
- &new->tid, &new->tlsdesc, &new->tid);
+ ret = __uniclone(stack, start, new);
a_dec(&rs.blocks);
if (rs.lock) __wake(&rs.blocks, 1, 1);
if (ret < 0) {
a_dec(&libc.threads_minus_1);
munmap(map, size);
- return -ret;
+ return EAGAIN;
}
*res = new;
return 0;
}
+
+void pthread_exit(void *result)
+{
+ struct pthread *self = pthread_self();
+ self->result = result;
+ docancel(self);
+}