#include "pthread_impl.h"
+#ifdef __pthread_unwind_next
+#undef __pthread_unwind_next
+#define __pthread_unwind_next __pthread_unwind_next_3
+#endif
+
void __pthread_unwind_next(struct __ptcb *cb)
{
int i, j, not_finished;
self = pthread_self();
if (self->cancel) self->result = PTHREAD_CANCELLED;
- if (!a_fetch_add(&libc.threads_minus_1, -1))
- exit(0);
-
LOCK(&self->exitlock);
not_finished = self->tsd_used;
}
}
- if (self->detached && self->map_base)
+ /* Mark this thread dead before decrementing count */
+ self->dead = 1;
+
+ if (!a_fetch_add(&libc.threads_minus_1, -1))
+ exit(0);
+
+ if (self->detached && self->map_base) {
+ syscall(__NR_rt_sigprocmask, SIG_BLOCK, (long)(uint64_t[1]){-1},0,8);
__unmapself(self->map_base, self->map_size);
+ }
- __syscall_exit(0);
+ syscall(SYS_exit, 0);
}
static void docancel(struct pthread *self)
{
struct __ptcb cb = { .__next = self->cancelbuf };
+ self->canceldisable = 1;
+ self->cancelasync = 0;
__pthread_unwind_next(&cb);
}
static void cancel_handler(int sig, siginfo_t *si, void *ctx)
{
struct pthread *self = __pthread_self();
- self->cancel = 1;
- if (self->canceldisable || (!self->cancelasync && !self->cancelpoint))
+ if (!self->cancel) {
+ if (si->si_code == SI_TIMER && libc.sigtimer)
+ libc.sigtimer(sig, si, ctx);
return;
- docancel(self);
+ }
+ if (self->canceldisable) return;
+ if (self->cancelasync || (self->cancelpoint==1 && PC_AT_SYS(ctx)))
+ docancel(self);
}
static void cancelpt(int x)
{
struct pthread *self = __pthread_self();
if (self->canceldisable) return;
- self->cancelpoint = x;
- if (self->cancel) docancel(self);
+ if ((self->cancelpoint+=x)==1 && x>=0 && self->cancel)
+ docancel(self);
}
/* "rsyscall" is a mechanism by which a thread can synchronously force all
static void rsyscall_handler(int sig, siginfo_t *si, void *ctx)
{
- if (rs.cnt == libc.threads_minus_1) return;
+ struct pthread *self = __pthread_self();
- if (syscall6(rs.nr, rs.arg[0], rs.arg[1], rs.arg[2],
+ if (si->si_code > 0 || si->si_pid != self->pid ||
+ rs.cnt == libc.threads_minus_1) return;
+
+ /* Threads which have already decremented themselves from the
+ * thread count must not increment rs.cnt or otherwise act. */
+ if (self->dead) {
+ __wait(&rs.hold, 0, 1, 1);
+ return;
+ }
+
+ if (syscall(rs.nr, rs.arg[0], rs.arg[1], rs.arg[2],
rs.arg[3], rs.arg[4], rs.arg[5]) < 0 && !rs.err) rs.err=errno;
a_inc(&rs.cnt);
while((i=rs.cnt)) __wait(&rs.cnt, 0, i, 1);
if (rs.err) errno = rs.err, ret = -1;
- else ret = syscall6(nr, a, b, c, d, e, f);
+ else ret = syscall(nr, a, b, c, d, e, f);
UNLOCK(&rs.lock);
return ret;
{
struct sigaction sa = { .sa_flags = SA_SIGINFO | SA_RESTART };
libc.lock = __lock;
+ libc.lockfile = __lockfile;
libc.cancelpt = cancelpt;
libc.rsyscall = rsyscall;
sa.sa_sigaction = cancel_handler;
static int start(void *p)
{
struct pthread *self = p;
+ if (self->unblock_cancel) {
+ sigset_t set;
+ sigemptyset(&set);
+ sigaddset(&set, SIGCANCEL);
+ __libc_sigprocmask(SIG_UNBLOCK, &set, 0);
+ }
pthread_exit(self->start(self->start_arg));
return 0;
}
size += __pthread_tsd_size;
map = mmap(0, size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANON, -1, 0);
if (!map) return EAGAIN;
- mprotect(map, guard, PROT_NONE);
+ if (guard) mprotect(map, guard, PROT_NONE);
tsd = map + size - __pthread_tsd_size;
new = (void *)(tsd - sizeof *new - PAGE_SIZE%sizeof *new);
new->tsd = (void *)tsd;
new->detached = attr->_a_detach;
new->attr = *attr;
+ new->unblock_cancel = self->cancel;
memcpy(new->tlsdesc, self->tlsdesc, sizeof new->tlsdesc);
new->tlsdesc[1] = (uintptr_t)new;
stack = (void *)((uintptr_t)new-1 & ~(uintptr_t)15);