this commit addresses two issues:
1. a race condition, whereby a cancellation request occurring after a
syscall returned from kernelspace but before the subsequent
CANCELPT_END would cause cancellable resource-allocating syscalls
(like open) to leak resources.
2. signal handlers invoked while the thread was blocked at a
cancellation point behaved as if asynchronous cancellation mode wer in
effect, resulting in potentially dangerous state corruption if a
cancellation request occurs.
the glibc/nptl implementation of threads shares both of these issues.
with this commit, both are fixed. however, cancellation points
encountered in a signal handler will not be acted upon if the signal
was received while the thread was already at a cancellation point.
they will of course be acted upon after the signal handler returns, so
in real-world usage where signal handlers quickly return, it should
not be a problem. it's possible to solve this problem too by having
sigaction() wrap all signal handlers with a function that uses a
pthread_cleanup handler to catch cancellation, patch up the saved
context, and return into the cancellable function that will catch and
act upon the cancellation. however that would be a lot of complexity
for minimal if any benefit...
14 files changed:
__asm__ ("movl %%gs:0,%0" : "=r" (self) );
return self;
}
__asm__ ("movl %%gs:0,%0" : "=r" (self) );
return self;
}
+
+#define PC_AT_SYS(c) \
+ (*(uint16_t *)(((ucontext_t *)(c))->uc_mcontext.__gregs[14])==0x80cd)
__asm__ ("movq %%fs:0,%0" : "=r" (self) );
return self;
}
__asm__ ("movq %%fs:0,%0" : "=r" (self) );
return self;
}
+
+#define PC_AT_SYS(c) \
+ (*(uint16_t *)(((ucontext_t *)(c))->uc_mcontext.__gregs[16])==0x050f)
#define UNLOCK(x) (*(x)=0)
#define CANCELPT(x) (libc.cancelpt ? libc.cancelpt((x)),0 : (void)(x),0)
#define CANCELPT_BEGIN CANCELPT(1)
#define UNLOCK(x) (*(x)=0)
#define CANCELPT(x) (libc.cancelpt ? libc.cancelpt((x)),0 : (void)(x),0)
#define CANCELPT_BEGIN CANCELPT(1)
-#define CANCELPT_END CANCELPT(0)
+#define CANCELPT_TRY CANCELPT(0)
+#define CANCELPT_END CANCELPT(-1)
extern char **__environ;
#define environ __environ
extern char **__environ;
#define environ __environ
#include <sys/wait.h>
#include "syscall.h"
#include <sys/wait.h>
#include "syscall.h"
int waitid(idtype_t type, id_t id, siginfo_t *info, int options)
{
int waitid(idtype_t type, id_t id, siginfo_t *info, int options)
{
- return syscall(SYS_waitid, type, id, info, options, 0);
+ int r;
+ CANCELPT_BEGIN;
+ r = syscall(SYS_waitid, type, id, info, options, 0);
+ if (r<0) CANCELPT_TRY;
+ CANCELPT_END;
+ return r;
#include <sys/wait.h>
#include "syscall.h"
#include <sys/wait.h>
#include "syscall.h"
pid_t waitpid(pid_t pid, int *status, int options)
{
pid_t waitpid(pid_t pid, int *status, int options)
{
- return syscall(SYS_wait4, pid, status, options, 0);
+ int r;
+ CANCELPT_BEGIN;
+ r = syscall(SYS_wait4, pid, status, options, 0);
+ if (r<0) CANCELPT_TRY;
+ CANCELPT_END;
+ return r;
if (ts) ts_tmp = *ts;
CANCELPT_BEGIN;
r = syscall(SYS_pselect6, n, rfds, wfds, efds, ts ? &ts_tmp : 0, data);
if (ts) ts_tmp = *ts;
CANCELPT_BEGIN;
r = syscall(SYS_pselect6, n, rfds, wfds, efds, ts ? &ts_tmp : 0, data);
CANCELPT_END;
return r;
}
CANCELPT_END;
return r;
}
int r;
CANCELPT_BEGIN;
r = syscall(SYS_select, n, rfds, wfds, efds, tv);
int r;
CANCELPT_BEGIN;
r = syscall(SYS_select, n, rfds, wfds, efds, tv);
CANCELPT_END;
return r;
}
CANCELPT_END;
return r;
}
int ret;
CANCELPT_BEGIN;
ret = syscall(__NR_rt_sigsuspend, mask, 8);
int ret;
CANCELPT_BEGIN;
ret = syscall(__NR_rt_sigsuspend, mask, 8);
+ if (ret<0) CANCELPT_TRY;
CANCELPT_END;
return ret;
}
CANCELPT_END;
return ret;
}
CANCELPT_BEGIN;
do {
ret = syscall(__NR_rt_sigtimedwait, mask, si, timeout, 8);
CANCELPT_BEGIN;
do {
ret = syscall(__NR_rt_sigtimedwait, mask, si, timeout, 8);
+ if (ret<0) CANCELPT_TRY;
} while (ret<0 && errno==EINTR);
CANCELPT_END;
return ret;
} while (ret<0 && errno==EINTR);
CANCELPT_END;
return ret;
int ret;
CANCELPT_BEGIN;
ret = ioctl(fd, TCSBRK, 1);
int ret;
CANCELPT_BEGIN;
ret = ioctl(fd, TCSBRK, 1);
CANCELPT_END;
return ret;
}
CANCELPT_END;
return ret;
}
int pthread_cond_timedwait(pthread_cond_t *c, pthread_mutex_t *m, const struct timespec *ts)
{
int r, e=0;
int pthread_cond_timedwait(pthread_cond_t *c, pthread_mutex_t *m, const struct timespec *ts)
{
int r, e=0;
+ CANCELPT_BEGIN;
+ CANCELPT_END;
pthread_cleanup_push(relock, m);
c->_c_block = 1;
if ((r=pthread_mutex_unlock(m))) return r;
pthread_cleanup_push(relock, m);
c->_c_block = 1;
if ((r=pthread_mutex_unlock(m))) return r;
e = __timedwait(&c->_c_block, 1, c->_c_clock, ts, 0);
e = __timedwait(&c->_c_block, 1, c->_c_clock, ts, 0);
pthread_cleanup_pop(0);
if ((r=pthread_mutex_lock(m))) return r;
pthread_cleanup_pop(0);
if ((r=pthread_mutex_lock(m))) return r;
+ CANCELPT_BEGIN;
+ CANCELPT_END;
static void docancel(struct pthread *self)
{
struct __ptcb cb = { .__next = self->cancelbuf };
static void docancel(struct pthread *self)
{
struct __ptcb cb = { .__next = self->cancelbuf };
+ sigset_t set;
+ self->canceldisable = 1;
+ self->cancelasync = 0;
+ sigemptyset(&set);
+ sigaddset(&set, SIGCANCEL);
+ __libc_sigprocmask(SIG_UNBLOCK, &set, 0);
__pthread_unwind_next(&cb);
}
__pthread_unwind_next(&cb);
}
struct pthread *self = __pthread_self();
if (si->si_code > 0 || si->si_pid != self->pid) return;
self->cancel = 1;
struct pthread *self = __pthread_self();
if (si->si_code > 0 || si->si_pid != self->pid) return;
self->cancel = 1;
- if (self->canceldisable || (!self->cancelasync && !self->cancelpoint))
- return;
- docancel(self);
+ if (self->canceldisable) return;
+ if (self->cancelasync || (self->cancelpoint==1 && PC_AT_SYS(ctx)))
+ docancel(self);
}
static void cancelpt(int x)
{
struct pthread *self = __pthread_self();
if (self->canceldisable) return;
}
static void cancelpt(int x)
{
struct pthread *self = __pthread_self();
if (self->canceldisable) return;
- self->cancelpoint = x;
- if (self->cancel) docancel(self);
+ if ((self->cancelpoint+=x)==1 && x>=0 && self->cancel)
+ docancel(self);
}
/* "rsyscall" is a mechanism by which a thread can synchronously force all
}
/* "rsyscall" is a mechanism by which a thread can synchronously force all
if (a_fetch_add(sem->__val, -1) > 0) return 0;
val = a_fetch_add(sem->__val, 1)+1;
if (val==1) __wake(sem->__val, 1, 0);
if (a_fetch_add(sem->__val, -1) > 0) return 0;
val = a_fetch_add(sem->__val, 1)+1;
if (val==1) __wake(sem->__val, 1, 0);
if (at && at->tv_nsec >= 1000000000UL) {
errno = EINVAL;
return -1;
}
if (at && at->tv_nsec >= 1000000000UL) {
errno = EINVAL;
return -1;
}
if (val <= 0 && __timedwait(sem->__val, val, CLOCK_REALTIME, at, 0) == ETIMEDOUT) {
errno = ETIMEDOUT;
if (val <= 0 && __timedwait(sem->__val, val, CLOCK_REALTIME, at, 0) == ETIMEDOUT) {
errno = ETIMEDOUT;
+ CANCELPT_TRY;
+ CANCELPT_END;
int ret;
CANCELPT_BEGIN;
ret = syscall(SYS_nanosleep, req, rem);
int ret;
CANCELPT_BEGIN;
ret = syscall(SYS_nanosleep, req, rem);
CANCELPT_END;
return ret;
}
CANCELPT_END;
return ret;
}