void *__mmap(void *, size_t, int, int, int, off_t);
int __munmap(void *, size_t);
int __mprotect(void *, size_t, int);
-void __vm_lock_impl(int);
-void __vm_unlock_impl(void);
static void dummy_0()
{
/* Process robust list in userspace to handle non-pshared mutexes
* and the detached thread case where the robust list head will
* be invalid when the kernel would process it. */
- __vm_lock_impl(+1);
+ __vm_lock();
volatile void *volatile *rp;
while ((rp=self->robust_list.head) && rp != &self->robust_list.head) {
pthread_mutex_t *m = (void *)((char *)rp
if (cont < 0 || waiters)
__wake(&m->_m_lock, 1, priv);
}
- __vm_unlock_impl();
+ __vm_unlock();
__do_orphaned_stdio_locks();
if (self->robust_list.off)
__syscall(SYS_set_robust_list, 0, 3*sizeof(long));
+ /* Since __unmapself bypasses the normal munmap code path,
+ * explicitly wait for vmlock holders first. */
+ __vm_wait();
+
/* The following call unmaps the thread's stack mapping
* and then exits without touching the stack. */
__unmapself(self->map_base, self->map_size);
void __do_cleanup_push(struct __ptcb *cb)
{
- if (!libc.has_thread_pointer) return;
struct pthread *self = __pthread_self();
cb->__next = self->cancelbuf;
self->cancelbuf = cb;
void __do_cleanup_pop(struct __ptcb *cb)
{
- if (!libc.has_thread_pointer) return;
__pthread_self()->cancelbuf = cb->__next;
}
do_sched = new->startlock[0] = 1;
__block_app_sigs(new->sigmask);
}
+ new->robust_list.head = &new->robust_list.head;
new->unblock_cancel = self->cancel;
new->canary = self->canary;