X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=src%2Fthread%2Fpthread_create.c;h=8761381a4d861b6d9919cd0220d31ffccb17bc87;hb=04335d9260c076cf4d9264bd93dd3b06c237a639;hp=893773fa10f0228be9d64af7e1e884dcb048391c;hpb=12e1e324683a1d381b7f15dd36c99b37dd44d940;p=musl diff --git a/src/thread/pthread_create.c b/src/thread/pthread_create.c index 893773fa..8761381a 100644 --- a/src/thread/pthread_create.c +++ b/src/thread/pthread_create.c @@ -2,16 +2,11 @@ #include "pthread_impl.h" #include "stdio_impl.h" #include "libc.h" +#include "lock.h" #include #include #include -void *__mmap(void *, size_t, int, int, int, off_t); -int __munmap(void *, size_t); -int __mprotect(void *, size_t, int); -void __vm_lock_impl(int); -void __vm_unlock_impl(void); - static void dummy_0() { } @@ -19,6 +14,7 @@ weak_alias(dummy_0, __acquire_ptc); weak_alias(dummy_0, __release_ptc); weak_alias(dummy_0, __pthread_tsd_run_dtors); weak_alias(dummy_0, __do_orphaned_stdio_locks); +weak_alias(dummy_0, __dl_thread_cleanup); _Noreturn void __pthread_exit(void *result) { @@ -38,11 +34,11 @@ _Noreturn void __pthread_exit(void *result) __pthread_tsd_run_dtors(); - __lock(self->exitlock); - - /* Mark this thread dead before decrementing count */ - __lock(self->killlock); - self->dead = 1; + /* Access to target the exiting thread with syscalls that use + * its kernel tid is controlled by killlock. For detached threads, + * any use past this point would have undefined behavior, but for + * joinable threads it's a valid usage that must be handled. */ + LOCK(self->killlock); /* Block all signals before decrementing the live thread count. * This is important to ensure that dynamically allocated TLS @@ -50,34 +46,22 @@ _Noreturn void __pthread_exit(void *result) * reasons as well. */ __block_all_sigs(&set); - /* Wait to unlock the kill lock, which governs functions like - * pthread_kill which target a thread id, until signals have - * been blocked. This precludes observation of the thread id - * as a live thread (with application code running in it) after - * the thread was reported dead by ESRCH being returned. */ - __unlock(self->killlock); - /* It's impossible to determine whether this is "the last thread" * until performing the atomic decrement, since multiple threads * could exit at the same time. For the last thread, revert the - * decrement and unblock signals to give the atexit handlers and - * stdio cleanup code a consistent state. */ + * decrement, restore the tid, and unblock signals to give the + * atexit handlers and stdio cleanup code a consistent state. */ if (a_fetch_add(&libc.threads_minus_1, -1)==0) { libc.threads_minus_1 = 0; + UNLOCK(self->killlock); __restore_sigs(&set); exit(0); } - if (self->locale != &libc.global_locale) { - a_dec(&libc.uselocale_cnt); - if (self->locale->ctype_utf8) - a_dec(&libc.bytelocale_cnt_minus_1); - } - /* Process robust list in userspace to handle non-pshared mutexes * and the detached thread case where the robust list head will * be invalid when the kernel would process it. */ - __vm_lock_impl(+1); + __vm_lock(); volatile void *volatile *rp; while ((rp=self->robust_list.head) && rp != &self->robust_list.head) { pthread_mutex_t *m = (void *)((char *)rp @@ -86,41 +70,55 @@ _Noreturn void __pthread_exit(void *result) int priv = (m->_m_type & 128) ^ 128; self->robust_list.pending = rp; self->robust_list.head = *rp; - int cont = a_swap(&m->_m_lock, self->tid|0x40000000); + int cont = a_swap(&m->_m_lock, 0x40000000); self->robust_list.pending = 0; if (cont < 0 || waiters) __wake(&m->_m_lock, 1, priv); } - __vm_unlock_impl(); + __vm_unlock(); __do_orphaned_stdio_locks(); + __dl_thread_cleanup(); - if (self->detached && self->map_base) { + /* This atomic potentially competes with a concurrent pthread_detach + * call; the loser is responsible for freeing thread resources. */ + int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING); + + if (state>=DT_DETACHED && self->map_base) { /* Detached threads must avoid the kernel clear_child_tid * feature, since the virtual address will have been * unmapped and possibly already reused by a new mapping * at the time the kernel would perform the write. In * the case of threads that started out detached, the * initial clone flags are correct, but if the thread was - * detached later (== 2), we need to clear it here. */ - if (self->detached == 2) __syscall(SYS_set_tid_address, 0); + * detached later, we need to clear it here. */ + if (state == DT_DYNAMIC) __syscall(SYS_set_tid_address, 0); /* Robust list will no longer be valid, and was already * processed above, so unregister it with the kernel. */ if (self->robust_list.off) __syscall(SYS_set_robust_list, 0, 3*sizeof(long)); + /* Since __unmapself bypasses the normal munmap code path, + * explicitly wait for vmlock holders first. */ + __vm_wait(); + /* The following call unmaps the thread's stack mapping * and then exits without touching the stack. */ __unmapself(self->map_base, self->map_size); } + /* After the kernel thread exits, its tid may be reused. Clear it + * to prevent inadvertent use and inform functions that would use + * it that it's no longer available. */ + self->tid = 0; + UNLOCK(self->killlock); + for (;;) __syscall(SYS_exit, 0); } void __do_cleanup_push(struct __ptcb *cb) { - if (!libc.has_thread_pointer) return; struct pthread *self = __pthread_self(); cb->__next = self->cancelbuf; self->cancelbuf = cb; @@ -128,33 +126,41 @@ void __do_cleanup_push(struct __ptcb *cb) void __do_cleanup_pop(struct __ptcb *cb) { - if (!libc.has_thread_pointer) return; __pthread_self()->cancelbuf = cb->__next; } +struct start_args { + void *(*start_func)(void *); + void *start_arg; + pthread_attr_t *attr; + volatile int *perr; + unsigned long sig_mask[_NSIG/8/sizeof(long)]; +}; + static int start(void *p) { - pthread_t self = p; - if (self->startlock[0]) { - __wait(self->startlock, 0, 1, 1); - if (self->startlock[0]) { - self->detached = 2; - pthread_exit(0); + struct start_args *args = p; + if (args->attr) { + pthread_t self = __pthread_self(); + int ret = -__syscall(SYS_sched_setscheduler, self->tid, + args->attr->_a_policy, &args->attr->_a_prio); + if (a_swap(args->perr, ret)==-2) + __wake(args->perr, 1, 1); + if (ret) { + self->detach_state = DT_DYNAMIC; + __pthread_exit(0); } - __restore_sigs(self->sigmask); } - if (self->unblock_cancel) - __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, - SIGPT_SET, 0, _NSIG/8); - __pthread_exit(self->start(self->start_arg)); + __syscall(SYS_rt_sigprocmask, SIG_SETMASK, &args->sig_mask, 0, _NSIG/8); + __pthread_exit(args->start_func(args->start_arg)); return 0; } static int start_c11(void *p) { - pthread_t self = p; - int (*start)(void*) = (int(*)(void*)) self->start; - __pthread_exit((void *)(uintptr_t)start(self->start_arg)); + struct start_args *args = p; + int (*start)(void*) = (int(*)(void*)) args->start_func; + __pthread_exit((void *)(uintptr_t)start(args->start_arg)); return 0; } @@ -178,8 +184,6 @@ static void init_file_lock(FILE *f) if (f && f->lock<0) f->lock = 0; } -void *__copy_tls(unsigned char *); - int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg) { int ret, c11 = (attrp == __ATTRP_C11_THREAD); @@ -189,14 +193,16 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att unsigned flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM | CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID | CLONE_DETACHED; - int do_sched = 0; - pthread_attr_t attr = {0}; + pthread_attr_t attr = { 0 }; + sigset_t set; + volatile int err = -1; if (!libc.can_do_threads) return ENOSYS; self = __pthread_self(); if (!libc.threaded) { - for (FILE *f=libc.ofl_head; f; f=f->next) + for (FILE *f=*__ofl_lock(); f; f=f->next) init_file_lock(f); + __ofl_unlock(); init_file_lock(__stdin_used); init_file_lock(__stdout_used); init_file_lock(__stderr_used); @@ -207,11 +213,16 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att if (attrp && !c11) attr = *attrp; __acquire_ptc(); + if (!attrp || c11) { + attr._a_stacksize = __default_stacksize; + attr._a_guardsize = __default_guardsize; + } + if (__block_new_threads) __wait(&__block_new_threads, 0, 1, 1); if (attr._a_stackaddr) { size_t need = libc.tls_size + __pthread_tsd_size; - size = attr._a_stacksize + DEFAULT_STACK_SIZE; + size = attr._a_stacksize; stack = (void *)(attr._a_stackaddr & -16); stack_limit = (void *)(attr._a_stackaddr - size); /* Use application-provided stack for TLS only when @@ -223,11 +234,11 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att memset(stack, 0, need); } else { size = ROUND(need); - guard = 0; } + guard = 0; } else { - guard = ROUND(DEFAULT_GUARD_SIZE + attr._a_guardsize); - size = guard + ROUND(DEFAULT_STACK_SIZE + attr._a_stacksize + guard = ROUND(attr._a_guardsize); + size = guard + ROUND(attr._a_stacksize + libc.tls_size + __pthread_tsd_size); } @@ -235,7 +246,8 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att if (guard) { map = __mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0); if (map == MAP_FAILED) goto fail; - if (__mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)) { + if (__mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE) + && errno != ENOSYS) { __munmap(map, size); goto fail; } @@ -255,43 +267,59 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att new->map_size = size; new->stack = stack; new->stack_size = stack - stack_limit; - new->start = entry; - new->start_arg = arg; + new->guard_size = guard; new->self = new; new->tsd = (void *)tsd; new->locale = &libc.global_locale; if (attr._a_detach) { - new->detached = 1; + new->detach_state = DT_DETACHED; flags -= CLONE_CHILD_CLEARTID; + } else { + new->detach_state = DT_JOINABLE; } + new->robust_list.head = &new->robust_list.head; + new->CANARY = self->CANARY; + + /* Setup argument structure for the new thread on its stack. */ + stack -= (uintptr_t)stack % sizeof(uintptr_t); + stack -= sizeof(struct start_args); + struct start_args *args = (void *)stack; + args->start_func = entry; + args->start_arg = arg; if (attr._a_sched) { - do_sched = new->startlock[0] = 1; - __block_app_sigs(new->sigmask); + args->attr = &attr; + args->perr = &err; + } else { + args->attr = 0; + args->perr = 0; } - new->unblock_cancel = self->cancel; - new->canary = self->canary; + + __block_app_sigs(&set); + + /* Ensure SIGCANCEL is unblocked in new thread. This requires + * working with a copy of the set so we can restore the + * original mask in the calling thread. */ + memcpy(&args->sig_mask, &set, sizeof args->sig_mask); + args->sig_mask[(SIGCANCEL-1)/8/sizeof(long)] &= + ~(1UL<<((SIGCANCEL-1)%(8*sizeof(long)))); a_inc(&libc.threads_minus_1); - ret = __clone((c11 ? start_c11 : start), stack, flags, new, &new->tid, TP_ADJ(new), &new->tid); + ret = __clone((c11 ? start_c11 : start), stack, flags, args, &new->tid, TP_ADJ(new), &new->detach_state); + __restore_sigs(&set); __release_ptc(); - if (do_sched) { - __restore_sigs(new->sigmask); - } - if (ret < 0) { a_dec(&libc.threads_minus_1); if (map) __munmap(map, size); return EAGAIN; } - if (do_sched) { - ret = __syscall(SYS_sched_setscheduler, new->tid, - attr._a_policy, &attr._a_prio); - a_store(new->startlock, ret<0 ? 2 : 0); - __wake(new->startlock, 1, 1); - if (ret < 0) return -ret; + if (attr._a_sched) { + if (a_cas(&err, -1, -2)==-1) + __wait(&err, 0, -2, 1); + ret = err; + if (ret) return ret; } *res = new;