X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=src%2Fthread%2Fpthread_create.c;h=6bdfb44f9cde29f656dcca6763be610f05204cd4;hb=8d81ba8c0bc6fe31136cb15c9c82ef4c24965040;hp=8761381a4d861b6d9919cd0220d31ffccb17bc87;hpb=04335d9260c076cf4d9264bd93dd3b06c237a639;p=musl diff --git a/src/thread/pthread_create.c b/src/thread/pthread_create.c index 8761381a..6bdfb44f 100644 --- a/src/thread/pthread_create.c +++ b/src/thread/pthread_create.c @@ -15,6 +15,41 @@ weak_alias(dummy_0, __release_ptc); weak_alias(dummy_0, __pthread_tsd_run_dtors); weak_alias(dummy_0, __do_orphaned_stdio_locks); weak_alias(dummy_0, __dl_thread_cleanup); +weak_alias(dummy_0, __membarrier_init); + +static int tl_lock_count; +static int tl_lock_waiters; + +void __tl_lock(void) +{ + int tid = __pthread_self()->tid; + int val = __thread_list_lock; + if (val == tid) { + tl_lock_count++; + return; + } + while ((val = a_cas(&__thread_list_lock, 0, tid))) + __wait(&__thread_list_lock, &tl_lock_waiters, val, 0); +} + +void __tl_unlock(void) +{ + if (tl_lock_count) { + tl_lock_count--; + return; + } + a_store(&__thread_list_lock, 0); + if (tl_lock_waiters) __wake(&__thread_list_lock, 1, 0); +} + +void __tl_sync(pthread_t td) +{ + a_barrier(); + int val = __thread_list_lock; + if (!val) return; + __wait(&__thread_list_lock, &tl_lock_waiters, val, 0); + if (tl_lock_waiters) __wake(&__thread_list_lock, 1, 0); +} _Noreturn void __pthread_exit(void *result) { @@ -40,24 +75,23 @@ _Noreturn void __pthread_exit(void *result) * joinable threads it's a valid usage that must be handled. */ LOCK(self->killlock); - /* Block all signals before decrementing the live thread count. - * This is important to ensure that dynamically allocated TLS - * is not under-allocated/over-committed, and possibly for other - * reasons as well. */ - __block_all_sigs(&set); - - /* It's impossible to determine whether this is "the last thread" - * until performing the atomic decrement, since multiple threads - * could exit at the same time. For the last thread, revert the - * decrement, restore the tid, and unblock signals to give the - * atexit handlers and stdio cleanup code a consistent state. */ - if (a_fetch_add(&libc.threads_minus_1, -1)==0) { - libc.threads_minus_1 = 0; - UNLOCK(self->killlock); + /* The thread list lock must be AS-safe, and thus requires + * application signals to be blocked before it can be taken. */ + __block_app_sigs(&set); + __tl_lock(); + + /* If this is the only thread in the list, don't proceed with + * termination of the thread, but restore the previous lock and + * signal state to prepare for exit to call atexit handlers. */ + if (self->next == self) { + __tl_unlock(); __restore_sigs(&set); + UNLOCK(self->killlock); exit(0); } + /* At this point we are committed to thread termination. */ + /* Process robust list in userspace to handle non-pshared mutexes * and the detached thread case where the robust list head will * be invalid when the kernel would process it. */ @@ -80,19 +114,25 @@ _Noreturn void __pthread_exit(void *result) __do_orphaned_stdio_locks(); __dl_thread_cleanup(); + /* Last, unlink thread from the list. This change will not be visible + * until the lock is released, which only happens after SYS_exit + * has been called, via the exit futex address pointing at the lock. + * This needs to happen after any possible calls to LOCK() that might + * skip locking if process appears single-threaded. */ + if (!--libc.threads_minus_1) libc.need_locks = -1; + self->next->prev = self->prev; + self->prev->next = self->next; + self->prev = self->next = self; + /* This atomic potentially competes with a concurrent pthread_detach * call; the loser is responsible for freeing thread resources. */ int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING); - if (state>=DT_DETACHED && self->map_base) { - /* Detached threads must avoid the kernel clear_child_tid - * feature, since the virtual address will have been - * unmapped and possibly already reused by a new mapping - * at the time the kernel would perform the write. In - * the case of threads that started out detached, the - * initial clone flags are correct, but if the thread was - * detached later, we need to clear it here. */ - if (state == DT_DYNAMIC) __syscall(SYS_set_tid_address, 0); + if (state==DT_DETACHED && self->map_base) { + /* Detached threads must block even implementation-internal + * signals, since they will not have a stack in their last + * moments of existence. */ + __block_all_sigs(&set); /* Robust list will no longer be valid, and was already * processed above, so unregister it with the kernel. */ @@ -108,6 +148,9 @@ _Noreturn void __pthread_exit(void *result) __unmapself(self->map_base, self->map_size); } + /* Wake any joiner. */ + __wake(&self->detach_state, 1, 1); + /* After the kernel thread exits, its tid may be reused. Clear it * to prevent inadvertent use and inform functions that would use * it that it's no longer available. */ @@ -132,23 +175,20 @@ void __do_cleanup_pop(struct __ptcb *cb) struct start_args { void *(*start_func)(void *); void *start_arg; - pthread_attr_t *attr; - volatile int *perr; + volatile int control; unsigned long sig_mask[_NSIG/8/sizeof(long)]; }; static int start(void *p) { struct start_args *args = p; - if (args->attr) { - pthread_t self = __pthread_self(); - int ret = -__syscall(SYS_sched_setscheduler, self->tid, - args->attr->_a_policy, &args->attr->_a_prio); - if (a_swap(args->perr, ret)==-2) - __wake(args->perr, 1, 1); - if (ret) { - self->detach_state = DT_DYNAMIC; - __pthread_exit(0); + int state = args->control; + if (state) { + if (a_cas(&args->control, 1, 2)==1) + __wait(&args->control, 0, 2, 1); + if (args->control) { + __syscall(SYS_set_tid_address, &args->control); + for (;;) __syscall(SYS_exit, 0); } } __syscall(SYS_rt_sigprocmask, SIG_SETMASK, &args->sig_mask, 0, _NSIG/8); @@ -172,8 +212,6 @@ weak_alias(dummy, __pthread_tsd_size); static void *dummy_tsd[1] = { 0 }; weak_alias(dummy_tsd, __pthread_tsd_main); -volatile int __block_new_threads = 0; - static FILE *volatile dummy_file = 0; weak_alias(dummy_file, __stdin_used); weak_alias(dummy_file, __stdout_used); @@ -195,7 +233,6 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID | CLONE_DETACHED; pthread_attr_t attr = { 0 }; sigset_t set; - volatile int err = -1; if (!libc.can_do_threads) return ENOSYS; self = __pthread_self(); @@ -208,6 +245,7 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att init_file_lock(__stderr_used); __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, _NSIG/8); self->tsd = (void **)__pthread_tsd_main; + __membarrier_init(); libc.threaded = 1; } if (attrp && !c11) attr = *attrp; @@ -218,8 +256,6 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att attr._a_guardsize = __default_guardsize; } - if (__block_new_threads) __wait(&__block_new_threads, 0, 1, 1); - if (attr._a_stackaddr) { size_t need = libc.tls_size + __pthread_tsd_size; size = attr._a_stacksize; @@ -273,27 +309,26 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att new->locale = &libc.global_locale; if (attr._a_detach) { new->detach_state = DT_DETACHED; - flags -= CLONE_CHILD_CLEARTID; } else { new->detach_state = DT_JOINABLE; } new->robust_list.head = &new->robust_list.head; new->CANARY = self->CANARY; + new->sysinfo = self->sysinfo; - /* Setup argument structure for the new thread on its stack. */ + /* Setup argument structure for the new thread on its stack. + * It's safe to access from the caller only until the thread + * list is unlocked. */ stack -= (uintptr_t)stack % sizeof(uintptr_t); stack -= sizeof(struct start_args); struct start_args *args = (void *)stack; args->start_func = entry; args->start_arg = arg; - if (attr._a_sched) { - args->attr = &attr; - args->perr = &err; - } else { - args->attr = 0; - args->perr = 0; - } + args->control = attr._a_sched ? 1 : 0; + /* Application signals (but not the synccall signal) must be + * blocked before the thread list lock can be taken, to ensure + * that the lock is AS-safe. */ __block_app_sigs(&set); /* Ensure SIGCANCEL is unblocked in new thread. This requires @@ -303,23 +338,40 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att args->sig_mask[(SIGCANCEL-1)/8/sizeof(long)] &= ~(1UL<<((SIGCANCEL-1)%(8*sizeof(long)))); - a_inc(&libc.threads_minus_1); - ret = __clone((c11 ? start_c11 : start), stack, flags, args, &new->tid, TP_ADJ(new), &new->detach_state); + __tl_lock(); + if (!libc.threads_minus_1++) libc.need_locks = 1; + ret = __clone((c11 ? start_c11 : start), stack, flags, args, &new->tid, TP_ADJ(new), &__thread_list_lock); + /* All clone failures translate to EAGAIN. If explicit scheduling + * was requested, attempt it before unlocking the thread list so + * that the failed thread is never exposed and so that we can + * clean up all transient resource usage before returning. */ + if (ret < 0) { + ret = -EAGAIN; + } else if (attr._a_sched) { + ret = __syscall(SYS_sched_setscheduler, + new->tid, attr._a_policy, &attr._a_prio); + if (a_swap(&args->control, ret ? 3 : 0)==2) + __wake(&args->control, 1, 1); + if (ret) + __wait(&args->control, 0, 3, 0); + } + + if (ret >= 0) { + new->next = self->next; + new->prev = self; + new->next->prev = new; + new->prev->next = new; + } else { + if (!--libc.threads_minus_1) libc.need_locks = 0; + } + __tl_unlock(); __restore_sigs(&set); __release_ptc(); if (ret < 0) { - a_dec(&libc.threads_minus_1); if (map) __munmap(map, size); - return EAGAIN; - } - - if (attr._a_sched) { - if (a_cas(&err, -1, -2)==-1) - __wait(&err, 0, -2, 1); - ret = err; - if (ret) return ret; + return -ret; } *res = new;