X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=src%2Fthread%2Fpthread_create.c;h=10f1b7d8cd39605d42dfea19370c88b9183e6036;hb=4554f155dd23a65fcdfd39f1d5af8af55ba37694;hp=5f491092597eca1fe5554357cead7b1b73192be3;hpb=f5eee489f7662b08ad1bba4b1267e34eb9565bba;p=musl diff --git a/src/thread/pthread_create.c b/src/thread/pthread_create.c index 5f491092..10f1b7d8 100644 --- a/src/thread/pthread_create.c +++ b/src/thread/pthread_create.c @@ -72,12 +72,13 @@ _Noreturn void __pthread_exit(void *result) /* Access to target the exiting thread with syscalls that use * its kernel tid is controlled by killlock. For detached threads, * any use past this point would have undefined behavior, but for - * joinable threads it's a valid usage that must be handled. */ + * joinable threads it's a valid usage that must be handled. + * Signals must be blocked since pthread_kill must be AS-safe. */ + __block_app_sigs(&set); LOCK(self->killlock); - /* The thread list lock must be AS-safe, and thus requires - * application signals to be blocked before it can be taken. */ - __block_app_sigs(&set); + /* The thread list lock must be AS-safe, and thus depends on + * application signals being blocked above. */ __tl_lock(); /* If this is the only thread in the list, don't proceed with @@ -85,19 +86,12 @@ _Noreturn void __pthread_exit(void *result) * signal state to prepare for exit to call atexit handlers. */ if (self->next == self) { __tl_unlock(); - __restore_sigs(&set); UNLOCK(self->killlock); + __restore_sigs(&set); exit(0); } - /* At this point we are committed to thread termination. Unlink - * the thread from the list. This change will not be visible - * until the lock is released, which only happens after SYS_exit - * has been called, via the exit futex address pointing at the lock. */ - libc.threads_minus_1--; - self->next->prev = self->prev; - self->prev->next = self->next; - self->prev = self->next = self; + /* At this point we are committed to thread termination. */ /* Process robust list in userspace to handle non-pshared mutexes * and the detached thread case where the robust list head will @@ -121,6 +115,16 @@ _Noreturn void __pthread_exit(void *result) __do_orphaned_stdio_locks(); __dl_thread_cleanup(); + /* Last, unlink thread from the list. This change will not be visible + * until the lock is released, which only happens after SYS_exit + * has been called, via the exit futex address pointing at the lock. + * This needs to happen after any possible calls to LOCK() that might + * skip locking if process appears single-threaded. */ + if (!--libc.threads_minus_1) libc.need_locks = -1; + self->next->prev = self->prev; + self->prev->next = self->next; + self->prev = self->next = self; + /* This atomic potentially competes with a concurrent pthread_detach * call; the loser is responsible for freeing thread resources. */ int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING); @@ -336,7 +340,7 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att ~(1UL<<((SIGCANCEL-1)%(8*sizeof(long)))); __tl_lock(); - libc.threads_minus_1++; + if (!libc.threads_minus_1++) libc.need_locks = 1; ret = __clone((c11 ? start_c11 : start), stack, flags, args, &new->tid, TP_ADJ(new), &__thread_list_lock); /* All clone failures translate to EAGAIN. If explicit scheduling @@ -360,7 +364,7 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att new->next->prev = new; new->prev->next = new; } else { - libc.threads_minus_1--; + if (!--libc.threads_minus_1) libc.need_locks = 0; } __tl_unlock(); __restore_sigs(&set);