+ /* It's impossible to determine whether this is "the last thread"
+ * until performing the atomic decrement, since multiple threads
+ * could exit at the same time. For the last thread, revert the
+ * decrement, restore the tid, and unblock signals to give the
+ * atexit handlers and stdio cleanup code a consistent state. */
+ if (a_fetch_add(&libc.threads_minus_1, -1)==0) {
+ libc.threads_minus_1 = 0;
+ UNLOCK(self->killlock);
+ __restore_sigs(&set);
+ exit(0);
+ }
+
+ /* Process robust list in userspace to handle non-pshared mutexes
+ * and the detached thread case where the robust list head will
+ * be invalid when the kernel would process it. */
+ __vm_lock();
+ volatile void *volatile *rp;
+ while ((rp=self->robust_list.head) && rp != &self->robust_list.head) {
+ pthread_mutex_t *m = (void *)((char *)rp
+ - offsetof(pthread_mutex_t, _m_next));
+ int waiters = m->_m_waiters;
+ int priv = (m->_m_type & 128) ^ 128;
+ self->robust_list.pending = rp;
+ self->robust_list.head = *rp;
+ int cont = a_swap(&m->_m_lock, 0x40000000);
+ self->robust_list.pending = 0;
+ if (cont < 0 || waiters)
+ __wake(&m->_m_lock, 1, priv);
+ }
+ __vm_unlock();
+
+ __do_orphaned_stdio_locks();
+ __dl_thread_cleanup();
+
+ /* This atomic potentially competes with a concurrent pthread_detach
+ * call; the loser is responsible for freeing thread resources. */
+ int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING);