+ /* Access to target the exiting thread with syscalls that use
+ * its kernel tid is controlled by killlock. For detached threads,
+ * any use past this point would have undefined behavior, but for
+ * joinable threads it's a valid usage that must be handled. */
+ LOCK(self->killlock);
+
+ /* Block all signals before decrementing the live thread count.
+ * This is important to ensure that dynamically allocated TLS
+ * is not under-allocated/over-committed, and possibly for other
+ * reasons as well. */
+ __block_all_sigs(&set);
+
+ /* It's impossible to determine whether this is "the last thread"
+ * until performing the atomic decrement, since multiple threads
+ * could exit at the same time. For the last thread, revert the
+ * decrement, restore the tid, and unblock signals to give the
+ * atexit handlers and stdio cleanup code a consistent state. */
+ if (a_fetch_add(&libc.threads_minus_1, -1)==0) {
+ libc.threads_minus_1 = 0;
+ UNLOCK(self->killlock);
+ __restore_sigs(&set);
+ exit(0);
+ }
+
+ /* Process robust list in userspace to handle non-pshared mutexes
+ * and the detached thread case where the robust list head will
+ * be invalid when the kernel would process it. */
+ __vm_lock();
+ volatile void *volatile *rp;
+ while ((rp=self->robust_list.head) && rp != &self->robust_list.head) {
+ pthread_mutex_t *m = (void *)((char *)rp
+ - offsetof(pthread_mutex_t, _m_next));
+ int waiters = m->_m_waiters;
+ int priv = (m->_m_type & 128) ^ 128;
+ self->robust_list.pending = rp;
+ self->robust_list.head = *rp;
+ int cont = a_swap(&m->_m_lock, 0x40000000);
+ self->robust_list.pending = 0;
+ if (cont < 0 || waiters)
+ __wake(&m->_m_lock, 1, priv);
+ }
+ __vm_unlock();