drop use of pthread_once for aio thread stack size init
[musl] / src / thread / pthread_cond_timedwait.c
index c5cf66c..6b76145 100644 (file)
 
 struct waiter {
        struct waiter *prev, *next;
-       int state, barrier, mutex_ret;
-       int *notify;
-       pthread_mutex_t *mutex;
-       pthread_cond_t *cond;
-       int shared;
+       volatile int state, barrier;
+       volatile int *notify;
 };
 
 /* Self-synchronized-destruction-safe lock functions */
@@ -52,7 +49,7 @@ static inline void unlock_requeue(volatile int *l, volatile int *r, int w)
 {
        a_store(l, 0);
        if (w) __wake(l, 1, 1);
-       else __syscall(SYS_futex, l, FUTEX_REQUEUE|128, 0, 1, r) != -EINVAL
+       else __syscall(SYS_futex, l, FUTEX_REQUEUE|FUTEX_PRIVATE, 0, 1, r) != -ENOSYS
                || __syscall(SYS_futex, l, FUTEX_REQUEUE, 0, 1, r);
 }
 
@@ -62,20 +59,60 @@ enum {
        LEAVING,
 };
 
-static void unwait(void *arg)
+int __pthread_cond_timedwait(pthread_cond_t *restrict c, pthread_mutex_t *restrict m, const struct timespec *restrict ts)
 {
-       struct waiter *node = arg;
+       struct waiter node = { 0 };
+       int e, seq, clock = c->_c_clock, cs, shared=0, oldstate, tmp;
+       volatile int *fut;
 
-       if (node->shared) {
-               pthread_cond_t *c = node->cond;
-               pthread_mutex_t *m = node->mutex;
+       if ((m->_m_type&15) && (m->_m_lock&INT_MAX) != __pthread_self()->tid)
+               return EPERM;
+
+       if (ts && ts->tv_nsec >= 1000000000UL)
+               return EINVAL;
+
+       __pthread_testcancel();
+
+       if (c->_c_shared) {
+               shared = 1;
+               fut = &c->_c_seq;
+               seq = c->_c_seq;
+               a_inc(&c->_c_waiters);
+       } else {
+               lock(&c->_c_lock);
+
+               seq = node.barrier = 2;
+               fut = &node.barrier;
+               node.state = WAITING;
+               node.next = c->_c_head;
+               c->_c_head = &node;
+               if (!c->_c_tail) c->_c_tail = &node;
+               else node.next->prev = &node;
+
+               unlock(&c->_c_lock);
+       }
+
+       __pthread_mutex_unlock(m);
+
+       __pthread_setcancelstate(PTHREAD_CANCEL_MASKED, &cs);
+       if (cs == PTHREAD_CANCEL_DISABLE) __pthread_setcancelstate(cs, 0);
+
+       do e = __timedwait_cp(fut, seq, clock, ts, !shared);
+       while (*fut==seq && (!e || e==EINTR));
+       if (e == EINTR) e = 0;
+
+       if (shared) {
+               /* Suppress cancellation if a signal was potentially
+                * consumed; this is a legitimate form of spurious
+                * wake even if not. */
+               if (e == ECANCELED && c->_c_seq != seq) e = 0;
                if (a_fetch_add(&c->_c_waiters, -1) == -0x7fffffff)
                        __wake(&c->_c_waiters, 1, 0);
-               node->mutex_ret = pthread_mutex_lock(m);
-               return;
+               oldstate = WAITING;
+               goto relock;
        }
 
-       int oldstate = a_cas(&node->state, WAITING, LEAVING);
+       oldstate = a_cas(&node.state, WAITING, LEAVING);
 
        if (oldstate == WAITING) {
                /* Access to cv object is valid because this waiter was not
@@ -83,89 +120,64 @@ static void unwait(void *arg)
                 * after seeing a LEAVING waiter without getting notified
                 * via the futex notify below. */
 
-               pthread_cond_t *c = node->cond;
                lock(&c->_c_lock);
                
-               if (c->_c_head == node) c->_c_head = node->next;
-               else if (node->prev) node->prev->next = node->next;
-               if (c->_c_tail == node) c->_c_tail = node->prev;
-               else if (node->next) node->next->prev = node->prev;
+               if (c->_c_head == &node) c->_c_head = node.next;
+               else if (node.prev) node.prev->next = node.next;
+               if (c->_c_tail == &node) c->_c_tail = node.prev;
+               else if (node.next) node.next->prev = node.prev;
                
                unlock(&c->_c_lock);
 
-               if (node->notify) {
-                       if (a_fetch_add(node->notify, -1)==1)
-                               __wake(node->notify, 1, 1);
+               if (node.notify) {
+                       if (a_fetch_add(node.notify, -1)==1)
+                               __wake(node.notify, 1, 1);
                }
        } else {
                /* Lock barrier first to control wake order. */
-               lock(&node->barrier);
+               lock(&node.barrier);
        }
 
-       node->mutex_ret = pthread_mutex_lock(node->mutex);
+relock:
+       /* Errors locking the mutex override any existing error or
+        * cancellation, since the caller must see them to know the
+        * state of the mutex. */
+       if ((tmp = pthread_mutex_lock(m))) e = tmp;
 
-       if (oldstate == WAITING) return;
+       if (oldstate == WAITING) goto done;
 
-       if (!node->next) a_inc(&node->mutex->_m_waiters);
+       if (!node.next && !(m->_m_type & 8))
+               a_inc(&m->_m_waiters);
 
        /* Unlock the barrier that's holding back the next waiter, and
         * either wake it or requeue it to the mutex. */
-       if (node->prev) {
-               unlock_requeue(&node->prev->barrier,
-                       &node->mutex->_m_lock,
-                       node->mutex->_m_type & 128);
-       } else {
-               a_dec(&node->mutex->_m_waiters);
+       if (node.prev) {
+               int val = m->_m_lock;
+               if (val>0) a_cas(&m->_m_lock, val, val|0x80000000);
+               unlock_requeue(&node.prev->barrier, &m->_m_lock, m->_m_type & (8|128));
+       } else if (!(m->_m_type & 8)) {
+               a_dec(&m->_m_waiters);          
        }
-}
-
-int pthread_cond_timedwait(pthread_cond_t *restrict c, pthread_mutex_t *restrict m, const struct timespec *restrict ts)
-{
-       struct waiter node = { .cond = c, .mutex = m };
-       int e, seq, *fut, clock = c->_c_clock;
-
-       if ((m->_m_type&15) && (m->_m_lock&INT_MAX) != __pthread_self()->tid)
-               return EPERM;
 
-       if (ts && ts->tv_nsec >= 1000000000UL)
-               return EINVAL;
-
-       pthread_testcancel();
-
-       if (c->_c_shared) {
-               node.shared = 1;
-               fut = &c->_c_seq;
-               seq = c->_c_seq;
-               a_inc(&c->_c_waiters);
-       } else {
-               lock(&c->_c_lock);
+       /* Since a signal was consumed, cancellation is not permitted. */
+       if (e == ECANCELED) e = 0;
 
-               seq = node.barrier = 2;
-               fut = &node.barrier;
-               node.state = WAITING;
-               node.next = c->_c_head;
-               c->_c_head = &node;
-               if (!c->_c_tail) c->_c_tail = &node;
-               else node.next->prev = &node;
+done:
+       __pthread_setcancelstate(cs, 0);
 
-               unlock(&c->_c_lock);
+       if (e == ECANCELED) {
+               __pthread_testcancel();
+               __pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, 0);
        }
 
-       pthread_mutex_unlock(m);
-
-       do e = __timedwait(fut, seq, clock, ts, unwait, &node, !node.shared);
-       while (*fut==seq && (!e || e==EINTR));
-       if (e == EINTR) e = 0;
-
-       unwait(&node);
-
-       return node.mutex_ret ? node.mutex_ret : e;
+       return e;
 }
 
 int __private_cond_signal(pthread_cond_t *c, int n)
 {
        struct waiter *p, *first=0;
-       int ref = 0, cur;
+       volatile int ref = 0;
+       int cur;
 
        lock(&c->_c_lock);
        for (p=c->_c_tail; n && p; p=p->prev) {
@@ -197,3 +209,5 @@ int __private_cond_signal(pthread_cond_t *c, int n)
 
        return 0;
 }
+
+weak_alias(__pthread_cond_timedwait, pthread_cond_timedwait);