1 #include "pthread_impl.h"
3 void __pthread_testcancel(void);
4 int __pthread_mutex_lock(pthread_mutex_t *);
5 int __pthread_mutex_unlock(pthread_mutex_t *);
6 int __pthread_setcancelstate(int, int *);
11 * Waiter objects have automatic storage on the waiting thread, and
12 * are used in building a linked list representing waiters currently
13 * waiting on the condition variable or a group of waiters woken
14 * together by a broadcast or signal; in the case of signal, this is a
15 * degenerate list of one member.
17 * Waiter lists attached to the condition variable itself are
18 * protected by the lock on the cv. Detached waiter lists are never
19 * modified again, but can only be traversed in reverse order, and are
20 * protected by the "barrier" locks in each node, which are unlocked
21 * in turn to control wake order.
23 * Since process-shared cond var semantics do not necessarily allow
24 * one thread to see another's automatic storage (they may be in
25 * different processes), the waiter list is not used for the
26 * process-shared case, but the structure is still used to store data
27 * needed by the cancellation cleanup handler.
31 struct waiter *prev, *next;
36 /* Self-synchronized-destruction-safe lock functions */
38 static inline void lock(volatile int *l)
42 do __wait(l, 0, 2, 1);
43 while (a_cas(l, 0, 2));
47 static inline void unlock(volatile int *l)
53 static inline void unlock_requeue(volatile int *l, volatile int *r, int w)
56 if (w) __wake(l, 1, 1);
57 else __syscall(SYS_futex, l, FUTEX_REQUEUE|128, 0, 1, r) != -ENOSYS
58 || __syscall(SYS_futex, l, FUTEX_REQUEUE, 0, 1, r);
67 static void dummy(void *arg)
71 int __pthread_cond_timedwait(pthread_cond_t *restrict c, pthread_mutex_t *restrict m, const struct timespec *restrict ts)
73 struct waiter node = { 0 };
74 int e, seq, *fut, clock = c->_c_clock, cs, shared=0, oldstate, tmp;
76 if ((m->_m_type&15) && (m->_m_lock&INT_MAX) != __pthread_self()->tid)
79 if (ts && ts->tv_nsec >= 1000000000UL)
82 __pthread_testcancel();
88 a_inc(&c->_c_waiters);
92 seq = node.barrier = 2;
95 node.next = c->_c_head;
97 if (!c->_c_tail) c->_c_tail = &node;
98 else node.next->prev = &node;
103 __pthread_mutex_unlock(m);
105 __pthread_setcancelstate(PTHREAD_CANCEL_MASKED, &cs);
107 do e = __timedwait(fut, seq, clock, ts, dummy, 0, !shared);
108 while (*fut==seq && (!e || e==EINTR));
109 if (e == EINTR) e = 0;
112 /* Suppress cancellation if a signal was potentially
113 * consumed; this is a legitimate form of spurious
114 * wake even if not. */
115 if (e == ECANCELED && c->_c_seq != seq) e = 0;
116 if (a_fetch_add(&c->_c_waiters, -1) == -0x7fffffff)
117 __wake(&c->_c_waiters, 1, 0);
122 oldstate = a_cas(&node.state, WAITING, LEAVING);
124 if (oldstate == WAITING) {
125 /* Access to cv object is valid because this waiter was not
126 * yet signaled and a new signal/broadcast cannot return
127 * after seeing a LEAVING waiter without getting notified
128 * via the futex notify below. */
132 if (c->_c_head == &node) c->_c_head = node.next;
133 else if (node.prev) node.prev->next = node.next;
134 if (c->_c_tail == &node) c->_c_tail = node.prev;
135 else if (node.next) node.next->prev = node.prev;
140 if (a_fetch_add(node.notify, -1)==1)
141 __wake(node.notify, 1, 1);
144 /* Lock barrier first to control wake order. */
149 /* Errors locking the mutex override any existing error or
150 * cancellation, since the caller must see them to know the
151 * state of the mutex. */
152 if ((tmp = pthread_mutex_lock(m))) e = tmp;
154 if (oldstate == WAITING) goto done;
156 if (!node.next) a_inc(&m->_m_waiters);
158 /* Unlock the barrier that's holding back the next waiter, and
159 * either wake it or requeue it to the mutex. */
161 unlock_requeue(&node.prev->barrier, &m->_m_lock, m->_m_type & 128);
163 a_dec(&m->_m_waiters);
165 /* Since a signal was consumed, cancellation is not permitted. */
166 if (e = ECANCELED) e = 0;
169 __pthread_setcancelstate(cs, 0);
171 if (e == ECANCELED) {
172 __pthread_testcancel();
173 __pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, 0);
179 int __private_cond_signal(pthread_cond_t *c, int n)
181 struct waiter *p, *first=0;
185 for (p=c->_c_tail; n && p; p=p->prev) {
186 if (a_cas(&p->state, WAITING, SIGNALED) != WAITING) {
194 /* Split the list, leaving any remainder on the cv. */
196 if (p->next) p->next->prev = 0;
204 /* Wait for any waiters in the LEAVING state to remove
205 * themselves from the list before returning or allowing
206 * signaled threads to proceed. */
207 while ((cur = ref)) __wait(&ref, 0, cur, 1);
209 /* Allow first signaled waiter, if any, to proceed. */
210 if (first) unlock(&first->barrier);
215 weak_alias(__pthread_cond_timedwait, pthread_cond_timedwait);