1 #include "pthread_impl.h"
6 * Waiter objects have automatic storage on the waiting thread, and
7 * are used in building a linked list representing waiters currently
8 * waiting on the condition variable or a group of waiters woken
9 * together by a broadcast or signal; in the case of signal, this is a
10 * degenerate list of one member.
12 * Waiter lists attached to the condition variable itself are
13 * protected by the lock on the cv. Detached waiter lists are
14 * protected by the associated mutex. The hand-off between protections
15 * is handled by a "barrier" lock in each node, which disallows
16 * signaled waiters from making forward progress to the code that will
17 * access the list using the mutex until the list is in a consistent
18 * state and the cv lock as been released.
20 * Since process-shared cond var semantics do not necessarily allow
21 * one thread to see another's automatic storage (they may be in
22 * different processes), the waiter list is not used for the
23 * process-shared case, but the structure is still used to store data
24 * needed by the cancellation cleanup handler.
28 struct waiter *prev, *next;
29 int state, barrier, requeued, mutex_ret;
31 pthread_mutex_t *mutex;
36 /* Self-synchronized-destruction-safe lock functions */
38 static inline void lock(volatile int *l)
42 do __wait(l, 0, 2, 1);
43 while (a_cas(l, 0, 2));
47 static inline void unlock(volatile int *l)
59 static void unwait(void *arg)
61 struct waiter *node = arg, *p;
64 pthread_cond_t *c = node->cond;
65 pthread_mutex_t *m = node->mutex;
66 if (a_fetch_add(&c->_c_waiters, -1) == -0x7fffffff)
67 __wake(&c->_c_waiters, 1, 0);
68 node->mutex_ret = pthread_mutex_lock(m);
72 int oldstate = a_cas(&node->state, WAITING, LEAVING);
74 if (oldstate == WAITING) {
75 /* Access to cv object is valid because this waiter was not
76 * yet signaled and a new signal/broadcast cannot return
77 * after seeing a LEAVING waiter without getting notified
78 * via the futex notify below. */
80 pthread_cond_t *c = node->cond;
83 if (c->_c_head == node) c->_c_head = node->next;
84 else if (node->prev) node->prev->next = node->next;
85 if (c->_c_tail == node) c->_c_tail = node->prev;
86 else if (node->next) node->next->prev = node->prev;
91 if (a_fetch_add(node->notify, -1)==1)
92 __wake(node->notify, 1, 1);
96 node->mutex_ret = pthread_mutex_lock(node->mutex);
98 if (oldstate == WAITING) return;
100 /* If the mutex can't be locked, we're in big trouble because
101 * it's all that protects access to the shared list state.
102 * In order to prevent catastrophic stack corruption from
103 * unsynchronized access, simply deadlock. */
104 if (node->mutex_ret && node->mutex_ret != EOWNERDEAD)
105 for (;;) lock(&(int){0});
107 /* Wait until control of the list has been handed over from
108 * the cv lock (signaling thread) to the mutex (waiters). */
109 lock(&node->barrier);
111 /* If this thread was requeued to the mutex, undo the extra
112 * waiter count that was added to the mutex. */
113 if (node->requeued) a_dec(&node->mutex->_m_waiters);
115 /* Find a thread to requeue to the mutex, starting from the
116 * end of the list (oldest waiters). */
117 for (p=node; p->next; p=p->next);
118 if (p==node) p=node->prev;
119 for (; p && p->requeued; p=p->prev);
120 if (p==node) p=node->prev;
123 a_inc(&node->mutex->_m_waiters);
124 /* The futex requeue command cannot requeue from
125 * private to shared, so for process-shared mutexes,
126 * simply wake the target. */
127 int wake = node->mutex->_m_type & 128;
128 __syscall(SYS_futex, &p->state, FUTEX_REQUEUE|128,
129 wake, 1, &node->mutex->_m_lock) != -EINVAL
130 || __syscall(SYS_futex, &p->state, FUTEX_REQUEUE,
131 0, 1, &node->mutex->_m_lock);
134 /* Remove this thread from the list. */
135 if (node->next) node->next->prev = node->prev;
136 if (node->prev) node->prev->next = node->next;
139 int pthread_cond_timedwait(pthread_cond_t *restrict c, pthread_mutex_t *restrict m, const struct timespec *restrict ts)
141 struct waiter node = { .cond = c, .mutex = m };
142 int e, seq, *fut, clock = c->_c_clock;
144 if ((m->_m_type&15) && (m->_m_lock&INT_MAX) != __pthread_self()->tid)
147 if (ts && ts->tv_nsec >= 1000000000UL)
150 pthread_testcancel();
156 a_inc(&c->_c_waiters);
162 seq = node.state = WAITING;
163 node.next = c->_c_head;
165 if (!c->_c_tail) c->_c_tail = &node;
166 else node.next->prev = &node;
171 pthread_mutex_unlock(m);
173 do e = __timedwait(fut, seq, clock, ts, unwait, &node, !node.shared);
174 while (*fut==seq && (!e || e==EINTR));
175 if (e == EINTR) e = 0;
179 return node.mutex_ret ? node.mutex_ret : e;
182 int __private_cond_signal(pthread_cond_t *c, int n)
184 struct waiter *p, *q=0;
188 for (p=c->_c_tail; n && p; p=p->prev) {
189 /* The per-waiter-node barrier lock is held at this
190 * point, so while the following CAS may allow forward
191 * progress in the target thread, it doesn't allow
192 * access to the waiter list yet. Ideally the target
193 * does not run until the futex wake anyway. */
194 if (a_cas(&p->state, WAITING, SIGNALED) != WAITING) {
202 /* Split the list, leaving any remainder on the cv. */
204 if (p->next) p->next->prev = 0;
212 /* Wait for any waiters in the LEAVING state to remove
213 * themselves from the list before returning or allowing
214 * signaled threads to proceed. */
215 while ((cur = ref)) __wait(&ref, 0, cur, 1);
217 /* Wake the first signaled thread and unlock the per-waiter
218 * barriers preventing their forward progress. */
221 if (!p->next) __wake(&p->state, 1, 1);