1 #include "futex_impl.h"
2 #include "threads_impl.h"
3
4 /*
5 * struct waiter
6 *
7 * Waiter objects have automatic storage on the waiting thread, and
8 * are used in building a linked list representing waiters currently
9 * waiting on the condition variable or a group of waiters woken
10 * together by a broadcast or signal; in the case of signal, this is a
11 * degenerate list of one member.
12 *
13 * Waiter lists attached to the condition variable itself are
14 * protected by the lock on the cv. Detached waiter lists are never
15 * modified again, but can only be traversed in reverse order, and are
16 * protected by the "barrier" locks in each node, which are unlocked
17 * in turn to control wake order.
18 */
19
20 struct waiter {
21 struct waiter *prev, *next;
22 atomic_int state;
23 atomic_int barrier;
24 atomic_int* notify;
25 };
26
27 enum {
28 WAITING,
29 SIGNALED,
30 LEAVING,
31 };
32
pthread_cond_timedwait(pthread_cond_t * restrict c,pthread_mutex_t * restrict m,const struct timespec * restrict ts)33 int pthread_cond_timedwait(pthread_cond_t* restrict c, pthread_mutex_t* restrict m,
34 const struct timespec* restrict ts) {
35 int e, clock = c->_c_clock, oldstate, tmp;
36
37 if ((m->_m_type != PTHREAD_MUTEX_NORMAL) &&
38 (m->_m_lock & PTHREAD_MUTEX_OWNED_LOCK_MASK) != __thread_get_tid())
39 return EPERM;
40
41 if (ts && (ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000L))
42 return EINVAL;
43
44 lock(&c->_c_lock);
45
46 int seq = 2;
47 struct waiter node = {
48 .barrier = ATOMIC_VAR_INIT(seq),
49 .state = ATOMIC_VAR_INIT(WAITING),
50 };
51 atomic_int* fut = &node.barrier;
52
53 /* Add our waiter node onto the condvar's list. We add the node to the
54 * head of the list, but this is logically the end of the queue. */
55 node.next = c->_c_head;
56 c->_c_head = &node;
57 if (!c->_c_tail)
58 c->_c_tail = &node;
59 else
60 node.next->prev = &node;
61
62 unlock(&c->_c_lock);
63
64 pthread_mutex_unlock(m);
65
66 /* Wait to be signaled. There are multiple ways this loop could exit:
67 * 1) After being woken by __private_cond_signal().
68 * 2) After being woken by pthread_mutex_unlock(), after we were
69 * requeued from the condvar's futex to the mutex's futex (by
70 * pthread_cond_timedwait() in another thread).
71 * 3) After a timeout.
72 * 4) On Linux, interrupted by an asynchronous signal. This does
73 * not apply on Zircon. */
74 do
75 e = __timedwait(fut, seq, clock, ts);
76 while (*fut == seq && !e);
77
78 oldstate = a_cas_shim(&node.state, WAITING, LEAVING);
79
80 if (oldstate == WAITING) {
81 /* The wait timed out. So far, this thread was not signaled
82 * by pthread_cond_signal()/broadcast() -- this thread was
83 * able to move state.node out of the WAITING state before any
84 * __private_cond_signal() call could do that.
85 *
86 * This thread must therefore remove the waiter node from the
87 * list itself. */
88
89 /* Access to cv object is valid because this waiter was not
90 * yet signaled and a new signal/broadcast cannot return
91 * after seeing a LEAVING waiter without getting notified
92 * via the futex notify below. */
93
94 lock(&c->_c_lock);
95
96 /* Remove our waiter node from the list. */
97 if (c->_c_head == &node)
98 c->_c_head = node.next;
99 else if (node.prev)
100 node.prev->next = node.next;
101 if (c->_c_tail == &node)
102 c->_c_tail = node.prev;
103 else if (node.next)
104 node.next->prev = node.prev;
105
106 unlock(&c->_c_lock);
107
108 /* It is possible that __private_cond_signal() saw our waiter node
109 * after we set node.state to LEAVING but before we removed the
110 * node from the list. If so, it will have set node.notify and
111 * will be waiting on it, and we need to wake it up.
112 *
113 * This is rather complex. An alternative would be to eliminate
114 * the node.state field and always claim _c_lock if we could have
115 * got a timeout. However, that presumably has higher overhead
116 * (since it contends _c_lock and involves more atomic ops). */
117 if (node.notify) {
118 if (atomic_fetch_add(node.notify, -1) == 1)
119 _zx_futex_wake(node.notify, 1);
120 }
121 } else {
122 /* This thread was at least partially signaled by
123 * pthread_cond_signal()/broadcast(). That might have raced
124 * with a timeout, so we need to wait for this thread to be
125 * fully signaled. We need to wait until another thread sets
126 * node.barrier to 0. (This lock() call will also set
127 * node.barrier to non-zero, but that side effect is
128 * unnecessary here.) */
129 lock(&node.barrier);
130 }
131
132 /* Errors locking the mutex override any existing error, since the
133 * caller must see them to know the state of the mutex. */
134 if ((tmp = pthread_mutex_lock(m)))
135 e = tmp;
136
137 if (oldstate == WAITING)
138 goto done;
139
140 /* By this point, our part of the waiter list cannot change further.
141 * It has been unlinked from the condvar by __private_cond_signal().
142 * It consists only of waiters that were woken explicitly by
143 * pthread_cond_signal()/broadcast(). Any timed-out waiters would have
144 * removed themselves from the list before __private_cond_signal()
145 * signaled the first node.barrier in our list.
146 *
147 * It is therefore safe now to read node.next and node.prev without
148 * holding _c_lock. */
149
150 /* As an optimization, we only update _m_waiters at the beginning and
151 * end of the woken list. */
152 if (!node.next)
153 atomic_fetch_add(&m->_m_waiters, 1);
154
155 /* Unlock the barrier that's holding back the next waiter, and
156 * either wake it or requeue it to the mutex. */
157 if (node.prev)
158 unlock_requeue(&node.prev->barrier, &m->_m_lock);
159 else
160 atomic_fetch_sub(&m->_m_waiters, 1);
161
162 done:
163
164 return e;
165 }
166
167 /* This will wake upto |n| threads that are waiting on the condvar. This
168 * is used to implement pthread_cond_signal() (for n=1) and
169 * pthread_cond_broadcast() (for n=-1). */
__private_cond_signal(void * condvar,int n)170 void __private_cond_signal(void* condvar, int n) {
171 pthread_cond_t* c = condvar;
172 struct waiter *p, *first = 0;
173 atomic_int ref = ATOMIC_VAR_INIT(0);
174 int cur;
175
176 lock(&c->_c_lock);
177 for (p = c->_c_tail; n && p; p = p->prev) {
178 if (a_cas_shim(&p->state, WAITING, SIGNALED) != WAITING) {
179 /* This waiter timed out, and it marked itself as in the
180 * LEAVING state. However, it hasn't yet claimed _c_lock
181 * (since we claimed the lock first) and so it hasn't yet
182 * removed itself from the list. We will wait for the waiter
183 * to remove itself from the list and to notify us of that. */
184 atomic_fetch_add(&ref, 1);
185 p->notify = &ref;
186 } else {
187 n--;
188 if (!first)
189 first = p;
190 }
191 }
192 /* Split the list, leaving any remainder on the cv. */
193 if (p) {
194 if (p->next)
195 p->next->prev = 0;
196 p->next = 0;
197 } else {
198 c->_c_head = 0;
199 }
200 c->_c_tail = p;
201 unlock(&c->_c_lock);
202
203 /* Wait for any waiters in the LEAVING state to remove
204 * themselves from the list before returning or allowing
205 * signaled threads to proceed. */
206 while ((cur = atomic_load(&ref)))
207 __wait(&ref, 0, cur);
208
209 /* Allow first signaled waiter, if any, to proceed. */
210 if (first)
211 unlock(&first->barrier);
212 }
213