1 #include <threads.h>
2 
3 #include <errno.h>
4 
5 #include <lib/sync/mutex.h>
6 
7 #include "futex_impl.h"
8 #include "libc.h"
9 #include "threads_impl.h"
10 
11 struct waiter {
12     struct waiter *prev, *next;
13     atomic_int state;
14     atomic_int barrier;
15     atomic_int* notify;
16 };
17 
18 enum {
19     WAITING,
20     LEAVING,
21 };
22 
cnd_timedwait(cnd_t * restrict c,mtx_t * restrict mutex,const struct timespec * restrict ts)23 int cnd_timedwait(cnd_t* restrict c, mtx_t* restrict mutex,
24                   const struct timespec* restrict ts) __TA_NO_THREAD_SAFETY_ANALYSIS {
25     sync_mutex_t* m = (sync_mutex_t*)mutex;
26     int e, clock = c->_c_clock, oldstate;
27 
28     if (ts && ts->tv_nsec >= 1000000000UL)
29         return thrd_error;
30 
31     lock(&c->_c_lock);
32 
33     int seq = 2;
34     struct waiter node = {
35         .barrier = ATOMIC_VAR_INIT(seq),
36         .state = ATOMIC_VAR_INIT(WAITING),
37     };
38     atomic_int* fut = &node.barrier;
39     /* Add our waiter node onto the condvar's list.  We add the node to the
40      * head of the list, but this is logically the end of the queue. */
41     node.next = c->_c_head;
42     c->_c_head = &node;
43     if (!c->_c_tail)
44         c->_c_tail = &node;
45     else
46         node.next->prev = &node;
47 
48     unlock(&c->_c_lock);
49 
50     sync_mutex_unlock(m);
51 
52     /* Wait to be signaled.  There are multiple ways this loop could exit:
53      *  1) After being woken by __private_cond_signal().
54      *  2) After being woken by sync_mutex_unlock(), after we were
55      *     requeued from the condvar's futex to the mutex's futex (by
56      *     cnd_timedwait() in another thread).
57      *  3) After a timeout.
58      *  4) On Linux, interrupted by an asynchronous signal.  This does
59      *     not apply on Zircon. */
60     do
61         e = __timedwait(fut, seq, clock, ts);
62     while (*fut == seq && !e);
63 
64     oldstate = a_cas_shim(&node.state, WAITING, LEAVING);
65 
66     if (oldstate == WAITING) {
67         /* The wait timed out.  So far, this thread was not signaled by
68          * cnd_signal()/cnd_broadcast() -- this thread was able to move
69          * state.node out of the WAITING state before any
70          * __private_cond_signal() call could do that.
71          *
72          * This thread must therefore remove the waiter node from the
73          * list itself. */
74 
75         /* Access to cv object is valid because this waiter was not
76          * yet signaled and a new signal/broadcast cannot return
77          * after seeing a LEAVING waiter without getting notified
78          * via the futex notify below. */
79 
80         lock(&c->_c_lock);
81 
82         /* Remove our waiter node from the list. */
83         if (c->_c_head == &node)
84             c->_c_head = node.next;
85         else if (node.prev)
86             node.prev->next = node.next;
87         if (c->_c_tail == &node)
88             c->_c_tail = node.prev;
89         else if (node.next)
90             node.next->prev = node.prev;
91 
92         unlock(&c->_c_lock);
93 
94         /* It is possible that __private_cond_signal() saw our waiter node
95          * after we set node.state to LEAVING but before we removed the
96          * node from the list.  If so, it will have set node.notify and
97          * will be waiting on it, and we need to wake it up.
98          *
99          * This is rather complex.  An alternative would be to eliminate
100          * the node.state field and always claim _c_lock if we could have
101          * got a timeout.  However, that presumably has higher overhead
102          * (since it contends _c_lock and involves more atomic ops). */
103         if (node.notify) {
104             if (atomic_fetch_add(node.notify, -1) == 1)
105                 _zx_futex_wake(node.notify, 1);
106         }
107     } else {
108         /* Lock barrier first to control wake order. */
109         lock(&node.barrier);
110     }
111 
112     /* We must leave the mutex in the "locked with waiters" state here.
113      * There are two reasons for that:
114      *  1) If we do the unlock_requeue() below, a condvar waiter will be
115      *     requeued to the mutex's futex.  We need to ensure that it will
116      *     be signaled by sync_mutex_unlock() in future.
117      *  2) If the current thread was woken via an unlock_requeue() +
118      *     sync_mutex_unlock(), there *might* be another thread waiting for
119      *     the mutex after us in the queue.  We need to ensure that it
120      *     will be signaled by sync_mutex_unlock() in future. */
121     sync_mutex_lock_with_waiter(m);
122 
123     /* By this point, our part of the waiter list cannot change further.
124      * It has been unlinked from the condvar by __private_cond_signal().
125      * It consists only of waiters that were woken explicitly by
126      * cnd_signal()/cnd_broadcast().  Any timed-out waiters would have
127      * removed themselves from the list before __private_cond_signal()
128      * signaled the first node.barrier in our list.
129      *
130      * It is therefore safe now to read node.next and node.prev without
131      * holding _c_lock. */
132 
133     if (oldstate != WAITING && node.prev) {
134         /* Unlock the barrier that's holding back the next waiter, and
135          * requeue it to the mutex so that it will be woken when the
136          * mutex is unlocked. */
137         unlock_requeue(&node.prev->barrier, &m->futex);
138     }
139 
140     switch (e) {
141     case 0:
142         return 0;
143     case EINVAL:
144         return thrd_error;
145     case ETIMEDOUT:
146         return thrd_timedout;
147     default:
148         // No other error values are permissible from __timedwait_cp();
149         __builtin_trap();
150     }
151 }
152