1 // Copyright 2016 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6
7 #include <object/futex_context.h>
8
9 #include <assert.h>
10 #include <lib/user_copy/user_ptr.h>
11 #include <object/thread_dispatcher.h>
12 #include <trace.h>
13 #include <zircon/types.h>
14
15 #define LOCAL_TRACE 0
16
FutexContext()17 FutexContext::FutexContext() {
18 LTRACE_ENTRY;
19 }
20
~FutexContext()21 FutexContext::~FutexContext() {
22 LTRACE_ENTRY;
23
24 // All of the threads should have removed themselves from wait queues
25 // by the time the process has exited.
26 DEBUG_ASSERT(futex_table_.is_empty());
27 }
28
FutexWait(user_in_ptr<const zx_futex_t> value_ptr,zx_futex_t current_value,zx_handle_t new_futex_owner,zx_time_t deadline,TimerSlack slack)29 zx_status_t FutexContext::FutexWait(user_in_ptr<const zx_futex_t> value_ptr,
30 zx_futex_t current_value, zx_handle_t new_futex_owner,
31 zx_time_t deadline, TimerSlack slack) {
32 LTRACE_ENTRY;
33
34 uintptr_t futex_key = reinterpret_cast<uintptr_t>(value_ptr.get());
35 if (futex_key % sizeof(int))
36 return ZX_ERR_INVALID_ARGS;
37
38 // TODO(johngro): When we actually support PI, look up the thread referenced
39 // by this handle, if any.
40 if (new_futex_owner != ZX_HANDLE_INVALID) {
41 return ZX_ERR_INVALID_ARGS;
42 }
43
44 // FutexWait() checks that the address value_ptr still contains
45 // current_value, and if so it sleeps awaiting a FutexWake() on value_ptr.
46 // Those two steps must together be atomic with respect to FutexWake().
47 // If a FutexWake() operation could occur between them, a userland mutex
48 // operation built on top of futexes would have a race condition that
49 // could miss wakeups.
50 Guard<fbl::Mutex> guard{&lock_};
51
52 int value;
53 zx_status_t result = value_ptr.copy_from_user(&value);
54 if (result != ZX_OK) {
55 return result;
56 }
57 if (value != current_value) {
58 return ZX_ERR_BAD_STATE;
59 }
60
61 FutexNode node;
62 node.set_hash_key(futex_key);
63 node.SetAsSingletonList();
64
65 QueueNodesLocked(&node);
66
67 // Block current thread. This releases lock_ and does not reacquire it.
68 result = node.BlockThread(guard.take(), deadline, slack);
69 if (result == ZX_OK) {
70 DEBUG_ASSERT(!node.IsInQueue());
71 // All the work necessary for removing us from the hash table was done by FutexWake()
72 return ZX_OK;
73 }
74
75 // The following happens if we hit the deadline (ZX_ERR_TIMED_OUT) or if
76 // the thread was killed (ZX_ERR_INTERNAL_INTR_KILLED) or suspended
77 // (ZX_ERR_INTERNAL_INTR_RETRY).
78 //
79 // We need to ensure that the thread's node is removed from the wait
80 // queue, because FutexWake() probably didn't do that.
81 Guard<fbl::Mutex> guard2{&lock_};
82 if (UnqueueNodeLocked(&node)) {
83 return result;
84 }
85 // The current thread was not found on the wait queue. This means
86 // that, although we hit the deadline (or were suspended/killed), we
87 // were *also* woken by FutexWake() (which removed the thread from the
88 // wait queue) -- the two raced together.
89 //
90 // In this case, we want to return a success status. This preserves
91 // the property that if FutexWake() is called with wake_count=1 and
92 // there are waiting threads, then at least one FutexWait() call
93 // returns success.
94 //
95 // If that property is broken, it can lead to missed wakeups in
96 // concurrency constructs that are built on top of futexes. For
97 // example, suppose a FutexWake() call from pthread_mutex_unlock()
98 // races with a FutexWait() deadline from pthread_mutex_timedlock(). A
99 // typical implementation of pthread_mutex_timedlock() will return
100 // immediately without trying again to claim the mutex if this
101 // FutexWait() call returns a timeout status. If that happens, and if
102 // another thread is waiting on the mutex, then that thread won't get
103 // woken -- the wakeup from the FutexWake() call would have got lost.
104 return ZX_OK;
105 }
106
FutexWake(user_in_ptr<const zx_futex_t> value_ptr,uint32_t wake_count,OwnerAction owner_action)107 zx_status_t FutexContext::FutexWake(user_in_ptr<const zx_futex_t> value_ptr,
108 uint32_t wake_count,
109 OwnerAction owner_action) {
110 LTRACE_ENTRY;
111
112 DEBUG_ASSERT((owner_action != OwnerAction::ASSIGN_WOKEN) || (wake_count == 1));
113
114 if (wake_count == 0) return ZX_OK;
115
116 uintptr_t futex_key = reinterpret_cast<uintptr_t>(value_ptr.get());
117 if (futex_key % sizeof(int))
118 return ZX_ERR_INVALID_ARGS;
119
120 AutoReschedDisable resched_disable; // Must come before the Guard.
121 resched_disable.Disable();
122 Guard<fbl::Mutex> guard{&lock_};
123
124 FutexNode* node = futex_table_.erase(futex_key);
125 if (!node) {
126 // nothing blocked on this futex if we can't find it
127 return ZX_OK;
128 }
129 DEBUG_ASSERT(node->GetKey() == futex_key);
130
131 FutexNode* remaining_waiters =
132 FutexNode::WakeThreads(node, wake_count, futex_key);
133
134 if (remaining_waiters) {
135 DEBUG_ASSERT(remaining_waiters->GetKey() == futex_key);
136 futex_table_.insert(remaining_waiters);
137 }
138
139 return ZX_OK;
140 }
141
FutexRequeue(user_in_ptr<const zx_futex_t> wake_ptr,uint32_t wake_count,int current_value,OwnerAction owner_action,user_in_ptr<const zx_futex_t> requeue_ptr,uint32_t requeue_count,zx_handle_t new_requeue_owner)142 zx_status_t FutexContext::FutexRequeue(user_in_ptr<const zx_futex_t> wake_ptr,
143 uint32_t wake_count,
144 int current_value,
145 OwnerAction owner_action,
146 user_in_ptr<const zx_futex_t> requeue_ptr,
147 uint32_t requeue_count,
148 zx_handle_t new_requeue_owner) {
149 LTRACE_ENTRY;
150
151 DEBUG_ASSERT((owner_action != OwnerAction::ASSIGN_WOKEN) || (wake_count == 1));
152
153 if ((requeue_ptr.get() == nullptr) && requeue_count)
154 return ZX_ERR_INVALID_ARGS;
155
156 // TODO(johngro): When we actually support PI, look up the thread referenced
157 // by this handle, if any.
158 if (new_requeue_owner != ZX_HANDLE_INVALID) {
159 return ZX_ERR_INVALID_ARGS;
160 }
161
162 AutoReschedDisable resched_disable; // Must come before the Guard.
163 Guard<fbl::Mutex> guard{&lock_};
164
165 int value;
166 zx_status_t result = wake_ptr.copy_from_user(&value);
167 if (result != ZX_OK) return result;
168 if (value != current_value) return ZX_ERR_BAD_STATE;
169
170 uintptr_t wake_key = reinterpret_cast<uintptr_t>(wake_ptr.get());
171 uintptr_t requeue_key = reinterpret_cast<uintptr_t>(requeue_ptr.get());
172 if (wake_key == requeue_key) return ZX_ERR_INVALID_ARGS;
173 if (wake_key % sizeof(int) || requeue_key % sizeof(int))
174 return ZX_ERR_INVALID_ARGS;
175
176 // This must happen before RemoveFromHead() calls set_hash_key() on
177 // nodes below, because operations on futex_table_ look at the GetKey
178 // field of the list head nodes for wake_key and requeue_key.
179 FutexNode* node = futex_table_.erase(wake_key);
180 if (!node) {
181 // nothing blocked on this futex if we can't find it
182 return ZX_OK;
183 }
184
185 // This must come before WakeThreads() to be useful, but we want to
186 // avoid doing it before copy_from_user() in case that faults.
187 resched_disable.Disable();
188
189 if (wake_count > 0) {
190 node = FutexNode::WakeThreads(node, wake_count, wake_key);
191 }
192
193 // node is now the head of wake_ptr futex after possibly removing some threads to wake
194 if (node != nullptr) {
195 if (requeue_count > 0) {
196 // head and tail of list of nodes to requeue
197 FutexNode* requeue_head = node;
198 node = FutexNode::RemoveFromHead(node, requeue_count,
199 wake_key, requeue_key);
200
201 // now requeue our nodes to requeue_ptr mutex
202 DEBUG_ASSERT(requeue_head->GetKey() == requeue_key);
203 QueueNodesLocked(requeue_head);
204 }
205 }
206
207 // add any remaining nodes back to wake_key futex
208 if (node != nullptr) {
209 DEBUG_ASSERT(node->GetKey() == wake_key);
210 futex_table_.insert(node);
211 }
212
213 return ZX_OK;
214 }
215
FutexGetOwner(user_in_ptr<const zx_futex_t> wake_ptr,user_out_ptr<zx_koid_t> koid)216 zx_status_t FutexContext::FutexGetOwner(user_in_ptr<const zx_futex_t> wake_ptr,
217 user_out_ptr<zx_koid_t> koid) {
218 return koid.copy_to_user(ZX_KOID_INVALID);
219 }
220
QueueNodesLocked(FutexNode * head)221 void FutexContext::QueueNodesLocked(FutexNode* head) {
222 DEBUG_ASSERT(lock_.lock().IsHeld());
223
224 FutexNode::HashTable::iterator iter;
225
226 // Attempt to insert this FutexNode into the hash table. If the insert
227 // succeeds, then the current thread is first to block on this futex and we
228 // are finished. If the insert fails, then there is already a thread
229 // waiting on this futex. Add ourselves to that thread's list.
230 if (!futex_table_.insert_or_find(head, &iter))
231 iter->AppendList(head);
232 }
233
234 // This attempts to unqueue a thread (which may or may not be waiting on a
235 // futex), given its FutexNode. This returns whether the FutexNode was
236 // found and removed from a futex wait queue.
UnqueueNodeLocked(FutexNode * node)237 bool FutexContext::UnqueueNodeLocked(FutexNode* node) {
238 DEBUG_ASSERT(lock_.lock().IsHeld());
239
240 if (!node->IsInQueue())
241 return false;
242
243 // Note: When UnqueueNode() is called from FutexWait(), it might be
244 // tempting to reuse the futex key that was passed to FutexWait().
245 // However, that could be out of date if the thread was requeued by
246 // FutexRequeue(), so we need to re-get the hash table key here.
247 uintptr_t futex_key = node->GetKey();
248
249 FutexNode* old_head = futex_table_.erase(futex_key);
250 DEBUG_ASSERT(old_head);
251 FutexNode* new_head = FutexNode::RemoveNodeFromList(old_head, node);
252 if (new_head)
253 futex_table_.insert(new_head);
254 return true;
255 }
256