1 // Copyright 2016 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6
7 #include <object/socket_dispatcher.h>
8
9 #include <string.h>
10
11 #include <assert.h>
12 #include <err.h>
13 #include <pow2.h>
14 #include <trace.h>
15
16 #include <lib/user_copy/user_ptr.h>
17
18 #include <vm/vm_aspace.h>
19 #include <vm/vm_object.h>
20 #include <vm/vm_object_paged.h>
21 #include <object/handle.h>
22
23 #include <zircon/rights.h>
24 #include <fbl/alloc_checker.h>
25 #include <fbl/auto_lock.h>
26
27 #define LOCAL_TRACE 0
28
29 // static
Create(uint32_t flags,fbl::RefPtr<Dispatcher> * dispatcher0,fbl::RefPtr<Dispatcher> * dispatcher1,zx_rights_t * rights)30 zx_status_t SocketDispatcher::Create(uint32_t flags,
31 fbl::RefPtr<Dispatcher>* dispatcher0,
32 fbl::RefPtr<Dispatcher>* dispatcher1,
33 zx_rights_t* rights) {
34 LTRACE_ENTRY;
35
36 if (flags & ~ZX_SOCKET_CREATE_MASK)
37 return ZX_ERR_INVALID_ARGS;
38
39 fbl::AllocChecker ac;
40
41 zx_signals_t starting_signals = ZX_SOCKET_WRITABLE;
42
43 if (flags & ZX_SOCKET_HAS_ACCEPT)
44 starting_signals |= ZX_SOCKET_SHARE;
45
46 ktl::unique_ptr<ControlMsg> control0;
47 ktl::unique_ptr<ControlMsg> control1;
48
49 // TODO: use mbufs to avoid pinning control buffer memory.
50 if (flags & ZX_SOCKET_HAS_CONTROL) {
51 starting_signals |= ZX_SOCKET_CONTROL_WRITABLE;
52
53 control0.reset(new (&ac) ControlMsg());
54 if (!ac.check())
55 return ZX_ERR_NO_MEMORY;
56
57 control1.reset(new (&ac) ControlMsg());
58 if (!ac.check())
59 return ZX_ERR_NO_MEMORY;
60 }
61
62 auto holder0 = fbl::AdoptRef(new (&ac) PeerHolder<SocketDispatcher>());
63 if (!ac.check())
64 return ZX_ERR_NO_MEMORY;
65 auto holder1 = holder0;
66
67 auto socket0 = fbl::AdoptRef(new (&ac) SocketDispatcher(ktl::move(holder0), starting_signals,
68 flags, ktl::move(control0)));
69 if (!ac.check())
70 return ZX_ERR_NO_MEMORY;
71
72 auto socket1 = fbl::AdoptRef(new (&ac) SocketDispatcher(ktl::move(holder1), starting_signals,
73 flags, ktl::move(control1)));
74 if (!ac.check())
75 return ZX_ERR_NO_MEMORY;
76
77 socket0->Init(socket1);
78 socket1->Init(socket0);
79
80 *rights = default_rights();
81 *dispatcher0 = ktl::move(socket0);
82 *dispatcher1 = ktl::move(socket1);
83 return ZX_OK;
84 }
85
SocketDispatcher(fbl::RefPtr<PeerHolder<SocketDispatcher>> holder,zx_signals_t starting_signals,uint32_t flags,ktl::unique_ptr<ControlMsg> control_msg)86 SocketDispatcher::SocketDispatcher(fbl::RefPtr<PeerHolder<SocketDispatcher>> holder,
87 zx_signals_t starting_signals, uint32_t flags,
88 ktl::unique_ptr<ControlMsg> control_msg)
89 : PeeredDispatcher(ktl::move(holder), starting_signals),
90 flags_(flags),
91 control_msg_(ktl::move(control_msg)),
92 control_msg_len_(0),
93 read_threshold_(0),
94 write_threshold_(0),
95 read_disabled_(false) {
96 }
97
~SocketDispatcher()98 SocketDispatcher::~SocketDispatcher() {
99 }
100
101 // This is called before either SocketDispatcher is accessible from threads other than the one
102 // initializing the socket, so it does not need locking.
Init(fbl::RefPtr<SocketDispatcher> other)103 void SocketDispatcher::Init(fbl::RefPtr<SocketDispatcher> other) TA_NO_THREAD_SAFETY_ANALYSIS {
104 peer_ = ktl::move(other);
105 peer_koid_ = peer_->get_koid();
106 }
107
on_zero_handles_locked()108 void SocketDispatcher::on_zero_handles_locked() {
109 canary_.Assert();
110 }
111
OnPeerZeroHandlesLocked()112 void SocketDispatcher::OnPeerZeroHandlesLocked() {
113 canary_.Assert();
114
115 UpdateStateLocked(ZX_SOCKET_WRITABLE, ZX_SOCKET_PEER_CLOSED);
116 }
117
UserSignalSelfLocked(uint32_t clear_mask,uint32_t set_mask)118 zx_status_t SocketDispatcher::UserSignalSelfLocked(uint32_t clear_mask, uint32_t set_mask) {
119 canary_.Assert();
120 UpdateStateLocked(clear_mask, set_mask);
121 return ZX_OK;
122 }
123
Shutdown(uint32_t how)124 zx_status_t SocketDispatcher::Shutdown(uint32_t how) TA_NO_THREAD_SAFETY_ANALYSIS {
125 canary_.Assert();
126
127 LTRACE_ENTRY;
128
129 const bool shutdown_read = how & ZX_SOCKET_SHUTDOWN_READ;
130 const bool shutdown_write = how & ZX_SOCKET_SHUTDOWN_WRITE;
131
132 Guard<fbl::Mutex> guard{get_lock()};
133
134 zx_signals_t signals = GetSignalsStateLocked();
135 // If we're already shut down in the requested way, return immediately.
136 const uint32_t want_signals =
137 (shutdown_read ? ZX_SOCKET_PEER_WRITE_DISABLED : 0) |
138 (shutdown_write ? ZX_SOCKET_WRITE_DISABLED : 0);
139 const uint32_t have_signals = signals & (ZX_SOCKET_PEER_WRITE_DISABLED | ZX_SOCKET_WRITE_DISABLED);
140 if (want_signals == have_signals) {
141 return ZX_OK;
142 }
143 zx_signals_t clear_mask = 0u;
144 zx_signals_t set_mask = 0u;
145 if (shutdown_read) {
146 read_disabled_ = true;
147 set_mask |= ZX_SOCKET_PEER_WRITE_DISABLED;
148 }
149 if (shutdown_write) {
150 clear_mask |= ZX_SOCKET_WRITABLE;
151 set_mask |= ZX_SOCKET_WRITE_DISABLED;
152 }
153 UpdateStateLocked(clear_mask, set_mask);
154
155 // Our peer already be closed - if so, we've already updated our own bits so we are done. If the
156 // peer is done, we need to notify them of the state change.
157 if (peer_ != nullptr) {
158 return peer_->ShutdownOtherLocked(how);
159 } else {
160 return ZX_OK;
161 }
162 }
163
ShutdownOtherLocked(uint32_t how)164 zx_status_t SocketDispatcher::ShutdownOtherLocked(uint32_t how) {
165 canary_.Assert();
166
167 const bool shutdown_read = how & ZX_SOCKET_SHUTDOWN_READ;
168 const bool shutdown_write = how & ZX_SOCKET_SHUTDOWN_WRITE;
169
170 zx_signals_t clear_mask = 0u;
171 zx_signals_t set_mask = 0u;
172 if (shutdown_read) {
173 clear_mask |= ZX_SOCKET_WRITABLE;
174 set_mask |= ZX_SOCKET_WRITE_DISABLED;
175 }
176 if (shutdown_write) {
177 read_disabled_ = true;
178 set_mask |= ZX_SOCKET_PEER_WRITE_DISABLED;
179 }
180
181 UpdateStateLocked(clear_mask, set_mask);
182 return ZX_OK;
183 }
184
Write(user_in_ptr<const void> src,size_t len,size_t * nwritten)185 zx_status_t SocketDispatcher::Write(user_in_ptr<const void> src, size_t len,
186 size_t* nwritten) TA_NO_THREAD_SAFETY_ANALYSIS {
187 canary_.Assert();
188
189 LTRACE_ENTRY;
190
191 Guard<fbl::Mutex> guard{get_lock()};
192
193 if (!peer_)
194 return ZX_ERR_PEER_CLOSED;
195 zx_signals_t signals = GetSignalsStateLocked();
196 if (signals & ZX_SOCKET_WRITE_DISABLED)
197 return ZX_ERR_BAD_STATE;
198
199 if (len == 0) {
200 *nwritten = 0;
201 return ZX_OK;
202 }
203 if (len != static_cast<size_t>(static_cast<uint32_t>(len)))
204 return ZX_ERR_INVALID_ARGS;
205
206 return peer_->WriteSelfLocked(src, len, nwritten);
207 }
208
WriteControl(user_in_ptr<const void> src,size_t len)209 zx_status_t SocketDispatcher::WriteControl(user_in_ptr<const void> src, size_t len)
210 TA_NO_THREAD_SAFETY_ANALYSIS {
211 canary_.Assert();
212
213 if ((flags_ & ZX_SOCKET_HAS_CONTROL) == 0)
214 return ZX_ERR_BAD_STATE;
215
216 if (len == 0)
217 return ZX_ERR_INVALID_ARGS;
218
219 if (len > ControlMsg::kSize)
220 return ZX_ERR_OUT_OF_RANGE;
221
222 Guard<fbl::Mutex> guard{get_lock()};
223 if (!peer_)
224 return ZX_ERR_PEER_CLOSED;
225
226 return peer_->WriteControlSelfLocked(src, len);
227 }
228
WriteControlSelfLocked(user_in_ptr<const void> src,size_t len)229 zx_status_t SocketDispatcher::WriteControlSelfLocked(user_in_ptr<const void> src,
230 size_t len) TA_NO_THREAD_SAFETY_ANALYSIS {
231 canary_.Assert();
232
233 if (control_msg_len_ != 0)
234 return ZX_ERR_SHOULD_WAIT;
235
236 if (src.copy_array_from_user(&control_msg_->msg, len) != ZX_OK)
237 return ZX_ERR_INVALID_ARGS; // Bad user buffer.
238
239 control_msg_len_ = static_cast<uint32_t>(len);
240
241 UpdateStateLocked(0u, ZX_SOCKET_CONTROL_READABLE);
242 if (peer_)
243 peer_->UpdateStateLocked(ZX_SOCKET_CONTROL_WRITABLE, 0u);
244
245 return ZX_OK;
246 }
247
WriteSelfLocked(user_in_ptr<const void> src,size_t len,size_t * written)248 zx_status_t SocketDispatcher::WriteSelfLocked(user_in_ptr<const void> src, size_t len,
249 size_t* written) TA_NO_THREAD_SAFETY_ANALYSIS {
250 canary_.Assert();
251
252 if (is_full())
253 return ZX_ERR_SHOULD_WAIT;
254
255 bool was_empty = is_empty();
256
257 size_t st = 0u;
258 zx_status_t status;
259 if (flags_ & ZX_SOCKET_DATAGRAM) {
260 status = data_.WriteDatagram(src, len, &st);
261 } else {
262 status = data_.WriteStream(src, len, &st);
263 }
264 if (status)
265 return status;
266
267 zx_signals_t clear = 0u;
268 zx_signals_t set = 0u;
269
270 if (st > 0) {
271 if (was_empty)
272 set |= ZX_SOCKET_READABLE;
273 // Assert signal if we go above the read threshold
274 if ((read_threshold_ > 0) && (data_.size() >= read_threshold_))
275 set |= ZX_SOCKET_READ_THRESHOLD;
276 if (set) {
277 UpdateStateLocked(0u, set);
278 }
279 if (peer_) {
280 size_t peer_write_threshold = peer_->write_threshold_;
281 // If free space falls below threshold, de-signal
282 if ((peer_write_threshold > 0) &&
283 ((data_.max_size() - data_.size()) < peer_write_threshold))
284 clear |= ZX_SOCKET_WRITE_THRESHOLD;
285 }
286 }
287
288 if (peer_ && is_full())
289 clear |= ZX_SOCKET_WRITABLE;
290
291 if (clear)
292 peer_->UpdateStateLocked(clear, 0u);
293
294 *written = st;
295 return status;
296 }
297
Read(user_out_ptr<void> dst,size_t len,size_t * nread)298 zx_status_t SocketDispatcher::Read(user_out_ptr<void> dst, size_t len,
299 size_t* nread) TA_NO_THREAD_SAFETY_ANALYSIS {
300 canary_.Assert();
301
302 LTRACE_ENTRY;
303
304 Guard<fbl::Mutex> guard{get_lock()};
305
306 if (len != (size_t)((uint32_t)len))
307 return ZX_ERR_INVALID_ARGS;
308
309 if (is_empty()) {
310 if (!peer_)
311 return ZX_ERR_PEER_CLOSED;
312 // If reading is disabled on our end and we're empty, we'll never become readable again.
313 // Return a different error to let the caller know.
314 if (read_disabled_)
315 return ZX_ERR_BAD_STATE;
316 return ZX_ERR_SHOULD_WAIT;
317 }
318
319 bool was_full = is_full();
320
321 auto st = data_.Read(dst, len, flags_ & ZX_SOCKET_DATAGRAM);
322
323 zx_signals_t clear = 0u;
324 zx_signals_t set = 0u;
325
326 // Deassert signal if we fell below the read threshold
327 if ((read_threshold_ > 0) && (data_.size() < read_threshold_))
328 clear |= ZX_SOCKET_READ_THRESHOLD;
329
330 if (is_empty()) {
331 clear |= ZX_SOCKET_READABLE;
332 }
333 if (set || clear) {
334 UpdateStateLocked(clear, set);
335 clear = set = 0u;
336 }
337 if (peer_) {
338 // Assert (write threshold) signal if space available is above
339 // threshold.
340 size_t peer_write_threshold = peer_->write_threshold_;
341 if (peer_write_threshold > 0 &&
342 ((data_.max_size() - data_.size()) >= peer_write_threshold))
343 set |= ZX_SOCKET_WRITE_THRESHOLD;
344 if (was_full && (st > 0))
345 set |= ZX_SOCKET_WRITABLE;
346 if (set)
347 peer_->UpdateStateLocked(0u, set);
348 }
349
350 *nread = static_cast<size_t>(st);
351 return ZX_OK;
352 }
353
ReadControl(user_out_ptr<void> dst,size_t len,size_t * nread)354 zx_status_t SocketDispatcher::ReadControl(user_out_ptr<void> dst, size_t len,
355 size_t* nread) TA_NO_THREAD_SAFETY_ANALYSIS {
356 canary_.Assert();
357
358 if ((flags_ & ZX_SOCKET_HAS_CONTROL) == 0) {
359 return ZX_ERR_BAD_STATE;
360 }
361
362 Guard<fbl::Mutex> guard{get_lock()};
363
364 if (control_msg_len_ == 0)
365 return ZX_ERR_SHOULD_WAIT;
366
367 size_t copy_len = MIN(control_msg_len_, len);
368 if (dst.copy_array_to_user(&control_msg_->msg, copy_len) != ZX_OK)
369 return ZX_ERR_INVALID_ARGS; // Invalid user buffer.
370
371 control_msg_len_ = 0;
372 UpdateStateLocked(ZX_SOCKET_CONTROL_READABLE, 0u);
373 if (peer_)
374 peer_->UpdateStateLocked(0u, ZX_SOCKET_CONTROL_WRITABLE);
375
376 *nread = copy_len;
377 return ZX_OK;
378 }
379
CheckShareable(SocketDispatcher * to_send)380 zx_status_t SocketDispatcher::CheckShareable(SocketDispatcher* to_send) {
381 // We disallow sharing of sockets that support sharing themselves
382 // and disallow sharing either end of the socket we're going to
383 // share on, thus preventing loops, etc.
384 Guard<fbl::Mutex> guard{get_lock()};
385 if ((to_send->flags_ & ZX_SOCKET_HAS_ACCEPT) ||
386 (to_send == this) || (to_send == peer_.get()))
387 return ZX_ERR_BAD_STATE;
388 return ZX_OK;
389 }
390
Share(HandleOwner h)391 zx_status_t SocketDispatcher::Share(HandleOwner h) TA_NO_THREAD_SAFETY_ANALYSIS {
392 canary_.Assert();
393
394 LTRACE_ENTRY;
395
396 if (!(flags_ & ZX_SOCKET_HAS_ACCEPT))
397 return ZX_ERR_NOT_SUPPORTED;
398
399 Guard<fbl::Mutex> guard{get_lock()};
400 if (!peer_)
401 return ZX_ERR_PEER_CLOSED;
402
403 return peer_->ShareSelfLocked(ktl::move(h));
404 }
405
ShareSelfLocked(HandleOwner h)406 zx_status_t SocketDispatcher::ShareSelfLocked(HandleOwner h) TA_NO_THREAD_SAFETY_ANALYSIS {
407 canary_.Assert();
408
409 if (accept_queue_)
410 return ZX_ERR_SHOULD_WAIT;
411
412 accept_queue_ = ktl::move(h);
413
414 UpdateStateLocked(0, ZX_SOCKET_ACCEPT);
415 if (peer_)
416 peer_->UpdateStateLocked(ZX_SOCKET_SHARE, 0);
417
418 return ZX_OK;
419 }
420
Accept(HandleOwner * h)421 zx_status_t SocketDispatcher::Accept(HandleOwner* h) TA_NO_THREAD_SAFETY_ANALYSIS {
422 canary_.Assert();
423
424 if (!(flags_ & ZX_SOCKET_HAS_ACCEPT))
425 return ZX_ERR_NOT_SUPPORTED;
426
427 Guard<fbl::Mutex> guard{get_lock()};
428
429 if (!accept_queue_)
430 return ZX_ERR_SHOULD_WAIT;
431
432 *h = ktl::move(accept_queue_);
433
434 UpdateStateLocked(ZX_SOCKET_ACCEPT, 0);
435 if (peer_)
436 peer_->UpdateStateLocked(0, ZX_SOCKET_SHARE);
437
438 return ZX_OK;
439 }
440
441 // NOTE(abdulla): peer_ is protected by get_lock() while peer_->data_
442 // is protected by peer_->get_lock(). These two locks are aliases of
443 // one another so must only acquire one of them. Thread-safety
444 // analysis does not know they are the same lock so we must disable
445 // analysis.
GetInfo(zx_info_socket_t * info) const446 void SocketDispatcher::GetInfo(zx_info_socket_t* info) const TA_NO_THREAD_SAFETY_ANALYSIS {
447 canary_.Assert();
448 Guard<fbl::Mutex> guard{get_lock()};
449 *info = zx_info_socket_t{
450 .options = flags_,
451 .rx_buf_max = data_.max_size(),
452 .rx_buf_size = data_.size(),
453 .rx_buf_available = data_.size(flags_ & ZX_SOCKET_DATAGRAM),
454 .tx_buf_max = peer_ ? peer_->data_.max_size() : 0,
455 .tx_buf_size = peer_ ? peer_->data_.size() : 0,
456 };
457 }
458
GetReadThreshold() const459 size_t SocketDispatcher::GetReadThreshold() const TA_NO_THREAD_SAFETY_ANALYSIS {
460 canary_.Assert();
461 Guard<fbl::Mutex> guard{get_lock()};
462 return read_threshold_;
463 }
464
GetWriteThreshold() const465 size_t SocketDispatcher::GetWriteThreshold() const TA_NO_THREAD_SAFETY_ANALYSIS {
466 canary_.Assert();
467 Guard<fbl::Mutex> guard{get_lock()};
468 return write_threshold_;
469 }
470
SetReadThreshold(size_t value)471 zx_status_t SocketDispatcher::SetReadThreshold(size_t value) TA_NO_THREAD_SAFETY_ANALYSIS {
472 canary_.Assert();
473 Guard<fbl::Mutex> guard{get_lock()};
474 if (value > data_.max_size())
475 return ZX_ERR_INVALID_ARGS;
476 read_threshold_ = value;
477 // Setting 0 disables thresholding. Deassert signal unconditionally.
478 if (value == 0) {
479 UpdateStateLocked(ZX_SOCKET_READ_THRESHOLD, 0u);
480 } else {
481 if (data_.size() >= read_threshold_) {
482 // Assert signal if we have queued data above the read threshold
483 UpdateStateLocked(0u, ZX_SOCKET_READ_THRESHOLD);
484 } else {
485 // De-assert signal if we upped threshold and queued data drops below
486 UpdateStateLocked(ZX_SOCKET_READ_THRESHOLD, 0u);
487 }
488 }
489 return ZX_OK;
490 }
491
SetWriteThreshold(size_t value)492 zx_status_t SocketDispatcher::SetWriteThreshold(size_t value) TA_NO_THREAD_SAFETY_ANALYSIS {
493 canary_.Assert();
494 Guard<fbl::Mutex> guard{get_lock()};
495 if (peer_ == NULL)
496 return ZX_ERR_PEER_CLOSED;
497 if (value > peer_->data_.max_size())
498 return ZX_ERR_INVALID_ARGS;
499 write_threshold_ = value;
500 // Setting 0 disables thresholding. Deassert signal unconditionally.
501 if (value == 0) {
502 UpdateStateLocked(ZX_SOCKET_WRITE_THRESHOLD, 0u);
503 } else {
504 // Assert signal if we have available space above the write threshold
505 if ((peer_->data_.max_size() - peer_->data_.size()) >= write_threshold_) {
506 // Assert signal if we have available space above the write threshold
507 UpdateStateLocked(0u, ZX_SOCKET_WRITE_THRESHOLD);
508 } else {
509 // De-assert signal if we upped threshold and available space drops below
510 UpdateStateLocked(ZX_SOCKET_WRITE_THRESHOLD, 0u);
511 }
512 }
513 return ZX_OK;
514 }
515