1 // Copyright 2016 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "devhost.h"
6 
7 #include <assert.h>
8 #include <atomic>
9 #include <errno.h>
10 #include <fcntl.h>
11 #include <new>
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <sys/stat.h>
16 #include <threads.h>
17 #include <unistd.h>
18 #include <utility>
19 
20 #include <ddk/device.h>
21 #include <ddk/driver.h>
22 
23 #include <zircon/assert.h>
24 #include <zircon/listnode.h>
25 #include <zircon/syscalls.h>
26 #include <zircon/types.h>
27 
28 #include <fbl/auto_lock.h>
29 
30 namespace devmgr {
31 
32 #define TRACE 0
33 
34 #if TRACE
35 #define xprintf(fmt...) printf(fmt)
36 #else
37 #define xprintf(fmt...) \
38     do {                \
39     } while (0)
40 #endif
41 
42 #define TRACE_ADD_REMOVE 0
43 
44 namespace internal {
45 __LOCAL mtx_t devhost_api_lock = MTX_INIT;
46 __LOCAL std::atomic<thrd_t> devhost_api_lock_owner(0);
47 } // namespace internal
48 
49 static thread_local CreationContext* g_creation_context;
50 
51 // The creation context is setup before the bind() or create() ops are
52 // invoked to provide the ability to sanity check the required device_add()
53 // operations these hooks should be making.
devhost_set_creation_context(CreationContext * ctx)54 void devhost_set_creation_context(CreationContext* ctx) {
55     g_creation_context = ctx;
56 }
57 
default_open(void * ctx,zx_device_t ** out,uint32_t flags)58 static zx_status_t default_open(void* ctx, zx_device_t** out, uint32_t flags) {
59     return ZX_OK;
60 }
61 
default_open_at(void * ctx,zx_device_t ** out,const char * path,uint32_t flags)62 static zx_status_t default_open_at(void* ctx, zx_device_t** out, const char* path, uint32_t flags) {
63     return ZX_ERR_NOT_SUPPORTED;
64 }
65 
default_close(void * ctx,uint32_t flags)66 static zx_status_t default_close(void* ctx, uint32_t flags) {
67     return ZX_OK;
68 }
69 
default_unbind(void * ctx)70 static void default_unbind(void* ctx) {
71 }
72 
default_release(void * ctx)73 static void default_release(void* ctx) {
74 }
75 
default_read(void * ctx,void * buf,size_t count,zx_off_t off,size_t * actual)76 static zx_status_t default_read(void* ctx, void* buf, size_t count, zx_off_t off, size_t* actual) {
77     return ZX_ERR_NOT_SUPPORTED;
78 }
79 
default_write(void * ctx,const void * buf,size_t count,zx_off_t off,size_t * actual)80 static zx_status_t default_write(void* ctx, const void* buf, size_t count, zx_off_t off, size_t* actual) {
81     return ZX_ERR_NOT_SUPPORTED;
82 }
83 
default_get_size(void * ctx)84 static zx_off_t default_get_size(void* ctx) {
85     return 0;
86 }
87 
default_ioctl(void * ctx,uint32_t op,const void * in_buf,size_t in_len,void * out_buf,size_t out_len,size_t * out_actual)88 static zx_status_t default_ioctl(void* ctx, uint32_t op,
89                              const void* in_buf, size_t in_len,
90                              void* out_buf, size_t out_len, size_t* out_actual) {
91     return ZX_ERR_NOT_SUPPORTED;
92 }
93 
default_suspend(void * ctx,uint32_t flags)94 static zx_status_t default_suspend(void* ctx, uint32_t flags) {
95     return ZX_ERR_NOT_SUPPORTED;
96 }
97 
default_resume(void * ctx,uint32_t flags)98 static zx_status_t default_resume(void* ctx, uint32_t flags) {
99     return ZX_ERR_NOT_SUPPORTED;
100 }
101 
default_rxrpc(void * ctx,zx_handle_t channel)102 static zx_status_t default_rxrpc(void* ctx, zx_handle_t channel) {
103     return ZX_ERR_NOT_SUPPORTED;
104 }
105 
default_message(void * ctx,fidl_msg_t * msg,fidl_txn_t * txn)106 static zx_status_t default_message(void *ctx, fidl_msg_t* msg, fidl_txn_t* txn) {
107     fidl_message_header_t* hdr = (fidl_message_header_t*) msg->bytes;
108     printf("devhost: Unsupported FIDL operation: 0x%x\n", hdr->ordinal);
109     zx_handle_close_many(msg->handles, msg->num_handles);
110     return ZX_ERR_NOT_SUPPORTED;
111 }
112 
__anon7dd01fd10102() 113 zx_protocol_device_t device_default_ops = []() {
114     zx_protocol_device_t ops = {};
115     ops.open = default_open;
116     ops.open_at = default_open_at;
117     ops.close = default_close;
118     ops.unbind = default_unbind;
119     ops.release = default_release;
120     ops.read = default_read;
121     ops.write = default_write;
122     ops.get_size = default_get_size;
123     ops.ioctl = default_ioctl;
124     ops.suspend = default_suspend;
125     ops.resume = default_resume;
126     ops.rxrpc = default_rxrpc;
127     ops.message = default_message;
128     return ops;
129 }();
130 
131 [[noreturn]]
device_invalid_fatal(void * ctx)132 static void device_invalid_fatal(void* ctx) {
133     printf("devhost: FATAL: zx_device_t used after destruction.\n");
134     __builtin_trap();
135 }
136 
__anon7dd01fd10202() 137 static zx_protocol_device_t device_invalid_ops = []() {
138     zx_protocol_device_t ops = {};
139     ops.open = +[](void* ctx, zx_device_t**, uint32_t) -> zx_status_t {
140         device_invalid_fatal(ctx);
141     };
142     ops.open_at = +[](void* ctx, zx_device_t**, const char*, uint32_t) -> zx_status_t {
143         device_invalid_fatal(ctx);
144     };
145     ops.close = +[](void* ctx, uint32_t) -> zx_status_t {
146         device_invalid_fatal(ctx);
147     };
148     ops.unbind = +[](void* ctx) {
149         device_invalid_fatal(ctx);
150     };
151     ops.release = +[](void* ctx) {
152         device_invalid_fatal(ctx);
153     };
154     ops.read = +[](void* ctx, void*, size_t, size_t, size_t*) -> zx_status_t {
155         device_invalid_fatal(ctx);
156     };
157     ops.write = +[](void* ctx, const void*, size_t, size_t, size_t*) -> zx_status_t {
158         device_invalid_fatal(ctx);
159     };
160     ops.get_size = +[](void* ctx) -> zx_off_t {
161         device_invalid_fatal(ctx);
162     };
163     ops.ioctl = +[](void* ctx, uint32_t, const void*, size_t, void*, size_t, size_t*)
164         -> zx_status_t {
165         device_invalid_fatal(ctx);
166     };
167     ops.suspend = +[](void* ctx, uint32_t) -> zx_status_t {
168         device_invalid_fatal(ctx);
169     };
170     ops.resume = +[](void* ctx, uint32_t) -> zx_status_t {
171         device_invalid_fatal(ctx);
172     };
173     ops.rxrpc = +[](void* ctx, zx_handle_t) -> zx_status_t {
174         device_invalid_fatal(ctx);
175     };
176     ops.message = +[](void* ctx, fidl_msg_t*, fidl_txn_t*) -> zx_status_t {
177         device_invalid_fatal(ctx);
178     };
179     return ops;
180 }();
181 
182 // Maximum number of dead devices to hold on the dead device list
183 // before we start free'ing the oldest when adding a new one.
184 #define DEAD_DEVICE_MAX 7
185 
devhost_device_destroy(zx_device_t * dev)186 void devhost_device_destroy(zx_device_t* dev) REQ_DM_LOCK {
187     static fbl::DoublyLinkedList<zx_device*, zx_device::Node> dead_list;
188     static unsigned dead_count = 0;
189 
190     // ensure any ops will be fatal
191     dev->ops = &device_invalid_ops;
192 
193     dev->magic = 0xdeaddeaddeaddead;
194 
195     // ensure all owned handles are invalid
196     dev->event.reset();
197     dev->local_event.reset();
198 
199     // ensure all pointers are invalid
200     dev->ctx = nullptr;
201     dev->driver = nullptr;
202     dev->parent.reset();
203     dev->conn.store(nullptr);
204     {
205         fbl::AutoLock guard(&dev->proxy_ios_lock);
206         dev->proxy_ios = nullptr;
207     }
208 
209     // Defer destruction to help catch use-after-free and also
210     // so the compiler can't (easily) optimize away the poisoning
211     // we do above.
212     dead_list.push_back(dev);
213 
214     if (dead_count == DEAD_DEVICE_MAX) {
215         zx_device_t* to_delete = dead_list.pop_front();
216         delete to_delete;
217     } else {
218         dead_count++;
219     }
220 }
221 
222 // defered work list
223 fbl::DoublyLinkedList<zx_device*, zx_device::DeferNode> defer_device_list;
224 int devhost_enumerators = 0;
225 
devhost_finalize()226 void devhost_finalize() {
227     // Early exit if there's no work
228     if (defer_device_list.is_empty()) {
229         return;
230     }
231 
232     // Otherwise we snapshot the list
233     auto list = std::move(defer_device_list);
234 
235     // We detach all the devices from their parents list-of-children
236     // while under the DM lock to avoid an enumerator starting to mutate
237     // things before we're done detaching them.
238     for (auto& dev : list) {
239         if (dev.parent) {
240             dev.parent->children.erase(dev);
241         }
242     }
243 
244     // Then we can get to the actual final teardown where we have
245     // to drop the lock to call the callback
246     zx_device* dev;
247     while ((dev = list.pop_front()) != nullptr) {
248         // invoke release op
249         if (dev->flags & DEV_FLAG_ADDED) {
250             ApiAutoRelock relock;
251             dev->ReleaseOp();
252         }
253 
254         if (dev->parent) {
255             // If the parent wants rebinding when its children are gone,
256             // And the parent is not dead, And this was the last child...
257             if ((dev->parent->flags & DEV_FLAG_WANTS_REBIND) &&
258                 (!(dev->parent->flags & DEV_FLAG_DEAD)) &&
259                 dev->parent->children.is_empty()) {
260                 // Clear the wants rebind flag and request the rebind
261                 dev->parent->flags &= (~DEV_FLAG_WANTS_REBIND);
262                 devhost_device_bind(dev->parent, "");
263             }
264 
265             dev->parent.reset();
266         }
267 
268         // destroy/deallocate the device
269         devhost_device_destroy(dev);
270     }
271 }
272 
273 
274 // enum_lock_{acquire,release}() are used whenever we're iterating
275 // on the device tree.  When "enum locked" it is legal to add a new
276 // child to the end of a device's list-of-children, but it is not
277 // legal to remove a child.  This avoids badness when we have to
278 // drop the DM lock to call into device ops while enumerating.
279 
enum_lock_acquire()280 static void enum_lock_acquire() REQ_DM_LOCK {
281     devhost_enumerators++;
282 }
283 
enum_lock_release()284 static void enum_lock_release() REQ_DM_LOCK {
285     if (--devhost_enumerators == 0) {
286         devhost_finalize();
287     }
288 }
289 
devhost_device_create(zx_driver_t * drv,const fbl::RefPtr<zx_device_t> & parent,const char * name,void * ctx,zx_protocol_device_t * ops,fbl::RefPtr<zx_device_t> * out)290 zx_status_t devhost_device_create(zx_driver_t* drv, const fbl::RefPtr<zx_device_t>& parent,
291                                   const char* name, void* ctx,
292                                   zx_protocol_device_t* ops, fbl::RefPtr<zx_device_t>* out)
293                                   REQ_DM_LOCK {
294 
295     if (!drv) {
296         printf("devhost: device_add could not find driver!\n");
297         return ZX_ERR_INVALID_ARGS;
298     }
299 
300     fbl::RefPtr<zx_device> dev;
301     zx_status_t status = zx_device::Create(&dev);
302     if (status != ZX_OK) {
303         return status;
304     }
305 
306     dev->ops = ops;
307     dev->driver = drv;
308 
309     if (name == nullptr) {
310         printf("devhost: dev=%p has null name.\n", dev.get());
311         name = "invalid";
312         dev->magic = 0;
313     }
314 
315     size_t len = strlen(name);
316     // TODO(teisenbe): I think this is overly aggresive, and could be changed
317     // to |len > ZX_DEVICE_NAME_MAX| and |len = ZX_DEVICE_NAME_MAX|.
318     if (len >= ZX_DEVICE_NAME_MAX) {
319         printf("devhost: dev=%p name too large '%s'\n", dev.get(), name);
320         len = ZX_DEVICE_NAME_MAX - 1;
321         dev->magic = 0;
322     }
323 
324     memcpy(dev->name, name, len);
325     dev->name[len] = 0;
326     // TODO(teisenbe): Why do we default to dev.get() here?  Why not just
327     // nullptr
328     dev->ctx = ctx ? ctx : dev.get();
329     *out = std::move(dev);
330     return ZX_OK;
331 }
332 
333 #define DEFAULT_IF_NULL(ops,method) \
334     if (ops->method == nullptr) { \
335         ops->method = default_##method; \
336     }
337 
device_validate(const fbl::RefPtr<zx_device_t> & dev)338 static zx_status_t device_validate(const fbl::RefPtr<zx_device_t>& dev) REQ_DM_LOCK {
339     if (dev == nullptr) {
340         printf("INVAL: nullptr!\n");
341         return ZX_ERR_INVALID_ARGS;
342     }
343     if (dev->flags & DEV_FLAG_ADDED) {
344         printf("device already added: %p(%s)\n", dev.get(), dev->name);
345         return ZX_ERR_BAD_STATE;
346     }
347     if (dev->magic != DEV_MAGIC) {
348         return ZX_ERR_BAD_STATE;
349     }
350     if (dev->ops == nullptr) {
351         printf("device add: %p(%s): nullptr ops\n", dev.get(), dev->name);
352         return ZX_ERR_INVALID_ARGS;
353     }
354     if ((dev->protocol_id == ZX_PROTOCOL_MISC_PARENT) ||
355         (dev->protocol_id == ZX_PROTOCOL_ROOT)) {
356         // These protocols is only allowed for the special
357         // singleton misc or root parent devices.
358         return ZX_ERR_INVALID_ARGS;
359     }
360     // devices which do not declare a primary protocol
361     // are implied to be misc devices
362     if (dev->protocol_id == 0) {
363         dev->protocol_id = ZX_PROTOCOL_MISC;
364     }
365 
366     // install default methods if needed
367     zx_protocol_device_t* ops = dev->ops;
368     DEFAULT_IF_NULL(ops, open);
369     DEFAULT_IF_NULL(ops, open_at);
370     DEFAULT_IF_NULL(ops, close);
371     DEFAULT_IF_NULL(ops, unbind);
372     DEFAULT_IF_NULL(ops, release);
373     DEFAULT_IF_NULL(ops, read);
374     DEFAULT_IF_NULL(ops, write);
375     DEFAULT_IF_NULL(ops, get_size);
376     DEFAULT_IF_NULL(ops, ioctl);
377     DEFAULT_IF_NULL(ops, suspend);
378     DEFAULT_IF_NULL(ops, resume);
379     DEFAULT_IF_NULL(ops, rxrpc);
380     DEFAULT_IF_NULL(ops, message);
381 
382     return ZX_OK;
383 }
384 
devhost_device_add(const fbl::RefPtr<zx_device_t> & dev,const fbl::RefPtr<zx_device_t> & parent,const zx_device_prop_t * props,uint32_t prop_count,const char * proxy_args)385 zx_status_t devhost_device_add(const fbl::RefPtr<zx_device_t>& dev,
386                                const fbl::RefPtr<zx_device_t>& parent,
387                                const zx_device_prop_t* props, uint32_t prop_count,
388                                const char* proxy_args)
389                                REQ_DM_LOCK {
390     auto fail = [&dev](zx_status_t status) {
391         if (dev) {
392             dev->flags |= DEV_FLAG_DEAD | DEV_FLAG_VERY_DEAD;
393         }
394         return status;
395     };
396 
397     zx_status_t status;
398     if ((status = device_validate(dev)) < 0) {
399         return fail(status);
400     }
401     if (parent == nullptr) {
402         printf("device_add: cannot add %p(%s) to nullptr parent\n", dev.get(), dev->name);
403         return fail(ZX_ERR_NOT_SUPPORTED);
404     }
405     if (parent->flags & DEV_FLAG_DEAD) {
406         printf("device add: %p: is dead, cannot add child %p\n", parent.get(), dev.get());
407         return fail(ZX_ERR_BAD_STATE);
408     }
409 
410     CreationContext* ctx = nullptr;
411 
412     // if creation ctx (thread local) is set, we are in a thread
413     // that is handling a bind() or create() callback and if that
414     // ctx's parent matches the one provided to add we need to do
415     // some additional checking...
416     if ((g_creation_context != nullptr) && (g_creation_context->parent == parent)) {
417         ctx = g_creation_context;
418         // If the RPC channel exists, this is for create rather than bind.
419         if (ctx->rpc->is_valid()) {
420             // create() must create only one child
421             if (ctx->child != nullptr) {
422                 printf("devhost: driver attempted to create multiple proxy devices!\n");
423                 return ZX_ERR_BAD_STATE;
424             }
425         }
426     }
427 
428 #if TRACE_ADD_REMOVE
429     printf("devhost: device add: %p(%s) parent=%p(%s)\n",
430             dev.get(), dev->name, parent.get(), parent->name);
431 #endif
432 
433     // Don't create an event handle if we alredy have one
434     if (!dev->event.is_valid() &&
435         ((status = zx::eventpair::create(0, &dev->event, &dev->local_event)) < 0)) {
436         printf("device add: %p(%s): cannot create event: %d\n",
437                dev.get(), dev->name, status);
438         return fail(status);
439     }
440 
441     dev->flags |= DEV_FLAG_BUSY;
442 
443     // proxy devices are created through this handshake process
444     if (ctx && (ctx->rpc->is_valid())) {
445         if (dev->flags & DEV_FLAG_INVISIBLE) {
446             printf("devhost: driver attempted to create invisible device in create()\n");
447             return ZX_ERR_INVALID_ARGS;
448         }
449         dev->flags |= DEV_FLAG_ADDED;
450         dev->flags &= (~DEV_FLAG_BUSY);
451         dev->rpc = zx::unowned_channel(ctx->rpc);
452         ctx->child = dev;
453         return ZX_OK;
454     }
455 
456     dev->parent = parent;
457 
458     // attach to our parent
459     parent->children.push_back(dev.get());
460 
461     if (!(dev->flags & DEV_FLAG_INSTANCE)) {
462         // devhost_add always consumes the handle
463         status = devhost_add(parent, dev, proxy_args, props, prop_count);
464         if (status < 0) {
465             printf("devhost: %p(%s): remote add failed %d\n",
466                    dev.get(), dev->name, status);
467             dev->parent->children.erase(*dev);
468             dev->parent.reset();
469 
470             // since we are under the lock the whole time, we added the node
471             // to the tail and then we peeled it back off the tail when we
472             // failed, we don't need to interact with the enum lock mechanism
473             dev->flags &= (~DEV_FLAG_BUSY);
474             return status;
475         }
476     }
477     dev->flags |= DEV_FLAG_ADDED;
478     dev->flags &= (~DEV_FLAG_BUSY);
479 
480     // record this device in the creation context if there is one
481     if (ctx && (ctx->child == nullptr)) {
482         ctx->child = dev;
483     }
484     return ZX_OK;
485 }
486 
487 #define REMOVAL_BAD_FLAGS \
488     (DEV_FLAG_DEAD | DEV_FLAG_BUSY |\
489      DEV_FLAG_INSTANCE | DEV_FLAG_MULTI_BIND)
490 
removal_problem(uint32_t flags)491 static const char* removal_problem(uint32_t flags) {
492     if (flags & DEV_FLAG_DEAD) {
493         return "already dead";
494     }
495     if (flags & DEV_FLAG_BUSY) {
496         return "being created";
497     }
498     if (flags & DEV_FLAG_INSTANCE) {
499         return "ephemeral device";
500     }
501     if (flags & DEV_FLAG_MULTI_BIND) {
502         return "multi-bind-able device";
503     }
504     return "?";
505 }
506 
devhost_unbind_children(const fbl::RefPtr<zx_device_t> & dev)507 static void devhost_unbind_children(const fbl::RefPtr<zx_device_t>& dev) REQ_DM_LOCK {
508 #if TRACE_ADD_REMOVE
509     printf("devhost_unbind_children: %p(%s)\n", dev.get(), dev->name);
510 #endif
511     enum_lock_acquire();
512     for (auto& child : dev->children) {
513         if (!(child.flags & DEV_FLAG_DEAD)) {
514             devhost_device_unbind(fbl::WrapRefPtr(&child));
515         }
516     }
517     enum_lock_release();
518 }
519 
devhost_device_remove(fbl::RefPtr<zx_device_t> dev)520 zx_status_t devhost_device_remove(fbl::RefPtr<zx_device_t> dev) REQ_DM_LOCK {
521     if (dev->flags & REMOVAL_BAD_FLAGS) {
522         printf("device: %p(%s): cannot be removed (%s)\n",
523                dev.get(), dev->name, removal_problem(dev->flags));
524         return ZX_ERR_INVALID_ARGS;
525     }
526 #if TRACE_ADD_REMOVE
527     printf("device: %p(%s): is being removed\n", dev.get(), dev->name);
528 #endif
529     dev->flags |= DEV_FLAG_DEAD;
530 
531     devhost_unbind_children(dev);
532 
533     // cause the vfs entry to be unpublished to avoid further open() attempts
534     xprintf("device: %p: devhost->devmgr remove rpc\n", dev.get());
535     devhost_remove(dev);
536 
537     dev->flags |= DEV_FLAG_VERY_DEAD;
538     return ZX_OK;
539 }
540 
devhost_device_rebind(const fbl::RefPtr<zx_device_t> & dev)541 zx_status_t devhost_device_rebind(const fbl::RefPtr<zx_device_t>& dev) REQ_DM_LOCK {
542     // note that we want to be rebound when our children are all gone
543     dev->flags |= DEV_FLAG_WANTS_REBIND;
544 
545     // request that any existing children go away
546     devhost_unbind_children(dev);
547 
548     return ZX_OK;
549 }
550 
devhost_device_unbind(const fbl::RefPtr<zx_device_t> & dev)551 zx_status_t devhost_device_unbind(const fbl::RefPtr<zx_device_t>& dev) REQ_DM_LOCK {
552     if (!(dev->flags & DEV_FLAG_UNBOUND)) {
553         dev->flags |= DEV_FLAG_UNBOUND;
554         // Call dev's unbind op.
555         if (dev->ops->unbind) {
556 #if TRACE_ADD_REMOVE
557             printf("call unbind dev: %p(%s)\n", dev.get(), dev->name);
558 #endif
559             ApiAutoRelock relock;
560             dev->UnbindOp();
561         }
562     }
563     return ZX_OK;
564 }
565 
devhost_device_open_at(const fbl::RefPtr<zx_device_t> & dev,fbl::RefPtr<zx_device_t> * out,const char * path,uint32_t flags)566 zx_status_t devhost_device_open_at(const fbl::RefPtr<zx_device_t>& dev,
567                                    fbl::RefPtr<zx_device_t>* out,
568                                    const char* path, uint32_t flags)
569                                    REQ_DM_LOCK {
570     if (dev->flags & DEV_FLAG_DEAD) {
571         printf("device open: %p(%s) is dead!\n", dev.get(), dev->name);
572         return ZX_ERR_BAD_STATE;
573     }
574     fbl::RefPtr<zx_device_t> new_ref(dev);
575     zx_status_t r;
576     zx_device_t* opened_dev = nullptr;
577     {
578         ApiAutoRelock relock;
579         if (path) {
580             r = dev->OpenAtOp(&opened_dev, path, flags);
581         } else {
582             r = dev->OpenOp(&opened_dev, flags);
583         }
584     }
585     if (r < 0) {
586         new_ref.reset();
587     } else if (opened_dev != nullptr) {
588         // open created a per-instance device for us
589         new_ref.reset();
590         // Claim the reference from open
591         new_ref = fbl::internal::MakeRefPtrNoAdopt(opened_dev);
592 
593         if (!(opened_dev->flags & DEV_FLAG_INSTANCE)) {
594             printf("device open: %p(%s) in bad state %x\n", opened_dev, opened_dev->name, flags);
595             panic();
596         }
597     }
598     *out = std::move(new_ref);
599     return r;
600 }
601 
devhost_device_close(fbl::RefPtr<zx_device_t> dev,uint32_t flags)602 zx_status_t devhost_device_close(fbl::RefPtr<zx_device_t> dev, uint32_t flags) REQ_DM_LOCK {
603     ApiAutoRelock relock;
604     return dev->CloseOp(flags);
605 }
606 
devhost_device_suspend_locked(const fbl::RefPtr<zx_device> & dev,uint32_t flags)607 static zx_status_t devhost_device_suspend_locked(const fbl::RefPtr<zx_device>& dev,
608                                                uint32_t flags) REQ_DM_LOCK {
609     // first suspend children (so we suspend from leaf up)
610     zx_status_t st;
611     for (auto& child : dev->children) {
612         if (!(child.flags & DEV_FLAG_DEAD)) {
613             st = devhost_device_suspend(fbl::WrapRefPtr(&child), flags);
614             if (st != ZX_OK) {
615                 return st;
616             }
617         }
618     }
619 
620     // then invoke our suspend hook
621     {
622         ApiAutoRelock relock;
623         st = dev->ops->suspend(dev->ctx, flags);
624     }
625 
626     // default_suspend() returns ZX_ERR_NOT_SUPPORTED
627     if ((st != ZX_OK) && (st != ZX_ERR_NOT_SUPPORTED)) {
628         return st;
629     } else {
630         return ZX_OK;
631     }
632 }
633 
devhost_device_suspend(const fbl::RefPtr<zx_device> & dev,uint32_t flags)634 zx_status_t devhost_device_suspend(const fbl::RefPtr<zx_device>& dev,
635                                    uint32_t flags) REQ_DM_LOCK {
636     //TODO this should eventually be two-pass using SUSPENDING/SUSPENDED flags
637     enum_lock_acquire();
638     zx_status_t r = devhost_device_suspend_locked(dev, flags);
639     enum_lock_release();
640     return r;
641 }
642 
643 } // namespace devmgr
644