1 // Copyright 2017 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // Standard Includes
6 #include <endian.h>
7 #include <inttypes.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <sys/param.h>
12 #include <threads.h>
13 #include <stdbool.h>
14 
15 // DDK Includes
16 #include <ddk/binding.h>
17 #include <ddk/device.h>
18 #include <ddk/debug.h>
19 #include <ddk/protocol/platform/device.h>
20 #include <ddk/protocol/sdmmc.h>
21 #include <ddk/trace/event.h>
22 
23 // Zircon Includes
24 #include <lib/sync/completion.h>
25 #include <pretty/hexdump.h>
26 #include <zircon/assert.h>
27 #include <zircon/process.h>
28 #include <zircon/syscalls.h>
29 #include <zircon/threads.h>
30 #include <zircon/device/block.h>
31 
32 #include "sdmmc.h"
33 
34 #define SDMMC_TXN_RECEIVED          ZX_EVENT_SIGNALED
35 #define SDMMC_SHUTDOWN              ZX_USER_SIGNAL_0
36 #define SDMMC_SHUTDOWN_DONE         ZX_USER_SIGNAL_1
37 #define SDMMC_ADD_MMC_CHILD_DONE    ZX_USER_SIGNAL_2
38 
39 #define SDMMC_LOCK(dev)   mtx_lock(&(dev)->lock);
40 #define SDMMC_UNLOCK(dev) mtx_unlock(&(dev)->lock);
41 
42 #define BLOCK_OP(op)    ((op) & BLOCK_OP_MASK)
43 
44 // block io transactions. one per client request
45 typedef struct sdmmc_txn {
46     block_op_t bop;
47     list_node_t node;
48     block_impl_queue_callback completion_cb;
49     void* cookie;
50 } sdmmc_txn_t;
51 
block_complete(sdmmc_txn_t * txn,zx_status_t status,trace_async_id_t async_id)52 static void block_complete(sdmmc_txn_t* txn, zx_status_t status, trace_async_id_t async_id) {
53     const block_op_t* bop = &txn->bop;
54     if (txn->completion_cb) {
55         // If tracing is not enabled this is a no-op.
56         TRACE_ASYNC_END("sdmmc","sdmmc_do_txn", async_id,
57             "command", TA_INT32(bop->rw.command),
58             "extra", TA_INT32(bop->rw.extra),
59             "length", TA_INT32(bop->rw.length),
60             "offset_vmo", TA_INT64(bop->rw.offset_vmo),
61             "offset_dev", TA_INT64(bop->rw.offset_dev),
62             "txn_status", TA_INT32(status));
63         txn->completion_cb(txn->cookie, status, &txn->bop);
64     } else {
65         zxlogf(TRACE, "sdmmc: block op %p completion_cb unset!\n", bop);
66     }
67 }
68 
sdmmc_get_size(void * ctx)69 static zx_off_t sdmmc_get_size(void* ctx) {
70     sdmmc_device_t* dev = ctx;
71     return dev->block_info.block_count * dev->block_info.block_size;
72 }
73 
sdmmc_ioctl(void * ctx,uint32_t op,const void * cmd,size_t cmdlen,void * reply,size_t max,size_t * out_actual)74 static zx_status_t sdmmc_ioctl(void* ctx, uint32_t op, const void* cmd,
75                                size_t cmdlen, void* reply, size_t max, size_t* out_actual) {
76     sdmmc_device_t* dev = ctx;
77     switch (op) {
78     case IOCTL_BLOCK_GET_INFO: {
79         block_info_t* info = reply;
80         if (max < sizeof(*info)) {
81             return ZX_ERR_BUFFER_TOO_SMALL;
82         }
83         memcpy(info, &dev->block_info, sizeof(*info));
84         *out_actual = sizeof(*info);
85         return ZX_OK;
86     }
87     default:
88         return ZX_ERR_NOT_SUPPORTED;
89     }
90     return 0;
91 }
92 
sdmmc_unbind(void * ctx)93 static void sdmmc_unbind(void* ctx) {
94     sdmmc_device_t* dev = ctx;
95     SDMMC_LOCK(dev);
96     if (dev->dead) {
97         //Already in middle of release.
98         SDMMC_UNLOCK(dev);
99         return;
100     }
101     dev->dead = true;
102     SDMMC_UNLOCK(dev);
103     device_remove(dev->zxdev);
104 }
105 
sdmmc_release(void * ctx)106 static void sdmmc_release(void* ctx) {
107     sdmmc_device_t* dev = ctx;
108     SDMMC_LOCK(dev);
109     dev->dead = true;
110     bool worker_thread_started = dev->worker_thread_started;
111     SDMMC_UNLOCK(dev);
112 
113     if (worker_thread_started) {
114         //Wait until the probe is done.If we know the type
115         //of the device, we can act accordingly.
116         uint32_t pending;
117         zx_object_wait_one(dev->worker_event, SDMMC_ADD_MMC_CHILD_DONE | SDMMC_SHUTDOWN_DONE,
118                            ZX_TIME_INFINITE, &pending);
119         if (pending & SDMMC_SHUTDOWN_DONE) {
120             thrd_join(dev->worker_thread, NULL);
121         } else if (pending & SDMMC_ADD_MMC_CHILD_DONE) {
122             zx_object_signal(dev->worker_event, 0, SDMMC_SHUTDOWN);
123             zx_object_wait_one(dev->worker_event, SDMMC_SHUTDOWN_DONE,
124                                ZX_TIME_INFINITE, NULL);
125             thrd_join(dev->worker_thread, NULL);
126         }
127     } else {
128         goto exit;
129     }
130 
131     // error out all pending requests
132     trace_async_id_t async_id = dev->async_id;
133     sdmmc_txn_t* txn = NULL;
134     SDMMC_LOCK(dev);
135     list_for_every_entry(&dev->txn_list, txn, sdmmc_txn_t, node) {
136         SDMMC_UNLOCK(dev);
137         block_complete(txn, ZX_ERR_BAD_STATE, async_id);
138         SDMMC_LOCK(dev);
139     }
140     SDMMC_UNLOCK(dev);
141 
142     if (dev->child_zxdev != NULL) {
143         device_remove(dev->child_zxdev);
144     }
145 
146 exit:
147     if (dev->worker_event != ZX_HANDLE_INVALID) {
148         zx_handle_close(dev->worker_event);
149     }
150 
151     free(dev);
152 }
153 
154 static zx_protocol_device_t sdmmc_block_device_proto = {
155     .version = DEVICE_OPS_VERSION,
156     .ioctl = sdmmc_ioctl,
157     .get_size = sdmmc_get_size,
158 };
159 
160 static zx_protocol_device_t sdmmc_sdio_device_proto = {
161     .version = DEVICE_OPS_VERSION,
162     .ioctl = sdmmc_ioctl,
163     .get_size = sdmmc_get_size,
164 };
165 
166 // Device protocol.
167 static zx_protocol_device_t sdmmc_device_proto = {
168     .version = DEVICE_OPS_VERSION,
169     .unbind = sdmmc_unbind,
170     .release = sdmmc_release,
171 };
172 
sdmmc_query(void * ctx,block_info_t * info_out,size_t * block_op_size_out)173 static void sdmmc_query(void* ctx, block_info_t* info_out, size_t* block_op_size_out) {
174     sdmmc_device_t* dev = ctx;
175     memcpy(info_out, &dev->block_info, sizeof(*info_out));
176     *block_op_size_out = sizeof(sdmmc_txn_t);
177 }
178 
sdmmc_queue(void * ctx,block_op_t * btxn,block_impl_queue_callback completion_cb,void * cookie)179 static void sdmmc_queue(void* ctx, block_op_t* btxn, block_impl_queue_callback completion_cb,
180                         void* cookie) {
181     sdmmc_device_t* dev = ctx;
182     sdmmc_txn_t* txn = containerof(btxn, sdmmc_txn_t, bop);
183     txn->completion_cb = completion_cb;
184     txn->cookie = cookie;
185     SDMMC_LOCK(dev);
186     trace_async_id_t async_id = dev->async_id;
187     SDMMC_UNLOCK(dev);
188 
189     switch (BLOCK_OP(btxn->command)) {
190     case BLOCK_OP_READ:
191     case BLOCK_OP_WRITE: {
192         SDMMC_LOCK(dev);
193         uint64_t max = dev->block_info.block_count;
194         SDMMC_UNLOCK(dev);
195         if ((btxn->rw.offset_dev >= max) || ((max - btxn->rw.offset_dev) < btxn->rw.length)) {
196             block_complete(txn, ZX_ERR_OUT_OF_RANGE, async_id);
197             return;
198         }
199         if (btxn->rw.length == 0) {
200             block_complete(txn, ZX_OK, async_id);
201             return;
202         }
203         break;
204     }
205     case BLOCK_OP_FLUSH:
206         // queue the flush op. because there is no out of order execution in this
207         // driver, when this op gets processed all previous ops are complete.
208         break;
209     default:
210         block_complete(txn, ZX_ERR_NOT_SUPPORTED, async_id);
211         return;
212     }
213 
214     SDMMC_LOCK(dev);
215 
216     list_add_tail(&dev->txn_list, &txn->node);
217     // Wake up the worker thread (while locked, so they don't accidentally
218     // clear the event).
219     zx_object_signal(dev->worker_event, 0, SDMMC_TXN_RECEIVED);
220 
221     SDMMC_UNLOCK(dev);
222 }
223 
224 // Block protocol
225 static block_impl_protocol_ops_t block_proto = {
226     .query = sdmmc_query,
227     .queue = sdmmc_queue,
228 };
229 
230 // SDIO protocol
231 static sdio_protocol_ops_t sdio_proto = {
232     .enable_fn = sdio_enable_function,
233     .disable_fn = sdio_disable_function,
234     .enable_fn_intr = sdio_enable_interrupt,
235     .disable_fn_intr = sdio_disable_interrupt,
236     .update_block_size = sdio_modify_block_size,
237     .get_block_size = sdio_get_cur_block_size,
238     .do_rw_txn = sdio_rw_data,
239     .do_rw_byte = sdio_rw_byte,
240     .get_dev_hw_info = sdio_get_device_hw_info,
241 };
242 
sdmmc_wait_for_tran(sdmmc_device_t * dev)243 static zx_status_t sdmmc_wait_for_tran(sdmmc_device_t* dev) {
244     uint32_t current_state;
245     const size_t max_attempts = 10;
246     size_t attempt = 0;
247     for (; attempt <= max_attempts; attempt++) {
248         uint32_t response;
249         zx_status_t st = sdmmc_send_status(dev, &response);
250         if (st != ZX_OK) {
251             zxlogf(SPEW, "sdmmc: SDMMC_SEND_STATUS error, retcode = %d\n", st);
252             return st;
253         }
254 
255         current_state = MMC_STATUS_CURRENT_STATE(response);
256         if (current_state == MMC_STATUS_CURRENT_STATE_RECV) {
257             st = sdmmc_stop_transmission(dev);
258             continue;
259         } else if (current_state == MMC_STATUS_CURRENT_STATE_TRAN) {
260             break;
261         }
262 
263         zx_nanosleep(zx_deadline_after(ZX_MSEC(10)));
264     }
265 
266     if (attempt == max_attempts) {
267         // Too many retries, fail.
268         return ZX_ERR_TIMED_OUT;
269     } else {
270         return ZX_OK;
271     }
272 }
273 
sdmmc_do_txn(sdmmc_device_t * dev,sdmmc_txn_t * txn)274 static void sdmmc_do_txn(sdmmc_device_t* dev, sdmmc_txn_t* txn) {
275     // The TRACE_*() event macros are empty if driver tracing isn't enabled.
276     // But that doesn't work for our call to trace_state().
277     if (TRACE_ENABLED()) {
278         dev->async_id = TRACE_NONCE();
279         TRACE_ASYNC_BEGIN("sdmmc","sdmmc_do_txn", dev->async_id,
280             "command", TA_INT32(txn->bop.rw.command),
281             "extra", TA_INT32(txn->bop.rw.extra),
282             "length", TA_INT32(txn->bop.rw.length),
283             "offset_vmo", TA_INT64(txn->bop.rw.offset_vmo),
284             "offset_dev", TA_INT64(txn->bop.rw.offset_dev));
285     }
286 
287     uint32_t cmd_idx = 0;
288     uint32_t cmd_flags = 0;
289 
290     // Figure out which SD command we need to issue.
291     switch (BLOCK_OP(txn->bop.command)) {
292     case BLOCK_OP_READ:
293         if (txn->bop.rw.length > 1) {
294             cmd_idx = SDMMC_READ_MULTIPLE_BLOCK;
295             cmd_flags = SDMMC_READ_MULTIPLE_BLOCK_FLAGS;
296         } else {
297             cmd_idx = SDMMC_READ_BLOCK;
298             cmd_flags = SDMMC_READ_BLOCK_FLAGS;
299         }
300         break;
301     case BLOCK_OP_WRITE:
302         if (txn->bop.rw.length > 1) {
303             cmd_idx = SDMMC_WRITE_MULTIPLE_BLOCK;
304             cmd_flags = SDMMC_WRITE_MULTIPLE_BLOCK_FLAGS;
305         } else {
306             cmd_idx = SDMMC_WRITE_BLOCK;
307             cmd_flags = SDMMC_WRITE_BLOCK_FLAGS;
308         }
309         break;
310     case BLOCK_OP_FLUSH:
311         block_complete(txn, ZX_OK, dev->async_id);
312         return;
313     default:
314         // should not get here
315         zxlogf(ERROR, "sdmmc: do_txn invalid block op %d\n", BLOCK_OP(txn->bop.command));
316         ZX_DEBUG_ASSERT(true);
317         block_complete(txn, ZX_ERR_INVALID_ARGS, dev->async_id);
318         return;
319     }
320 
321     zxlogf(TRACE, "sdmmc: do_txn blockop 0x%x offset_vmo 0x%" PRIx64 " length 0x%x blocksize 0x%x"
322                   " max_transfer_size 0x%x\n",
323            txn->bop.command, txn->bop.rw.offset_vmo, txn->bop.rw.length,
324            dev->block_info.block_size, dev->block_info.max_transfer_size);
325 
326     sdmmc_req_t* req = &dev->req;
327     memset(req, 0, sizeof(*req));
328     req->cmd_idx = cmd_idx;
329     req->cmd_flags = cmd_flags;
330     req->arg = txn->bop.rw.offset_dev;
331     req->blockcount = txn->bop.rw.length;
332     req->blocksize = dev->block_info.block_size;
333 
334     // convert offset_vmo and length to bytes
335     txn->bop.rw.offset_vmo *= dev->block_info.block_size;
336     txn->bop.rw.length *= dev->block_info.block_size;
337 
338     zx_status_t st = ZX_OK;
339     if (sdmmc_use_dma(dev)) {
340         req->use_dma = true;
341         req->virt_buffer = NULL;
342         req->pmt = ZX_HANDLE_INVALID;
343         req->dma_vmo =  txn->bop.rw.vmo;
344         req->buf_offset = txn->bop.rw.offset_vmo;
345     } else {
346         req->use_dma = false;
347         st = zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
348                          0, txn->bop.rw.vmo, txn->bop.rw.offset_vmo, txn->bop.rw.length,
349                          (uintptr_t*)&req->virt_buffer);
350         if (st != ZX_OK) {
351             zxlogf(TRACE, "sdmmc: do_txn vmo map error %d\n", st);
352             block_complete(txn, st, dev->async_id);
353             return;
354         }
355         req->virt_size = txn->bop.rw.length;
356     }
357 
358     st = sdmmc_request(&dev->host, req);
359     if (st != ZX_OK) {
360         zxlogf(TRACE, "sdmmc: do_txn error %d\n", st);
361         goto exit;
362     } else {
363         if ((req->blockcount > 1) && !(dev->host_info.caps & SDMMC_HOST_CAP_AUTO_CMD12)) {
364             st = sdmmc_stop_transmission(dev);
365             if (st != ZX_OK) {
366                 zxlogf(TRACE, "sdmmc: do_txn stop transmission error %d\n", st);
367                 goto exit;
368             }
369         }
370         goto exit;
371     }
372 exit:
373     if (!req->use_dma) {
374         zx_vmar_unmap(zx_vmar_root_self(), (uintptr_t)req->virt_buffer, req->virt_size);
375     }
376     block_complete(txn, st, dev->async_id);
377     zxlogf(TRACE, "sdmmc: do_txn complete\n");
378 }
379 
sdmmc_worker_thread(void * arg)380 static int sdmmc_worker_thread(void* arg) {
381     zx_status_t st = ZX_OK;
382     sdmmc_device_t* dev = (sdmmc_device_t*)arg;
383     bool dead = false;
384 
385     SDMMC_LOCK(dev);
386     st = sdmmc_host_info(&dev->host, &dev->host_info);
387     if (st != ZX_OK) {
388         zxlogf(ERROR, "sdmmc: failed to get host info\n");
389         SDMMC_UNLOCK(dev);
390         goto fail;
391     }
392 
393     zxlogf(TRACE, "sdmmc: host caps dma %d 8-bit bus %d max_transfer_size %" PRIu64 "\n",
394            sdmmc_use_dma(dev) ? 1 : 0,
395            (dev->host_info.caps & SDMMC_HOST_CAP_BUS_WIDTH_8) ? 1 : 0,
396            dev->host_info.max_transfer_size);
397 
398     dev->block_info.max_transfer_size = dev->host_info.max_transfer_size;
399 
400     // Reset the card.
401     sdmmc_hw_reset(&dev->host);
402 
403     // No matter what state the card is in, issuing the GO_IDLE_STATE command will
404     // put the card into the idle state.
405     if ((st = sdmmc_go_idle(dev)) != ZX_OK) {
406         zxlogf(ERROR, "sdmmc: SDMMC_GO_IDLE_STATE failed, retcode = %d\n", st);
407         SDMMC_UNLOCK(dev);
408         goto fail;
409     }
410 
411     // Probe for SDIO, SD and then MMC
412     if ((st = sdmmc_probe_sdio(dev)) != ZX_OK) {
413         if ((st = sdmmc_probe_sd(dev)) != ZX_OK) {
414             if ((st = sdmmc_probe_mmc(dev)) != ZX_OK) {
415                 zxlogf(ERROR, "sdmmc: failed to probe\n");
416                 SDMMC_UNLOCK(dev);
417                 goto fail;
418             }
419         }
420     }
421 
422     if (dev->type == SDMMC_TYPE_SDIO) {
423         zx_device_t* hci_zxdev =  device_get_parent(dev->zxdev);
424 
425         zx_device_prop_t props[] = {
426              { BIND_SDIO_VID, 0, dev->sdio_dev.funcs[0].hw_info.manufacturer_id},
427              { BIND_SDIO_PID, 0, dev->sdio_dev.funcs[0].hw_info.product_id},
428         };
429 
430         device_add_args_t sdio_args = {
431             .version = DEVICE_ADD_ARGS_VERSION,
432             .name = "sdio",
433             .ctx = dev,
434             .ops = &sdmmc_sdio_device_proto,
435             .proto_id = ZX_PROTOCOL_SDIO,
436             .proto_ops = &sdio_proto,
437             .props = props,
438             .prop_count = countof(props),
439         };
440 
441         // Use platform device protocol to create our SDIO device, if it is available.
442         pdev_protocol_t pdev;
443         st = device_get_protocol(hci_zxdev, ZX_PROTOCOL_PDEV, &pdev);
444         if (st == ZX_OK) {
445             st = pdev_device_add(&pdev, 0, &sdio_args, &dev->child_zxdev);
446         } else {
447             st = device_add(dev->zxdev, &sdio_args, &dev->child_zxdev);
448         }
449         if (st != ZX_OK) {
450             zxlogf(ERROR, "sdmmc: Failed to add sdio device, retcode = %d\n", st);
451             SDMMC_UNLOCK(dev);
452             goto fail;
453         }
454         zx_object_signal(dev->worker_event, 0, SDMMC_SHUTDOWN_DONE);
455         SDMMC_UNLOCK(dev);
456     } else {
457         // Device must be in TRAN state at this point
458         st = sdmmc_wait_for_tran(dev);
459         if (st != ZX_OK) {
460             zxlogf(ERROR, "sdmmc: waiting for TRAN state failed, retcode = %d\n", st);
461             SDMMC_UNLOCK(dev);
462             goto fail;
463         }
464 
465         device_add_args_t block_args = {
466             .version = DEVICE_ADD_ARGS_VERSION,
467             .name = "sdmmc-block",
468             .ctx = dev,
469             .ops = &sdmmc_block_device_proto,
470             .proto_id = ZX_PROTOCOL_BLOCK_IMPL,
471             .proto_ops = &block_proto,
472         };
473 
474         st = device_add(dev->zxdev, &block_args, &dev->child_zxdev);
475         if (st != ZX_OK) {
476             zxlogf(ERROR, "sdmmc: Failed to add mmc device, retcode = %d\n", st);
477             SDMMC_UNLOCK(dev);
478             goto fail;
479         }
480 
481         zx_object_signal(dev->worker_event, 0, SDMMC_ADD_MMC_CHILD_DONE);
482         SDMMC_UNLOCK(dev);
483         for (;;) {
484             // don't loop until txn_list is empty to check for SDMMC_SHUTDOWN
485             // between each txn.
486             SDMMC_LOCK(dev);
487             sdmmc_txn_t* txn = list_remove_head_type(&dev->txn_list, sdmmc_txn_t, node);
488             if (txn) {
489                 // Unlock if we execute the transaction
490                 SDMMC_UNLOCK(dev);
491                 sdmmc_do_txn(dev, txn);
492             } else {
493                 // Stay locked if we're clearing the "RECEIVED" flag.
494                 zx_object_signal(dev->worker_event, SDMMC_TXN_RECEIVED, 0);
495                 SDMMC_UNLOCK(dev);
496             }
497 
498             uint32_t pending;
499             zx_status_t st = zx_object_wait_one(dev->worker_event,
500                                                 SDMMC_TXN_RECEIVED | SDMMC_SHUTDOWN,
501                                                 ZX_TIME_INFINITE, &pending);
502             if (st != ZX_OK) {
503                 zxlogf(ERROR, "sdmmc: worker thread wait failed, retcode = %d\n", st);
504                 goto fail;
505             }
506             if (pending & SDMMC_SHUTDOWN) {
507                 zx_object_signal(dev->worker_event, 0, SDMMC_SHUTDOWN_DONE);
508                 break;
509             }
510         }
511     }
512 
513     zxlogf(TRACE, "sdmmc: worker thread terminated successfully\n");
514     return 0;
515 
516 fail:
517     SDMMC_LOCK(dev);
518     zx_object_signal(dev->worker_event, 0, SDMMC_SHUTDOWN_DONE);
519     dead = dev->dead;
520     zx_device_t* zxdev = dev->zxdev;
521     SDMMC_UNLOCK(dev);
522     if (!dead) {
523         //Already in middle of shutdown
524         device_remove(zxdev);
525     }
526     return st;
527 }
528 
sdmmc_bind(void * ctx,zx_device_t * parent)529 static zx_status_t sdmmc_bind(void* ctx, zx_device_t* parent) {
530     // Allocate the device.
531     sdmmc_device_t* dev = calloc(1, sizeof(*dev));
532     if (!dev) {
533         zxlogf(ERROR, "sdmmc: no memory to allocate sdmmc device!\n");
534         return ZX_ERR_NO_MEMORY;
535     }
536 
537     zx_status_t st = device_get_protocol(parent, ZX_PROTOCOL_SDMMC, &dev->host);
538     if (st != ZX_OK) {
539         zxlogf(ERROR, "sdmmc: failed to get sdmmc protocol\n");
540         free(dev);
541         return ZX_ERR_NOT_SUPPORTED;
542     }
543 
544     mtx_init(&dev->lock, mtx_plain);
545 
546     device_add_args_t args = {
547         .version = DEVICE_ADD_ARGS_VERSION,
548         .name = "sdmmc",
549         .ctx = dev,
550         .ops = &sdmmc_device_proto,
551         .flags = DEVICE_ADD_NON_BINDABLE,
552     };
553 
554     SDMMC_LOCK(dev);
555     st = device_add(parent, &args, &dev->zxdev);
556     if (st != ZX_OK) {
557         free(dev);
558         SDMMC_UNLOCK(dev);
559         return st;
560     }
561 
562     st = zx_event_create(0, &dev->worker_event);
563     if (st != ZX_OK) {
564         zxlogf(ERROR, "sdmmc: failed to create event, retcode = %d\n", st);
565         SDMMC_UNLOCK(dev);
566         device_remove(dev->zxdev);
567         return st;
568     }
569     list_initialize(&dev->txn_list);
570     dev->worker_thread_started = true;
571     // bootstrap in a thread
572     int rc = thrd_create_with_name(&dev->worker_thread, sdmmc_worker_thread, dev, "sdmmc-worker");
573     if (rc != thrd_success) {
574         st = thrd_status_to_zx_status(rc);
575         dev->worker_thread_started = false;
576         zx_device_t* zxdev = dev->zxdev;
577         bool dead = dev->dead;
578         SDMMC_UNLOCK(dev);
579         if (!dead) {
580             device_remove(zxdev);
581         }
582         return st;
583     }
584     SDMMC_UNLOCK(dev);
585     return ZX_OK;
586 }
587 
588 static zx_driver_ops_t sdmmc_driver_ops = {
589     .version = DRIVER_OPS_VERSION,
590     .bind = sdmmc_bind,
591 };
592 
593 // The formatter does not play nice with these macros.
594 // clang-format off
595 ZIRCON_DRIVER_BEGIN(sdmmc, sdmmc_driver_ops, "zircon", "0.1", 1)
596     BI_MATCH_IF(EQ, BIND_PROTOCOL, ZX_PROTOCOL_SDMMC),
597 ZIRCON_DRIVER_END(sdmmc)
598 // clang-format on
599