1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2011-07-25 weety first version
9 */
10
11 #include <rtthread.h>
12 #include <drivers/dev_mmcsd_core.h>
13 #include <drivers/dev_sd.h>
14 #include <drivers/dev_mmc.h>
15 #include <drivers/dev_sdio.h>
16 #include <string.h>
17
18 #define DBG_TAG "SDIO"
19 #ifdef RT_SDIO_DEBUG
20 #define DBG_LVL DBG_LOG
21 #else
22 #define DBG_LVL DBG_INFO
23 #endif /* RT_SDIO_DEBUG */
24 #include <rtdbg.h>
25
26 #ifndef RT_MMCSD_STACK_SIZE
27 #define RT_MMCSD_STACK_SIZE 1024
28 #endif
29 #ifndef RT_MMCSD_THREAD_PRIORITY
30 #if (RT_THREAD_PRIORITY_MAX == 32)
31 #define RT_MMCSD_THREAD_PRIORITY 0x16
32 #else
33 #define RT_MMCSD_THREAD_PRIORITY 0x40
34 #endif
35 #endif
36
37 //static struct rt_semaphore mmcsd_sem;
38 static struct rt_thread mmcsd_detect_thread;
39 static rt_uint8_t mmcsd_stack[RT_MMCSD_STACK_SIZE];
40 static struct rt_mailbox mmcsd_detect_mb;
41 static rt_uint32_t mmcsd_detect_mb_pool[4];
42 static struct rt_mailbox mmcsd_hotpluge_mb;
43 static rt_uint32_t mmcsd_hotpluge_mb_pool[4];
44
mmcsd_host_lock(struct rt_mmcsd_host * host)45 void mmcsd_host_lock(struct rt_mmcsd_host *host)
46 {
47 rt_mutex_take(&host->bus_lock, RT_WAITING_FOREVER);
48 }
49
mmcsd_host_unlock(struct rt_mmcsd_host * host)50 void mmcsd_host_unlock(struct rt_mmcsd_host *host)
51 {
52 rt_mutex_release(&host->bus_lock);
53 }
54
mmcsd_req_complete(struct rt_mmcsd_host * host)55 void mmcsd_req_complete(struct rt_mmcsd_host *host)
56 {
57 rt_sem_release(&host->sem_ack);
58 }
59
mmcsd_send_request(struct rt_mmcsd_host * host,struct rt_mmcsd_req * req)60 void mmcsd_send_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req)
61 {
62 do
63 {
64 req->cmd->retries--;
65 req->cmd->err = 0;
66 req->cmd->mrq = req;
67 if (req->data)
68 {
69 req->cmd->data = req->data;
70 req->data->err = 0;
71 req->data->mrq = req;
72 if (req->stop)
73 {
74 req->data->stop = req->stop;
75 req->stop->err = 0;
76 req->stop->mrq = req;
77 }
78 }
79 host->ops->request(host, req);
80
81 rt_sem_take(&host->sem_ack, RT_WAITING_FOREVER);
82
83 }
84 while (req->cmd->err && (req->cmd->retries > 0));
85
86
87 }
88
mmcsd_send_cmd(struct rt_mmcsd_host * host,struct rt_mmcsd_cmd * cmd,int retries)89 rt_int32_t mmcsd_send_cmd(struct rt_mmcsd_host *host,
90 struct rt_mmcsd_cmd *cmd,
91 int retries)
92 {
93 struct rt_mmcsd_req req;
94
95 rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
96 rt_memset(cmd->resp, 0, sizeof(cmd->resp));
97 cmd->retries = retries;
98
99 req.cmd = cmd;
100 cmd->data = RT_NULL;
101
102 mmcsd_send_request(host, &req);
103
104 return cmd->err;
105 }
106
mmcsd_go_idle(struct rt_mmcsd_host * host)107 rt_int32_t mmcsd_go_idle(struct rt_mmcsd_host *host)
108 {
109 rt_int32_t err;
110 struct rt_mmcsd_cmd cmd;
111
112 if (!controller_is_spi(host))
113 {
114 mmcsd_set_chip_select(host, MMCSD_CS_HIGH);
115 rt_thread_mdelay(1);
116 }
117
118 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
119
120 cmd.cmd_code = GO_IDLE_STATE;
121 cmd.arg = 0;
122 cmd.flags = RESP_SPI_R1 | RESP_NONE | CMD_BC;
123
124 err = mmcsd_send_cmd(host, &cmd, 0);
125
126 rt_thread_mdelay(1);
127
128 if (!controller_is_spi(host))
129 {
130 mmcsd_set_chip_select(host, MMCSD_CS_IGNORE);
131 rt_thread_mdelay(1);
132 }
133
134 return err;
135 }
136
mmcsd_spi_read_ocr(struct rt_mmcsd_host * host,rt_int32_t high_capacity,rt_uint32_t * ocr)137 rt_int32_t mmcsd_spi_read_ocr(struct rt_mmcsd_host *host,
138 rt_int32_t high_capacity,
139 rt_uint32_t *ocr)
140 {
141 struct rt_mmcsd_cmd cmd;
142 rt_int32_t err;
143
144 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
145
146 cmd.cmd_code = SPI_READ_OCR;
147 cmd.arg = high_capacity ? (1 << 30) : 0;
148 cmd.flags = RESP_SPI_R3;
149
150 err = mmcsd_send_cmd(host, &cmd, 0);
151
152 *ocr = cmd.resp[1];
153
154 return err;
155 }
156
mmcsd_all_get_cid(struct rt_mmcsd_host * host,rt_uint32_t * cid)157 rt_int32_t mmcsd_all_get_cid(struct rt_mmcsd_host *host, rt_uint32_t *cid)
158 {
159 rt_int32_t err;
160 struct rt_mmcsd_cmd cmd;
161
162 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
163
164 cmd.cmd_code = ALL_SEND_CID;
165 cmd.arg = 0;
166 cmd.flags = RESP_R2 | CMD_BCR;
167
168 err = mmcsd_send_cmd(host, &cmd, 3);
169 if (err)
170 return err;
171
172 rt_memcpy(cid, cmd.resp, sizeof(rt_uint32_t) * 4);
173
174 return 0;
175 }
176
mmcsd_get_cid(struct rt_mmcsd_host * host,rt_uint32_t * cid)177 rt_int32_t mmcsd_get_cid(struct rt_mmcsd_host *host, rt_uint32_t *cid)
178 {
179 rt_int32_t err, i;
180 struct rt_mmcsd_req req;
181 struct rt_mmcsd_cmd cmd;
182 struct rt_mmcsd_data data;
183 rt_uint32_t *buf = RT_NULL;
184
185 if (!controller_is_spi(host))
186 {
187 if (!host->card)
188 return -RT_ERROR;
189 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
190
191 cmd.cmd_code = SEND_CID;
192 cmd.arg = host->card->rca << 16;
193 cmd.flags = RESP_R2 | CMD_AC;
194 err = mmcsd_send_cmd(host, &cmd, 3);
195 if (err)
196 return err;
197
198 rt_memcpy(cid, cmd.resp, sizeof(rt_uint32_t) * 4);
199
200 return 0;
201 }
202
203 buf = (rt_uint32_t *)rt_malloc(16);
204 if (!buf)
205 {
206 LOG_E("allocate memory failed!");
207
208 return -RT_ENOMEM;
209 }
210
211 rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
212 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
213 rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
214
215 req.cmd = &cmd;
216 req.data = &data;
217
218 cmd.cmd_code = SEND_CID;
219 cmd.arg = 0;
220
221 /* NOTE HACK: the RESP_SPI_R1 is always correct here, but we
222 * rely on callers to never use this with "native" calls for reading
223 * CSD or CID. Native versions of those commands use the R2 type,
224 * not R1 plus a data block.
225 */
226 cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
227
228 data.blksize = 16;
229 data.blks = 1;
230 data.flags = DATA_DIR_READ;
231 data.buf = buf;
232 /*
233 * The spec states that CSR and CID accesses have a timeout
234 * of 64 clock cycles.
235 */
236 data.timeout_ns = 0;
237 data.timeout_clks = 64;
238
239 mmcsd_send_request(host, &req);
240
241 if (cmd.err || data.err)
242 {
243 rt_free(buf);
244
245 return -RT_ERROR;
246 }
247
248 for (i = 0; i < 4; i++)
249 cid[i] = buf[i];
250 rt_free(buf);
251
252 return 0;
253 }
254
mmcsd_get_csd(struct rt_mmcsd_card * card,rt_uint32_t * csd)255 rt_int32_t mmcsd_get_csd(struct rt_mmcsd_card *card, rt_uint32_t *csd)
256 {
257 rt_int32_t err, i;
258 struct rt_mmcsd_req req;
259 struct rt_mmcsd_cmd cmd;
260 struct rt_mmcsd_data data;
261 rt_uint32_t *buf = RT_NULL;
262
263 if (!controller_is_spi(card->host))
264 {
265 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
266
267 cmd.cmd_code = SEND_CSD;
268 cmd.arg = card->rca << 16;
269 cmd.flags = RESP_R2 | CMD_AC;
270 err = mmcsd_send_cmd(card->host, &cmd, 3);
271 if (err)
272 return err;
273
274 rt_memcpy(csd, cmd.resp, sizeof(rt_uint32_t) * 4);
275
276 return 0;
277 }
278
279 buf = (rt_uint32_t *)rt_malloc(16);
280 if (!buf)
281 {
282 LOG_E("allocate memory failed!");
283
284 return -RT_ENOMEM;
285 }
286
287 rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
288 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
289 rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
290
291 req.cmd = &cmd;
292 req.data = &data;
293
294 cmd.cmd_code = SEND_CSD;
295 cmd.arg = 0;
296
297 /* NOTE HACK: the RESP_SPI_R1 is always correct here, but we
298 * rely on callers to never use this with "native" calls for reading
299 * CSD or CID. Native versions of those commands use the R2 type,
300 * not R1 plus a data block.
301 */
302 cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
303
304 data.blksize = 16;
305 data.blks = 1;
306 data.flags = DATA_DIR_READ;
307 data.buf = buf;
308
309 /*
310 * The spec states that CSR and CID accesses have a timeout
311 * of 64 clock cycles.
312 */
313 data.timeout_ns = 0;
314 data.timeout_clks = 64;
315
316 mmcsd_send_request(card->host, &req);
317
318 if (cmd.err || data.err)
319 {
320 rt_free(buf);
321
322 return -RT_ERROR;
323 }
324
325 for (i = 0; i < 4; i++)
326 csd[i] = buf[i];
327 rt_free(buf);
328
329 return 0;
330 }
331
_mmcsd_select_card(struct rt_mmcsd_host * host,struct rt_mmcsd_card * card)332 static rt_int32_t _mmcsd_select_card(struct rt_mmcsd_host *host,
333 struct rt_mmcsd_card *card)
334 {
335 rt_int32_t err;
336 struct rt_mmcsd_cmd cmd;
337
338 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
339
340 cmd.cmd_code = SELECT_CARD;
341
342 if (card)
343 {
344 cmd.arg = card->rca << 16;
345 cmd.flags = RESP_R1 | CMD_AC;
346 }
347 else
348 {
349 cmd.arg = 0;
350 cmd.flags = RESP_NONE | CMD_AC;
351 }
352
353 err = mmcsd_send_cmd(host, &cmd, 3);
354 if (err)
355 return err;
356
357 return 0;
358 }
359
mmcsd_select_card(struct rt_mmcsd_card * card)360 rt_int32_t mmcsd_select_card(struct rt_mmcsd_card *card)
361 {
362 return _mmcsd_select_card(card->host, card);
363 }
364
mmcsd_deselect_cards(struct rt_mmcsd_card * card)365 rt_int32_t mmcsd_deselect_cards(struct rt_mmcsd_card *card)
366 {
367 return _mmcsd_select_card(card->host, RT_NULL);
368 }
369
mmcsd_spi_use_crc(struct rt_mmcsd_host * host,rt_int32_t use_crc)370 rt_int32_t mmcsd_spi_use_crc(struct rt_mmcsd_host *host, rt_int32_t use_crc)
371 {
372 struct rt_mmcsd_cmd cmd;
373 rt_int32_t err;
374
375 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
376
377 cmd.cmd_code = SPI_CRC_ON_OFF;
378 cmd.flags = RESP_SPI_R1;
379 cmd.arg = use_crc;
380
381 err = mmcsd_send_cmd(host, &cmd, 0);
382 if (!err)
383 host->spi_use_crc = use_crc;
384
385 return err;
386 }
387
mmcsd_set_iocfg(struct rt_mmcsd_host * host)388 rt_inline void mmcsd_set_iocfg(struct rt_mmcsd_host *host)
389 {
390 struct rt_mmcsd_io_cfg *io_cfg = &host->io_cfg;
391
392 mmcsd_dbg("clock %uHz busmode %u powermode %u cs %u Vdd %u "
393 "width %u \n",
394 io_cfg->clock, io_cfg->bus_mode,
395 io_cfg->power_mode, io_cfg->chip_select, io_cfg->vdd,
396 io_cfg->bus_width);
397
398 host->ops->set_iocfg(host, io_cfg);
399 }
400
401 /*
402 * Control chip select pin on a host.
403 */
mmcsd_set_chip_select(struct rt_mmcsd_host * host,rt_int32_t mode)404 void mmcsd_set_chip_select(struct rt_mmcsd_host *host, rt_int32_t mode)
405 {
406 host->io_cfg.chip_select = mode;
407 mmcsd_set_iocfg(host);
408 }
409
410 /*
411 * Sets the host clock to the highest possible frequency that
412 * is below "hz".
413 */
mmcsd_set_clock(struct rt_mmcsd_host * host,rt_uint32_t clk)414 void mmcsd_set_clock(struct rt_mmcsd_host *host, rt_uint32_t clk)
415 {
416 if (clk < host->freq_min)
417 {
418 LOG_W("clock too low!");
419 }
420
421 host->io_cfg.clock = clk;
422 mmcsd_set_iocfg(host);
423 }
424
425 /*
426 * Change the bus mode (open drain/push-pull) of a host.
427 */
mmcsd_set_bus_mode(struct rt_mmcsd_host * host,rt_uint32_t mode)428 void mmcsd_set_bus_mode(struct rt_mmcsd_host *host, rt_uint32_t mode)
429 {
430 host->io_cfg.bus_mode = mode;
431 mmcsd_set_iocfg(host);
432 }
433
434 /*
435 * Change data bus width of a host.
436 */
mmcsd_set_bus_width(struct rt_mmcsd_host * host,rt_uint32_t width)437 void mmcsd_set_bus_width(struct rt_mmcsd_host *host, rt_uint32_t width)
438 {
439 host->io_cfg.bus_width = width;
440 mmcsd_set_iocfg(host);
441 }
442
mmcsd_set_timing(struct rt_mmcsd_host * host,rt_uint32_t timing)443 void mmcsd_set_timing(struct rt_mmcsd_host *host, rt_uint32_t timing)
444 {
445 host->io_cfg.timing = timing;
446 mmcsd_set_iocfg(host);
447 }
448
mmcsd_set_data_timeout(struct rt_mmcsd_data * data,const struct rt_mmcsd_card * card)449 void mmcsd_set_data_timeout(struct rt_mmcsd_data *data,
450 const struct rt_mmcsd_card *card)
451 {
452 rt_uint32_t mult;
453
454 if (card->card_type == CARD_TYPE_SDIO)
455 {
456 data->timeout_ns = 1000000000; /* SDIO card 1s */
457 data->timeout_clks = 0;
458
459 return;
460 }
461
462 /*
463 * SD cards use a 100 multiplier rather than 10
464 */
465 mult = (card->card_type == CARD_TYPE_SD) ? 100 : 10;
466
467 /*
468 * Scale up the multiplier (and therefore the timeout) by
469 * the r2w factor for writes.
470 */
471 if (data->flags & DATA_DIR_WRITE)
472 mult <<= card->csd.r2w_factor;
473
474 data->timeout_ns = card->tacc_ns * mult;
475 data->timeout_clks = card->tacc_clks * mult;
476
477 /*
478 * SD cards also have an upper limit on the timeout.
479 */
480 if (card->card_type == CARD_TYPE_SD)
481 {
482 rt_uint32_t timeout_us, limit_us;
483
484 timeout_us = data->timeout_ns / 1000;
485 timeout_us += data->timeout_clks * 1000 /
486 (card->host->io_cfg.clock / 1000);
487
488 if (data->flags & DATA_DIR_WRITE)
489 /*
490 * The limit is really 250 ms, but that is
491 * insufficient for some crappy cards.
492 */
493 limit_us = 300000;
494 else
495 limit_us = 100000;
496
497 /*
498 * SDHC cards always use these fixed values.
499 */
500 if (timeout_us > limit_us || card->flags & CARD_FLAG_SDHC)
501 {
502 data->timeout_ns = limit_us * 1000; /* SDHC card fixed 250ms */
503 data->timeout_clks = 0;
504 }
505 }
506
507 if (controller_is_spi(card->host))
508 {
509 if (data->flags & DATA_DIR_WRITE)
510 {
511 if (data->timeout_ns < 1000000000)
512 data->timeout_ns = 1000000000; /* 1s */
513 }
514 else
515 {
516 if (data->timeout_ns < 100000000)
517 data->timeout_ns = 100000000; /* 100ms */
518 }
519 }
520 }
521
522 /*
523 * Mask off any voltages we don't support and select
524 * the lowest voltage
525 */
mmcsd_select_voltage(struct rt_mmcsd_host * host,rt_uint32_t ocr)526 rt_uint32_t mmcsd_select_voltage(struct rt_mmcsd_host *host, rt_uint32_t ocr)
527 {
528 int bit;
529 extern int __rt_ffs(int value);
530
531 ocr &= host->valid_ocr;
532
533 bit = __rt_ffs(ocr);
534 if (bit)
535 {
536 bit -= 1;
537
538 ocr &= 3 << bit;
539
540 host->io_cfg.vdd = bit;
541 mmcsd_set_iocfg(host);
542 }
543 else
544 {
545 LOG_W("host doesn't support card's voltages!");
546 ocr = 0;
547 }
548
549 return ocr;
550 }
551
mmcsd_power_up(struct rt_mmcsd_host * host)552 static void mmcsd_power_up(struct rt_mmcsd_host *host)
553 {
554 int bit = __rt_fls(host->valid_ocr) - 1;
555
556 host->io_cfg.vdd = bit;
557 if (controller_is_spi(host))
558 {
559 host->io_cfg.chip_select = MMCSD_CS_HIGH;
560 host->io_cfg.bus_mode = MMCSD_BUSMODE_PUSHPULL;
561 }
562 else
563 {
564 host->io_cfg.chip_select = MMCSD_CS_IGNORE;
565 host->io_cfg.bus_mode = MMCSD_BUSMODE_OPENDRAIN;
566 }
567 host->io_cfg.power_mode = MMCSD_POWER_UP;
568 host->io_cfg.bus_width = MMCSD_BUS_WIDTH_1;
569 mmcsd_set_iocfg(host);
570
571 /*
572 * This delay should be sufficient to allow the power supply
573 * to reach the minimum voltage.
574 */
575 rt_thread_mdelay(10);
576
577 host->io_cfg.clock = host->freq_min;
578 host->io_cfg.power_mode = MMCSD_POWER_ON;
579 mmcsd_set_iocfg(host);
580
581 /*
582 * This delay must be at least 74 clock sizes, or 1 ms, or the
583 * time required to reach a stable voltage.
584 */
585 rt_thread_mdelay(10);
586 }
587
mmcsd_power_off(struct rt_mmcsd_host * host)588 static void mmcsd_power_off(struct rt_mmcsd_host *host)
589 {
590 host->io_cfg.clock = 0;
591 host->io_cfg.vdd = 0;
592 if (!controller_is_spi(host))
593 {
594 host->io_cfg.bus_mode = MMCSD_BUSMODE_OPENDRAIN;
595 host->io_cfg.chip_select = MMCSD_CS_IGNORE;
596 }
597 host->io_cfg.power_mode = MMCSD_POWER_OFF;
598 host->io_cfg.bus_width = MMCSD_BUS_WIDTH_1;
599 mmcsd_set_iocfg(host);
600 }
601
mmcsd_wait_cd_changed(rt_int32_t timeout)602 int mmcsd_wait_cd_changed(rt_int32_t timeout)
603 {
604 struct rt_mmcsd_host *host;
605 if (rt_mb_recv(&mmcsd_hotpluge_mb, (rt_ubase_t *)&host, timeout) == RT_EOK)
606 {
607 if (host->card == RT_NULL)
608 {
609 return MMCSD_HOST_UNPLUGED;
610 }
611 else
612 {
613 return MMCSD_HOST_PLUGED;
614 }
615 }
616 return -RT_ETIMEOUT;
617 }
618 RTM_EXPORT(mmcsd_wait_cd_changed);
619
mmcsd_change(struct rt_mmcsd_host * host)620 void mmcsd_change(struct rt_mmcsd_host *host)
621 {
622 rt_mb_send(&mmcsd_detect_mb, (rt_ubase_t)host);
623 }
624
mmcsd_detect(void * param)625 void mmcsd_detect(void *param)
626 {
627 struct rt_mmcsd_host *host;
628 rt_uint32_t ocr;
629 rt_int32_t err;
630
631 while (1)
632 {
633 if (rt_mb_recv(&mmcsd_detect_mb, (rt_ubase_t *)&host, RT_WAITING_FOREVER) == RT_EOK)
634 {
635 if (host->card == RT_NULL)
636 {
637 mmcsd_host_lock(host);
638 mmcsd_power_up(host);
639 mmcsd_go_idle(host);
640
641 mmcsd_send_if_cond(host, host->valid_ocr);
642
643 err = sdio_io_send_op_cond(host, 0, &ocr);
644 if (!err)
645 {
646 if (init_sdio(host, ocr))
647 mmcsd_power_off(host);
648 mmcsd_host_unlock(host);
649 continue;
650 }
651
652 /*
653 * detect SD card
654 */
655 err = mmcsd_send_app_op_cond(host, 0, &ocr);
656 if (!err)
657 {
658 if (init_sd(host, ocr))
659 mmcsd_power_off(host);
660 mmcsd_host_unlock(host);
661 rt_mb_send(&mmcsd_hotpluge_mb, (rt_ubase_t)host);
662 continue;
663 }
664
665 /*
666 * detect mmc card
667 */
668 err = mmc_send_op_cond(host, 0, &ocr);
669 if (!err)
670 {
671 if (init_mmc(host, ocr))
672 mmcsd_power_off(host);
673 mmcsd_host_unlock(host);
674 rt_mb_send(&mmcsd_hotpluge_mb, (rt_ubase_t)host);
675 continue;
676 }
677 mmcsd_host_unlock(host);
678 }
679 else
680 {
681 /* card removed */
682 mmcsd_host_lock(host);
683 if (host->card->sdio_function_num != 0)
684 {
685 LOG_W("unsupport sdio card plug out!");
686 }
687 else
688 {
689 rt_mmcsd_blk_remove(host->card);
690 rt_free(host->card);
691
692 host->card = RT_NULL;
693 }
694 mmcsd_host_unlock(host);
695 rt_mb_send(&mmcsd_hotpluge_mb, (rt_ubase_t)host);
696 }
697 }
698 }
699 }
700
mmcsd_host_init(struct rt_mmcsd_host * host)701 void mmcsd_host_init(struct rt_mmcsd_host *host)
702 {
703 rt_memset(host, 0, sizeof(struct rt_mmcsd_host));
704 strncpy(host->name, "sd", sizeof(host->name) - 1);
705 host->max_seg_size = 65535;
706 host->max_dma_segs = 1;
707 host->max_blk_size = 512;
708 host->max_blk_count = 4096;
709
710 rt_mutex_init(&host->bus_lock, "sd_bus_lock", RT_IPC_FLAG_FIFO);
711 rt_sem_init(&host->sem_ack, "sd_ack", 0, RT_IPC_FLAG_FIFO);
712 }
713
mmcsd_alloc_host(void)714 struct rt_mmcsd_host *mmcsd_alloc_host(void)
715 {
716 struct rt_mmcsd_host *host;
717
718 host = rt_malloc(sizeof(struct rt_mmcsd_host));
719 if (!host)
720 {
721 LOG_E("alloc host failed");
722
723 return RT_NULL;
724 }
725
726 mmcsd_host_init(host);
727
728 return host;
729 }
730
mmcsd_free_host(struct rt_mmcsd_host * host)731 void mmcsd_free_host(struct rt_mmcsd_host *host)
732 {
733 rt_mutex_detach(&host->bus_lock);
734 rt_sem_detach(&host->sem_ack);
735 rt_free(host);
736 }
737
mmcsd_excute_tuning(struct rt_mmcsd_card * card)738 rt_int32_t mmcsd_excute_tuning(struct rt_mmcsd_card *card)
739 {
740 struct rt_mmcsd_host *host = card->host;
741 rt_int32_t opcode;
742
743 if (!host->ops->execute_tuning)
744 return RT_EOK;
745
746 if (card->card_type == CARD_TYPE_MMC)
747 opcode = SEND_TUNING_BLOCK_HS200;
748 else
749 opcode = SEND_TUNING_BLOCK;
750
751 return host->ops->execute_tuning(host, opcode);;
752 }
753
rt_mmcsd_core_init(void)754 int rt_mmcsd_core_init(void)
755 {
756 rt_err_t ret;
757
758 /* initialize detect SD cart thread */
759 /* initialize mailbox and create detect SD card thread */
760 ret = rt_mb_init(&mmcsd_detect_mb, "mmcsdmb",
761 &mmcsd_detect_mb_pool[0], sizeof(mmcsd_detect_mb_pool) / sizeof(mmcsd_detect_mb_pool[0]),
762 RT_IPC_FLAG_FIFO);
763 RT_ASSERT(ret == RT_EOK);
764
765 ret = rt_mb_init(&mmcsd_hotpluge_mb, "mmcsdhotplugmb",
766 &mmcsd_hotpluge_mb_pool[0], sizeof(mmcsd_hotpluge_mb_pool) / sizeof(mmcsd_hotpluge_mb_pool[0]),
767 RT_IPC_FLAG_FIFO);
768 RT_ASSERT(ret == RT_EOK);
769 ret = rt_thread_init(&mmcsd_detect_thread, "mmcsd_detect", mmcsd_detect, RT_NULL,
770 &mmcsd_stack[0], RT_MMCSD_STACK_SIZE, RT_MMCSD_THREAD_PRIORITY, 20);
771 if (ret == RT_EOK)
772 {
773 rt_thread_startup(&mmcsd_detect_thread);
774 }
775
776 rt_sdio_init();
777
778 return 0;
779 }
780 INIT_PREV_EXPORT(rt_mmcsd_core_init);
781
782