1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2011-07-25 weety first version
9 * 2023-08-08 GuEe-GUI port to the block
10 */
11
12 #include <rtthread.h>
13 #include <drivers/blk.h>
14 #include <drivers/misc.h>
15 #include <drivers/dev_mmcsd_core.h>
16
17 #define DBG_TAG "SDIO"
18 #ifdef RT_SDIO_DEBUG
19 #define DBG_LVL DBG_LOG
20 #else
21 #define DBG_LVL DBG_INFO
22 #endif /* RT_SDIO_DEBUG */
23 #include <rtdbg.h>
24
25 #ifndef RT_MMCSD_MAX_PARTITION
26 #define RT_MMCSD_MAX_PARTITION 16
27 #endif
28
29 struct mmcsd_blk_device
30 {
31 struct rt_blk_disk parent;
32 struct rt_mmcsd_card *card;
33
34 rt_size_t max_req_size;
35 struct rt_device_blk_geometry geometry;
36 };
37
38 #define raw_to_mmcsd_blk(raw) rt_container_of(raw, struct mmcsd_blk_device, parent)
39
40 #ifdef RT_USING_DM
41 static struct rt_dm_ida sdio_ida = RT_DM_IDA_INIT(SDIO);
42 #endif
43
__send_status(struct rt_mmcsd_card * card,rt_uint32_t * status,unsigned retries)44 static int __send_status(struct rt_mmcsd_card *card, rt_uint32_t *status, unsigned retries)
45 {
46 int err;
47 struct rt_mmcsd_cmd cmd;
48
49 cmd.busy_timeout = 0;
50 cmd.cmd_code = SEND_STATUS;
51 cmd.arg = card->rca << 16;
52 cmd.flags = RESP_R1 | CMD_AC;
53 err = mmcsd_send_cmd(card->host, &cmd, retries);
54 if (err)
55 return err;
56
57 if (status)
58 *status = cmd.resp[0];
59
60 return 0;
61 }
62
card_busy_detect(struct rt_mmcsd_card * card,unsigned int timeout_ms,rt_uint32_t * resp_errs)63 static int card_busy_detect(struct rt_mmcsd_card *card, unsigned int timeout_ms,
64 rt_uint32_t *resp_errs)
65 {
66 int timeout = rt_tick_from_millisecond(timeout_ms);
67 int err = 0;
68 rt_uint32_t status;
69 rt_tick_t start;
70
71 start = rt_tick_get();
72 do
73 {
74 rt_bool_t out = (int)(rt_tick_get() - start) > timeout;
75
76 err = __send_status(card, &status, 5);
77 if (err)
78 {
79 LOG_E("error %d requesting status", err);
80 return err;
81 }
82
83 /* Accumulate any response error bits seen */
84 if (resp_errs)
85 *resp_errs |= status;
86
87 if (out)
88 {
89 LOG_E("wait card busy timeout");
90 return -RT_ETIMEOUT;
91 }
92 /*
93 * Some cards mishandle the status bits,
94 * so make sure to check both the busy
95 * indication and the card state.
96 */
97 }
98 while (!(status & R1_READY_FOR_DATA) ||
99 (R1_CURRENT_STATE(status) == 7));
100
101 return err;
102 }
103
mmcsd_num_wr_blocks(struct rt_mmcsd_card * card)104 rt_int32_t mmcsd_num_wr_blocks(struct rt_mmcsd_card *card)
105 {
106 rt_int32_t err;
107 rt_uint32_t blocks;
108
109 struct rt_mmcsd_req req;
110 struct rt_mmcsd_cmd cmd;
111 struct rt_mmcsd_data data;
112 rt_uint32_t timeout_us;
113
114 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
115
116 cmd.cmd_code = APP_CMD;
117 cmd.arg = card->rca << 16;
118 cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_AC;
119
120 err = mmcsd_send_cmd(card->host, &cmd, 0);
121 if (err)
122 return -RT_ERROR;
123 if (!controller_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
124 return -RT_ERROR;
125
126 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
127
128 cmd.cmd_code = SD_APP_SEND_NUM_WR_BLKS;
129 cmd.arg = 0;
130 cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
131
132 rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
133
134 data.timeout_ns = card->tacc_ns * 100;
135 data.timeout_clks = card->tacc_clks * 100;
136
137 timeout_us = data.timeout_ns / 1000;
138 timeout_us += data.timeout_clks * 1000 /
139 (card->host->io_cfg.clock / 1000);
140
141 if (timeout_us > 100000)
142 {
143 data.timeout_ns = 100000000;
144 data.timeout_clks = 0;
145 }
146
147 data.blksize = 4;
148 data.blks = 1;
149 data.flags = DATA_DIR_READ;
150 data.buf = &blocks;
151
152 rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
153
154 req.cmd = &cmd;
155 req.data = &data;
156
157 mmcsd_send_request(card->host, &req);
158
159 if (cmd.err || data.err)
160 return -RT_ERROR;
161
162 return blocks;
163 }
164
rt_mmcsd_req_blk(struct rt_mmcsd_card * card,rt_uint32_t sector,void * buf,rt_size_t blks,rt_uint8_t dir)165 static rt_err_t rt_mmcsd_req_blk(struct rt_mmcsd_card *card,
166 rt_uint32_t sector,
167 void *buf,
168 rt_size_t blks,
169 rt_uint8_t dir)
170 {
171 struct rt_mmcsd_cmd cmd, stop;
172 struct rt_mmcsd_data data;
173 struct rt_mmcsd_req req;
174 struct rt_mmcsd_host *host = card->host;
175 rt_uint32_t r_cmd, w_cmd;
176
177 mmcsd_host_lock(host);
178 rt_memset(&req, 0, sizeof(struct rt_mmcsd_req));
179 rt_memset(&cmd, 0, sizeof(struct rt_mmcsd_cmd));
180 rt_memset(&stop, 0, sizeof(struct rt_mmcsd_cmd));
181 rt_memset(&data, 0, sizeof(struct rt_mmcsd_data));
182 req.cmd = &cmd;
183 req.data = &data;
184
185 cmd.arg = sector;
186 if (!(card->flags & CARD_FLAG_SDHC))
187 {
188 cmd.arg <<= 9;
189 }
190 cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_ADTC;
191
192 data.blksize = SECTOR_SIZE;
193 data.blks = blks;
194
195 if (blks > 1)
196 {
197 if (!controller_is_spi(card->host) || !dir)
198 {
199 req.stop = &stop;
200 stop.cmd_code = STOP_TRANSMISSION;
201 stop.arg = 0;
202 stop.flags = RESP_SPI_R1B | RESP_R1B | CMD_AC;
203 }
204 r_cmd = READ_MULTIPLE_BLOCK;
205 w_cmd = WRITE_MULTIPLE_BLOCK;
206 }
207 else
208 {
209 req.stop = RT_NULL;
210 r_cmd = READ_SINGLE_BLOCK;
211 w_cmd = WRITE_BLOCK;
212 }
213
214 if (!controller_is_spi(card->host) && (card->flags & 0x8000))
215 {
216 /* last request is WRITE,need check busy */
217 card_busy_detect(card, 10000, RT_NULL);
218 }
219
220 if (!dir)
221 {
222 cmd.cmd_code = r_cmd;
223 data.flags |= DATA_DIR_READ;
224 card->flags &= 0x7fff;
225 }
226 else
227 {
228 cmd.cmd_code = w_cmd;
229 data.flags |= DATA_DIR_WRITE;
230 card->flags |= 0x8000;
231 }
232
233 mmcsd_set_data_timeout(&data, card);
234 data.buf = buf;
235
236 mmcsd_send_request(host, &req);
237
238 mmcsd_host_unlock(host);
239
240 if (cmd.err || data.err || stop.err)
241 {
242 LOG_E("mmcsd request blocks error");
243 LOG_E("%d,%d,%d, 0x%08x,0x%08x",
244 cmd.err, data.err, stop.err, data.flags, sector);
245
246 return -RT_ERROR;
247 }
248
249 return RT_EOK;
250 }
251
mmcsd_set_blksize(struct rt_mmcsd_card * card)252 static rt_int32_t mmcsd_set_blksize(struct rt_mmcsd_card *card)
253 {
254 struct rt_mmcsd_cmd cmd;
255 int err;
256
257 /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
258 if (card->flags & CARD_FLAG_SDHC)
259 return 0;
260
261 mmcsd_host_lock(card->host);
262 cmd.cmd_code = SET_BLOCKLEN;
263 cmd.arg = 512;
264 cmd.flags = RESP_SPI_R1 | RESP_R1 | CMD_AC;
265 err = mmcsd_send_cmd(card->host, &cmd, 5);
266 mmcsd_host_unlock(card->host);
267
268 if (err)
269 {
270 LOG_E("MMCSD: unable to set block size to %d: %d", cmd.arg, err);
271
272 return -RT_ERROR;
273 }
274
275 return 0;
276 }
277
mmcsd_blk_read(struct rt_blk_disk * disk,rt_off_t sector,void * buffer,rt_size_t sector_count)278 static rt_ssize_t mmcsd_blk_read(struct rt_blk_disk *disk, rt_off_t sector,
279 void *buffer, rt_size_t sector_count)
280 {
281 rt_err_t err;
282 rt_size_t offset = 0;
283 rt_size_t req_size = 0;
284 rt_size_t remain_size = sector_count;
285 void *rd_ptr = (void *)buffer;
286 struct mmcsd_blk_device *blk_dev = raw_to_mmcsd_blk(disk);
287
288 while (remain_size)
289 {
290 req_size = rt_min_t(rt_size_t, remain_size, blk_dev->max_req_size);
291
292 err = rt_mmcsd_req_blk(blk_dev->card, sector + offset, rd_ptr, req_size, 0);
293
294 if (err)
295 {
296 return err;
297 }
298
299 offset += req_size;
300 rd_ptr = (void *)((rt_uint8_t *)rd_ptr + (req_size << 9));
301 remain_size -= req_size;
302 }
303
304 return sector_count - remain_size;
305 }
306
mmcsd_blk_write(struct rt_blk_disk * disk,rt_off_t sector,const void * buffer,rt_size_t sector_count)307 static rt_ssize_t mmcsd_blk_write(struct rt_blk_disk *disk, rt_off_t sector,
308 const void *buffer, rt_size_t sector_count)
309 {
310 rt_err_t err;
311 rt_size_t offset = 0;
312 rt_size_t req_size = 0;
313 rt_size_t remain_size = sector_count;
314 void *wr_ptr = (void *)buffer;
315 struct mmcsd_blk_device *blk_dev = raw_to_mmcsd_blk(disk);
316
317 while (remain_size)
318 {
319 req_size = rt_min_t(rt_size_t, remain_size, blk_dev->max_req_size);
320
321 err = rt_mmcsd_req_blk(blk_dev->card, sector + offset, wr_ptr, req_size, 1);
322
323 if (err)
324 {
325 return err;
326 }
327
328 offset += req_size;
329 wr_ptr = (void *)((rt_uint8_t *)wr_ptr + (req_size << 9));
330 remain_size -= req_size;
331 }
332
333 return sector_count - remain_size;
334 }
335
mmcsd_blk_getgeome(struct rt_blk_disk * disk,struct rt_device_blk_geometry * geometry)336 static rt_err_t mmcsd_blk_getgeome(struct rt_blk_disk *disk,
337 struct rt_device_blk_geometry *geometry)
338 {
339 struct mmcsd_blk_device *blk_dev = raw_to_mmcsd_blk(disk);
340
341 rt_memcpy(geometry, &blk_dev->geometry, sizeof(*geometry));
342
343 return RT_EOK;
344 }
345
346 static const struct rt_blk_disk_ops mmcsd_blk_ops =
347 {
348 .read = mmcsd_blk_read,
349 .write = mmcsd_blk_write,
350 .getgeome = mmcsd_blk_getgeome,
351 };
352
rt_mmcsd_blk_probe(struct rt_mmcsd_card * card)353 rt_int32_t rt_mmcsd_blk_probe(struct rt_mmcsd_card *card)
354 {
355 rt_err_t err;
356 struct rt_mmcsd_host *host = card->host;
357 struct mmcsd_blk_device *blk_dev = rt_calloc(1, sizeof(*blk_dev));
358
359 if (!blk_dev)
360 {
361 return -RT_ENOMEM;
362 }
363 card->blk_dev = blk_dev;
364
365 #ifdef RT_USING_DM
366 blk_dev->parent.ida = &sdio_ida;
367 #endif
368 blk_dev->parent.parallel_io = RT_FALSE;
369 blk_dev->parent.removable = controller_is_removable(host);
370 blk_dev->parent.ops = &mmcsd_blk_ops;
371 blk_dev->parent.max_partitions = RT_MMCSD_MAX_PARTITION;
372
373 blk_dev->card = card;
374 blk_dev->max_req_size = rt_min_t(rt_size_t,
375 host->max_dma_segs * host->max_seg_size,
376 host->max_blk_count * host->max_blk_size) >> 9;
377 blk_dev->geometry.bytes_per_sector = 1 << 9;
378 blk_dev->geometry.block_size = card->card_blksize;
379 blk_dev->geometry.sector_count = card->card_capacity * (1024 / 512);
380
381 /* Set blk size before partitions probe, Why? */
382 if ((err = mmcsd_set_blksize(card)))
383 {
384 goto _fail;
385 }
386 rt_thread_mdelay(1);
387
388 #ifdef RT_USING_DM
389 rt_dm_dev_set_name(&blk_dev->parent.parent, host->name);
390 #else
391 rt_strncpy(blk_dev->parent.parent.parent.name, host->name, RT_NAME_MAX);
392 #endif
393
394 if ((err = rt_hw_blk_disk_register(&blk_dev->parent)))
395 {
396 goto _fail;
397 }
398
399 return RT_EOK;
400
401 _fail:
402 card->blk_dev = RT_NULL;
403 free(blk_dev);
404
405 return err;
406 }
407
rt_mmcsd_blk_remove(struct rt_mmcsd_card * card)408 void rt_mmcsd_blk_remove(struct rt_mmcsd_card *card)
409 {
410 struct mmcsd_blk_device *blk_dev = card->blk_dev;
411
412 if (!blk_dev)
413 {
414 return;
415 }
416
417 if (!rt_hw_blk_disk_unregister(&blk_dev->parent))
418 {
419 card->blk_dev = RT_NULL;
420 rt_free(blk_dev);
421 }
422 }
423