1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2017-08-08 Yang the first version
9 * 2019-07-19 Magicoe The first version for LPC55S6x
10 * 2023-02-0 Alex Yang update driver
11 */
12
13 #include "board.h"
14 #include <rtdevice.h>
15 #include "fsl_sdif.h"
16
17
18 #ifdef RT_USING_SDIO
19
20 //#define MMCSD_DEBUG
21
22 #ifdef MMCSD_DEBUG
23 #define MMCSD_DGB rt_kprintf
24 #else
25 #define MMCSD_DGB(fmt, ...)
26 #endif
27
28 #define SDMMCHOST_RESET_TIMEOUT_VALUE (1000000U)
29
30 struct lpc_mmcsd
31 {
32 struct rt_mmcsd_host *host;
33 SDIF_Type *SDIFx;
34 uint32_t sdmmcHostDmaBuffer[0x40];
35 };
36
37
SDMMCHOST_ErrorRecovery(SDIF_Type * base)38 static void SDMMCHOST_ErrorRecovery(SDIF_Type *base)
39 {
40 (void)SDIF_Reset(base, kSDIF_ResetAll, SDMMCHOST_RESET_TIMEOUT_VALUE);
41 /* the host controller clock will be disabled by the reset operation, so re-send the clock sync command to enable
42 the output clock */
43 sdif_command_t clockSync = {
44 .flags = kSDIF_WaitPreTransferComplete | kSDIF_CmdUpdateClockRegisterOnly, .index = 0U, .argument = 0U};
45 (void)SDIF_SendCommand(base, &clockSync, 0U);
46 }
47
48
49
lpc_sdmmc_request(struct rt_mmcsd_host * host,struct rt_mmcsd_req * req)50 static void lpc_sdmmc_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req)
51 {
52
53 struct lpc_mmcsd *mmcsd;
54 struct rt_mmcsd_cmd *cmd;
55 struct rt_mmcsd_data *data;
56 rt_uint32_t *buf = NULL;
57
58 status_t error;
59
60 mmcsd = (struct lpc_mmcsd *) host->private_data;
61 cmd = req->cmd;
62 data = cmd->data;
63
64 sdif_dma_config_t dmaConfig;
65
66 dmaConfig.enableFixBurstLen = false;
67 dmaConfig.mode = kSDIF_ChainDMAMode;
68 dmaConfig.dmaDesBufferStartAddr = mmcsd->sdmmcHostDmaBuffer;
69 dmaConfig.dmaDesBufferLen = 0x40;
70 dmaConfig.dmaDesSkipLen = 0U;
71
72 sdif_transfer_t fsl_content = {0};
73 sdif_command_t fsl_command = {0};
74 sdif_data_t fsl_data = {0};
75
76 fsl_content.command = &fsl_command;
77 fsl_content.data = &fsl_data;
78
79 // MMCSD_DGB("ARG:0x%X, CODE:0x%X\r\n", cmd->arg, cmd->cmd_code);
80
81
82 fsl_command.index = cmd->cmd_code;
83 fsl_command.argument = cmd->arg;
84
85 if (cmd->cmd_code == STOP_TRANSMISSION)
86 fsl_command.type = kCARD_CommandTypeAbort;
87 else
88 fsl_command.type = kCARD_CommandTypeNormal;
89
90 switch (cmd->flags & RESP_MASK)
91 {
92 case RESP_NONE:
93 fsl_command.responseType = kCARD_ResponseTypeNone;
94 break;
95 case RESP_R1:
96 fsl_command.responseType = kCARD_ResponseTypeR1;
97 break;
98 case RESP_R1B:
99 fsl_command.responseType = kCARD_ResponseTypeR1b;
100 break;
101 case RESP_R2:
102 fsl_command.responseType = kCARD_ResponseTypeR2;
103 break;
104 case RESP_R3:
105 fsl_command.responseType = kCARD_ResponseTypeR3;
106 break;
107 case RESP_R4:
108 fsl_command.responseType = kCARD_ResponseTypeR4;
109 break;
110 case RESP_R6:
111 fsl_command.responseType = kCARD_ResponseTypeR6;
112 break;
113 case RESP_R7:
114 fsl_command.responseType = kCARD_ResponseTypeR7;
115 break;
116 case RESP_R5:
117 fsl_command.responseType = kCARD_ResponseTypeR5;
118 break;
119 default:
120 RT_ASSERT(NULL);
121 }
122
123 fsl_command.flags = 0;
124 fsl_content.command = &fsl_command;
125
126 if (data)
127 {
128
129 if (req->stop != NULL)
130 fsl_data.enableAutoCommand12 = true;
131 else
132 fsl_data.enableAutoCommand12 = false;
133
134 fsl_data.enableIgnoreError = false;
135 fsl_data.blockSize = data->blksize;
136 fsl_data.blockCount = data->blks;
137
138 if ((cmd->cmd_code == WRITE_BLOCK) || (cmd->cmd_code == WRITE_MULTIPLE_BLOCK))
139 {
140 if (buf)
141 {
142 MMCSD_DGB(" write(data->buf to buf) ");
143 rt_memcpy(buf, data->buf, fsl_data.blockSize * fsl_data.blockCount);
144 fsl_data.txData = (uint32_t const *)buf;
145 }
146 else
147 {
148 fsl_data.txData = (uint32_t const *)data->buf;
149 }
150
151 fsl_data.rxData = NULL;
152 }
153 else
154 {
155 if (buf)
156 {
157 fsl_data.rxData = (uint32_t *)buf;
158 }
159 else
160 {
161 fsl_data.rxData = (uint32_t *)data->buf;
162 }
163
164 fsl_data.txData = NULL;
165 }
166
167 fsl_content.data = &fsl_data;
168 }
169 else
170 {
171 fsl_content.data = NULL;
172 }
173
174 error = SDIF_TransferBlocking(mmcsd->SDIFx, &dmaConfig, &fsl_content);
175 if (error != kStatus_Success)
176 {
177 SDMMCHOST_ErrorRecovery(mmcsd->SDIFx);
178 MMCSD_DGB(" ***SDIF_TransferBlocking error: %d*** --> \n", error);
179 cmd->err = -RT_ERROR;
180 }
181
182 if (buf)
183 {
184 if (fsl_data.rxData)
185 {
186 MMCSD_DGB("read copy buf to data->buf ");
187 rt_memcpy(data->buf, buf, fsl_data.blockSize * fsl_data.blockCount);
188 }
189
190 rt_free_align(buf);
191 }
192
193 if ((cmd->flags & RESP_MASK) == RESP_R2)
194 {
195 cmd->resp[3] = fsl_command.response[0];
196 cmd->resp[2] = fsl_command.response[1];
197 cmd->resp[1] = fsl_command.response[2];
198 cmd->resp[0] = fsl_command.response[3];
199 // MMCSD_DGB(" resp 0x%08X 0x%08X 0x%08X 0x%08X\n", cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
200
201 }
202 else
203 {
204 cmd->resp[0] = fsl_command.response[0];
205 // MMCSD_DGB(" resp 0x%08X\n", cmd->resp[0]);
206 }
207
208 mmcsd_req_complete(host);
209
210 }
211
212
213
214
215
216
217
218
lpc_sdmmc_set_iocfg(struct rt_mmcsd_host * host,struct rt_mmcsd_io_cfg * io_cfg)219 static void lpc_sdmmc_set_iocfg(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *io_cfg)
220 {
221 //rt_kprintf("%s\r\n", __FUNCTION__);
222 struct lpc_mmcsd *mmcsd;
223
224 mmcsd = (struct lpc_mmcsd *) host->private_data;
225
226 uint32_t sdxc_clock = io_cfg->clock;
227
228 MMCSD_DGB("sdxc_clock:%d\r\n", sdxc_clock);
229 MMCSD_DGB("bus_width:%d\r\n", io_cfg->bus_width);
230
231 if (sdxc_clock != 0U)
232 {
233 SDIF_SetCardClock(mmcsd->SDIFx, CLOCK_GetSdioClkFreq(), sdxc_clock);
234
235 switch (io_cfg->bus_width)
236 {
237 case MMCSD_BUS_WIDTH_4:
238 SDIF_SetCardBusWidth(mmcsd->SDIFx, kSDIF_Bus4BitWidth);
239 break;
240 case MMCSD_BUS_WIDTH_8:
241 SDIF_SetCardBusWidth(mmcsd->SDIFx, kSDIF_Bus8BitWidth);
242 break;
243 default:
244 SDIF_SetCardBusWidth(mmcsd->SDIFx, kSDIF_Bus1BitWidth);
245 break;
246 }
247 }
248
249 rt_thread_mdelay(20);
250 }
251
252 static const struct rt_mmcsd_host_ops lpc_mmcsd_host_ops =
253 {
254 .request = lpc_sdmmc_request,
255 .set_iocfg = lpc_sdmmc_set_iocfg,
256 .get_card_status = NULL,
257 .enable_sdio_irq = NULL, // Do not use the interrupt mode, use DMA instead
258 };
259
260
261
262
rt_hw_sdio_init(void)263 int rt_hw_sdio_init(void)
264 {
265 struct rt_mmcsd_host *host = NULL;
266 struct lpc_mmcsd *mmcsd = NULL;
267
268 host = mmcsd_alloc_host();
269 if (!host)
270 {
271 return -RT_ERROR;
272 }
273
274 mmcsd = rt_malloc(sizeof(struct lpc_mmcsd));
275 if (!mmcsd)
276 {
277 rt_kprintf("alloc mci failed\n");
278 goto err;
279 }
280
281 rt_memset(mmcsd, 0, sizeof(struct lpc_mmcsd));
282 mmcsd->SDIFx = SDIF;
283
284 host->ops = &lpc_mmcsd_host_ops;
285 host->freq_min = 375000;
286 host->freq_max = 50000000;
287 host->valid_ocr = VDD_30_31 | VDD_31_32 | VDD_32_33 | VDD_33_34;
288 host->flags = MMCSD_MUTBLKWRITE | MMCSD_BUSWIDTH_4 | MMCSD_SUP_HIGHSPEED | MMCSD_SUP_SDIO_IRQ;
289
290 host->max_seg_size = 65535;
291 host->max_dma_segs = 2;
292 host->max_blk_size = 512;
293 host->max_blk_count = 4096;
294
295 mmcsd->host = host;
296
297 /* Perform necessary initialization */
298 CLOCK_AttachClk(kMAIN_CLK_to_SDIO_CLK);
299 CLOCK_SetClkDiv(kCLOCK_DivSdioClk, (uint32_t)(SystemCoreClock / FSL_FEATURE_SDIF_MAX_SOURCE_CLOCK + 1U), true);
300
301 MMCSD_DGB("SDIO clock:%dHz\r\n", CLOCK_GetSdioClkFreq());
302
303 sdif_config_t sdif_config = {0};
304
305 sdif_config.responseTimeout = 0xFFU;
306 sdif_config.cardDetDebounce_Clock = 0xFFFFFFU;
307 sdif_config.dataTimeout = 0xFFFFFFU;
308 SDIF_Init(mmcsd->SDIFx, &sdif_config);
309
310 SDIF_EnableCardPower(mmcsd->SDIFx, false);
311 SDIF_EnableCardPower(mmcsd->SDIFx, true);
312
313 host->private_data = mmcsd;
314
315 mmcsd_change(host);
316
317 return 0;
318
319 err:
320 mmcsd_free_host(host);
321
322 return -RT_ENOMEM;
323 }
324 INIT_DEVICE_EXPORT(rt_hw_sdio_init);
325
326 #endif /* endif RT_USING_SDIO */
327