1 /*
2 * Copyright (c) 2006-2021, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2017-10-10 Tanek first version
9 * 2021-07-07 linzhenxing add sd card drivers in mmu
10 * 2021-07-14 linzhenxing add emmc
11 */
12
13 #include <rtthread.h>
14 #include <rthw.h>
15 #include <drivers/dev_mmcsd_core.h>
16 #include <ioremap.h>
17
18 #include <board.h>
19 #include <fsl_usdhc.h>
20 #include <fsl_gpio.h>
21 #include <fsl_iomuxc.h>
22
23 #include <ioremap.h>
24 #include <string.h>
25 #define DBG_TAG "drv_sdio"
26 #ifdef RT_SDIO_DEBUG
27 #define DBG_LVL DBG_LOG
28 #else
29 #define DBG_LVL DBG_INFO
30 #endif /* RT_SDIO_DEBUG */
31 #include <rtdbg.h>
32
33 #define CACHE_LINESIZE (32)
34
35 #define USDHC_ADMA_TABLE_WORDS (8U) /* define the ADMA descriptor table length */
36 #define USDHC_ADMA2_ADDR_ALIGN (4U) /* define the ADMA2 descriptor table addr align size */
37 #define IMXRT_MAX_FREQ (52UL * 1000UL * 1000UL)
38
39 #define USDHC_ADMA_TABLE_WORDS (8U) /* define the ADMA descriptor table length */
40 #define USDHC_ADMA2_ADDR_ALIGN (4U) /* define the ADMA2 descriptor table addr align size */
41 #define USDHC_READ_BURST_LEN (8U) /*!< number of words USDHC read in a single burst */
42 #define USDHC_WRITE_BURST_LEN (8U) /*!< number of words USDHC write in a single burst */
43 #define USDHC_DATA_TIMEOUT (0xFU) /*!< data timeout counter value */
44
45 /* Read/write watermark level. The bigger value indicates DMA has higher read/write performance. */
46 #define USDHC_READ_WATERMARK_LEVEL (0x80U)
47 #define USDHC_WRITE_WATERMARK_LEVEL (0x80U)
48
49 /* DMA mode */
50 #define USDHC_DMA_MODE kUSDHC_DmaModeAdma2
51
52 /* Endian mode. */
53 #define USDHC_ENDIAN_MODE kUSDHC_EndianModeLittle
54
55 static uint32_t g_usdhcAdma2Table[USDHC_ADMA_TABLE_WORDS];
56 struct rt_mmcsd_host *host1;
57 struct rt_mmcsd_host *host2;
58 static rt_mutex_t mmcsd_mutex = RT_NULL;
59
60 void host_change(void);
61
62 struct imxrt_mmcsd
63 {
64 struct rt_mmcsd_host *host;
65 struct rt_mmcsd_req *req;
66 struct rt_mmcsd_cmd *cmd;
67
68 struct rt_timer timer;
69
70 rt_uint32_t *buf;
71
72 usdhc_host_t usdhc_host;
73 clock_div_t usdhc_div;
74 clock_ip_name_t ip_clock;
75
76 uint32_t *usdhc_adma2_table;
77 };
78
79 /*! @name Configuration */
80 /*@{*/
81
82 /*!
83 * @brief Sets the IOMUXC pin mux mode.
84 * @note The first five parameters can be filled with the pin function ID macros.
85 *
86 * This is an example to set the ENET1_RX_DATA0 Pad as FLEXCAN1_TX:
87 * @code
88 * IOMUXC_SetPinMux(IOMUXC_ENET1_RX_DATA0_FLEXCAN1_TX, 0);
89 * @endcode
90 *
91 * This is an example to set the GPIO1_IO02 Pad as I2C1_SCL:
92 * @code
93 * IOMUXC_SetPinMux(IOMUXC_GPIO1_IO02_I2C1_SCL, 0);
94 * @endcode
95 *
96 * @param muxRegister The pin mux register.
97 * @param muxMode The pin mux mode.
98 * @param inputRegister The select input register.
99 * @param inputDaisy The input daisy.
100 * @param configRegister The config register.
101 * @param inputOnfield Software input on field.
102 */
_IOMUXC_SetPinMux(uint32_t muxRegister,uint32_t muxMode,uint32_t inputRegister,uint32_t inputDaisy,uint32_t configRegister,uint32_t inputOnfield)103 static inline void _IOMUXC_SetPinMux(uint32_t muxRegister,
104 uint32_t muxMode,
105 uint32_t inputRegister,
106 uint32_t inputDaisy,
107 uint32_t configRegister,
108 uint32_t inputOnfield)
109 {
110 *((volatile uint32_t *)rt_ioremap((void*)muxRegister, 0x4)) =
111 IOMUXC_SW_MUX_CTL_PAD_MUX_MODE(muxMode) | IOMUXC_SW_MUX_CTL_PAD_SION(inputOnfield);
112
113 if (inputRegister)
114 {
115 *((volatile uint32_t *)rt_ioremap((void*)inputRegister, 0x4)) = IOMUXC_SELECT_INPUT_DAISY(inputDaisy);
116 }
117 }
118
119 /*!
120 * @brief Sets the IOMUXC pin configuration.
121 * @note The previous five parameters can be filled with the pin function ID macros.
122 *
123 * This is an example to set pin configuration for IOMUXC_GPIO1_IO02_I2C1_SCL:
124 * @code
125 * IOMUXC_SetPinConfig(IOMUXC_GPIO1_IO02_I2C1_SCL, IOMUXC_SW_PAD_CTL_PAD_PUE_MASK | IOMUXC_SW_PAD_CTL_PAD_PUS(2U));
126 * @endcode
127 *
128 * @param muxRegister The pin mux register.
129 * @param muxMode The pin mux mode.
130 * @param inputRegister The select input register.
131 * @param inputDaisy The input daisy.
132 * @param configRegister The config register.
133 * @param configValue The pin config value.
134 */
_IOMUXC_SetPinConfig(uint32_t muxRegister,uint32_t muxMode,uint32_t inputRegister,uint32_t inputDaisy,uint32_t configRegister,uint32_t configValue)135 static inline void _IOMUXC_SetPinConfig(uint32_t muxRegister,
136 uint32_t muxMode,
137 uint32_t inputRegister,
138 uint32_t inputDaisy,
139 uint32_t configRegister,
140 uint32_t configValue)
141 {
142 if (configRegister)
143 {
144 *((volatile uint32_t *)rt_ioremap((void*)configRegister, 0x4)) = configValue;
145 }
146 }
147
_mmcsd_gpio_init(struct imxrt_mmcsd * mmcsd)148 static void _mmcsd_gpio_init(struct imxrt_mmcsd *mmcsd)
149 {
150
151 CLOCK_EnableClock(kCLOCK_Iomuxc); /* iomuxc clock (iomuxc_clk_enable): 0x03u */
152 #ifdef RT_USING_SDIO1
153 /* uSDHC1 pins start*/
154 _IOMUXC_SetPinMux(IOMUXC_UART1_RTS_B_USDHC1_CD_B, 0U);
155 _IOMUXC_SetPinConfig(IOMUXC_UART1_RTS_B_USDHC1_CD_B,
156 IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
157 IOMUXC_SW_PAD_CTL_PAD_DSE(1U) |
158 IOMUXC_SW_PAD_CTL_PAD_HYS_MASK);
159
160 _IOMUXC_SetPinMux(IOMUXC_SD1_CLK_USDHC1_CLK, 0U);
161 _IOMUXC_SetPinConfig(IOMUXC_SD1_CLK_USDHC1_CLK,
162 IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
163 IOMUXC_SW_PAD_CTL_PAD_DSE(1U) |
164 IOMUXC_SW_PAD_CTL_PAD_SPEED(1U) |
165 IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
166 IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
167 IOMUXC_SW_PAD_CTL_PAD_PUS(1U) |
168 IOMUXC_SW_PAD_CTL_PAD_HYS_MASK);
169
170 _IOMUXC_SetPinMux(IOMUXC_SD1_CMD_USDHC1_CMD, 0U);
171 _IOMUXC_SetPinConfig(IOMUXC_SD1_CMD_USDHC1_CMD,
172 IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
173 IOMUXC_SW_PAD_CTL_PAD_DSE(1U) |
174 IOMUXC_SW_PAD_CTL_PAD_SPEED(2U) |
175 IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
176 IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
177 IOMUXC_SW_PAD_CTL_PAD_PUS(1U) |
178 IOMUXC_SW_PAD_CTL_PAD_HYS_MASK);
179
180 _IOMUXC_SetPinMux(IOMUXC_SD1_DATA0_USDHC1_DATA0, 0U);
181 _IOMUXC_SetPinConfig(IOMUXC_SD1_DATA0_USDHC1_DATA0,
182 IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
183 IOMUXC_SW_PAD_CTL_PAD_DSE(1U) |
184 IOMUXC_SW_PAD_CTL_PAD_SPEED(2U) |
185 IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
186 IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
187 IOMUXC_SW_PAD_CTL_PAD_PUS(1U) |
188 IOMUXC_SW_PAD_CTL_PAD_HYS_MASK);
189
190 _IOMUXC_SetPinMux(IOMUXC_SD1_DATA1_USDHC1_DATA1, 0U);
191 _IOMUXC_SetPinConfig(IOMUXC_SD1_DATA1_USDHC1_DATA1,
192 IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
193 IOMUXC_SW_PAD_CTL_PAD_DSE(1U) |
194 IOMUXC_SW_PAD_CTL_PAD_SPEED(2U) |
195 IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
196 IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
197 IOMUXC_SW_PAD_CTL_PAD_PUS(1U) |
198 IOMUXC_SW_PAD_CTL_PAD_HYS_MASK);
199
200 _IOMUXC_SetPinMux(IOMUXC_SD1_DATA2_USDHC1_DATA2, 0U);
201 _IOMUXC_SetPinConfig(IOMUXC_SD1_DATA2_USDHC1_DATA2,
202 IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
203 IOMUXC_SW_PAD_CTL_PAD_DSE(1U) |
204 IOMUXC_SW_PAD_CTL_PAD_SPEED(2U) |
205 IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
206 IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
207 IOMUXC_SW_PAD_CTL_PAD_PUS(1U) |
208 IOMUXC_SW_PAD_CTL_PAD_HYS_MASK);
209
210 _IOMUXC_SetPinMux(IOMUXC_SD1_DATA3_USDHC1_DATA3, 0U);
211 _IOMUXC_SetPinConfig(IOMUXC_SD1_DATA3_USDHC1_DATA3,
212 IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
213 IOMUXC_SW_PAD_CTL_PAD_DSE(1U) |
214 IOMUXC_SW_PAD_CTL_PAD_SPEED(2U) |
215 IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
216 IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
217 IOMUXC_SW_PAD_CTL_PAD_PUS(1U) |
218 IOMUXC_SW_PAD_CTL_PAD_HYS_MASK);
219
220 /* uSDHC1 pins end*/
221 #endif
222
223 #ifdef RT_USING_SDIO2
224 /* uSDHC2 pins start*/
225 _IOMUXC_SetPinMux(IOMUXC_NAND_WE_B_USDHC2_CMD, 0U);
226 _IOMUXC_SetPinConfig(IOMUXC_NAND_WE_B_USDHC2_CMD,
227 IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
228 IOMUXC_SW_PAD_CTL_PAD_DSE(7U) |
229 IOMUXC_SW_PAD_CTL_PAD_SPEED(2U) |
230 IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
231 IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
232 IOMUXC_SW_PAD_CTL_PAD_PUS(1U) |
233 IOMUXC_SW_PAD_CTL_PAD_HYS_MASK);
234 _IOMUXC_SetPinMux(IOMUXC_NAND_RE_B_USDHC2_CLK, 0U);
235
236 _IOMUXC_SetPinConfig(IOMUXC_NAND_RE_B_USDHC2_CLK,
237 IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
238 IOMUXC_SW_PAD_CTL_PAD_DSE(1U) |
239 IOMUXC_SW_PAD_CTL_PAD_SPEED(2U) |
240 IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
241 IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
242 IOMUXC_SW_PAD_CTL_PAD_PUS(1U) |
243 IOMUXC_SW_PAD_CTL_PAD_HYS_MASK);
244 _IOMUXC_SetPinMux(IOMUXC_NAND_ALE_USDHC2_RESET_B, 0U);
245 _IOMUXC_SetPinConfig(IOMUXC_NAND_ALE_USDHC2_RESET_B,
246 IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
247 IOMUXC_SW_PAD_CTL_PAD_DSE(1U) |
248 IOMUXC_SW_PAD_CTL_PAD_SPEED(2U) |
249 IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
250 IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
251 IOMUXC_SW_PAD_CTL_PAD_PUS(1U) |
252 IOMUXC_SW_PAD_CTL_PAD_HYS_MASK);
253 _IOMUXC_SetPinMux(IOMUXC_NAND_DATA00_USDHC2_DATA0, 0U);
254 _IOMUXC_SetPinConfig(IOMUXC_NAND_DATA00_USDHC2_DATA0,
255 IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
256 IOMUXC_SW_PAD_CTL_PAD_DSE(7U) |
257 IOMUXC_SW_PAD_CTL_PAD_SPEED(2U) |
258 IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
259 IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
260 IOMUXC_SW_PAD_CTL_PAD_PUS(1U) |
261 IOMUXC_SW_PAD_CTL_PAD_HYS_MASK);
262 _IOMUXC_SetPinMux(IOMUXC_NAND_DATA01_USDHC2_DATA1, 0U);
263 _IOMUXC_SetPinConfig(IOMUXC_NAND_DATA01_USDHC2_DATA1,
264 IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
265 IOMUXC_SW_PAD_CTL_PAD_DSE(7U) |
266 IOMUXC_SW_PAD_CTL_PAD_SPEED(2U) |
267 IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
268 IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
269 IOMUXC_SW_PAD_CTL_PAD_PUS(1U) |
270 IOMUXC_SW_PAD_CTL_PAD_HYS_MASK);
271 _IOMUXC_SetPinMux(IOMUXC_NAND_DATA02_USDHC2_DATA2, 0U);
272 _IOMUXC_SetPinConfig(IOMUXC_NAND_DATA02_USDHC2_DATA2,
273 IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
274 IOMUXC_SW_PAD_CTL_PAD_DSE(7U) |
275 IOMUXC_SW_PAD_CTL_PAD_SPEED(2U) |
276 IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
277 IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
278 IOMUXC_SW_PAD_CTL_PAD_PUS(1U) |
279 IOMUXC_SW_PAD_CTL_PAD_HYS_MASK);
280 _IOMUXC_SetPinMux(IOMUXC_NAND_DATA03_USDHC2_DATA3, 0U);
281 _IOMUXC_SetPinConfig(IOMUXC_NAND_DATA03_USDHC2_DATA3,
282 IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
283 IOMUXC_SW_PAD_CTL_PAD_DSE(7U) |
284 IOMUXC_SW_PAD_CTL_PAD_SPEED(2U) |
285 IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
286 IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
287 IOMUXC_SW_PAD_CTL_PAD_PUS(1U) |
288 IOMUXC_SW_PAD_CTL_PAD_HYS_MASK);
289 _IOMUXC_SetPinMux(IOMUXC_NAND_DATA04_USDHC2_DATA4, 0U);
290 _IOMUXC_SetPinConfig(IOMUXC_NAND_DATA04_USDHC2_DATA4,
291 IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
292 IOMUXC_SW_PAD_CTL_PAD_DSE(7U) |
293 IOMUXC_SW_PAD_CTL_PAD_SPEED(2U) |
294 IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
295 IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
296 IOMUXC_SW_PAD_CTL_PAD_PUS(1U) |
297 IOMUXC_SW_PAD_CTL_PAD_HYS_MASK);
298 _IOMUXC_SetPinMux(IOMUXC_NAND_DATA05_USDHC2_DATA5, 0U);
299 _IOMUXC_SetPinConfig(IOMUXC_NAND_DATA05_USDHC2_DATA5,
300 IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
301 IOMUXC_SW_PAD_CTL_PAD_DSE(7U) |
302 IOMUXC_SW_PAD_CTL_PAD_SPEED(2U) |
303 IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
304 IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
305 IOMUXC_SW_PAD_CTL_PAD_PUS(1U) |
306 IOMUXC_SW_PAD_CTL_PAD_HYS_MASK);
307 _IOMUXC_SetPinMux(IOMUXC_NAND_DATA06_USDHC2_DATA6, 0U);
308 _IOMUXC_SetPinConfig(IOMUXC_NAND_DATA06_USDHC2_DATA6,
309 IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
310 IOMUXC_SW_PAD_CTL_PAD_DSE(7U) |
311 IOMUXC_SW_PAD_CTL_PAD_SPEED(2U) |
312 IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
313 IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
314 IOMUXC_SW_PAD_CTL_PAD_PUS(7U) |
315 IOMUXC_SW_PAD_CTL_PAD_HYS_MASK);
316 _IOMUXC_SetPinMux(IOMUXC_NAND_DATA07_USDHC2_DATA7, 0U);
317 _IOMUXC_SetPinConfig(IOMUXC_NAND_DATA07_USDHC2_DATA7,
318 IOMUXC_SW_PAD_CTL_PAD_SRE_MASK |
319 IOMUXC_SW_PAD_CTL_PAD_DSE(7U) |
320 IOMUXC_SW_PAD_CTL_PAD_SPEED(2U) |
321 IOMUXC_SW_PAD_CTL_PAD_PKE_MASK |
322 IOMUXC_SW_PAD_CTL_PAD_PUE_MASK |
323 IOMUXC_SW_PAD_CTL_PAD_PUS(1U) |
324 IOMUXC_SW_PAD_CTL_PAD_HYS_MASK);
325
326 /* uSDHC2 pins end*/
327 #endif
328 }
SDMMCHOST_ErrorRecovery(USDHC_Type * base)329 static void SDMMCHOST_ErrorRecovery(USDHC_Type *base)
330 {
331 uint32_t status = 0U;
332 /* get host present status */
333 status = USDHC_GetPresentStatusFlags(base);
334 /* check command inhibit status flag */
335 if ((status & kUSDHC_CommandInhibitFlag) != 0U)
336 {
337 /* reset command line */
338 USDHC_Reset(base, kUSDHC_ResetCommand, 1000U);
339 }
340 /* check data inhibit status flag */
341 if ((status & kUSDHC_DataInhibitFlag) != 0U)
342 {
343 /* reset data line */
344 USDHC_Reset(base, kUSDHC_ResetData, 1000U);
345 }
346 }
347
_mmcsd_host_init(struct imxrt_mmcsd * mmcsd)348 static void _mmcsd_host_init(struct imxrt_mmcsd *mmcsd)
349 {
350 usdhc_host_t *usdhc_host = &mmcsd->usdhc_host;
351
352 /* Initializes SDHC. */
353 usdhc_host->config.dataTimeout = USDHC_DATA_TIMEOUT;
354 usdhc_host->config.endianMode = USDHC_ENDIAN_MODE;
355 usdhc_host->config.readWatermarkLevel = USDHC_READ_WATERMARK_LEVEL;
356 usdhc_host->config.writeWatermarkLevel = USDHC_WRITE_WATERMARK_LEVEL;
357 usdhc_host->config.readBurstLen = USDHC_READ_BURST_LEN;
358 usdhc_host->config.writeBurstLen = USDHC_WRITE_BURST_LEN;
359
360 USDHC_Init(usdhc_host->base, &(usdhc_host->config));
361 }
362
_mmcsd_clk_init(struct imxrt_mmcsd * mmcsd)363 static void _mmcsd_clk_init(struct imxrt_mmcsd *mmcsd)
364 {
365 CLOCK_EnableClock(mmcsd->ip_clock);
366 CLOCK_SetDiv(mmcsd->usdhc_div, 5U);
367 }
368
_mmc_request(struct rt_mmcsd_host * host,struct rt_mmcsd_req * req)369 static void _mmc_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req)
370 {
371 struct imxrt_mmcsd *mmcsd;
372 struct rt_mmcsd_cmd *cmd;
373 struct rt_mmcsd_data *data;
374 status_t error;
375 usdhc_adma_config_t dmaConfig;
376 usdhc_transfer_t fsl_content = {0};
377 usdhc_command_t fsl_command = {0};
378 usdhc_data_t fsl_data = {0};
379 rt_uint32_t *buf = NULL;
380
381 rt_mutex_take(mmcsd_mutex, RT_WAITING_FOREVER);
382
383 RT_ASSERT(host != RT_NULL);
384 RT_ASSERT(req != RT_NULL);
385
386 mmcsd = (struct imxrt_mmcsd *)host->private_data;
387 RT_ASSERT(mmcsd != RT_NULL);
388
389 cmd = req->cmd;
390 RT_ASSERT(cmd != RT_NULL);
391
392 LOG_D("\tcmd->cmd_code: %02d, cmd->arg: %08x, cmd->flags: %08x --> ", cmd->cmd_code, cmd->arg, cmd->flags);
393
394 data = cmd->data;
395
396 memset(&dmaConfig, 0, sizeof(usdhc_adma_config_t));
397 /* config adma */
398 dmaConfig.dmaMode = USDHC_DMA_MODE;
399 dmaConfig.burstLen = kUSDHC_EnBurstLenForINCR;
400 dmaConfig.admaTable = mmcsd->usdhc_adma2_table;
401 dmaConfig.admaTableWords = USDHC_ADMA_TABLE_WORDS;
402
403 fsl_command.index = cmd->cmd_code;
404 fsl_command.argument = cmd->arg;
405
406 if (cmd->cmd_code == STOP_TRANSMISSION)
407 fsl_command.type = kCARD_CommandTypeAbort;
408 else
409 fsl_command.type = kCARD_CommandTypeNormal;
410
411 switch (cmd->flags & RESP_MASK)
412 {
413 case RESP_NONE:
414 fsl_command.responseType = kCARD_ResponseTypeNone;
415 break;
416 case RESP_R1:
417 fsl_command.responseType = kCARD_ResponseTypeR1;
418 break;
419 case RESP_R1B:
420 fsl_command.responseType = kCARD_ResponseTypeR1b;
421 break;
422 case RESP_R2:
423 fsl_command.responseType = kCARD_ResponseTypeR2;
424 break;
425 case RESP_R3:
426 fsl_command.responseType = kCARD_ResponseTypeR3;
427 break;
428 case RESP_R4:
429 fsl_command.responseType = kCARD_ResponseTypeR4;
430 break;
431 case RESP_R6:
432 fsl_command.responseType = kCARD_ResponseTypeR6;
433 break;
434 case RESP_R7:
435 fsl_command.responseType = kCARD_ResponseTypeR7;
436 break;
437 case RESP_R5:
438 fsl_command.responseType = kCARD_ResponseTypeR5;
439 break;
440 default:
441 RT_ASSERT(NULL);
442 }
443
444 fsl_command.flags = 0;
445 fsl_content.command = &fsl_command;
446
447 if (data)
448 {
449 if (req->stop != NULL)
450 fsl_data.enableAutoCommand12 = true;
451 else
452 fsl_data.enableAutoCommand12 = false;
453
454 fsl_data.enableAutoCommand23 = false;
455
456 fsl_data.enableIgnoreError = false;
457 fsl_data.blockSize = data->blksize;
458 fsl_data.blockCount = data->blks;
459
460 LOG_D(" blksize:%d, blks:%d ", fsl_data.blockSize, fsl_data.blockCount);
461
462 if (((rt_uint32_t)data->buf & (CACHE_LINESIZE - 1)) || // align cache(32byte)
463 ((rt_uint32_t)data->buf > 0x00000000 && (rt_uint32_t)data->buf < 0x00080000) /*|| // ITCM
464 ((rt_uint32_t)data->buf >= 0x20000000 && (rt_uint32_t)data->buf < 0x20080000)*/) // DTCM
465 {
466
467 buf = rt_malloc_align(fsl_data.blockSize * fsl_data.blockCount, CACHE_LINESIZE);
468 RT_ASSERT(buf != RT_NULL);
469
470 LOG_D(" malloc buf: %p, data->buf:%p, %d ", buf, data->buf, fsl_data.blockSize * fsl_data.blockCount);
471 }
472
473
474 if ((cmd->cmd_code == WRITE_BLOCK) || (cmd->cmd_code == WRITE_MULTIPLE_BLOCK))
475 {
476 if (buf)
477 {
478 LOG_D(" write(data->buf to buf) ");
479 rt_memcpy(buf, data->buf, fsl_data.blockSize * fsl_data.blockCount);
480 fsl_data.txData = (uint32_t const *)buf;
481 }
482 else
483 {
484 fsl_data.txData = (uint32_t const *)data->buf;
485 }
486
487 fsl_data.rxData = NULL;
488 }
489 else
490 {
491 if (buf)
492 {
493 fsl_data.rxData = (uint32_t *)buf;
494 }
495 else
496 {
497 fsl_data.rxData = (uint32_t *)data->buf;
498 }
499
500 fsl_data.txData = NULL;
501 }
502
503 fsl_content.data = &fsl_data;
504 }
505 else
506 {
507 fsl_content.data = NULL;
508 }
509
510 error = USDHC_TransferBlocking(mmcsd->usdhc_host.base, &dmaConfig, &fsl_content);
511 if (error == kStatus_Fail)
512 {
513 SDMMCHOST_ErrorRecovery(mmcsd->usdhc_host.base);
514 LOG_D(" ***USDHC_TransferBlocking error: %d*** --> \n", error);
515 cmd->err = -RT_ERROR;
516 }
517
518 if (buf)
519 {
520 if (fsl_data.rxData)
521 {
522 LOG_D("read copy buf to data->buf ");
523 rt_memcpy(data->buf, buf, fsl_data.blockSize * fsl_data.blockCount);
524 }
525
526 rt_free_align(buf);
527 }
528
529 if ((cmd->flags & RESP_MASK) == RESP_R2)
530 {
531 cmd->resp[3] = fsl_command.response[0];
532 cmd->resp[2] = fsl_command.response[1];
533 cmd->resp[1] = fsl_command.response[2];
534 cmd->resp[0] = fsl_command.response[3];
535 LOG_D(" resp 0x%08X 0x%08X 0x%08X 0x%08X\n",
536 cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
537 }
538 else
539 {
540 cmd->resp[0] = fsl_command.response[0];
541 LOG_D(" resp 0x%08X\n", cmd->resp[0]);
542 }
543
544 mmcsd_req_complete(host);
545
546 rt_mutex_release(mmcsd_mutex);
547 return;
548 }
549
_mmc_set_iocfg(struct rt_mmcsd_host * host,struct rt_mmcsd_io_cfg * io_cfg)550 static void _mmc_set_iocfg(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *io_cfg)
551 {
552
553 struct imxrt_mmcsd *mmcsd;
554 unsigned int usdhc_clk;
555 unsigned int bus_width;
556 uint32_t src_clk;
557
558 RT_ASSERT(host != RT_NULL);
559 RT_ASSERT(host->private_data != RT_NULL);
560 RT_ASSERT(io_cfg != RT_NULL);
561
562
563 mmcsd = (struct imxrt_mmcsd *)host->private_data;
564 usdhc_clk = io_cfg->clock;
565 bus_width = io_cfg->bus_width;
566
567 if (usdhc_clk > IMXRT_MAX_FREQ)
568 usdhc_clk = IMXRT_MAX_FREQ;
569 src_clk = (CLOCK_GetSysPfdFreq(kCLOCK_Pfd2) / (CLOCK_GetDiv(mmcsd->usdhc_div) + 1U));
570
571 LOG_D("\tsrc_clk: %d, usdhc_clk: %d, bus_width: %d\n", src_clk, usdhc_clk, bus_width);
572 if (usdhc_clk)
573 {
574 USDHC_SetSdClock(mmcsd->usdhc_host.base, src_clk, usdhc_clk);
575 /* Change bus width */
576 if (bus_width == MMCSD_BUS_WIDTH_8)
577 USDHC_SetDataBusWidth(mmcsd->usdhc_host.base, kUSDHC_DataBusWidth8Bit);
578 else if (bus_width == MMCSD_BUS_WIDTH_4)
579 USDHC_SetDataBusWidth(mmcsd->usdhc_host.base, kUSDHC_DataBusWidth4Bit);
580 else if (bus_width == MMCSD_BUS_WIDTH_1)
581 USDHC_SetDataBusWidth(mmcsd->usdhc_host.base, kUSDHC_DataBusWidth1Bit);
582 else
583 RT_ASSERT(RT_NULL);
584
585 }
586 }
587
588 static const struct rt_mmcsd_host_ops ops =
589 {
590 _mmc_request,
591 _mmc_set_iocfg,
592 RT_NULL,//_mmc_get_card_status,
593 RT_NULL,//_mmc_enable_sdio_irq,
594 };
595
imxrt_mci_init(void)596 rt_int32_t imxrt_mci_init(void)
597 {
598 #ifdef RT_USING_SDIO1
599 struct imxrt_mmcsd *mmcsd1;
600
601 host1 = mmcsd_alloc_host();
602 if (!host1)
603 {
604 return -RT_ERROR;
605 }
606
607 mmcsd1 = rt_malloc(sizeof(struct imxrt_mmcsd));
608 if (!mmcsd1)
609 {
610 LOG_E("alloc mci failed\n");
611 goto err;
612 }
613
614 rt_memset(mmcsd1, 0, sizeof(struct imxrt_mmcsd));
615 mmcsd1->usdhc_host.base = (USDHC_Type *)rt_ioremap((void*)USDHC1_BASE, 0x1000);
616 mmcsd1->usdhc_div = kCLOCK_Usdhc1Div;
617 mmcsd1->usdhc_adma2_table = g_usdhcAdma2Table;
618
619 strncpy(host1->name, "sd", sizeof(host1->name)-1);
620 host1->ops = &ops;
621 host1->freq_min = 375000;
622 host1->freq_max = 25000000;
623 host1->valid_ocr = VDD_32_33 | VDD_33_34;
624 host1->flags = MMCSD_BUSWIDTH_4 | MMCSD_MUTBLKWRITE | \
625 MMCSD_SUP_HIGHSPEED | MMCSD_SUP_SDIO_IRQ;
626 host1->max_seg_size = 65535;
627 host1->max_dma_segs = 2;
628 host1->max_blk_size = 512;
629 host1->max_blk_count = 4096;
630
631 mmcsd1->host = host1;
632 _mmcsd_clk_init(mmcsd1);
633 _mmcsd_gpio_init(mmcsd1);
634 _mmcsd_host_init(mmcsd1);
635
636 host1->private_data = mmcsd1;
637
638 mmcsd_change(host1);
639 #endif
640
641 #ifdef RT_USING_SDIO2
642 struct imxrt_mmcsd *mmcsd2;
643 host2 = mmcsd_alloc_host();
644 if (!host2)
645 {
646 return -RT_ERROR;
647 }
648
649 mmcsd2 = rt_malloc(sizeof(struct imxrt_mmcsd));
650 if (!mmcsd2)
651 {
652 LOG_E("alloc mci failed\n");
653 goto err;
654 }
655
656 rt_memset(mmcsd2, 0, sizeof(struct imxrt_mmcsd));
657 mmcsd2->usdhc_host.base = (USDHC_Type *)rt_ioremap((void*)USDHC2_BASE, 0x1000);
658 mmcsd2->usdhc_div = kCLOCK_Usdhc1Div;
659 mmcsd2->usdhc_adma2_table = g_usdhcAdma2Table;
660
661 strncpy(host2->name, "emmc", sizeof(host2->name)-1);
662 host2->ops = &ops;
663 host2->freq_min = 375000;
664 host2->freq_max = 52000000;
665 host2->valid_ocr = VDD_35_36;
666 host2->flags = MMCSD_BUSWIDTH_4 | MMCSD_MUTBLKWRITE | \
667 MMCSD_SUP_HIGHSPEED | MMCSD_SUP_SDIO_IRQ;
668 host2->max_seg_size = 65535;
669 host2->max_dma_segs = 2;
670 host2->max_blk_size = 512;
671 host2->max_blk_count = 4096;
672
673 mmcsd2->host = host2;
674 _mmcsd_clk_init(mmcsd2);
675 _mmcsd_gpio_init(mmcsd2);
676 _mmcsd_host_init(mmcsd2);
677
678 host2->private_data = mmcsd2;
679 mmcsd_change(host2);
680 #endif
681 mmcsd_mutex = rt_mutex_create("mmutex", RT_IPC_FLAG_FIFO);
682 if (mmcsd_mutex == RT_NULL)
683 {
684 LOG_E("create mmcsd mutex failed.\n");
685 return -1;
686 }
687
688 return 0;
689
690 err:
691 #ifdef RT_USING_SDIO1
692 mmcsd_free_host(host1);
693 #endif
694 #ifdef RT_USING_SDIO2
695 mmcsd_free_host(host2);
696 #endif
697 return -RT_ENOMEM;
698 }
699
700 INIT_DEVICE_EXPORT(imxrt_mci_init);
host_change(void)701 void host_change(void)
702 {
703 mmcsd_change(host1);
704 }
705
706
707