1 /*
2 * Copyright (c) 2006-2024 RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2024-08-16 zhujiale first version
9 */
10 #include <rtthread.h>
11 #include "sdhci.h"
12 #include <rtdbg.h>
13 #include <mmu.h>
14 #include <drivers/core/dm.h>
15
16
plat_request(struct rt_mmcsd_host * host,struct rt_mmcsd_req * req)17 static void plat_request(struct rt_mmcsd_host *host, struct rt_mmcsd_req *req)
18 {
19 struct mmc_host *mmc = (struct mmc_host *)host;
20 rt_uint32_t flags = req->cmd->flags;
21
22 switch (flags & RESP_MASK)
23 {
24 case RESP_NONE:
25 flags |= MMC_RSP_NONE;
26 break;
27 case RESP_R1:
28 flags |= MMC_RSP_R1;
29 break;
30 case RESP_R1B:
31 flags |= MMC_RSP_R1B;
32 break;
33 case RESP_R2:
34 flags |= MMC_RSP_R2;
35 break;
36 case RESP_R3:
37 flags |= MMC_RSP_R3;
38 break;
39 case RESP_R4:
40 flags |= MMC_RSP_R4;
41 break;
42 case RESP_R5:
43 flags |= MMC_RSP_R5;
44 break;
45 case RESP_R6:
46 flags |= MMC_RSP_R6;
47 break;
48 case RESP_R7:
49 flags |= MMC_RSP_R7;
50 break;
51 }
52 if (req->data)
53 {
54 if ((rt_uint64_t)rt_kmem_v2p(req->data->buf) > 0xffffffff)
55 {
56 void *dma_buffer = rt_malloc(ARCH_PAGE_SIZE);
57 void *req_buf = NULL;
58
59 if (req->data->blks * req->data->blksize > ARCH_PAGE_SIZE)
60 {
61 dma_buffer = rt_realloc(dma_buffer, req->data->blks * req->data->blksize);
62 }
63
64 if (req->data->flags & DATA_DIR_WRITE)
65 {
66 rt_memcpy(dma_buffer, req->data->buf, req->data->blks * req->data->blksize);
67 req->data->buf = dma_buffer;
68 }
69 else if (req->data->flags & DATA_DIR_READ)
70 {
71 req_buf = req->data->buf;
72 req->data->buf = dma_buffer;
73 }
74 req->cmd->flags |= flags;
75 mmc->ops->request(mmc, req);
76
77 rt_sem_take(&host->sem_ack, RT_WAITING_FOREVER);
78
79 if (req->data->flags & DATA_DIR_READ)
80 {
81 rt_memcpy(req_buf, dma_buffer, req->data->blksize * req->data->blks);
82 req->data->buf = req_buf;
83 }
84
85 rt_free(dma_buffer);
86 rt_sem_release(&host->sem_ack);
87 }
88 else
89 {
90 req->cmd->flags |= flags;
91 mmc->ops->request(mmc, req);
92 }
93 }
94 else
95 {
96 req->cmd->flags |= flags;
97 mmc->ops->request(mmc, req);
98 }
99 }
100
plat_set_ioconfig(struct rt_mmcsd_host * host,struct rt_mmcsd_io_cfg * iocfg)101 static void plat_set_ioconfig(struct rt_mmcsd_host *host, struct rt_mmcsd_io_cfg *iocfg)
102 {
103 struct mmc_host *mmc = (struct mmc_host *)host;
104
105 LOG_D("clock:%d,width:%d,power:%d,vdd:%d,timing:%d\n",
106 iocfg->clock, iocfg->bus_width,
107 iocfg->power_mode, iocfg->vdd, iocfg->timing);
108
109 mmc->ops->set_ios(mmc, iocfg);
110 }
111
plat_get_card_status(struct rt_mmcsd_host * host)112 static rt_int32_t plat_get_card_status(struct rt_mmcsd_host *host)
113 {
114 struct mmc_host *mmc = (struct mmc_host *)host;
115
116 return mmc->ops->get_cd(mmc);
117 }
118
plat_execute_tuning(struct rt_mmcsd_host * host,rt_int32_t opcode)119 static rt_int32_t plat_execute_tuning(struct rt_mmcsd_host *host, rt_int32_t opcode)
120 {
121 struct mmc_host *mmc = (struct mmc_host *)host;
122
123 return mmc->ops->execute_tuning(mmc, opcode);
124 }
125
plat_enable_sdio_irq(struct rt_mmcsd_host * host,rt_int32_t en)126 static void plat_enable_sdio_irq(struct rt_mmcsd_host *host, rt_int32_t en)
127 {
128 struct mmc_host *mmc = (struct mmc_host *)host;
129
130 return mmc->ops->enable_sdio_irq(mmc, en);
131 }
132
133
134 static const struct rt_mmcsd_host_ops rt_mmcsd_ops = {
135 .request = plat_request,
136 .set_iocfg = plat_set_ioconfig,
137 .get_card_status = plat_get_card_status,
138 .enable_sdio_irq = plat_enable_sdio_irq,
139 .execute_tuning = plat_execute_tuning,
140 };
141
142
mmc_request_done(struct mmc_host * host,struct rt_mmcsd_req * mrq)143 void mmc_request_done(struct mmc_host *host, struct rt_mmcsd_req *mrq)
144 {
145 mmcsd_req_complete(&host->rthost);
146 }
147
148 /*add host in rtt while sdhci complete*/
mmc_add_host(struct mmc_host * mmc)149 int mmc_add_host(struct mmc_host *mmc)
150 {
151 mmc->rthost.ops = &rt_mmcsd_ops;
152 mmc->rthost.flags = mmc->caps;
153 mmc->rthost.freq_max = mmc->f_max;
154 mmc->rthost.freq_min = 400000;
155 mmc->rthost.max_dma_segs = mmc->max_segs;
156 mmc->rthost.max_seg_size = mmc->max_seg_size;
157 mmc->rthost.max_blk_size = mmc->max_blk_size;
158 mmc->rthost.max_blk_count = mmc->max_blk_count;
159 mmc->rthost.valid_ocr = VDD_33_34 | VDD_32_33 | VDD_31_32 | VDD_30_31 | VDD_165_195 | VDD_20_21;
160
161
162 mmcsd_change(&mmc->rthost);
163 return 0;
164 }
165
mmc_alloc_host(int extra,struct rt_device * dev)166 struct mmc_host *mmc_alloc_host(int extra, struct rt_device *dev)
167 {
168 struct mmc_host *mmc;
169
170 mmc = rt_malloc(sizeof(*mmc) + extra);
171 if (mmc)
172 {
173 rt_memset(mmc, 0, sizeof(*mmc) + extra);
174 mmc->parent = dev;
175 mmcsd_host_init(&mmc->rthost);
176 }
177
178 return mmc;
179 }
180
mmc_remove_host(struct mmc_host * host)181 void mmc_remove_host(struct mmc_host *host)
182 {
183 rt_free(host);
184 }
185
mmc_abort_tuning(struct mmc_host * host,rt_uint32_t opcode)186 int mmc_abort_tuning(struct mmc_host *host, rt_uint32_t opcode)
187 {
188 return 0;
189 }
190
191
mmc_gpio_get_cd(struct mmc_host * host)192 int mmc_gpio_get_cd(struct mmc_host *host)
193 {
194 return -ENOSYS;
195 }
196
mmc_detect_change(struct mmc_host * host,unsigned long delay)197 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
198 {
199 }
200
201
mmc_regulator_set_vqmmc(struct mmc_host * mmc,struct rt_mmcsd_io_cfg * ios)202 int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct rt_mmcsd_io_cfg *ios)
203 {
204 return 0;
205 }
206
mmc_can_gpio_ro(struct mmc_host * host)207 rt_bool_t mmc_can_gpio_ro(struct mmc_host *host)
208 {
209 return RT_FALSE;
210 }
211
mmc_gpio_get_ro(struct mmc_host * host)212 int mmc_gpio_get_ro(struct mmc_host *host)
213 {
214 return 0;
215 }
216
mmc_send_abort_tuning(struct mmc_host * host,rt_uint32_t opcode)217 int mmc_send_abort_tuning(struct mmc_host *host, rt_uint32_t opcode)
218 {
219 return 0;
220 }
mmc_of_parse(struct mmc_host * host)221 int mmc_of_parse(struct mmc_host *host)
222 {
223 struct rt_device *dev = host->parent;
224 rt_uint32_t bus_width;
225
226 if (!dev || !dev->ofw_node)
227 return 0;
228
229 /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
230 if (rt_dm_dev_prop_read_u32(dev, "bus-width", &bus_width) < 0)
231 {
232 bus_width = 1;
233 }
234
235 switch (bus_width)
236 {
237 case 8:
238 host->caps |= MMC_CAP_8_BIT_DATA;
239 break; /* Hosts capable of 8-bit can also do 4 bits */
240 case 4:
241 host->caps |= MMC_CAP_4_BIT_DATA;
242 break;
243 case 1:
244 break;
245 default:
246 return -EINVAL;
247 }
248
249 /* f_max is obtained from the optional "max-frequency" property */
250 rt_dm_dev_prop_read_u32(dev, "max-frequency", &host->f_max);
251
252 if (rt_dm_dev_prop_read_bool(dev, "cap-mmc-highspeed"))
253 {
254 host->caps |= MMC_CAP_MMC_HIGHSPEED;
255 }
256
257 if (rt_dm_dev_prop_read_bool(dev, "mmc-hs200-1_8v"))
258 {
259 host->caps |= MMC_CAP2_HS200_1_8V_SDR;
260 }
261
262 if (rt_dm_dev_prop_read_bool(dev, "non-removable"))
263 {
264 host->caps |= MMC_CAP_NONREMOVABLE;
265 }
266
267 if (rt_dm_dev_prop_read_bool(dev, "no-sdio"))
268 {
269 host->caps2 |= MMC_CAP2_NO_SDIO;
270 }
271
272 if (rt_dm_dev_prop_read_bool(dev, "no-sd"))
273 {
274 host->caps2 |= MMC_CAP2_NO_SD;
275 }
276
277 if (rt_dm_dev_prop_read_bool(dev, "mmc-ddr-3_3v"))
278 {
279 host->caps |= MMC_CAP_3_3V_DDR;
280 }
281
282 if (rt_dm_dev_prop_read_bool(dev, "mmc-ddr-1_8v"))
283 {
284 host->caps |= MMC_CAP_1_8V_DDR;
285 }
286
287 if (rt_dm_dev_prop_read_bool(dev, "mmc-ddr-1_2v"))
288 {
289 host->caps |= MMC_CAP_1_2V_DDR;
290 }
291
292 return 0;
293 }
294
295
mmc_free_host(struct mmc_host * host)296 void mmc_free_host(struct mmc_host *host)
297 {
298 }
299
mmc_can_gpio_cd(struct mmc_host * host)300 rt_bool_t mmc_can_gpio_cd(struct mmc_host *host)
301 {
302 return RT_FALSE;
303 }
304