1 // Copyright 2017 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // Standard Includes
6 #include <endian.h>
7 #include <inttypes.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11
12 #include <ddk/protocol/sdmmc.h>
13 #include <ddk/debug.h>
14 #include <hw/sdio.h>
15
16 #include <pretty/hexdump.h>
17
18 #include "sdmmc.h"
19
20 #define RCA_ARG(dev) ((dev)->rca << 16)
21
sdmmc_request_helper(sdmmc_device_t * dev,sdmmc_req_t * req,uint8_t retries,uint32_t wait_time)22 zx_status_t sdmmc_request_helper(sdmmc_device_t* dev, sdmmc_req_t* req,
23 uint8_t retries, uint32_t wait_time) {
24 zx_status_t st;
25 while (((st = sdmmc_request(&dev->host, req)) != ZX_OK) && retries > 0) {
26 retries--;
27 zx_nanosleep(zx_deadline_after(ZX_MSEC(wait_time)));
28 }
29 return st;
30 }
31
32 // SD/MMC shared ops
33
sdmmc_go_idle(sdmmc_device_t * dev)34 zx_status_t sdmmc_go_idle(sdmmc_device_t* dev) {
35 sdmmc_req_t req = {
36 .cmd_idx = SDMMC_GO_IDLE_STATE,
37 .arg = 0,
38 .cmd_flags = SDMMC_GO_IDLE_STATE_FLAGS,
39 .use_dma = sdmmc_use_dma(dev),
40 };
41 return sdmmc_request(&dev->host, &req);
42 }
43
sdmmc_send_status(sdmmc_device_t * dev,uint32_t * response)44 zx_status_t sdmmc_send_status(sdmmc_device_t* dev, uint32_t* response) {
45 sdmmc_req_t req = {
46 .cmd_idx = SDMMC_SEND_STATUS,
47 .arg = RCA_ARG(dev),
48 .cmd_flags = SDMMC_SEND_STATUS_FLAGS,
49 .use_dma = sdmmc_use_dma(dev),
50 };
51 zx_status_t st = sdmmc_request(&dev->host, &req);
52 if (st == ZX_OK) {
53 *response = req.response[0];
54 }
55 return st;
56 }
57
sdmmc_stop_transmission(sdmmc_device_t * dev)58 zx_status_t sdmmc_stop_transmission(sdmmc_device_t* dev) {
59 sdmmc_req_t req = {
60 .cmd_idx = SDMMC_STOP_TRANSMISSION,
61 .arg = 0,
62 .cmd_flags = SDMMC_STOP_TRANSMISSION_FLAGS,
63 .use_dma = sdmmc_use_dma(dev),
64 };
65 return sdmmc_request(&dev->host, &req);
66 }
67
68 // SD ops
69
sd_send_if_cond(sdmmc_device_t * dev)70 zx_status_t sd_send_if_cond(sdmmc_device_t* dev) {
71 // TODO what is this parameter?
72 uint32_t arg = 0x1aa;
73 sdmmc_req_t req = {
74 .cmd_idx = SD_SEND_IF_COND,
75 .arg = arg,
76 .cmd_flags = SD_SEND_IF_COND_FLAGS,
77 .use_dma = sdmmc_use_dma(dev),
78 };
79 zx_status_t st = sdmmc_request(&dev->host, &req);
80 if (st != ZX_OK) {
81 zxlogf(TRACE, "sd: SD_SEND_IF_COND failed, retcode = %d\n", st);
82 return st;
83 }
84 if ((req.response[0] & 0xfff) != arg) {
85 // The card should have replied with the pattern that we sent.
86 zxlogf(TRACE, "sd: SDMMC_SEND_IF_COND got bad reply = %"PRIu32"\n",
87 req.response[0]);
88 return ZX_ERR_BAD_STATE;
89 } else {
90 return ZX_OK;
91 }
92 }
93
sd_send_relative_addr(sdmmc_device_t * dev,uint16_t * rca)94 zx_status_t sd_send_relative_addr(sdmmc_device_t* dev, uint16_t *rca) {
95 sdmmc_req_t req = {
96 .cmd_idx = SD_SEND_RELATIVE_ADDR,
97 .arg = 0,
98 .cmd_flags = SD_SEND_RELATIVE_ADDR_FLAGS,
99 .use_dma = sdmmc_use_dma(dev),
100 };
101
102 zx_status_t st = sdmmc_request(&dev->host, &req);
103 if (st != ZX_OK) {
104 zxlogf(TRACE, "sd: SD_SEND_RELATIVE_ADDR failed, retcode = %d\n", st);
105 return st;
106 }
107
108 if (rca != NULL) {
109 *rca = (req.response[0]) >> 16;
110 }
111 return st;
112 }
113
sd_switch_uhs_voltage(sdmmc_device_t * dev,uint32_t ocr)114 zx_status_t sd_switch_uhs_voltage(sdmmc_device_t *dev, uint32_t ocr) {
115 zx_status_t st = ZX_OK;
116 sdmmc_req_t req = {
117 .cmd_idx = SD_VOLTAGE_SWITCH,
118 .arg = ocr,
119 .cmd_flags = SD_VOLTAGE_SWITCH_FLAGS,
120 .use_dma = sdmmc_use_dma(dev),
121 };
122
123 if (dev->signal_voltage == SDMMC_VOLTAGE_V180) {
124 return ZX_OK;
125 }
126
127 st = sdmmc_request(&dev->host, &req);
128 if (st != ZX_OK) {
129 zxlogf(TRACE, "sd: SD_VOLTAGE_SWITCH failed, retcode = %d\n", st);
130 return st;
131 }
132 zx_nanosleep(zx_deadline_after(ZX_MSEC(20)));
133 //TODO: clock gating while switching voltage
134 st = sdmmc_set_signal_voltage(&dev->host, SDMMC_VOLTAGE_V180);
135 if (st != ZX_OK) {
136 zxlogf(TRACE, "sd: SD_VOLTAGE_SWITCH failed, retcode = %d\n", st);
137 return st;
138 }
139 return ZX_OK;
140 }
141
142 // SDIO specific ops
143
sdio_send_op_cond(sdmmc_device_t * dev,uint32_t ocr,uint32_t * rocr)144 zx_status_t sdio_send_op_cond(sdmmc_device_t* dev, uint32_t ocr, uint32_t* rocr) {
145 zx_status_t st = ZX_OK;
146 sdmmc_req_t req = {
147 .cmd_idx = SDIO_SEND_OP_COND,
148 .arg = ocr,
149 .cmd_flags = SDIO_SEND_OP_COND_FLAGS,
150 .use_dma = sdmmc_use_dma(dev),
151 };
152 for (size_t i = 0; i < 100; i++) {
153 if ((st = sdmmc_request_helper(dev, &req, 3, 10)) != ZX_OK) {
154 // fail on request error
155 break;
156 }
157 // No need to wait for busy clear if probing
158 if ((ocr == 0) || (req.response[0] & MMC_OCR_BUSY)) {
159 *rocr = req.response[0];
160 break;
161 }
162 zx_nanosleep(zx_deadline_after(ZX_MSEC(10)));
163 }
164 return st;
165 }
166
sdio_io_rw_direct(sdmmc_device_t * dev,bool write,uint32_t fn_idx,uint32_t reg_addr,uint8_t write_byte,uint8_t * read_byte)167 zx_status_t sdio_io_rw_direct(sdmmc_device_t* dev, bool write, uint32_t fn_idx,
168 uint32_t reg_addr, uint8_t write_byte, uint8_t *read_byte) {
169 uint32_t cmd_arg = 0;
170 if (write) {
171 cmd_arg |= SDIO_IO_RW_DIRECT_RW_FLAG;
172 if (read_byte) {
173 cmd_arg |= SDIO_IO_RW_DIRECT_RAW_FLAG;
174 }
175 }
176 update_bits(&cmd_arg, SDIO_IO_RW_DIRECT_FN_IDX_MASK, SDIO_IO_RW_DIRECT_FN_IDX_LOC,
177 fn_idx);
178 update_bits(&cmd_arg, SDIO_IO_RW_DIRECT_REG_ADDR_MASK, SDIO_IO_RW_DIRECT_REG_ADDR_LOC,
179 reg_addr);
180 update_bits(&cmd_arg, SDIO_IO_RW_DIRECT_WRITE_BYTE_MASK, SDIO_IO_RW_DIRECT_WRITE_BYTE_LOC,
181 write_byte);
182 sdmmc_req_t req = {
183 .cmd_idx = SDIO_IO_RW_DIRECT,
184 .arg = cmd_arg,
185 .cmd_flags = SDIO_IO_RW_DIRECT_FLAGS,
186 .use_dma = sdmmc_use_dma(dev),
187 };
188 zx_status_t st = sdmmc_request(&dev->host, &req);
189 if (st != ZX_OK) {
190 zxlogf(ERROR, "sdio: SDIO_IO_RW_DIRECT failed, retcode = %d\n", st);
191 return st;
192 }
193 if (read_byte) {
194 *read_byte = get_bits(req.response[0], SDIO_IO_RW_DIRECT_RESP_READ_BYTE_MASK,
195 SDIO_IO_RW_DIRECT_RESP_READ_BYTE_LOC);
196 }
197 return ZX_OK;
198 }
199
sdio_io_rw_extended(sdmmc_device_t * dev,bool write,uint32_t fn_idx,uint32_t reg_addr,bool incr,uint32_t blk_count,uint32_t blk_size,bool use_dma,uint8_t * buf,zx_handle_t dma_vmo,uint64_t buf_offset)200 zx_status_t sdio_io_rw_extended(sdmmc_device_t *dev, bool write, uint32_t fn_idx,
201 uint32_t reg_addr, bool incr, uint32_t blk_count,
202 uint32_t blk_size, bool use_dma, uint8_t *buf,
203 zx_handle_t dma_vmo, uint64_t buf_offset) {
204
205 uint32_t cmd_arg = 0;
206 if (write) {
207 cmd_arg |= SDIO_IO_RW_EXTD_RW_FLAG;
208 }
209 update_bits(&cmd_arg, SDIO_IO_RW_EXTD_FN_IDX_MASK, SDIO_IO_RW_EXTD_FN_IDX_LOC,
210 fn_idx);
211 update_bits(&cmd_arg, SDIO_IO_RW_EXTD_REG_ADDR_MASK, SDIO_IO_RW_EXTD_REG_ADDR_LOC,
212 reg_addr);
213 if (incr) {
214 cmd_arg |= SDIO_IO_RW_EXTD_OP_CODE_INCR;
215 }
216
217 if (blk_count > 1) {
218 if (dev->sdio_dev.hw_info.caps & SDIO_CARD_MULTI_BLOCK) {
219 cmd_arg |= SDIO_IO_RW_EXTD_BLOCK_MODE;
220 update_bits(&cmd_arg, SDIO_IO_RW_EXTD_BYTE_BLK_COUNT_MASK,
221 SDIO_IO_RW_EXTD_BYTE_BLK_COUNT_LOC, blk_count);
222 } else {
223 //Convert the request into byte mode?
224 return ZX_ERR_NOT_SUPPORTED;
225 }
226 } else {
227 //SDIO Spec Table 5-3
228 uint32_t arg_blk_size = (blk_size == 512) ? 0 : blk_size;
229 update_bits(&cmd_arg, SDIO_IO_RW_EXTD_BYTE_BLK_COUNT_MASK,
230 SDIO_IO_RW_EXTD_BYTE_BLK_COUNT_LOC, arg_blk_size);
231 }
232 sdmmc_req_t req = {
233 .cmd_idx = SDIO_IO_RW_DIRECT_EXTENDED,
234 .arg = cmd_arg,
235 .cmd_flags = write ? (SDIO_IO_RW_DIRECT_EXTENDED_FLAGS) :
236 (SDIO_IO_RW_DIRECT_EXTENDED_FLAGS | SDMMC_CMD_READ),
237 .blockcount = blk_count,
238 .blocksize = blk_size,
239 };
240
241 if (use_dma) {
242 req.virt_buffer = NULL;
243 req.dma_vmo = dma_vmo;
244 req.buf_offset = buf_offset;
245 } else {
246 req.virt_buffer = buf + buf_offset;
247 req.virt_size = blk_size;
248 }
249 req.use_dma = use_dma;
250
251 zx_status_t st = sdmmc_request(&dev->host, &req);
252 if (st != ZX_OK) {
253 zxlogf(ERROR, "sdio: SDIO_IO_RW_DIRECT_EXTENDED failed, retcode = %d\n", st);
254 return st;
255 }
256 return ZX_OK;
257 }
258
259 // MMC ops
260
mmc_send_op_cond(sdmmc_device_t * dev,uint32_t ocr,uint32_t * rocr)261 zx_status_t mmc_send_op_cond(sdmmc_device_t* dev, uint32_t ocr, uint32_t* rocr) {
262 // Request sector addressing if not probing
263 uint32_t arg = (ocr == 0) ? ocr : ((1 << 30) | ocr);
264 sdmmc_req_t req = {
265 .cmd_idx = MMC_SEND_OP_COND,
266 .arg = arg,
267 .cmd_flags = MMC_SEND_OP_COND_FLAGS,
268 .use_dma = sdmmc_use_dma(dev),
269 };
270 zx_status_t st;
271 for (int i = 100; i; i--) {
272 if ((st = sdmmc_request(&dev->host, &req)) != ZX_OK) {
273 // fail on request error
274 break;
275 }
276 // No need to wait for busy clear if probing
277 if ((arg == 0) || (req.response[0] & MMC_OCR_BUSY)) {
278 *rocr = req.response[0];
279 break;
280 }
281 zx_nanosleep(zx_deadline_after(ZX_MSEC(10)));
282 }
283 return st;
284 }
285
mmc_all_send_cid(sdmmc_device_t * dev,uint32_t cid[4])286 zx_status_t mmc_all_send_cid(sdmmc_device_t* dev, uint32_t cid[4]) {
287 sdmmc_req_t req = {
288 .cmd_idx = SDMMC_ALL_SEND_CID,
289 .arg = 0,
290 .cmd_flags = SDMMC_ALL_SEND_CID_FLAGS,
291 .use_dma = sdmmc_use_dma(dev),
292 };
293 zx_status_t st = sdmmc_request(&dev->host, &req);
294 if (st == ZX_OK) {
295 cid[0] = req.response[0];
296 cid[1] = req.response[1];
297 cid[2] = req.response[2];
298 cid[3] = req.response[3];
299 }
300 return st;
301 }
302
mmc_set_relative_addr(sdmmc_device_t * dev,uint16_t rca)303 zx_status_t mmc_set_relative_addr(sdmmc_device_t* dev, uint16_t rca) {
304 sdmmc_req_t req = {
305 .cmd_idx = MMC_SET_RELATIVE_ADDR,
306 .arg = (rca << 16),
307 .cmd_flags = MMC_SET_RELATIVE_ADDR_FLAGS,
308 .use_dma = sdmmc_use_dma(dev),
309 };
310 return sdmmc_request(&dev->host, &req);
311 }
312
mmc_send_csd(sdmmc_device_t * dev,uint32_t csd[4])313 zx_status_t mmc_send_csd(sdmmc_device_t* dev, uint32_t csd[4]) {
314 sdmmc_req_t req = {
315 .cmd_idx = SDMMC_SEND_CSD,
316 .arg = RCA_ARG(dev),
317 .cmd_flags = SDMMC_SEND_CSD_FLAGS,
318 .use_dma = sdmmc_use_dma(dev),
319 };
320 zx_status_t st = sdmmc_request(&dev->host, &req);
321 if (st == ZX_OK) {
322 csd[0] = req.response[0];
323 csd[1] = req.response[1];
324 csd[2] = req.response[2];
325 csd[3] = req.response[3];
326 }
327 return st;
328 }
329
mmc_send_ext_csd(sdmmc_device_t * dev,uint8_t ext_csd[512])330 zx_status_t mmc_send_ext_csd(sdmmc_device_t* dev, uint8_t ext_csd[512]) {
331 // EXT_CSD is send in a data stage
332 sdmmc_req_t req = {
333 .cmd_idx = MMC_SEND_EXT_CSD,
334 .arg = 0,
335 .blockcount = 1,
336 .blocksize = 512,
337 .use_dma = false,
338 .virt_buffer = ext_csd,
339 .virt_size = 512,
340 .cmd_flags = MMC_SEND_EXT_CSD_FLAGS,
341 };
342 zx_status_t st = sdmmc_request(&dev->host, &req);
343 if ((st == ZX_OK) && (driver_get_log_flags() & DDK_LOG_SPEW)) {
344 zxlogf(SPEW, "EXT_CSD:\n");
345 hexdump8_ex(ext_csd, 512, 0);
346 }
347 return st;
348 }
349
mmc_select_card(sdmmc_device_t * dev)350 zx_status_t mmc_select_card(sdmmc_device_t* dev) {
351 sdmmc_req_t req = {
352 .cmd_idx = MMC_SELECT_CARD,
353 .arg = RCA_ARG(dev),
354 .cmd_flags = MMC_SELECT_CARD_FLAGS,
355 .use_dma = sdmmc_use_dma(dev),
356 };
357 return sdmmc_request(&dev->host, &req);
358 }
359
mmc_switch(sdmmc_device_t * dev,uint8_t index,uint8_t value)360 zx_status_t mmc_switch(sdmmc_device_t* dev, uint8_t index, uint8_t value) {
361 // Send the MMC_SWITCH command
362 uint32_t arg = (3 << 24) | // write byte
363 (index << 16) | (value << 8);
364 sdmmc_req_t req = {
365 .cmd_idx = MMC_SWITCH,
366 .arg = arg,
367 .cmd_flags = MMC_SWITCH_FLAGS,
368 .use_dma = sdmmc_use_dma(dev),
369 };
370 return sdmmc_request(&dev->host, &req);
371 }
372