1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2008, Freescale Semiconductor, Inc
4 * Copyright 2020 NXP
5 * Andy Fleming
6 *
7 * Based vaguely on the Linux code
8 */
9
10 #include <config.h>
11 #include <common.h>
12 #include <blk.h>
13 #include <command.h>
14 #include <dm.h>
15 #include <log.h>
16 #include <dm/device-internal.h>
17 #include <errno.h>
18 #include <mmc.h>
19 #include <part.h>
20 #include <linux/bitops.h>
21 #include <linux/delay.h>
22 #include <power/regulator.h>
23 #include <malloc.h>
24 #include <memalign.h>
25 #include <linux/list.h>
26 #include <div64.h>
27 #include "mmc_private.h"
28
29 #define DEFAULT_CMD6_TIMEOUT_MS 500
30
31 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
32
33 #if !CONFIG_IS_ENABLED(DM_MMC)
34
mmc_wait_dat0(struct mmc * mmc,int state,int timeout_us)35 static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
36 {
37 if (mmc->cfg->ops->wait_dat0)
38 return mmc->cfg->ops->wait_dat0(mmc, state, timeout_us);
39
40 return -ENOSYS;
41 }
42
board_mmc_getwp(struct mmc * mmc)43 __weak int board_mmc_getwp(struct mmc *mmc)
44 {
45 return -1;
46 }
47
mmc_getwp(struct mmc * mmc)48 int mmc_getwp(struct mmc *mmc)
49 {
50 int wp;
51
52 wp = board_mmc_getwp(mmc);
53
54 if (wp < 0) {
55 if (mmc->cfg->ops->getwp)
56 wp = mmc->cfg->ops->getwp(mmc);
57 else
58 wp = 0;
59 }
60
61 return wp;
62 }
63
board_mmc_getcd(struct mmc * mmc)64 __weak int board_mmc_getcd(struct mmc *mmc)
65 {
66 return -1;
67 }
68 #endif
69
70 #ifdef CONFIG_MMC_TRACE
mmmc_trace_before_send(struct mmc * mmc,struct mmc_cmd * cmd)71 void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
72 {
73 printf("CMD_SEND:%d\n", cmd->cmdidx);
74 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
75 }
76
mmmc_trace_after_send(struct mmc * mmc,struct mmc_cmd * cmd,int ret)77 void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
78 {
79 int i;
80 u8 *ptr;
81
82 if (ret) {
83 printf("\t\tRET\t\t\t %d\n", ret);
84 } else {
85 switch (cmd->resp_type) {
86 case MMC_RSP_NONE:
87 printf("\t\tMMC_RSP_NONE\n");
88 break;
89 case MMC_RSP_R1:
90 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
91 cmd->response[0]);
92 break;
93 case MMC_RSP_R1b:
94 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
95 cmd->response[0]);
96 break;
97 case MMC_RSP_R2:
98 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
99 cmd->response[0]);
100 printf("\t\t \t\t 0x%08x \n",
101 cmd->response[1]);
102 printf("\t\t \t\t 0x%08x \n",
103 cmd->response[2]);
104 printf("\t\t \t\t 0x%08x \n",
105 cmd->response[3]);
106 printf("\n");
107 printf("\t\t\t\t\tDUMPING DATA\n");
108 for (i = 0; i < 4; i++) {
109 int j;
110 printf("\t\t\t\t\t%03d - ", i*4);
111 ptr = (u8 *)&cmd->response[i];
112 ptr += 3;
113 for (j = 0; j < 4; j++)
114 printf("%02x ", *ptr--);
115 printf("\n");
116 }
117 break;
118 case MMC_RSP_R3:
119 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
120 cmd->response[0]);
121 break;
122 default:
123 printf("\t\tERROR MMC rsp not supported\n");
124 break;
125 }
126 }
127 }
128
mmc_trace_state(struct mmc * mmc,struct mmc_cmd * cmd)129 void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
130 {
131 int status;
132
133 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
134 printf("CURR STATE:%d\n", status);
135 }
136 #endif
137
138 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG) || CONFIG_VAL(LOGLEVEL) >= LOGL_DEBUG
mmc_mode_name(enum bus_mode mode)139 const char *mmc_mode_name(enum bus_mode mode)
140 {
141 static const char *const names[] = {
142 [MMC_LEGACY] = "MMC legacy",
143 [MMC_HS] = "MMC High Speed (26MHz)",
144 [SD_HS] = "SD High Speed (50MHz)",
145 [UHS_SDR12] = "UHS SDR12 (25MHz)",
146 [UHS_SDR25] = "UHS SDR25 (50MHz)",
147 [UHS_SDR50] = "UHS SDR50 (100MHz)",
148 [UHS_SDR104] = "UHS SDR104 (208MHz)",
149 [UHS_DDR50] = "UHS DDR50 (50MHz)",
150 [MMC_HS_52] = "MMC High Speed (52MHz)",
151 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
152 [MMC_HS_200] = "HS200 (200MHz)",
153 [MMC_HS_400] = "HS400 (200MHz)",
154 [MMC_HS_400_ES] = "HS400ES (200MHz)",
155 };
156
157 if (mode >= MMC_MODES_END)
158 return "Unknown mode";
159 else
160 return names[mode];
161 }
162 #endif
163
mmc_mode2freq(struct mmc * mmc,enum bus_mode mode)164 static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
165 {
166 static const int freqs[] = {
167 [MMC_LEGACY] = 25000000,
168 [MMC_HS] = 26000000,
169 [SD_HS] = 50000000,
170 [MMC_HS_52] = 52000000,
171 [MMC_DDR_52] = 52000000,
172 [UHS_SDR12] = 25000000,
173 [UHS_SDR25] = 50000000,
174 [UHS_SDR50] = 100000000,
175 [UHS_DDR50] = 50000000,
176 [UHS_SDR104] = 208000000,
177 [MMC_HS_200] = 200000000,
178 [MMC_HS_400] = 200000000,
179 [MMC_HS_400_ES] = 200000000,
180 };
181
182 if (mode == MMC_LEGACY)
183 return mmc->legacy_speed;
184 else if (mode >= MMC_MODES_END)
185 return 0;
186 else
187 return freqs[mode];
188 }
189
mmc_select_mode(struct mmc * mmc,enum bus_mode mode)190 static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
191 {
192 mmc->selected_mode = mode;
193 mmc->tran_speed = mmc_mode2freq(mmc, mode);
194 mmc->ddr_mode = mmc_is_mode_ddr(mode);
195 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
196 mmc->tran_speed / 1000000);
197 return 0;
198 }
199
200 #if !CONFIG_IS_ENABLED(DM_MMC)
mmc_send_cmd(struct mmc * mmc,struct mmc_cmd * cmd,struct mmc_data * data)201 int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
202 {
203 int ret;
204
205 mmmc_trace_before_send(mmc, cmd);
206 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
207 mmmc_trace_after_send(mmc, cmd, ret);
208
209 return ret;
210 }
211 #endif
212
213 /**
214 * mmc_send_cmd_retry() - send a command to the mmc device, retrying on error
215 *
216 * @dev: device to receive the command
217 * @cmd: command to send
218 * @data: additional data to send/receive
219 * @retries: how many times to retry; mmc_send_cmd is always called at least
220 * once
221 * Return: 0 if ok, -ve on error
222 */
mmc_send_cmd_retry(struct mmc * mmc,struct mmc_cmd * cmd,struct mmc_data * data,uint retries)223 static int mmc_send_cmd_retry(struct mmc *mmc, struct mmc_cmd *cmd,
224 struct mmc_data *data, uint retries)
225 {
226 int ret;
227
228 do {
229 ret = mmc_send_cmd(mmc, cmd, data);
230 } while (ret && retries--);
231
232 return ret;
233 }
234
235 /**
236 * mmc_send_cmd_quirks() - send a command to the mmc device, retrying if a
237 * specific quirk is enabled
238 *
239 * @dev: device to receive the command
240 * @cmd: command to send
241 * @data: additional data to send/receive
242 * @quirk: retry only if this quirk is enabled
243 * @retries: how many times to retry; mmc_send_cmd is always called at least
244 * once
245 * Return: 0 if ok, -ve on error
246 */
mmc_send_cmd_quirks(struct mmc * mmc,struct mmc_cmd * cmd,struct mmc_data * data,u32 quirk,uint retries)247 static int mmc_send_cmd_quirks(struct mmc *mmc, struct mmc_cmd *cmd,
248 struct mmc_data *data, u32 quirk, uint retries)
249 {
250 if (IS_ENABLED(CONFIG_MMC_QUIRKS) && mmc->quirks & quirk)
251 return mmc_send_cmd_retry(mmc, cmd, data, retries);
252 else
253 return mmc_send_cmd(mmc, cmd, data);
254 }
255
mmc_send_status(struct mmc * mmc,unsigned int * status)256 int mmc_send_status(struct mmc *mmc, unsigned int *status)
257 {
258 struct mmc_cmd cmd;
259 int ret;
260
261 cmd.cmdidx = MMC_CMD_SEND_STATUS;
262 cmd.resp_type = MMC_RSP_R1;
263 if (!mmc_host_is_spi(mmc))
264 cmd.cmdarg = mmc->rca << 16;
265
266 ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 4);
267 mmc_trace_state(mmc, &cmd);
268 if (!ret)
269 *status = cmd.response[0];
270
271 return ret;
272 }
273
mmc_poll_for_busy(struct mmc * mmc,int timeout_ms)274 int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
275 {
276 unsigned int status;
277 int err;
278
279 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
280 if (err != -ENOSYS)
281 return err;
282
283 while (1) {
284 err = mmc_send_status(mmc, &status);
285 if (err)
286 return err;
287
288 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
289 (status & MMC_STATUS_CURR_STATE) !=
290 MMC_STATE_PRG)
291 break;
292
293 if (status & MMC_STATUS_MASK) {
294 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
295 pr_err("Status Error: 0x%08x\n", status);
296 #endif
297 return -ECOMM;
298 }
299
300 if (timeout_ms-- <= 0)
301 break;
302
303 udelay(1000);
304 }
305
306 if (timeout_ms <= 0) {
307 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
308 pr_err("Timeout waiting card ready\n");
309 #endif
310 return -ETIMEDOUT;
311 }
312
313 return 0;
314 }
315
mmc_set_blocklen(struct mmc * mmc,int len)316 int mmc_set_blocklen(struct mmc *mmc, int len)
317 {
318 struct mmc_cmd cmd;
319
320 if (mmc->ddr_mode)
321 return 0;
322
323 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
324 cmd.resp_type = MMC_RSP_R1;
325 cmd.cmdarg = len;
326
327 return mmc_send_cmd_quirks(mmc, &cmd, NULL,
328 MMC_QUIRK_RETRY_SET_BLOCKLEN, 4);
329 }
330
331 #ifdef MMC_SUPPORTS_TUNING
332 static const u8 tuning_blk_pattern_4bit[] = {
333 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
334 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
335 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
336 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
337 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
338 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
339 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
340 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
341 };
342
343 static const u8 tuning_blk_pattern_8bit[] = {
344 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
345 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
346 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
347 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
348 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
349 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
350 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
351 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
352 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
353 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
354 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
355 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
356 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
357 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
358 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
359 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
360 };
361
mmc_send_tuning(struct mmc * mmc,u32 opcode,int * cmd_error)362 int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
363 {
364 struct mmc_cmd cmd;
365 struct mmc_data data;
366 const u8 *tuning_block_pattern;
367 int size, err;
368
369 if (mmc->bus_width == 8) {
370 tuning_block_pattern = tuning_blk_pattern_8bit;
371 size = sizeof(tuning_blk_pattern_8bit);
372 } else if (mmc->bus_width == 4) {
373 tuning_block_pattern = tuning_blk_pattern_4bit;
374 size = sizeof(tuning_blk_pattern_4bit);
375 } else {
376 return -EINVAL;
377 }
378
379 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
380
381 cmd.cmdidx = opcode;
382 cmd.cmdarg = 0;
383 cmd.resp_type = MMC_RSP_R1;
384
385 data.dest = (void *)data_buf;
386 data.blocks = 1;
387 data.blocksize = size;
388 data.flags = MMC_DATA_READ;
389
390 err = mmc_send_cmd(mmc, &cmd, &data);
391 if (err)
392 return err;
393
394 if (memcmp(data_buf, tuning_block_pattern, size))
395 return -EIO;
396
397 return 0;
398 }
399 #endif
400
mmc_read_blocks(struct mmc * mmc,void * dst,lbaint_t start,lbaint_t blkcnt)401 static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
402 lbaint_t blkcnt)
403 {
404 struct mmc_cmd cmd;
405 struct mmc_data data;
406
407 if (blkcnt > 1)
408 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
409 else
410 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
411
412 if (mmc->high_capacity)
413 cmd.cmdarg = start;
414 else
415 cmd.cmdarg = start * mmc->read_bl_len;
416
417 cmd.resp_type = MMC_RSP_R1;
418
419 data.dest = dst;
420 data.blocks = blkcnt;
421 data.blocksize = mmc->read_bl_len;
422 data.flags = MMC_DATA_READ;
423
424 if (mmc_send_cmd(mmc, &cmd, &data))
425 return 0;
426
427 if (blkcnt > 1) {
428 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
429 cmd.cmdarg = 0;
430 cmd.resp_type = MMC_RSP_R1b;
431 if (mmc_send_cmd(mmc, &cmd, NULL)) {
432 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
433 pr_err("mmc fail to send stop cmd\n");
434 #endif
435 return 0;
436 }
437 }
438
439 return blkcnt;
440 }
441
442 #if !CONFIG_IS_ENABLED(DM_MMC)
mmc_get_b_max(struct mmc * mmc,void * dst,lbaint_t blkcnt)443 static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
444 {
445 if (mmc->cfg->ops->get_b_max)
446 return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
447 else
448 return mmc->cfg->b_max;
449 }
450 #endif
451
452 #if CONFIG_IS_ENABLED(BLK)
mmc_bread(struct udevice * dev,lbaint_t start,lbaint_t blkcnt,void * dst)453 ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
454 #else
455 ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
456 void *dst)
457 #endif
458 {
459 #if CONFIG_IS_ENABLED(BLK)
460 struct blk_desc *block_dev = dev_get_uclass_plat(dev);
461 #endif
462 int dev_num = block_dev->devnum;
463 int err;
464 lbaint_t cur, blocks_todo = blkcnt;
465 uint b_max;
466
467 if (blkcnt == 0)
468 return 0;
469
470 struct mmc *mmc = find_mmc_device(dev_num);
471 if (!mmc)
472 return 0;
473
474 if (CONFIG_IS_ENABLED(MMC_TINY))
475 err = mmc_switch_part(mmc, block_dev->hwpart);
476 else
477 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
478
479 if (err < 0)
480 return 0;
481
482 if ((start + blkcnt) > block_dev->lba) {
483 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
484 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
485 start + blkcnt, block_dev->lba);
486 #endif
487 return 0;
488 }
489
490 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
491 pr_debug("%s: Failed to set blocklen\n", __func__);
492 return 0;
493 }
494
495 b_max = mmc_get_b_max(mmc, dst, blkcnt);
496
497 do {
498 cur = (blocks_todo > b_max) ? b_max : blocks_todo;
499 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
500 pr_debug("%s: Failed to read blocks\n", __func__);
501 return 0;
502 }
503 blocks_todo -= cur;
504 start += cur;
505 dst += cur * mmc->read_bl_len;
506 } while (blocks_todo > 0);
507
508 return blkcnt;
509 }
510
mmc_go_idle(struct mmc * mmc)511 static int mmc_go_idle(struct mmc *mmc)
512 {
513 struct mmc_cmd cmd;
514 int err;
515
516 udelay(1000);
517
518 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
519 cmd.cmdarg = 0;
520 cmd.resp_type = MMC_RSP_NONE;
521
522 err = mmc_send_cmd(mmc, &cmd, NULL);
523
524 if (err)
525 return err;
526
527 udelay(2000);
528
529 return 0;
530 }
531
532 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
mmc_switch_voltage(struct mmc * mmc,int signal_voltage)533 static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
534 {
535 struct mmc_cmd cmd;
536 int err = 0;
537
538 /*
539 * Send CMD11 only if the request is to switch the card to
540 * 1.8V signalling.
541 */
542 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
543 return mmc_set_signal_voltage(mmc, signal_voltage);
544
545 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
546 cmd.cmdarg = 0;
547 cmd.resp_type = MMC_RSP_R1;
548
549 err = mmc_send_cmd(mmc, &cmd, NULL);
550 if (err)
551 return err;
552
553 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
554 return -EIO;
555
556 /*
557 * The card should drive cmd and dat[0:3] low immediately
558 * after the response of cmd11, but wait 100 us to be sure
559 */
560 err = mmc_wait_dat0(mmc, 0, 100);
561 if (err == -ENOSYS)
562 udelay(100);
563 else if (err)
564 return -ETIMEDOUT;
565
566 /*
567 * During a signal voltage level switch, the clock must be gated
568 * for 5 ms according to the SD spec
569 */
570 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
571
572 err = mmc_set_signal_voltage(mmc, signal_voltage);
573 if (err)
574 return err;
575
576 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
577 mdelay(10);
578 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
579
580 /*
581 * Failure to switch is indicated by the card holding
582 * dat[0:3] low. Wait for at least 1 ms according to spec
583 */
584 err = mmc_wait_dat0(mmc, 1, 1000);
585 if (err == -ENOSYS)
586 udelay(1000);
587 else if (err)
588 return -ETIMEDOUT;
589
590 return 0;
591 }
592 #endif
593
sd_send_op_cond(struct mmc * mmc,bool uhs_en)594 static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
595 {
596 int timeout = 1000;
597 int err;
598 struct mmc_cmd cmd;
599
600 while (1) {
601 cmd.cmdidx = MMC_CMD_APP_CMD;
602 cmd.resp_type = MMC_RSP_R1;
603 cmd.cmdarg = 0;
604
605 err = mmc_send_cmd(mmc, &cmd, NULL);
606
607 if (err)
608 return err;
609
610 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
611 cmd.resp_type = MMC_RSP_R3;
612
613 /*
614 * Most cards do not answer if some reserved bits
615 * in the ocr are set. However, Some controller
616 * can set bit 7 (reserved for low voltages), but
617 * how to manage low voltages SD card is not yet
618 * specified.
619 */
620 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
621 (mmc->cfg->voltages & 0xff8000);
622
623 if (mmc->version == SD_VERSION_2)
624 cmd.cmdarg |= OCR_HCS;
625
626 if (uhs_en)
627 cmd.cmdarg |= OCR_S18R;
628
629 err = mmc_send_cmd(mmc, &cmd, NULL);
630
631 if (err)
632 return err;
633
634 if (cmd.response[0] & OCR_BUSY)
635 break;
636
637 if (timeout-- <= 0)
638 return -EOPNOTSUPP;
639
640 udelay(1000);
641 }
642
643 if (mmc->version != SD_VERSION_2)
644 mmc->version = SD_VERSION_1_0;
645
646 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
647 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
648 cmd.resp_type = MMC_RSP_R3;
649 cmd.cmdarg = 0;
650
651 err = mmc_send_cmd(mmc, &cmd, NULL);
652
653 if (err)
654 return err;
655 }
656
657 mmc->ocr = cmd.response[0];
658
659 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
660 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
661 == 0x41000000) {
662 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
663 if (err)
664 return err;
665 }
666 #endif
667
668 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
669 mmc->rca = 0;
670
671 return 0;
672 }
673
mmc_send_op_cond_iter(struct mmc * mmc,int use_arg)674 static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
675 {
676 struct mmc_cmd cmd;
677 int err;
678
679 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
680 cmd.resp_type = MMC_RSP_R3;
681 cmd.cmdarg = 0;
682 if (use_arg && !mmc_host_is_spi(mmc))
683 cmd.cmdarg = OCR_HCS |
684 (mmc->cfg->voltages &
685 (mmc->ocr & OCR_VOLTAGE_MASK)) |
686 (mmc->ocr & OCR_ACCESS_MODE);
687
688 err = mmc_send_cmd(mmc, &cmd, NULL);
689 if (err)
690 return err;
691 mmc->ocr = cmd.response[0];
692 return 0;
693 }
694
mmc_send_op_cond(struct mmc * mmc)695 static int mmc_send_op_cond(struct mmc *mmc)
696 {
697 int err, i;
698 int timeout = 1000;
699 uint start;
700
701 /* Some cards seem to need this */
702 mmc_go_idle(mmc);
703
704 start = get_timer(0);
705 /* Asking to the card its capabilities */
706 for (i = 0; ; i++) {
707 err = mmc_send_op_cond_iter(mmc, i != 0);
708 if (err)
709 return err;
710
711 /* exit if not busy (flag seems to be inverted) */
712 if (mmc->ocr & OCR_BUSY)
713 break;
714
715 if (get_timer(start) > timeout)
716 return -ETIMEDOUT;
717 udelay(100);
718 }
719 mmc->op_cond_pending = 1;
720 return 0;
721 }
722
mmc_complete_op_cond(struct mmc * mmc)723 static int mmc_complete_op_cond(struct mmc *mmc)
724 {
725 struct mmc_cmd cmd;
726 int timeout = 1000;
727 ulong start;
728 int err;
729
730 mmc->op_cond_pending = 0;
731 if (!(mmc->ocr & OCR_BUSY)) {
732 /* Some cards seem to need this */
733 mmc_go_idle(mmc);
734
735 start = get_timer(0);
736 while (1) {
737 err = mmc_send_op_cond_iter(mmc, 1);
738 if (err)
739 return err;
740 if (mmc->ocr & OCR_BUSY)
741 break;
742 if (get_timer(start) > timeout)
743 return -EOPNOTSUPP;
744 udelay(100);
745 }
746 }
747
748 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
749 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
750 cmd.resp_type = MMC_RSP_R3;
751 cmd.cmdarg = 0;
752
753 err = mmc_send_cmd(mmc, &cmd, NULL);
754
755 if (err)
756 return err;
757
758 mmc->ocr = cmd.response[0];
759 }
760
761 mmc->version = MMC_VERSION_UNKNOWN;
762
763 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
764 mmc->rca = 1;
765
766 return 0;
767 }
768
769
mmc_send_ext_csd(struct mmc * mmc,u8 * ext_csd)770 int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
771 {
772 struct mmc_cmd cmd;
773 struct mmc_data data;
774 int err;
775
776 /* Get the Card Status Register */
777 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
778 cmd.resp_type = MMC_RSP_R1;
779 cmd.cmdarg = 0;
780
781 data.dest = (char *)ext_csd;
782 data.blocks = 1;
783 data.blocksize = MMC_MAX_BLOCK_LEN;
784 data.flags = MMC_DATA_READ;
785
786 err = mmc_send_cmd(mmc, &cmd, &data);
787
788 return err;
789 }
790
__mmc_switch(struct mmc * mmc,u8 set,u8 index,u8 value,bool send_status)791 static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
792 bool send_status)
793 {
794 unsigned int status, start;
795 struct mmc_cmd cmd;
796 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
797 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
798 (index == EXT_CSD_PART_CONF);
799 int ret;
800
801 if (mmc->gen_cmd6_time)
802 timeout_ms = mmc->gen_cmd6_time * 10;
803
804 if (is_part_switch && mmc->part_switch_time)
805 timeout_ms = mmc->part_switch_time * 10;
806
807 cmd.cmdidx = MMC_CMD_SWITCH;
808 cmd.resp_type = MMC_RSP_R1b;
809 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
810 (index << 16) |
811 (value << 8);
812
813 ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 3);
814 if (ret)
815 return ret;
816
817 start = get_timer(0);
818
819 /* poll dat0 for rdy/buys status */
820 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
821 if (ret && ret != -ENOSYS)
822 return ret;
823
824 /*
825 * In cases when neiter allowed to poll by using CMD13 nor we are
826 * capable of polling by using mmc_wait_dat0, then rely on waiting the
827 * stated timeout to be sufficient.
828 */
829 if (ret == -ENOSYS && !send_status) {
830 mdelay(timeout_ms);
831 return 0;
832 }
833
834 if (!send_status)
835 return 0;
836
837 /* Finally wait until the card is ready or indicates a failure
838 * to switch. It doesn't hurt to use CMD13 here even if send_status
839 * is false, because by now (after 'timeout_ms' ms) the bus should be
840 * reliable.
841 */
842 do {
843 ret = mmc_send_status(mmc, &status);
844
845 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
846 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
847 value);
848 return -EIO;
849 }
850 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA) &&
851 (status & MMC_STATUS_CURR_STATE) == MMC_STATE_TRANS)
852 return 0;
853 udelay(100);
854 } while (get_timer(start) < timeout_ms);
855
856 return -ETIMEDOUT;
857 }
858
mmc_switch(struct mmc * mmc,u8 set,u8 index,u8 value)859 int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
860 {
861 return __mmc_switch(mmc, set, index, value, true);
862 }
863
mmc_boot_wp(struct mmc * mmc)864 int mmc_boot_wp(struct mmc *mmc)
865 {
866 return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
867 }
868
mmc_boot_wp_single_partition(struct mmc * mmc,int partition)869 int mmc_boot_wp_single_partition(struct mmc *mmc, int partition)
870 {
871 u8 value;
872 int ret;
873
874 value = EXT_CSD_BOOT_WP_B_PWR_WP_EN;
875
876 if (partition == 0) {
877 value |= EXT_CSD_BOOT_WP_B_SEC_WP_SEL;
878 ret = mmc_switch(mmc,
879 EXT_CSD_CMD_SET_NORMAL,
880 EXT_CSD_BOOT_WP,
881 value);
882 } else if (partition == 1) {
883 value |= EXT_CSD_BOOT_WP_B_SEC_WP_SEL;
884 value |= EXT_CSD_BOOT_WP_B_PWR_WP_SEC_SEL;
885 ret = mmc_switch(mmc,
886 EXT_CSD_CMD_SET_NORMAL,
887 EXT_CSD_BOOT_WP,
888 value);
889 } else {
890 ret = mmc_boot_wp(mmc);
891 }
892
893 return ret;
894 }
895
896 #if !CONFIG_IS_ENABLED(MMC_TINY)
mmc_set_card_speed(struct mmc * mmc,enum bus_mode mode,bool hsdowngrade)897 static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
898 bool hsdowngrade)
899 {
900 int err;
901 int speed_bits;
902
903 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
904
905 switch (mode) {
906 case MMC_HS:
907 case MMC_HS_52:
908 case MMC_DDR_52:
909 speed_bits = EXT_CSD_TIMING_HS;
910 break;
911 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
912 case MMC_HS_200:
913 speed_bits = EXT_CSD_TIMING_HS200;
914 break;
915 #endif
916 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
917 case MMC_HS_400:
918 speed_bits = EXT_CSD_TIMING_HS400;
919 break;
920 #endif
921 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
922 case MMC_HS_400_ES:
923 speed_bits = EXT_CSD_TIMING_HS400;
924 break;
925 #endif
926 case MMC_LEGACY:
927 speed_bits = EXT_CSD_TIMING_LEGACY;
928 break;
929 default:
930 return -EINVAL;
931 }
932
933 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
934 speed_bits, !hsdowngrade);
935 if (err)
936 return err;
937
938 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
939 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
940 /*
941 * In case the eMMC is in HS200/HS400 mode and we are downgrading
942 * to HS mode, the card clock are still running much faster than
943 * the supported HS mode clock, so we can not reliably read out
944 * Extended CSD. Reconfigure the controller to run at HS mode.
945 */
946 if (hsdowngrade) {
947 mmc_select_mode(mmc, MMC_HS);
948 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
949 }
950 #endif
951
952 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
953 /* Now check to see that it worked */
954 err = mmc_send_ext_csd(mmc, test_csd);
955 if (err)
956 return err;
957
958 /* No high-speed support */
959 if (!test_csd[EXT_CSD_HS_TIMING])
960 return -ENOTSUPP;
961 }
962
963 return 0;
964 }
965
mmc_get_capabilities(struct mmc * mmc)966 static int mmc_get_capabilities(struct mmc *mmc)
967 {
968 u8 *ext_csd = mmc->ext_csd;
969 char cardtype;
970
971 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
972
973 if (mmc_host_is_spi(mmc))
974 return 0;
975
976 /* Only version 4 supports high-speed */
977 if (mmc->version < MMC_VERSION_4)
978 return 0;
979
980 if (!ext_csd) {
981 pr_err("No ext_csd found!\n"); /* this should enver happen */
982 return -ENOTSUPP;
983 }
984
985 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
986
987 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
988 mmc->cardtype = cardtype;
989
990 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
991 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
992 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
993 mmc->card_caps |= MMC_MODE_HS200;
994 }
995 #endif
996 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
997 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
998 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
999 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
1000 mmc->card_caps |= MMC_MODE_HS400;
1001 }
1002 #endif
1003 if (cardtype & EXT_CSD_CARD_TYPE_52) {
1004 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
1005 mmc->card_caps |= MMC_MODE_DDR_52MHz;
1006 mmc->card_caps |= MMC_MODE_HS_52MHz;
1007 }
1008 if (cardtype & EXT_CSD_CARD_TYPE_26)
1009 mmc->card_caps |= MMC_MODE_HS;
1010
1011 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1012 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
1013 (mmc->card_caps & MMC_MODE_HS400)) {
1014 mmc->card_caps |= MMC_MODE_HS400_ES;
1015 }
1016 #endif
1017
1018 return 0;
1019 }
1020 #endif
1021
mmc_set_capacity(struct mmc * mmc,int part_num)1022 static int mmc_set_capacity(struct mmc *mmc, int part_num)
1023 {
1024 switch (part_num) {
1025 case 0:
1026 mmc->capacity = mmc->capacity_user;
1027 break;
1028 case 1:
1029 case 2:
1030 mmc->capacity = mmc->capacity_boot;
1031 break;
1032 case 3:
1033 mmc->capacity = mmc->capacity_rpmb;
1034 break;
1035 case 4:
1036 case 5:
1037 case 6:
1038 case 7:
1039 mmc->capacity = mmc->capacity_gp[part_num - 4];
1040 break;
1041 default:
1042 return -1;
1043 }
1044
1045 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1046
1047 return 0;
1048 }
1049
mmc_switch_part(struct mmc * mmc,unsigned int part_num)1050 int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1051 {
1052 int ret;
1053 int retry = 3;
1054
1055 do {
1056 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1057 EXT_CSD_PART_CONF,
1058 (mmc->part_config & ~PART_ACCESS_MASK)
1059 | (part_num & PART_ACCESS_MASK));
1060 } while (ret && retry--);
1061
1062 /*
1063 * Set the capacity if the switch succeeded or was intended
1064 * to return to representing the raw device.
1065 */
1066 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1067 ret = mmc_set_capacity(mmc, part_num);
1068 mmc_get_blk_desc(mmc)->hwpart = part_num;
1069 }
1070
1071 return ret;
1072 }
1073
1074 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
mmc_hwpart_config(struct mmc * mmc,const struct mmc_hwpart_conf * conf,enum mmc_hwpart_conf_mode mode)1075 int mmc_hwpart_config(struct mmc *mmc,
1076 const struct mmc_hwpart_conf *conf,
1077 enum mmc_hwpart_conf_mode mode)
1078 {
1079 u8 part_attrs = 0;
1080 u32 enh_size_mult;
1081 u32 enh_start_addr;
1082 u32 gp_size_mult[4];
1083 u32 max_enh_size_mult;
1084 u32 tot_enh_size_mult = 0;
1085 u8 wr_rel_set;
1086 int i, pidx, err;
1087 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1088
1089 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1090 return -EINVAL;
1091
1092 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1093 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1094 return -EMEDIUMTYPE;
1095 }
1096
1097 if (!(mmc->part_support & PART_SUPPORT)) {
1098 pr_err("Card does not support partitioning\n");
1099 return -EMEDIUMTYPE;
1100 }
1101
1102 if (!mmc->hc_wp_grp_size) {
1103 pr_err("Card does not define HC WP group size\n");
1104 return -EMEDIUMTYPE;
1105 }
1106
1107 /* check partition alignment and total enhanced size */
1108 if (conf->user.enh_size) {
1109 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1110 conf->user.enh_start % mmc->hc_wp_grp_size) {
1111 pr_err("User data enhanced area not HC WP group "
1112 "size aligned\n");
1113 return -EINVAL;
1114 }
1115 part_attrs |= EXT_CSD_ENH_USR;
1116 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1117 if (mmc->high_capacity) {
1118 enh_start_addr = conf->user.enh_start;
1119 } else {
1120 enh_start_addr = (conf->user.enh_start << 9);
1121 }
1122 } else {
1123 enh_size_mult = 0;
1124 enh_start_addr = 0;
1125 }
1126 tot_enh_size_mult += enh_size_mult;
1127
1128 for (pidx = 0; pidx < 4; pidx++) {
1129 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1130 pr_err("GP%i partition not HC WP group size "
1131 "aligned\n", pidx+1);
1132 return -EINVAL;
1133 }
1134 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1135 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1136 part_attrs |= EXT_CSD_ENH_GP(pidx);
1137 tot_enh_size_mult += gp_size_mult[pidx];
1138 }
1139 }
1140
1141 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1142 pr_err("Card does not support enhanced attribute\n");
1143 return -EMEDIUMTYPE;
1144 }
1145
1146 err = mmc_send_ext_csd(mmc, ext_csd);
1147 if (err)
1148 return err;
1149
1150 max_enh_size_mult =
1151 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1152 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1153 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1154 if (tot_enh_size_mult > max_enh_size_mult) {
1155 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1156 tot_enh_size_mult, max_enh_size_mult);
1157 return -EMEDIUMTYPE;
1158 }
1159
1160 /* The default value of EXT_CSD_WR_REL_SET is device
1161 * dependent, the values can only be changed if the
1162 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1163 * changed only once and before partitioning is completed. */
1164 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1165 if (conf->user.wr_rel_change) {
1166 if (conf->user.wr_rel_set)
1167 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1168 else
1169 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1170 }
1171 for (pidx = 0; pidx < 4; pidx++) {
1172 if (conf->gp_part[pidx].wr_rel_change) {
1173 if (conf->gp_part[pidx].wr_rel_set)
1174 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1175 else
1176 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1177 }
1178 }
1179
1180 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1181 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1182 puts("Card does not support host controlled partition write "
1183 "reliability settings\n");
1184 return -EMEDIUMTYPE;
1185 }
1186
1187 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1188 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1189 pr_err("Card already partitioned\n");
1190 return -EPERM;
1191 }
1192
1193 if (mode == MMC_HWPART_CONF_CHECK)
1194 return 0;
1195
1196 /* Partitioning requires high-capacity size definitions */
1197 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1198 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1199 EXT_CSD_ERASE_GROUP_DEF, 1);
1200
1201 if (err)
1202 return err;
1203
1204 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1205
1206 #if CONFIG_IS_ENABLED(MMC_WRITE)
1207 /* update erase group size to be high-capacity */
1208 mmc->erase_grp_size =
1209 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1210 #endif
1211
1212 }
1213
1214 /* all OK, write the configuration */
1215 for (i = 0; i < 4; i++) {
1216 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1217 EXT_CSD_ENH_START_ADDR+i,
1218 (enh_start_addr >> (i*8)) & 0xFF);
1219 if (err)
1220 return err;
1221 }
1222 for (i = 0; i < 3; i++) {
1223 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1224 EXT_CSD_ENH_SIZE_MULT+i,
1225 (enh_size_mult >> (i*8)) & 0xFF);
1226 if (err)
1227 return err;
1228 }
1229 for (pidx = 0; pidx < 4; pidx++) {
1230 for (i = 0; i < 3; i++) {
1231 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1232 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1233 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1234 if (err)
1235 return err;
1236 }
1237 }
1238 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1239 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1240 if (err)
1241 return err;
1242
1243 if (mode == MMC_HWPART_CONF_SET)
1244 return 0;
1245
1246 /* The WR_REL_SET is a write-once register but shall be
1247 * written before setting PART_SETTING_COMPLETED. As it is
1248 * write-once we can only write it when completing the
1249 * partitioning. */
1250 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1251 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1252 EXT_CSD_WR_REL_SET, wr_rel_set);
1253 if (err)
1254 return err;
1255 }
1256
1257 /* Setting PART_SETTING_COMPLETED confirms the partition
1258 * configuration but it only becomes effective after power
1259 * cycle, so we do not adjust the partition related settings
1260 * in the mmc struct. */
1261
1262 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1263 EXT_CSD_PARTITION_SETTING,
1264 EXT_CSD_PARTITION_SETTING_COMPLETED);
1265 if (err)
1266 return err;
1267
1268 return 0;
1269 }
1270 #endif
1271
1272 #if !CONFIG_IS_ENABLED(DM_MMC)
mmc_getcd(struct mmc * mmc)1273 int mmc_getcd(struct mmc *mmc)
1274 {
1275 int cd;
1276
1277 cd = board_mmc_getcd(mmc);
1278
1279 if (cd < 0) {
1280 if (mmc->cfg->ops->getcd)
1281 cd = mmc->cfg->ops->getcd(mmc);
1282 else
1283 cd = 1;
1284 }
1285
1286 return cd;
1287 }
1288 #endif
1289
1290 #if !CONFIG_IS_ENABLED(MMC_TINY)
sd_switch(struct mmc * mmc,int mode,int group,u8 value,u8 * resp)1291 static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1292 {
1293 struct mmc_cmd cmd;
1294 struct mmc_data data;
1295
1296 /* Switch the frequency */
1297 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1298 cmd.resp_type = MMC_RSP_R1;
1299 cmd.cmdarg = (mode << 31) | 0xffffff;
1300 cmd.cmdarg &= ~(0xf << (group * 4));
1301 cmd.cmdarg |= value << (group * 4);
1302
1303 data.dest = (char *)resp;
1304 data.blocksize = 64;
1305 data.blocks = 1;
1306 data.flags = MMC_DATA_READ;
1307
1308 return mmc_send_cmd(mmc, &cmd, &data);
1309 }
1310
sd_get_capabilities(struct mmc * mmc)1311 static int sd_get_capabilities(struct mmc *mmc)
1312 {
1313 int err;
1314 struct mmc_cmd cmd;
1315 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1316 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1317 struct mmc_data data;
1318 int timeout;
1319 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1320 u32 sd3_bus_mode;
1321 #endif
1322
1323 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1324
1325 if (mmc_host_is_spi(mmc))
1326 return 0;
1327
1328 /* Read the SCR to find out if this card supports higher speeds */
1329 cmd.cmdidx = MMC_CMD_APP_CMD;
1330 cmd.resp_type = MMC_RSP_R1;
1331 cmd.cmdarg = mmc->rca << 16;
1332
1333 err = mmc_send_cmd(mmc, &cmd, NULL);
1334
1335 if (err)
1336 return err;
1337
1338 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1339 cmd.resp_type = MMC_RSP_R1;
1340 cmd.cmdarg = 0;
1341
1342 data.dest = (char *)scr;
1343 data.blocksize = 8;
1344 data.blocks = 1;
1345 data.flags = MMC_DATA_READ;
1346
1347 err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
1348
1349 if (err)
1350 return err;
1351
1352 mmc->scr[0] = __be32_to_cpu(scr[0]);
1353 mmc->scr[1] = __be32_to_cpu(scr[1]);
1354
1355 switch ((mmc->scr[0] >> 24) & 0xf) {
1356 case 0:
1357 mmc->version = SD_VERSION_1_0;
1358 break;
1359 case 1:
1360 mmc->version = SD_VERSION_1_10;
1361 break;
1362 case 2:
1363 mmc->version = SD_VERSION_2;
1364 if ((mmc->scr[0] >> 15) & 0x1)
1365 mmc->version = SD_VERSION_3;
1366 break;
1367 default:
1368 mmc->version = SD_VERSION_1_0;
1369 break;
1370 }
1371
1372 if (mmc->scr[0] & SD_DATA_4BIT)
1373 mmc->card_caps |= MMC_MODE_4BIT;
1374
1375 /* Version 1.0 doesn't support switching */
1376 if (mmc->version == SD_VERSION_1_0)
1377 return 0;
1378
1379 timeout = 4;
1380 while (timeout--) {
1381 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1382 (u8 *)switch_status);
1383
1384 if (err)
1385 return err;
1386
1387 /* The high-speed function is busy. Try again */
1388 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1389 break;
1390 }
1391
1392 /* If high-speed isn't supported, we return */
1393 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1394 mmc->card_caps |= MMC_CAP(SD_HS);
1395
1396 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1397 /* Version before 3.0 don't support UHS modes */
1398 if (mmc->version < SD_VERSION_3)
1399 return 0;
1400
1401 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1402 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1403 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1404 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1405 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1406 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1407 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1408 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1409 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1410 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1411 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1412 #endif
1413
1414 return 0;
1415 }
1416
sd_set_card_speed(struct mmc * mmc,enum bus_mode mode)1417 static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1418 {
1419 int err;
1420
1421 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1422 int speed;
1423
1424 /* SD version 1.00 and 1.01 does not support CMD 6 */
1425 if (mmc->version == SD_VERSION_1_0)
1426 return 0;
1427
1428 switch (mode) {
1429 case MMC_LEGACY:
1430 speed = UHS_SDR12_BUS_SPEED;
1431 break;
1432 case SD_HS:
1433 speed = HIGH_SPEED_BUS_SPEED;
1434 break;
1435 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1436 case UHS_SDR12:
1437 speed = UHS_SDR12_BUS_SPEED;
1438 break;
1439 case UHS_SDR25:
1440 speed = UHS_SDR25_BUS_SPEED;
1441 break;
1442 case UHS_SDR50:
1443 speed = UHS_SDR50_BUS_SPEED;
1444 break;
1445 case UHS_DDR50:
1446 speed = UHS_DDR50_BUS_SPEED;
1447 break;
1448 case UHS_SDR104:
1449 speed = UHS_SDR104_BUS_SPEED;
1450 break;
1451 #endif
1452 default:
1453 return -EINVAL;
1454 }
1455
1456 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1457 if (err)
1458 return err;
1459
1460 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1461 return -ENOTSUPP;
1462
1463 return 0;
1464 }
1465
sd_select_bus_width(struct mmc * mmc,int w)1466 static int sd_select_bus_width(struct mmc *mmc, int w)
1467 {
1468 int err;
1469 struct mmc_cmd cmd;
1470
1471 if ((w != 4) && (w != 1))
1472 return -EINVAL;
1473
1474 cmd.cmdidx = MMC_CMD_APP_CMD;
1475 cmd.resp_type = MMC_RSP_R1;
1476 cmd.cmdarg = mmc->rca << 16;
1477
1478 err = mmc_send_cmd(mmc, &cmd, NULL);
1479 if (err)
1480 return err;
1481
1482 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1483 cmd.resp_type = MMC_RSP_R1;
1484 if (w == 4)
1485 cmd.cmdarg = 2;
1486 else if (w == 1)
1487 cmd.cmdarg = 0;
1488 err = mmc_send_cmd(mmc, &cmd, NULL);
1489 if (err)
1490 return err;
1491
1492 return 0;
1493 }
1494 #endif
1495
1496 #if CONFIG_IS_ENABLED(MMC_WRITE)
sd_read_ssr(struct mmc * mmc)1497 static int sd_read_ssr(struct mmc *mmc)
1498 {
1499 static const unsigned int sd_au_size[] = {
1500 0, SZ_16K / 512, SZ_32K / 512,
1501 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1502 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1503 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1504 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1505 SZ_64M / 512,
1506 };
1507 int err, i;
1508 struct mmc_cmd cmd;
1509 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1510 struct mmc_data data;
1511 unsigned int au, eo, et, es;
1512
1513 cmd.cmdidx = MMC_CMD_APP_CMD;
1514 cmd.resp_type = MMC_RSP_R1;
1515 cmd.cmdarg = mmc->rca << 16;
1516
1517 err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_APP_CMD, 4);
1518 if (err)
1519 return err;
1520
1521 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1522 cmd.resp_type = MMC_RSP_R1;
1523 cmd.cmdarg = 0;
1524
1525 data.dest = (char *)ssr;
1526 data.blocksize = 64;
1527 data.blocks = 1;
1528 data.flags = MMC_DATA_READ;
1529
1530 err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
1531 if (err)
1532 return err;
1533
1534 for (i = 0; i < 16; i++)
1535 ssr[i] = be32_to_cpu(ssr[i]);
1536
1537 au = (ssr[2] >> 12) & 0xF;
1538 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1539 mmc->ssr.au = sd_au_size[au];
1540 es = (ssr[3] >> 24) & 0xFF;
1541 es |= (ssr[2] & 0xFF) << 8;
1542 et = (ssr[3] >> 18) & 0x3F;
1543 if (es && et) {
1544 eo = (ssr[3] >> 16) & 0x3;
1545 mmc->ssr.erase_timeout = (et * 1000) / es;
1546 mmc->ssr.erase_offset = eo * 1000;
1547 }
1548 } else {
1549 pr_debug("Invalid Allocation Unit Size.\n");
1550 }
1551
1552 return 0;
1553 }
1554 #endif
1555 /* frequency bases */
1556 /* divided by 10 to be nice to platforms without floating point */
1557 static const int fbase[] = {
1558 10000,
1559 100000,
1560 1000000,
1561 10000000,
1562 };
1563
1564 /* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1565 * to platforms without floating point.
1566 */
1567 static const u8 multipliers[] = {
1568 0, /* reserved */
1569 10,
1570 12,
1571 13,
1572 15,
1573 20,
1574 25,
1575 30,
1576 35,
1577 40,
1578 45,
1579 50,
1580 55,
1581 60,
1582 70,
1583 80,
1584 };
1585
bus_width(uint cap)1586 static inline int bus_width(uint cap)
1587 {
1588 if (cap == MMC_MODE_8BIT)
1589 return 8;
1590 if (cap == MMC_MODE_4BIT)
1591 return 4;
1592 if (cap == MMC_MODE_1BIT)
1593 return 1;
1594 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1595 return 0;
1596 }
1597
1598 #if !CONFIG_IS_ENABLED(DM_MMC)
1599 #ifdef MMC_SUPPORTS_TUNING
mmc_execute_tuning(struct mmc * mmc,uint opcode)1600 static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1601 {
1602 return -ENOTSUPP;
1603 }
1604 #endif
1605
mmc_set_ios(struct mmc * mmc)1606 static int mmc_set_ios(struct mmc *mmc)
1607 {
1608 int ret = 0;
1609
1610 if (mmc->cfg->ops->set_ios)
1611 ret = mmc->cfg->ops->set_ios(mmc);
1612
1613 return ret;
1614 }
1615
mmc_host_power_cycle(struct mmc * mmc)1616 static int mmc_host_power_cycle(struct mmc *mmc)
1617 {
1618 int ret = 0;
1619
1620 if (mmc->cfg->ops->host_power_cycle)
1621 ret = mmc->cfg->ops->host_power_cycle(mmc);
1622
1623 return ret;
1624 }
1625 #endif
1626
mmc_set_clock(struct mmc * mmc,uint clock,bool disable)1627 int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1628 {
1629 if (!disable) {
1630 if (clock > mmc->cfg->f_max)
1631 clock = mmc->cfg->f_max;
1632
1633 if (clock < mmc->cfg->f_min)
1634 clock = mmc->cfg->f_min;
1635 }
1636
1637 mmc->clock = clock;
1638 mmc->clk_disable = disable;
1639
1640 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1641
1642 return mmc_set_ios(mmc);
1643 }
1644
mmc_set_bus_width(struct mmc * mmc,uint width)1645 static int mmc_set_bus_width(struct mmc *mmc, uint width)
1646 {
1647 mmc->bus_width = width;
1648
1649 return mmc_set_ios(mmc);
1650 }
1651
1652 #if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1653 /*
1654 * helper function to display the capabilities in a human
1655 * friendly manner. The capabilities include bus width and
1656 * supported modes.
1657 */
mmc_dump_capabilities(const char * text,uint caps)1658 void mmc_dump_capabilities(const char *text, uint caps)
1659 {
1660 enum bus_mode mode;
1661
1662 pr_debug("%s: widths [", text);
1663 if (caps & MMC_MODE_8BIT)
1664 pr_debug("8, ");
1665 if (caps & MMC_MODE_4BIT)
1666 pr_debug("4, ");
1667 if (caps & MMC_MODE_1BIT)
1668 pr_debug("1, ");
1669 pr_debug("\b\b] modes [");
1670 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1671 if (MMC_CAP(mode) & caps)
1672 pr_debug("%s, ", mmc_mode_name(mode));
1673 pr_debug("\b\b]\n");
1674 }
1675 #endif
1676
1677 struct mode_width_tuning {
1678 enum bus_mode mode;
1679 uint widths;
1680 #ifdef MMC_SUPPORTS_TUNING
1681 uint tuning;
1682 #endif
1683 };
1684
1685 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
mmc_voltage_to_mv(enum mmc_voltage voltage)1686 int mmc_voltage_to_mv(enum mmc_voltage voltage)
1687 {
1688 switch (voltage) {
1689 case MMC_SIGNAL_VOLTAGE_000: return 0;
1690 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1691 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1692 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1693 }
1694 return -EINVAL;
1695 }
1696
mmc_set_signal_voltage(struct mmc * mmc,uint signal_voltage)1697 static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1698 {
1699 int err;
1700
1701 if (mmc->signal_voltage == signal_voltage)
1702 return 0;
1703
1704 mmc->signal_voltage = signal_voltage;
1705 err = mmc_set_ios(mmc);
1706 if (err)
1707 pr_debug("unable to set voltage (err %d)\n", err);
1708
1709 return err;
1710 }
1711 #else
mmc_set_signal_voltage(struct mmc * mmc,uint signal_voltage)1712 static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1713 {
1714 return 0;
1715 }
1716 #endif
1717
1718 #if !CONFIG_IS_ENABLED(MMC_TINY)
1719 static const struct mode_width_tuning sd_modes_by_pref[] = {
1720 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1721 #ifdef MMC_SUPPORTS_TUNING
1722 {
1723 .mode = UHS_SDR104,
1724 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1725 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1726 },
1727 #endif
1728 {
1729 .mode = UHS_SDR50,
1730 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1731 },
1732 {
1733 .mode = UHS_DDR50,
1734 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1735 },
1736 {
1737 .mode = UHS_SDR25,
1738 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1739 },
1740 #endif
1741 {
1742 .mode = SD_HS,
1743 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1744 },
1745 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1746 {
1747 .mode = UHS_SDR12,
1748 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1749 },
1750 #endif
1751 {
1752 .mode = MMC_LEGACY,
1753 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1754 }
1755 };
1756
1757 #define for_each_sd_mode_by_pref(caps, mwt) \
1758 for (mwt = sd_modes_by_pref;\
1759 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1760 mwt++) \
1761 if (caps & MMC_CAP(mwt->mode))
1762
sd_select_mode_and_width(struct mmc * mmc,uint card_caps)1763 static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1764 {
1765 int err;
1766 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1767 const struct mode_width_tuning *mwt;
1768 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1769 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1770 #else
1771 bool uhs_en = false;
1772 #endif
1773 uint caps;
1774
1775 #ifdef DEBUG
1776 mmc_dump_capabilities("sd card", card_caps);
1777 mmc_dump_capabilities("host", mmc->host_caps);
1778 #endif
1779
1780 if (mmc_host_is_spi(mmc)) {
1781 mmc_set_bus_width(mmc, 1);
1782 mmc_select_mode(mmc, MMC_LEGACY);
1783 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1784 #if CONFIG_IS_ENABLED(MMC_WRITE)
1785 err = sd_read_ssr(mmc);
1786 if (err)
1787 pr_warn("unable to read ssr\n");
1788 #endif
1789 return 0;
1790 }
1791
1792 /* Restrict card's capabilities by what the host can do */
1793 caps = card_caps & mmc->host_caps;
1794
1795 if (!uhs_en)
1796 caps &= ~UHS_CAPS;
1797
1798 for_each_sd_mode_by_pref(caps, mwt) {
1799 uint *w;
1800
1801 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1802 if (*w & caps & mwt->widths) {
1803 pr_debug("trying mode %s width %d (at %d MHz)\n",
1804 mmc_mode_name(mwt->mode),
1805 bus_width(*w),
1806 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1807
1808 /* configure the bus width (card + host) */
1809 err = sd_select_bus_width(mmc, bus_width(*w));
1810 if (err)
1811 goto error;
1812 mmc_set_bus_width(mmc, bus_width(*w));
1813
1814 /* configure the bus mode (card) */
1815 err = sd_set_card_speed(mmc, mwt->mode);
1816 if (err)
1817 goto error;
1818
1819 /* configure the bus mode (host) */
1820 mmc_select_mode(mmc, mwt->mode);
1821 mmc_set_clock(mmc, mmc->tran_speed,
1822 MMC_CLK_ENABLE);
1823
1824 #ifdef MMC_SUPPORTS_TUNING
1825 /* execute tuning if needed */
1826 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1827 err = mmc_execute_tuning(mmc,
1828 mwt->tuning);
1829 if (err) {
1830 pr_debug("tuning failed\n");
1831 goto error;
1832 }
1833 }
1834 #endif
1835
1836 #if CONFIG_IS_ENABLED(MMC_WRITE)
1837 err = sd_read_ssr(mmc);
1838 if (err)
1839 pr_warn("unable to read ssr\n");
1840 #endif
1841 if (!err)
1842 return 0;
1843
1844 error:
1845 /* revert to a safer bus speed */
1846 mmc_select_mode(mmc, MMC_LEGACY);
1847 mmc_set_clock(mmc, mmc->tran_speed,
1848 MMC_CLK_ENABLE);
1849 }
1850 }
1851 }
1852
1853 pr_err("unable to select a mode\n");
1854 return -ENOTSUPP;
1855 }
1856
1857 /*
1858 * read the compare the part of ext csd that is constant.
1859 * This can be used to check that the transfer is working
1860 * as expected.
1861 */
mmc_read_and_compare_ext_csd(struct mmc * mmc)1862 static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1863 {
1864 int err;
1865 const u8 *ext_csd = mmc->ext_csd;
1866 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1867
1868 if (mmc->version < MMC_VERSION_4)
1869 return 0;
1870
1871 err = mmc_send_ext_csd(mmc, test_csd);
1872 if (err)
1873 return err;
1874
1875 /* Only compare read only fields */
1876 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1877 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1878 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1879 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1880 ext_csd[EXT_CSD_REV]
1881 == test_csd[EXT_CSD_REV] &&
1882 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1883 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1884 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1885 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1886 return 0;
1887
1888 return -EBADMSG;
1889 }
1890
1891 #if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
mmc_set_lowest_voltage(struct mmc * mmc,enum bus_mode mode,uint32_t allowed_mask)1892 static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1893 uint32_t allowed_mask)
1894 {
1895 u32 card_mask = 0;
1896
1897 switch (mode) {
1898 case MMC_HS_400_ES:
1899 case MMC_HS_400:
1900 case MMC_HS_200:
1901 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1902 EXT_CSD_CARD_TYPE_HS400_1_8V))
1903 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1904 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1905 EXT_CSD_CARD_TYPE_HS400_1_2V))
1906 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1907 break;
1908 case MMC_DDR_52:
1909 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1910 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1911 MMC_SIGNAL_VOLTAGE_180;
1912 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1913 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1914 break;
1915 default:
1916 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1917 break;
1918 }
1919
1920 while (card_mask & allowed_mask) {
1921 enum mmc_voltage best_match;
1922
1923 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1924 if (!mmc_set_signal_voltage(mmc, best_match))
1925 return 0;
1926
1927 allowed_mask &= ~best_match;
1928 }
1929
1930 return -ENOTSUPP;
1931 }
1932 #else
mmc_set_lowest_voltage(struct mmc * mmc,enum bus_mode mode,uint32_t allowed_mask)1933 static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1934 uint32_t allowed_mask)
1935 {
1936 return 0;
1937 }
1938 #endif
1939
1940 static const struct mode_width_tuning mmc_modes_by_pref[] = {
1941 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1942 {
1943 .mode = MMC_HS_400_ES,
1944 .widths = MMC_MODE_8BIT,
1945 },
1946 #endif
1947 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1948 {
1949 .mode = MMC_HS_400,
1950 .widths = MMC_MODE_8BIT,
1951 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1952 },
1953 #endif
1954 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1955 {
1956 .mode = MMC_HS_200,
1957 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1958 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1959 },
1960 #endif
1961 {
1962 .mode = MMC_DDR_52,
1963 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1964 },
1965 {
1966 .mode = MMC_HS_52,
1967 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1968 },
1969 {
1970 .mode = MMC_HS,
1971 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1972 },
1973 {
1974 .mode = MMC_LEGACY,
1975 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1976 }
1977 };
1978
1979 #define for_each_mmc_mode_by_pref(caps, mwt) \
1980 for (mwt = mmc_modes_by_pref;\
1981 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1982 mwt++) \
1983 if (caps & MMC_CAP(mwt->mode))
1984
1985 static const struct ext_csd_bus_width {
1986 uint cap;
1987 bool is_ddr;
1988 uint ext_csd_bits;
1989 } ext_csd_bus_width[] = {
1990 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1991 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1992 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1993 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1994 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1995 };
1996
1997 #if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
mmc_select_hs400(struct mmc * mmc)1998 static int mmc_select_hs400(struct mmc *mmc)
1999 {
2000 int err;
2001
2002 /* Set timing to HS200 for tuning */
2003 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
2004 if (err)
2005 return err;
2006
2007 /* configure the bus mode (host) */
2008 mmc_select_mode(mmc, MMC_HS_200);
2009 mmc_set_clock(mmc, mmc->tran_speed, false);
2010
2011 /* execute tuning if needed */
2012 mmc->hs400_tuning = 1;
2013 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
2014 mmc->hs400_tuning = 0;
2015 if (err) {
2016 debug("tuning failed\n");
2017 return err;
2018 }
2019
2020 /* Set back to HS */
2021 mmc_set_card_speed(mmc, MMC_HS, true);
2022
2023 err = mmc_hs400_prepare_ddr(mmc);
2024 if (err)
2025 return err;
2026
2027 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2028 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
2029 if (err)
2030 return err;
2031
2032 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
2033 if (err)
2034 return err;
2035
2036 mmc_select_mode(mmc, MMC_HS_400);
2037 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2038 if (err)
2039 return err;
2040
2041 return 0;
2042 }
2043 #else
mmc_select_hs400(struct mmc * mmc)2044 static int mmc_select_hs400(struct mmc *mmc)
2045 {
2046 return -ENOTSUPP;
2047 }
2048 #endif
2049
2050 #if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2051 #if !CONFIG_IS_ENABLED(DM_MMC)
mmc_set_enhanced_strobe(struct mmc * mmc)2052 static int mmc_set_enhanced_strobe(struct mmc *mmc)
2053 {
2054 return -ENOTSUPP;
2055 }
2056 #endif
mmc_select_hs400es(struct mmc * mmc)2057 static int mmc_select_hs400es(struct mmc *mmc)
2058 {
2059 int err;
2060
2061 err = mmc_set_card_speed(mmc, MMC_HS, true);
2062 if (err)
2063 return err;
2064
2065 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2066 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2067 EXT_CSD_BUS_WIDTH_STROBE);
2068 if (err) {
2069 printf("switch to bus width for hs400 failed\n");
2070 return err;
2071 }
2072 /* TODO: driver strength */
2073 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2074 if (err)
2075 return err;
2076
2077 mmc_select_mode(mmc, MMC_HS_400_ES);
2078 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2079 if (err)
2080 return err;
2081
2082 return mmc_set_enhanced_strobe(mmc);
2083 }
2084 #else
mmc_select_hs400es(struct mmc * mmc)2085 static int mmc_select_hs400es(struct mmc *mmc)
2086 {
2087 return -ENOTSUPP;
2088 }
2089 #endif
2090
2091 #define for_each_supported_width(caps, ddr, ecbv) \
2092 for (ecbv = ext_csd_bus_width;\
2093 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2094 ecbv++) \
2095 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2096
mmc_select_mode_and_width(struct mmc * mmc,uint card_caps)2097 static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2098 {
2099 int err = 0;
2100 const struct mode_width_tuning *mwt;
2101 const struct ext_csd_bus_width *ecbw;
2102
2103 #ifdef DEBUG
2104 mmc_dump_capabilities("mmc", card_caps);
2105 mmc_dump_capabilities("host", mmc->host_caps);
2106 #endif
2107
2108 if (mmc_host_is_spi(mmc)) {
2109 mmc_set_bus_width(mmc, 1);
2110 mmc_select_mode(mmc, MMC_LEGACY);
2111 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2112 return 0;
2113 }
2114
2115 /* Restrict card's capabilities by what the host can do */
2116 card_caps &= mmc->host_caps;
2117
2118 /* Only version 4 of MMC supports wider bus widths */
2119 if (mmc->version < MMC_VERSION_4)
2120 return 0;
2121
2122 if (!mmc->ext_csd) {
2123 pr_debug("No ext_csd found!\n"); /* this should enver happen */
2124 return -ENOTSUPP;
2125 }
2126
2127 #if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2128 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
2129 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2130 /*
2131 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2132 * before doing anything else, since a transition from either of
2133 * the HS200/HS400 mode directly to legacy mode is not supported.
2134 */
2135 if (mmc->selected_mode == MMC_HS_200 ||
2136 mmc->selected_mode == MMC_HS_400 ||
2137 mmc->selected_mode == MMC_HS_400_ES)
2138 mmc_set_card_speed(mmc, MMC_HS, true);
2139 else
2140 #endif
2141 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2142
2143 for_each_mmc_mode_by_pref(card_caps, mwt) {
2144 for_each_supported_width(card_caps & mwt->widths,
2145 mmc_is_mode_ddr(mwt->mode), ecbw) {
2146 enum mmc_voltage old_voltage;
2147 pr_debug("trying mode %s width %d (at %d MHz)\n",
2148 mmc_mode_name(mwt->mode),
2149 bus_width(ecbw->cap),
2150 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2151 old_voltage = mmc->signal_voltage;
2152 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2153 MMC_ALL_SIGNAL_VOLTAGE);
2154 if (err)
2155 continue;
2156
2157 /* configure the bus width (card + host) */
2158 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2159 EXT_CSD_BUS_WIDTH,
2160 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2161 if (err)
2162 goto error;
2163 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2164
2165 if (mwt->mode == MMC_HS_400) {
2166 err = mmc_select_hs400(mmc);
2167 if (err) {
2168 printf("Select HS400 failed %d\n", err);
2169 goto error;
2170 }
2171 } else if (mwt->mode == MMC_HS_400_ES) {
2172 err = mmc_select_hs400es(mmc);
2173 if (err) {
2174 printf("Select HS400ES failed %d\n",
2175 err);
2176 goto error;
2177 }
2178 } else {
2179 /* configure the bus speed (card) */
2180 err = mmc_set_card_speed(mmc, mwt->mode, false);
2181 if (err)
2182 goto error;
2183
2184 /*
2185 * configure the bus width AND the ddr mode
2186 * (card). The host side will be taken care
2187 * of in the next step
2188 */
2189 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2190 err = mmc_switch(mmc,
2191 EXT_CSD_CMD_SET_NORMAL,
2192 EXT_CSD_BUS_WIDTH,
2193 ecbw->ext_csd_bits);
2194 if (err)
2195 goto error;
2196 }
2197
2198 /* configure the bus mode (host) */
2199 mmc_select_mode(mmc, mwt->mode);
2200 mmc_set_clock(mmc, mmc->tran_speed,
2201 MMC_CLK_ENABLE);
2202 #ifdef MMC_SUPPORTS_TUNING
2203
2204 /* execute tuning if needed */
2205 if (mwt->tuning) {
2206 err = mmc_execute_tuning(mmc,
2207 mwt->tuning);
2208 if (err) {
2209 pr_debug("tuning failed : %d\n", err);
2210 goto error;
2211 }
2212 }
2213 #endif
2214 }
2215
2216 /* do a transfer to check the configuration */
2217 err = mmc_read_and_compare_ext_csd(mmc);
2218 if (!err)
2219 return 0;
2220 error:
2221 mmc_set_signal_voltage(mmc, old_voltage);
2222 /* if an error occurred, revert to a safer bus mode */
2223 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2224 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2225 mmc_select_mode(mmc, MMC_LEGACY);
2226 mmc_set_bus_width(mmc, 1);
2227 }
2228 }
2229
2230 pr_err("unable to select a mode : %d\n", err);
2231
2232 return -ENOTSUPP;
2233 }
2234 #endif
2235
2236 #if CONFIG_IS_ENABLED(MMC_TINY)
2237 DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2238 #endif
2239
mmc_startup_v4(struct mmc * mmc)2240 static int mmc_startup_v4(struct mmc *mmc)
2241 {
2242 int err, i;
2243 u64 capacity;
2244 bool has_parts = false;
2245 bool part_completed;
2246 static const u32 mmc_versions[] = {
2247 MMC_VERSION_4,
2248 MMC_VERSION_4_1,
2249 MMC_VERSION_4_2,
2250 MMC_VERSION_4_3,
2251 MMC_VERSION_4_4,
2252 MMC_VERSION_4_41,
2253 MMC_VERSION_4_5,
2254 MMC_VERSION_5_0,
2255 MMC_VERSION_5_1
2256 };
2257
2258 #if CONFIG_IS_ENABLED(MMC_TINY)
2259 u8 *ext_csd = ext_csd_bkup;
2260
2261 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2262 return 0;
2263
2264 if (!mmc->ext_csd)
2265 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2266
2267 err = mmc_send_ext_csd(mmc, ext_csd);
2268 if (err)
2269 goto error;
2270
2271 /* store the ext csd for future reference */
2272 if (!mmc->ext_csd)
2273 mmc->ext_csd = ext_csd;
2274 #else
2275 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2276
2277 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2278 return 0;
2279
2280 /* check ext_csd version and capacity */
2281 err = mmc_send_ext_csd(mmc, ext_csd);
2282 if (err)
2283 goto error;
2284
2285 /* store the ext csd for future reference */
2286 if (!mmc->ext_csd)
2287 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2288 if (!mmc->ext_csd)
2289 return -ENOMEM;
2290 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2291 #endif
2292 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2293 return -EINVAL;
2294
2295 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2296
2297 if (mmc->version >= MMC_VERSION_4_2) {
2298 /*
2299 * According to the JEDEC Standard, the value of
2300 * ext_csd's capacity is valid if the value is more
2301 * than 2GB
2302 */
2303 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2304 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2305 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2306 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2307 capacity *= MMC_MAX_BLOCK_LEN;
2308 if ((capacity >> 20) > 2 * 1024)
2309 mmc->capacity_user = capacity;
2310 }
2311
2312 if (mmc->version >= MMC_VERSION_4_5)
2313 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2314
2315 /* The partition data may be non-zero but it is only
2316 * effective if PARTITION_SETTING_COMPLETED is set in
2317 * EXT_CSD, so ignore any data if this bit is not set,
2318 * except for enabling the high-capacity group size
2319 * definition (see below).
2320 */
2321 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2322 EXT_CSD_PARTITION_SETTING_COMPLETED);
2323
2324 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2325 /* Some eMMC set the value too low so set a minimum */
2326 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2327 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2328
2329 /* store the partition info of emmc */
2330 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2331 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2332 ext_csd[EXT_CSD_BOOT_MULT])
2333 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2334 if (part_completed &&
2335 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2336 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2337
2338 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2339
2340 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2341
2342 for (i = 0; i < 4; i++) {
2343 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2344 uint mult = (ext_csd[idx + 2] << 16) +
2345 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2346 if (mult)
2347 has_parts = true;
2348 if (!part_completed)
2349 continue;
2350 mmc->capacity_gp[i] = mult;
2351 mmc->capacity_gp[i] *=
2352 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2353 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2354 mmc->capacity_gp[i] <<= 19;
2355 }
2356
2357 #ifndef CONFIG_SPL_BUILD
2358 if (part_completed) {
2359 mmc->enh_user_size =
2360 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2361 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2362 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2363 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2364 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2365 mmc->enh_user_size <<= 19;
2366 mmc->enh_user_start =
2367 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2368 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2369 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2370 ext_csd[EXT_CSD_ENH_START_ADDR];
2371 if (mmc->high_capacity)
2372 mmc->enh_user_start <<= 9;
2373 }
2374 #endif
2375
2376 /*
2377 * Host needs to enable ERASE_GRP_DEF bit if device is
2378 * partitioned. This bit will be lost every time after a reset
2379 * or power off. This will affect erase size.
2380 */
2381 if (part_completed)
2382 has_parts = true;
2383 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2384 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2385 has_parts = true;
2386 if (has_parts) {
2387 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2388 EXT_CSD_ERASE_GROUP_DEF, 1);
2389
2390 if (err)
2391 goto error;
2392
2393 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2394 }
2395
2396 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2397 #if CONFIG_IS_ENABLED(MMC_WRITE)
2398 /* Read out group size from ext_csd */
2399 mmc->erase_grp_size =
2400 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2401 #endif
2402 /*
2403 * if high capacity and partition setting completed
2404 * SEC_COUNT is valid even if it is smaller than 2 GiB
2405 * JEDEC Standard JESD84-B45, 6.2.4
2406 */
2407 if (mmc->high_capacity && part_completed) {
2408 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2409 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2410 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2411 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2412 capacity *= MMC_MAX_BLOCK_LEN;
2413 mmc->capacity_user = capacity;
2414 }
2415 }
2416 #if CONFIG_IS_ENABLED(MMC_WRITE)
2417 else {
2418 /* Calculate the group size from the csd value. */
2419 int erase_gsz, erase_gmul;
2420
2421 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2422 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2423 mmc->erase_grp_size = (erase_gsz + 1)
2424 * (erase_gmul + 1);
2425 }
2426 #endif
2427 #if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2428 mmc->hc_wp_grp_size = 1024
2429 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2430 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2431 #endif
2432
2433 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2434
2435 mmc->can_trim =
2436 !!(ext_csd[EXT_CSD_SEC_FEATURE] & EXT_CSD_SEC_FEATURE_TRIM_EN);
2437
2438 return 0;
2439 error:
2440 if (mmc->ext_csd) {
2441 #if !CONFIG_IS_ENABLED(MMC_TINY)
2442 free(mmc->ext_csd);
2443 #endif
2444 mmc->ext_csd = NULL;
2445 }
2446 return err;
2447 }
2448
mmc_startup(struct mmc * mmc)2449 static int mmc_startup(struct mmc *mmc)
2450 {
2451 int err, i;
2452 uint mult, freq;
2453 u64 cmult, csize;
2454 struct mmc_cmd cmd;
2455 struct blk_desc *bdesc;
2456
2457 #ifdef CONFIG_MMC_SPI_CRC_ON
2458 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2459 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2460 cmd.resp_type = MMC_RSP_R1;
2461 cmd.cmdarg = 1;
2462 err = mmc_send_cmd(mmc, &cmd, NULL);
2463 if (err)
2464 return err;
2465 }
2466 #endif
2467
2468 /* Put the Card in Identify Mode */
2469 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2470 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
2471 cmd.resp_type = MMC_RSP_R2;
2472 cmd.cmdarg = 0;
2473
2474 err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_SEND_CID, 4);
2475 if (err)
2476 return err;
2477
2478 memcpy(mmc->cid, cmd.response, 16);
2479
2480 /*
2481 * For MMC cards, set the Relative Address.
2482 * For SD cards, get the Relatvie Address.
2483 * This also puts the cards into Standby State
2484 */
2485 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2486 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2487 cmd.cmdarg = mmc->rca << 16;
2488 cmd.resp_type = MMC_RSP_R6;
2489
2490 err = mmc_send_cmd(mmc, &cmd, NULL);
2491
2492 if (err)
2493 return err;
2494
2495 if (IS_SD(mmc))
2496 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2497 }
2498
2499 /* Get the Card-Specific Data */
2500 cmd.cmdidx = MMC_CMD_SEND_CSD;
2501 cmd.resp_type = MMC_RSP_R2;
2502 cmd.cmdarg = mmc->rca << 16;
2503
2504 err = mmc_send_cmd(mmc, &cmd, NULL);
2505
2506 if (err)
2507 return err;
2508
2509 mmc->csd[0] = cmd.response[0];
2510 mmc->csd[1] = cmd.response[1];
2511 mmc->csd[2] = cmd.response[2];
2512 mmc->csd[3] = cmd.response[3];
2513
2514 if (mmc->version == MMC_VERSION_UNKNOWN) {
2515 int version = (cmd.response[0] >> 26) & 0xf;
2516
2517 switch (version) {
2518 case 0:
2519 mmc->version = MMC_VERSION_1_2;
2520 break;
2521 case 1:
2522 mmc->version = MMC_VERSION_1_4;
2523 break;
2524 case 2:
2525 mmc->version = MMC_VERSION_2_2;
2526 break;
2527 case 3:
2528 mmc->version = MMC_VERSION_3;
2529 break;
2530 case 4:
2531 mmc->version = MMC_VERSION_4;
2532 break;
2533 default:
2534 mmc->version = MMC_VERSION_1_2;
2535 break;
2536 }
2537 }
2538
2539 /* divide frequency by 10, since the mults are 10x bigger */
2540 freq = fbase[(cmd.response[0] & 0x7)];
2541 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2542
2543 mmc->legacy_speed = freq * mult;
2544 mmc_select_mode(mmc, MMC_LEGACY);
2545
2546 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2547 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2548 #if CONFIG_IS_ENABLED(MMC_WRITE)
2549
2550 if (IS_SD(mmc))
2551 mmc->write_bl_len = mmc->read_bl_len;
2552 else
2553 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2554 #endif
2555
2556 if (mmc->high_capacity) {
2557 csize = (mmc->csd[1] & 0x3f) << 16
2558 | (mmc->csd[2] & 0xffff0000) >> 16;
2559 cmult = 8;
2560 } else {
2561 csize = (mmc->csd[1] & 0x3ff) << 2
2562 | (mmc->csd[2] & 0xc0000000) >> 30;
2563 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2564 }
2565
2566 mmc->capacity_user = (csize + 1) << (cmult + 2);
2567 mmc->capacity_user *= mmc->read_bl_len;
2568 mmc->capacity_boot = 0;
2569 mmc->capacity_rpmb = 0;
2570 for (i = 0; i < 4; i++)
2571 mmc->capacity_gp[i] = 0;
2572
2573 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2574 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2575
2576 #if CONFIG_IS_ENABLED(MMC_WRITE)
2577 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2578 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2579 #endif
2580
2581 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2582 cmd.cmdidx = MMC_CMD_SET_DSR;
2583 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2584 cmd.resp_type = MMC_RSP_NONE;
2585 if (mmc_send_cmd(mmc, &cmd, NULL))
2586 pr_warn("MMC: SET_DSR failed\n");
2587 }
2588
2589 /* Select the card, and put it into Transfer Mode */
2590 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2591 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2592 cmd.resp_type = MMC_RSP_R1;
2593 cmd.cmdarg = mmc->rca << 16;
2594 err = mmc_send_cmd(mmc, &cmd, NULL);
2595
2596 if (err)
2597 return err;
2598 }
2599
2600 /*
2601 * For SD, its erase group is always one sector
2602 */
2603 #if CONFIG_IS_ENABLED(MMC_WRITE)
2604 mmc->erase_grp_size = 1;
2605 #endif
2606 mmc->part_config = MMCPART_NOAVAILABLE;
2607
2608 err = mmc_startup_v4(mmc);
2609 if (err)
2610 return err;
2611
2612 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2613 if (err)
2614 return err;
2615
2616 #if CONFIG_IS_ENABLED(MMC_TINY)
2617 mmc_set_clock(mmc, mmc->legacy_speed, false);
2618 mmc_select_mode(mmc, MMC_LEGACY);
2619 mmc_set_bus_width(mmc, 1);
2620 #else
2621 if (IS_SD(mmc)) {
2622 err = sd_get_capabilities(mmc);
2623 if (err)
2624 return err;
2625 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2626 } else {
2627 err = mmc_get_capabilities(mmc);
2628 if (err)
2629 return err;
2630 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2631 }
2632 #endif
2633 if (err)
2634 return err;
2635
2636 mmc->best_mode = mmc->selected_mode;
2637
2638 /* Fix the block length for DDR mode */
2639 if (mmc->ddr_mode) {
2640 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2641 #if CONFIG_IS_ENABLED(MMC_WRITE)
2642 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2643 #endif
2644 }
2645
2646 /* fill in device description */
2647 bdesc = mmc_get_blk_desc(mmc);
2648 bdesc->lun = 0;
2649 bdesc->hwpart = 0;
2650 bdesc->type = 0;
2651 bdesc->blksz = mmc->read_bl_len;
2652 bdesc->log2blksz = LOG2(bdesc->blksz);
2653 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2654 #if !defined(CONFIG_SPL_BUILD) || \
2655 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2656 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2657 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2658 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2659 (mmc->cid[3] >> 16) & 0xffff);
2660 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2661 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2662 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2663 (mmc->cid[2] >> 24) & 0xff);
2664 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2665 (mmc->cid[2] >> 16) & 0xf);
2666 #else
2667 bdesc->vendor[0] = 0;
2668 bdesc->product[0] = 0;
2669 bdesc->revision[0] = 0;
2670 #endif
2671
2672 #if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2673 part_init(bdesc);
2674 #endif
2675
2676 return 0;
2677 }
2678
mmc_send_if_cond(struct mmc * mmc)2679 static int mmc_send_if_cond(struct mmc *mmc)
2680 {
2681 struct mmc_cmd cmd;
2682 int err;
2683
2684 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2685 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
2686 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2687 cmd.resp_type = MMC_RSP_R7;
2688
2689 err = mmc_send_cmd(mmc, &cmd, NULL);
2690
2691 if (err)
2692 return err;
2693
2694 if ((cmd.response[0] & 0xff) != 0xaa)
2695 return -EOPNOTSUPP;
2696 else
2697 mmc->version = SD_VERSION_2;
2698
2699 return 0;
2700 }
2701
2702 #if !CONFIG_IS_ENABLED(DM_MMC)
2703 /* board-specific MMC power initializations. */
board_mmc_power_init(void)2704 __weak void board_mmc_power_init(void)
2705 {
2706 }
2707 #endif
2708
mmc_power_init(struct mmc * mmc)2709 static int mmc_power_init(struct mmc *mmc)
2710 {
2711 #if CONFIG_IS_ENABLED(DM_MMC)
2712 #if CONFIG_IS_ENABLED(DM_REGULATOR)
2713 int ret;
2714
2715 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2716 &mmc->vmmc_supply);
2717 if (ret)
2718 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2719
2720 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2721 &mmc->vqmmc_supply);
2722 if (ret)
2723 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2724 #endif
2725 #else /* !CONFIG_DM_MMC */
2726 /*
2727 * Driver model should use a regulator, as above, rather than calling
2728 * out to board code.
2729 */
2730 board_mmc_power_init();
2731 #endif
2732 return 0;
2733 }
2734
2735 /*
2736 * put the host in the initial state:
2737 * - turn on Vdd (card power supply)
2738 * - configure the bus width and clock to minimal values
2739 */
mmc_set_initial_state(struct mmc * mmc)2740 static void mmc_set_initial_state(struct mmc *mmc)
2741 {
2742 int err;
2743
2744 /* First try to set 3.3V. If it fails set to 1.8V */
2745 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2746 if (err != 0)
2747 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2748 if (err != 0)
2749 pr_warn("mmc: failed to set signal voltage\n");
2750
2751 mmc_select_mode(mmc, MMC_LEGACY);
2752 mmc_set_bus_width(mmc, 1);
2753 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2754 }
2755
mmc_power_on(struct mmc * mmc)2756 static int mmc_power_on(struct mmc *mmc)
2757 {
2758 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2759 if (mmc->vmmc_supply) {
2760 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2761
2762 if (ret && ret != -EACCES) {
2763 printf("Error enabling VMMC supply : %d\n", ret);
2764 return ret;
2765 }
2766 }
2767 #endif
2768 return 0;
2769 }
2770
mmc_power_off(struct mmc * mmc)2771 static int mmc_power_off(struct mmc *mmc)
2772 {
2773 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2774 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2775 if (mmc->vmmc_supply) {
2776 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2777
2778 if (ret && ret != -EACCES) {
2779 pr_debug("Error disabling VMMC supply : %d\n", ret);
2780 return ret;
2781 }
2782 }
2783 #endif
2784 return 0;
2785 }
2786
mmc_power_cycle(struct mmc * mmc)2787 static int mmc_power_cycle(struct mmc *mmc)
2788 {
2789 int ret;
2790
2791 ret = mmc_power_off(mmc);
2792 if (ret)
2793 return ret;
2794
2795 ret = mmc_host_power_cycle(mmc);
2796 if (ret)
2797 return ret;
2798
2799 /*
2800 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2801 * to be on the safer side.
2802 */
2803 udelay(2000);
2804 return mmc_power_on(mmc);
2805 }
2806
mmc_get_op_cond(struct mmc * mmc,bool quiet)2807 int mmc_get_op_cond(struct mmc *mmc, bool quiet)
2808 {
2809 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2810 int err;
2811
2812 if (mmc->has_init)
2813 return 0;
2814
2815 err = mmc_power_init(mmc);
2816 if (err)
2817 return err;
2818
2819 #ifdef CONFIG_MMC_QUIRKS
2820 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2821 MMC_QUIRK_RETRY_SEND_CID |
2822 MMC_QUIRK_RETRY_APP_CMD;
2823 #endif
2824
2825 err = mmc_power_cycle(mmc);
2826 if (err) {
2827 /*
2828 * if power cycling is not supported, we should not try
2829 * to use the UHS modes, because we wouldn't be able to
2830 * recover from an error during the UHS initialization.
2831 */
2832 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2833 uhs_en = false;
2834 mmc->host_caps &= ~UHS_CAPS;
2835 err = mmc_power_on(mmc);
2836 }
2837 if (err)
2838 return err;
2839
2840 #if CONFIG_IS_ENABLED(DM_MMC)
2841 /*
2842 * Re-initialization is needed to clear old configuration for
2843 * mmc rescan.
2844 */
2845 err = mmc_reinit(mmc);
2846 #else
2847 /* made sure it's not NULL earlier */
2848 err = mmc->cfg->ops->init(mmc);
2849 #endif
2850 if (err)
2851 return err;
2852 mmc->ddr_mode = 0;
2853
2854 retry:
2855 mmc_set_initial_state(mmc);
2856
2857 /* Reset the Card */
2858 err = mmc_go_idle(mmc);
2859
2860 if (err)
2861 return err;
2862
2863 /* The internal partition reset to user partition(0) at every CMD0 */
2864 mmc_get_blk_desc(mmc)->hwpart = 0;
2865
2866 /* Test for SD version 2 */
2867 err = mmc_send_if_cond(mmc);
2868
2869 /* Now try to get the SD card's operating condition */
2870 err = sd_send_op_cond(mmc, uhs_en);
2871 if (err && uhs_en) {
2872 uhs_en = false;
2873 mmc_power_cycle(mmc);
2874 goto retry;
2875 }
2876
2877 /* If the command timed out, we check for an MMC card */
2878 if (err == -ETIMEDOUT) {
2879 err = mmc_send_op_cond(mmc);
2880
2881 if (err) {
2882 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2883 if (!quiet)
2884 pr_err("Card did not respond to voltage select! : %d\n", err);
2885 #endif
2886 return -EOPNOTSUPP;
2887 }
2888 }
2889
2890 return err;
2891 }
2892
mmc_start_init(struct mmc * mmc)2893 int mmc_start_init(struct mmc *mmc)
2894 {
2895 bool no_card;
2896 int err = 0;
2897
2898 /*
2899 * all hosts are capable of 1 bit bus-width and able to use the legacy
2900 * timings.
2901 */
2902 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2903 MMC_MODE_1BIT;
2904
2905 if (IS_ENABLED(CONFIG_MMC_SPEED_MODE_SET)) {
2906 if (mmc->user_speed_mode != MMC_MODES_END) {
2907 int i;
2908 /* set host caps */
2909 if (mmc->host_caps & MMC_CAP(mmc->user_speed_mode)) {
2910 /* Remove all existing speed capabilities */
2911 for (i = MMC_LEGACY; i < MMC_MODES_END; i++)
2912 mmc->host_caps &= ~MMC_CAP(i);
2913 mmc->host_caps |= (MMC_CAP(mmc->user_speed_mode)
2914 | MMC_CAP(MMC_LEGACY) |
2915 MMC_MODE_1BIT);
2916 } else {
2917 pr_err("bus_mode requested is not supported\n");
2918 return -EINVAL;
2919 }
2920 }
2921 }
2922 #if CONFIG_IS_ENABLED(DM_MMC)
2923 mmc_deferred_probe(mmc);
2924 #endif
2925 #if !defined(CONFIG_MMC_BROKEN_CD)
2926 no_card = mmc_getcd(mmc) == 0;
2927 #else
2928 no_card = 0;
2929 #endif
2930 #if !CONFIG_IS_ENABLED(DM_MMC)
2931 /* we pretend there's no card when init is NULL */
2932 no_card = no_card || (mmc->cfg->ops->init == NULL);
2933 #endif
2934 if (no_card) {
2935 mmc->has_init = 0;
2936 #if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2937 pr_err("MMC: no card present\n");
2938 #endif
2939 return -ENOMEDIUM;
2940 }
2941
2942 err = mmc_get_op_cond(mmc, false);
2943
2944 if (!err)
2945 mmc->init_in_progress = 1;
2946
2947 return err;
2948 }
2949
mmc_complete_init(struct mmc * mmc)2950 static int mmc_complete_init(struct mmc *mmc)
2951 {
2952 int err = 0;
2953
2954 mmc->init_in_progress = 0;
2955 if (mmc->op_cond_pending)
2956 err = mmc_complete_op_cond(mmc);
2957
2958 if (!err)
2959 err = mmc_startup(mmc);
2960 if (err)
2961 mmc->has_init = 0;
2962 else
2963 mmc->has_init = 1;
2964 return err;
2965 }
2966
mmc_init(struct mmc * mmc)2967 int mmc_init(struct mmc *mmc)
2968 {
2969 int err = 0;
2970 __maybe_unused ulong start;
2971 #if CONFIG_IS_ENABLED(DM_MMC)
2972 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2973
2974 upriv->mmc = mmc;
2975 #endif
2976 if (mmc->has_init)
2977 return 0;
2978
2979 start = get_timer(0);
2980
2981 if (!mmc->init_in_progress)
2982 err = mmc_start_init(mmc);
2983
2984 if (!err)
2985 err = mmc_complete_init(mmc);
2986 if (err)
2987 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2988
2989 return err;
2990 }
2991
2992 #if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2993 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2994 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
mmc_deinit(struct mmc * mmc)2995 int mmc_deinit(struct mmc *mmc)
2996 {
2997 u32 caps_filtered;
2998
2999 if (!mmc->has_init)
3000 return 0;
3001
3002 if (IS_SD(mmc)) {
3003 caps_filtered = mmc->card_caps &
3004 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
3005 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
3006 MMC_CAP(UHS_SDR104));
3007
3008 return sd_select_mode_and_width(mmc, caps_filtered);
3009 } else {
3010 caps_filtered = mmc->card_caps &
3011 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400) | MMC_CAP(MMC_HS_400_ES));
3012
3013 return mmc_select_mode_and_width(mmc, caps_filtered);
3014 }
3015 }
3016 #endif
3017
mmc_set_dsr(struct mmc * mmc,u16 val)3018 int mmc_set_dsr(struct mmc *mmc, u16 val)
3019 {
3020 mmc->dsr = val;
3021 return 0;
3022 }
3023
3024 /* CPU-specific MMC initializations */
cpu_mmc_init(struct bd_info * bis)3025 __weak int cpu_mmc_init(struct bd_info *bis)
3026 {
3027 return -1;
3028 }
3029
3030 /* board-specific MMC initializations. */
board_mmc_init(struct bd_info * bis)3031 __weak int board_mmc_init(struct bd_info *bis)
3032 {
3033 return -1;
3034 }
3035
mmc_set_preinit(struct mmc * mmc,int preinit)3036 void mmc_set_preinit(struct mmc *mmc, int preinit)
3037 {
3038 mmc->preinit = preinit;
3039 }
3040
3041 #if CONFIG_IS_ENABLED(DM_MMC)
mmc_probe(struct bd_info * bis)3042 static int mmc_probe(struct bd_info *bis)
3043 {
3044 int ret, i;
3045 struct uclass *uc;
3046 struct udevice *dev;
3047
3048 ret = uclass_get(UCLASS_MMC, &uc);
3049 if (ret)
3050 return ret;
3051
3052 /*
3053 * Try to add them in sequence order. Really with driver model we
3054 * should allow holes, but the current MMC list does not allow that.
3055 * So if we request 0, 1, 3 we will get 0, 1, 2.
3056 */
3057 for (i = 0; ; i++) {
3058 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3059 if (ret == -ENODEV)
3060 break;
3061 }
3062 uclass_foreach_dev(dev, uc) {
3063 ret = device_probe(dev);
3064 if (ret)
3065 pr_err("%s - probe failed: %d\n", dev->name, ret);
3066 }
3067
3068 return 0;
3069 }
3070 #else
mmc_probe(struct bd_info * bis)3071 static int mmc_probe(struct bd_info *bis)
3072 {
3073 if (board_mmc_init(bis) < 0)
3074 cpu_mmc_init(bis);
3075
3076 return 0;
3077 }
3078 #endif
3079
mmc_initialize(struct bd_info * bis)3080 int mmc_initialize(struct bd_info *bis)
3081 {
3082 static int initialized = 0;
3083 int ret;
3084 if (initialized) /* Avoid initializing mmc multiple times */
3085 return 0;
3086 initialized = 1;
3087
3088 #if !CONFIG_IS_ENABLED(BLK)
3089 #if !CONFIG_IS_ENABLED(MMC_TINY)
3090 mmc_list_init();
3091 #endif
3092 #endif
3093 ret = mmc_probe(bis);
3094 if (ret)
3095 return ret;
3096
3097 #ifndef CONFIG_SPL_BUILD
3098 print_mmc_devices(',');
3099 #endif
3100
3101 mmc_do_preinit();
3102 return 0;
3103 }
3104
3105 #if CONFIG_IS_ENABLED(DM_MMC)
mmc_init_device(int num)3106 int mmc_init_device(int num)
3107 {
3108 struct udevice *dev;
3109 struct mmc *m;
3110 int ret;
3111
3112 if (uclass_get_device_by_seq(UCLASS_MMC, num, &dev)) {
3113 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3114 if (ret)
3115 return ret;
3116 }
3117
3118 m = mmc_get_mmc_dev(dev);
3119 if (!m)
3120 return 0;
3121
3122 /* Initialising user set speed mode */
3123 m->user_speed_mode = MMC_MODES_END;
3124
3125 if (m->preinit)
3126 mmc_start_init(m);
3127
3128 return 0;
3129 }
3130 #endif
3131
3132 #ifdef CONFIG_CMD_BKOPS_ENABLE
mmc_set_bkops_enable(struct mmc * mmc,bool autobkops,bool enable)3133 int mmc_set_bkops_enable(struct mmc *mmc, bool autobkops, bool enable)
3134 {
3135 int err;
3136 u32 bit = autobkops ? BIT(1) : BIT(0);
3137 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3138
3139 err = mmc_send_ext_csd(mmc, ext_csd);
3140 if (err) {
3141 puts("Could not get ext_csd register values\n");
3142 return err;
3143 }
3144
3145 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3146 puts("Background operations not supported on device\n");
3147 return -EMEDIUMTYPE;
3148 }
3149
3150 if (enable && (ext_csd[EXT_CSD_BKOPS_EN] & bit)) {
3151 puts("Background operations already enabled\n");
3152 return 0;
3153 }
3154
3155 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN,
3156 enable ? bit : 0);
3157 if (err) {
3158 printf("Failed to %sable manual background operations\n",
3159 enable ? "en" : "dis");
3160 return err;
3161 }
3162
3163 printf("%sabled %s background operations\n",
3164 enable ? "En" : "Dis", autobkops ? "auto" : "manual");
3165
3166 return 0;
3167 }
3168 #endif
3169
mmc_get_env_dev(void)3170 __weak int mmc_get_env_dev(void)
3171 {
3172 #ifdef CONFIG_SYS_MMC_ENV_DEV
3173 return CONFIG_SYS_MMC_ENV_DEV;
3174 #else
3175 return 0;
3176 #endif
3177 }
3178