1
2
3
4
5
6
7
8
9#include <config.h>
10#include <common.h>
11#include <command.h>
12#include <dm.h>
13#include <dm/device-internal.h>
14#include <errno.h>
15#include <mmc.h>
16#include <part.h>
17#include <power/regulator.h>
18#include <malloc.h>
19#include <memalign.h>
20#include <linux/list.h>
21#include <div64.h>
22#include "mmc_private.h"
23
24#define DEFAULT_CMD6_TIMEOUT_MS 500
25
26static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
27
28#if !CONFIG_IS_ENABLED(DM_MMC)
29
30static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
31{
32 return -ENOSYS;
33}
34
35__weak int board_mmc_getwp(struct mmc *mmc)
36{
37 return -1;
38}
39
40int mmc_getwp(struct mmc *mmc)
41{
42 int wp;
43
44 wp = board_mmc_getwp(mmc);
45
46 if (wp < 0) {
47 if (mmc->cfg->ops->getwp)
48 wp = mmc->cfg->ops->getwp(mmc);
49 else
50 wp = 0;
51 }
52
53 return wp;
54}
55
56__weak int board_mmc_getcd(struct mmc *mmc)
57{
58 return -1;
59}
60#endif
61
62#ifdef CONFIG_MMC_TRACE
63void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
64{
65 printf("CMD_SEND:%d\n", cmd->cmdidx);
66 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
67}
68
69void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
70{
71 int i;
72 u8 *ptr;
73
74 if (ret) {
75 printf("\t\tRET\t\t\t %d\n", ret);
76 } else {
77 switch (cmd->resp_type) {
78 case MMC_RSP_NONE:
79 printf("\t\tMMC_RSP_NONE\n");
80 break;
81 case MMC_RSP_R1:
82 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
83 cmd->response[0]);
84 break;
85 case MMC_RSP_R1b:
86 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
87 cmd->response[0]);
88 break;
89 case MMC_RSP_R2:
90 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
91 cmd->response[0]);
92 printf("\t\t \t\t 0x%08x \n",
93 cmd->response[1]);
94 printf("\t\t \t\t 0x%08x \n",
95 cmd->response[2]);
96 printf("\t\t \t\t 0x%08x \n",
97 cmd->response[3]);
98 printf("\n");
99 printf("\t\t\t\t\tDUMPING DATA\n");
100 for (i = 0; i < 4; i++) {
101 int j;
102 printf("\t\t\t\t\t%03d - ", i*4);
103 ptr = (u8 *)&cmd->response[i];
104 ptr += 3;
105 for (j = 0; j < 4; j++)
106 printf("%02x ", *ptr--);
107 printf("\n");
108 }
109 break;
110 case MMC_RSP_R3:
111 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
112 cmd->response[0]);
113 break;
114 default:
115 printf("\t\tERROR MMC rsp not supported\n");
116 break;
117 }
118 }
119}
120
121void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
122{
123 int status;
124
125 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
126 printf("CURR STATE:%d\n", status);
127}
128#endif
129
130#if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
131const char *mmc_mode_name(enum bus_mode mode)
132{
133 static const char *const names[] = {
134 [MMC_LEGACY] = "MMC legacy",
135 [MMC_HS] = "MMC High Speed (26MHz)",
136 [SD_HS] = "SD High Speed (50MHz)",
137 [UHS_SDR12] = "UHS SDR12 (25MHz)",
138 [UHS_SDR25] = "UHS SDR25 (50MHz)",
139 [UHS_SDR50] = "UHS SDR50 (100MHz)",
140 [UHS_SDR104] = "UHS SDR104 (208MHz)",
141 [UHS_DDR50] = "UHS DDR50 (50MHz)",
142 [MMC_HS_52] = "MMC High Speed (52MHz)",
143 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
144 [MMC_HS_200] = "HS200 (200MHz)",
145 [MMC_HS_400] = "HS400 (200MHz)",
146 [MMC_HS_400_ES] = "HS400ES (200MHz)",
147 };
148
149 if (mode >= MMC_MODES_END)
150 return "Unknown mode";
151 else
152 return names[mode];
153}
154#endif
155
156static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
157{
158 static const int freqs[] = {
159 [MMC_LEGACY] = 25000000,
160 [MMC_HS] = 26000000,
161 [SD_HS] = 50000000,
162 [MMC_HS_52] = 52000000,
163 [MMC_DDR_52] = 52000000,
164 [UHS_SDR12] = 25000000,
165 [UHS_SDR25] = 50000000,
166 [UHS_SDR50] = 100000000,
167 [UHS_DDR50] = 50000000,
168 [UHS_SDR104] = 208000000,
169 [MMC_HS_200] = 200000000,
170 [MMC_HS_400] = 200000000,
171 [MMC_HS_400_ES] = 200000000,
172 };
173
174 if (mode == MMC_LEGACY)
175 return mmc->legacy_speed;
176 else if (mode >= MMC_MODES_END)
177 return 0;
178 else
179 return freqs[mode];
180}
181
182static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
183{
184 mmc->selected_mode = mode;
185 mmc->tran_speed = mmc_mode2freq(mmc, mode);
186 mmc->ddr_mode = mmc_is_mode_ddr(mode);
187 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
188 mmc->tran_speed / 1000000);
189 return 0;
190}
191
192#if !CONFIG_IS_ENABLED(DM_MMC)
193int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
194{
195 int ret;
196
197 mmmc_trace_before_send(mmc, cmd);
198 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
199 mmmc_trace_after_send(mmc, cmd, ret);
200
201 return ret;
202}
203#endif
204
205int mmc_send_status(struct mmc *mmc, unsigned int *status)
206{
207 struct mmc_cmd cmd;
208 int err, retries = 5;
209
210 cmd.cmdidx = MMC_CMD_SEND_STATUS;
211 cmd.resp_type = MMC_RSP_R1;
212 if (!mmc_host_is_spi(mmc))
213 cmd.cmdarg = mmc->rca << 16;
214
215 while (retries--) {
216 err = mmc_send_cmd(mmc, &cmd, NULL);
217 if (!err) {
218 mmc_trace_state(mmc, &cmd);
219 *status = cmd.response[0];
220 return 0;
221 }
222 }
223 mmc_trace_state(mmc, &cmd);
224 return -ECOMM;
225}
226
227int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
228{
229 unsigned int status;
230 int err;
231
232 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
233 if (err != -ENOSYS)
234 return err;
235
236 while (1) {
237 err = mmc_send_status(mmc, &status);
238 if (err)
239 return err;
240
241 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
242 (status & MMC_STATUS_CURR_STATE) !=
243 MMC_STATE_PRG)
244 break;
245
246 if (status & MMC_STATUS_MASK) {
247#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
248 pr_err("Status Error: 0x%08x\n", status);
249#endif
250 return -ECOMM;
251 }
252
253 if (timeout_ms-- <= 0)
254 break;
255
256 udelay(1000);
257 }
258
259 if (timeout_ms <= 0) {
260#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
261 pr_err("Timeout waiting card ready\n");
262#endif
263 return -ETIMEDOUT;
264 }
265
266 return 0;
267}
268
269int mmc_set_blocklen(struct mmc *mmc, int len)
270{
271 struct mmc_cmd cmd;
272 int err;
273
274 if (mmc->ddr_mode)
275 return 0;
276
277 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
278 cmd.resp_type = MMC_RSP_R1;
279 cmd.cmdarg = len;
280
281 err = mmc_send_cmd(mmc, &cmd, NULL);
282
283#ifdef CONFIG_MMC_QUIRKS
284 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
285 int retries = 4;
286
287
288
289
290 do {
291 err = mmc_send_cmd(mmc, &cmd, NULL);
292 if (!err)
293 break;
294 } while (retries--);
295 }
296#endif
297
298 return err;
299}
300
301#ifdef MMC_SUPPORTS_TUNING
302static const u8 tuning_blk_pattern_4bit[] = {
303 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
304 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
305 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
306 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
307 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
308 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
309 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
310 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
311};
312
313static const u8 tuning_blk_pattern_8bit[] = {
314 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
315 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
316 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
317 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
318 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
319 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
320 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
321 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
322 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
323 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
324 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
325 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
326 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
327 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
328 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
329 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
330};
331
332int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
333{
334 struct mmc_cmd cmd;
335 struct mmc_data data;
336 const u8 *tuning_block_pattern;
337 int size, err;
338
339 if (mmc->bus_width == 8) {
340 tuning_block_pattern = tuning_blk_pattern_8bit;
341 size = sizeof(tuning_blk_pattern_8bit);
342 } else if (mmc->bus_width == 4) {
343 tuning_block_pattern = tuning_blk_pattern_4bit;
344 size = sizeof(tuning_blk_pattern_4bit);
345 } else {
346 return -EINVAL;
347 }
348
349 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
350
351 cmd.cmdidx = opcode;
352 cmd.cmdarg = 0;
353 cmd.resp_type = MMC_RSP_R1;
354
355 data.dest = (void *)data_buf;
356 data.blocks = 1;
357 data.blocksize = size;
358 data.flags = MMC_DATA_READ;
359
360 err = mmc_send_cmd(mmc, &cmd, &data);
361 if (err)
362 return err;
363
364 if (memcmp(data_buf, tuning_block_pattern, size))
365 return -EIO;
366
367 return 0;
368}
369#endif
370
371static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
372 lbaint_t blkcnt)
373{
374 struct mmc_cmd cmd;
375 struct mmc_data data;
376
377 if (blkcnt > 1)
378 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
379 else
380 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
381
382 if (mmc->high_capacity)
383 cmd.cmdarg = start;
384 else
385 cmd.cmdarg = start * mmc->read_bl_len;
386
387 cmd.resp_type = MMC_RSP_R1;
388
389 data.dest = dst;
390 data.blocks = blkcnt;
391 data.blocksize = mmc->read_bl_len;
392 data.flags = MMC_DATA_READ;
393
394 if (mmc_send_cmd(mmc, &cmd, &data))
395 return 0;
396
397 if (blkcnt > 1) {
398 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
399 cmd.cmdarg = 0;
400 cmd.resp_type = MMC_RSP_R1b;
401 if (mmc_send_cmd(mmc, &cmd, NULL)) {
402#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
403 pr_err("mmc fail to send stop cmd\n");
404#endif
405 return 0;
406 }
407 }
408
409 return blkcnt;
410}
411
412#if CONFIG_IS_ENABLED(BLK)
413ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
414#else
415ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
416 void *dst)
417#endif
418{
419#if CONFIG_IS_ENABLED(BLK)
420 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
421#endif
422 int dev_num = block_dev->devnum;
423 int err;
424 lbaint_t cur, blocks_todo = blkcnt;
425
426 if (blkcnt == 0)
427 return 0;
428
429 struct mmc *mmc = find_mmc_device(dev_num);
430 if (!mmc)
431 return 0;
432
433 if (CONFIG_IS_ENABLED(MMC_TINY))
434 err = mmc_switch_part(mmc, block_dev->hwpart);
435 else
436 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
437
438 if (err < 0)
439 return 0;
440
441 if ((start + blkcnt) > block_dev->lba) {
442#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
443 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
444 start + blkcnt, block_dev->lba);
445#endif
446 return 0;
447 }
448
449 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
450 pr_debug("%s: Failed to set blocklen\n", __func__);
451 return 0;
452 }
453
454 do {
455 cur = (blocks_todo > mmc->cfg->b_max) ?
456 mmc->cfg->b_max : blocks_todo;
457 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
458 pr_debug("%s: Failed to read blocks\n", __func__);
459 return 0;
460 }
461 blocks_todo -= cur;
462 start += cur;
463 dst += cur * mmc->read_bl_len;
464 } while (blocks_todo > 0);
465
466 return blkcnt;
467}
468
469static int mmc_go_idle(struct mmc *mmc)
470{
471 struct mmc_cmd cmd;
472 int err;
473
474 udelay(1000);
475
476 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
477 cmd.cmdarg = 0;
478 cmd.resp_type = MMC_RSP_NONE;
479
480 err = mmc_send_cmd(mmc, &cmd, NULL);
481
482 if (err)
483 return err;
484
485 udelay(2000);
486
487 return 0;
488}
489
490#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
491static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
492{
493 struct mmc_cmd cmd;
494 int err = 0;
495
496
497
498
499
500 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
501 return mmc_set_signal_voltage(mmc, signal_voltage);
502
503 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
504 cmd.cmdarg = 0;
505 cmd.resp_type = MMC_RSP_R1;
506
507 err = mmc_send_cmd(mmc, &cmd, NULL);
508 if (err)
509 return err;
510
511 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
512 return -EIO;
513
514
515
516
517
518 err = mmc_wait_dat0(mmc, 0, 100);
519 if (err == -ENOSYS)
520 udelay(100);
521 else if (err)
522 return -ETIMEDOUT;
523
524
525
526
527
528 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
529
530 err = mmc_set_signal_voltage(mmc, signal_voltage);
531 if (err)
532 return err;
533
534
535 mdelay(10);
536 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
537
538
539
540
541
542 err = mmc_wait_dat0(mmc, 1, 1000);
543 if (err == -ENOSYS)
544 udelay(1000);
545 else if (err)
546 return -ETIMEDOUT;
547
548 return 0;
549}
550#endif
551
552static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
553{
554 int timeout = 1000;
555 int err;
556 struct mmc_cmd cmd;
557
558 while (1) {
559 cmd.cmdidx = MMC_CMD_APP_CMD;
560 cmd.resp_type = MMC_RSP_R1;
561 cmd.cmdarg = 0;
562
563 err = mmc_send_cmd(mmc, &cmd, NULL);
564
565 if (err)
566 return err;
567
568 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
569 cmd.resp_type = MMC_RSP_R3;
570
571
572
573
574
575
576
577
578 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
579 (mmc->cfg->voltages & 0xff8000);
580
581 if (mmc->version == SD_VERSION_2)
582 cmd.cmdarg |= OCR_HCS;
583
584 if (uhs_en)
585 cmd.cmdarg |= OCR_S18R;
586
587 err = mmc_send_cmd(mmc, &cmd, NULL);
588
589 if (err)
590 return err;
591
592 if (cmd.response[0] & OCR_BUSY)
593 break;
594
595 if (timeout-- <= 0)
596 return -EOPNOTSUPP;
597
598 udelay(1000);
599 }
600
601 if (mmc->version != SD_VERSION_2)
602 mmc->version = SD_VERSION_1_0;
603
604 if (mmc_host_is_spi(mmc)) {
605 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
606 cmd.resp_type = MMC_RSP_R3;
607 cmd.cmdarg = 0;
608
609 err = mmc_send_cmd(mmc, &cmd, NULL);
610
611 if (err)
612 return err;
613 }
614
615 mmc->ocr = cmd.response[0];
616
617#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
618 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
619 == 0x41000000) {
620 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
621 if (err)
622 return err;
623 }
624#endif
625
626 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
627 mmc->rca = 0;
628
629 return 0;
630}
631
632static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
633{
634 struct mmc_cmd cmd;
635 int err;
636
637 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
638 cmd.resp_type = MMC_RSP_R3;
639 cmd.cmdarg = 0;
640 if (use_arg && !mmc_host_is_spi(mmc))
641 cmd.cmdarg = OCR_HCS |
642 (mmc->cfg->voltages &
643 (mmc->ocr & OCR_VOLTAGE_MASK)) |
644 (mmc->ocr & OCR_ACCESS_MODE);
645
646 err = mmc_send_cmd(mmc, &cmd, NULL);
647 if (err)
648 return err;
649 mmc->ocr = cmd.response[0];
650 return 0;
651}
652
653static int mmc_send_op_cond(struct mmc *mmc)
654{
655 int err, i;
656
657
658 mmc_go_idle(mmc);
659
660
661 for (i = 0; i < 2; i++) {
662 err = mmc_send_op_cond_iter(mmc, i != 0);
663 if (err)
664 return err;
665
666
667 if (mmc->ocr & OCR_BUSY)
668 break;
669 }
670 mmc->op_cond_pending = 1;
671 return 0;
672}
673
674static int mmc_complete_op_cond(struct mmc *mmc)
675{
676 struct mmc_cmd cmd;
677 int timeout = 1000;
678 ulong start;
679 int err;
680
681 mmc->op_cond_pending = 0;
682 if (!(mmc->ocr & OCR_BUSY)) {
683
684 mmc_go_idle(mmc);
685
686 start = get_timer(0);
687 while (1) {
688 err = mmc_send_op_cond_iter(mmc, 1);
689 if (err)
690 return err;
691 if (mmc->ocr & OCR_BUSY)
692 break;
693 if (get_timer(start) > timeout)
694 return -EOPNOTSUPP;
695 udelay(100);
696 }
697 }
698
699 if (mmc_host_is_spi(mmc)) {
700 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
701 cmd.resp_type = MMC_RSP_R3;
702 cmd.cmdarg = 0;
703
704 err = mmc_send_cmd(mmc, &cmd, NULL);
705
706 if (err)
707 return err;
708
709 mmc->ocr = cmd.response[0];
710 }
711
712 mmc->version = MMC_VERSION_UNKNOWN;
713
714 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
715 mmc->rca = 1;
716
717 return 0;
718}
719
720
721static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
722{
723 struct mmc_cmd cmd;
724 struct mmc_data data;
725 int err;
726
727
728 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
729 cmd.resp_type = MMC_RSP_R1;
730 cmd.cmdarg = 0;
731
732 data.dest = (char *)ext_csd;
733 data.blocks = 1;
734 data.blocksize = MMC_MAX_BLOCK_LEN;
735 data.flags = MMC_DATA_READ;
736
737 err = mmc_send_cmd(mmc, &cmd, &data);
738
739 return err;
740}
741
742static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
743 bool send_status)
744{
745 unsigned int status, start;
746 struct mmc_cmd cmd;
747 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
748 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
749 (index == EXT_CSD_PART_CONF);
750 int retries = 3;
751 int ret;
752
753 if (mmc->gen_cmd6_time)
754 timeout_ms = mmc->gen_cmd6_time * 10;
755
756 if (is_part_switch && mmc->part_switch_time)
757 timeout_ms = mmc->part_switch_time * 10;
758
759 cmd.cmdidx = MMC_CMD_SWITCH;
760 cmd.resp_type = MMC_RSP_R1b;
761 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
762 (index << 16) |
763 (value << 8);
764
765 do {
766 ret = mmc_send_cmd(mmc, &cmd, NULL);
767 } while (ret && retries-- > 0);
768
769 if (ret)
770 return ret;
771
772 start = get_timer(0);
773
774
775 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
776 if (ret && ret != -ENOSYS)
777 return ret;
778
779
780
781
782
783
784 if (ret == -ENOSYS && !send_status)
785 mdelay(timeout_ms);
786
787
788
789
790
791
792 do {
793 ret = mmc_send_status(mmc, &status);
794
795 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
796 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
797 value);
798 return -EIO;
799 }
800 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
801 return 0;
802 udelay(100);
803 } while (get_timer(start) < timeout_ms);
804
805 return -ETIMEDOUT;
806}
807
808int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
809{
810 return __mmc_switch(mmc, set, index, value, true);
811}
812
813#if !CONFIG_IS_ENABLED(MMC_TINY)
814static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
815 bool hsdowngrade)
816{
817 int err;
818 int speed_bits;
819
820 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
821
822 switch (mode) {
823 case MMC_HS:
824 case MMC_HS_52:
825 case MMC_DDR_52:
826 speed_bits = EXT_CSD_TIMING_HS;
827 break;
828#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
829 case MMC_HS_200:
830 speed_bits = EXT_CSD_TIMING_HS200;
831 break;
832#endif
833#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
834 case MMC_HS_400:
835 speed_bits = EXT_CSD_TIMING_HS400;
836 break;
837#endif
838#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
839 case MMC_HS_400_ES:
840 speed_bits = EXT_CSD_TIMING_HS400;
841 break;
842#endif
843 case MMC_LEGACY:
844 speed_bits = EXT_CSD_TIMING_LEGACY;
845 break;
846 default:
847 return -EINVAL;
848 }
849
850 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
851 speed_bits, !hsdowngrade);
852 if (err)
853 return err;
854
855#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
856 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
857
858
859
860
861
862
863 if (hsdowngrade) {
864 mmc_select_mode(mmc, MMC_HS);
865 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
866 }
867#endif
868
869 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
870
871 err = mmc_send_ext_csd(mmc, test_csd);
872 if (err)
873 return err;
874
875
876 if (!test_csd[EXT_CSD_HS_TIMING])
877 return -ENOTSUPP;
878 }
879
880 return 0;
881}
882
883static int mmc_get_capabilities(struct mmc *mmc)
884{
885 u8 *ext_csd = mmc->ext_csd;
886 char cardtype;
887
888 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
889
890 if (mmc_host_is_spi(mmc))
891 return 0;
892
893
894 if (mmc->version < MMC_VERSION_4)
895 return 0;
896
897 if (!ext_csd) {
898 pr_err("No ext_csd found!\n");
899 return -ENOTSUPP;
900 }
901
902 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
903
904 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
905 mmc->cardtype = cardtype;
906
907#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
908 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
909 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
910 mmc->card_caps |= MMC_MODE_HS200;
911 }
912#endif
913#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
914 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
915 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
916 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
917 mmc->card_caps |= MMC_MODE_HS400;
918 }
919#endif
920 if (cardtype & EXT_CSD_CARD_TYPE_52) {
921 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
922 mmc->card_caps |= MMC_MODE_DDR_52MHz;
923 mmc->card_caps |= MMC_MODE_HS_52MHz;
924 }
925 if (cardtype & EXT_CSD_CARD_TYPE_26)
926 mmc->card_caps |= MMC_MODE_HS;
927
928#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
929 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
930 (mmc->card_caps & MMC_MODE_HS400)) {
931 mmc->card_caps |= MMC_MODE_HS400_ES;
932 }
933#endif
934
935 return 0;
936}
937#endif
938
939static int mmc_set_capacity(struct mmc *mmc, int part_num)
940{
941 switch (part_num) {
942 case 0:
943 mmc->capacity = mmc->capacity_user;
944 break;
945 case 1:
946 case 2:
947 mmc->capacity = mmc->capacity_boot;
948 break;
949 case 3:
950 mmc->capacity = mmc->capacity_rpmb;
951 break;
952 case 4:
953 case 5:
954 case 6:
955 case 7:
956 mmc->capacity = mmc->capacity_gp[part_num - 4];
957 break;
958 default:
959 return -1;
960 }
961
962 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
963
964 return 0;
965}
966
967int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
968{
969 int ret;
970 int retry = 3;
971
972 do {
973 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
974 EXT_CSD_PART_CONF,
975 (mmc->part_config & ~PART_ACCESS_MASK)
976 | (part_num & PART_ACCESS_MASK));
977 } while (ret && retry--);
978
979
980
981
982
983 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
984 ret = mmc_set_capacity(mmc, part_num);
985 mmc_get_blk_desc(mmc)->hwpart = part_num;
986 }
987
988 return ret;
989}
990
991#if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
992int mmc_hwpart_config(struct mmc *mmc,
993 const struct mmc_hwpart_conf *conf,
994 enum mmc_hwpart_conf_mode mode)
995{
996 u8 part_attrs = 0;
997 u32 enh_size_mult;
998 u32 enh_start_addr;
999 u32 gp_size_mult[4];
1000 u32 max_enh_size_mult;
1001 u32 tot_enh_size_mult = 0;
1002 u8 wr_rel_set;
1003 int i, pidx, err;
1004 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1005
1006 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1007 return -EINVAL;
1008
1009 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1010 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1011 return -EMEDIUMTYPE;
1012 }
1013
1014 if (!(mmc->part_support & PART_SUPPORT)) {
1015 pr_err("Card does not support partitioning\n");
1016 return -EMEDIUMTYPE;
1017 }
1018
1019 if (!mmc->hc_wp_grp_size) {
1020 pr_err("Card does not define HC WP group size\n");
1021 return -EMEDIUMTYPE;
1022 }
1023
1024
1025 if (conf->user.enh_size) {
1026 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1027 conf->user.enh_start % mmc->hc_wp_grp_size) {
1028 pr_err("User data enhanced area not HC WP group "
1029 "size aligned\n");
1030 return -EINVAL;
1031 }
1032 part_attrs |= EXT_CSD_ENH_USR;
1033 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1034 if (mmc->high_capacity) {
1035 enh_start_addr = conf->user.enh_start;
1036 } else {
1037 enh_start_addr = (conf->user.enh_start << 9);
1038 }
1039 } else {
1040 enh_size_mult = 0;
1041 enh_start_addr = 0;
1042 }
1043 tot_enh_size_mult += enh_size_mult;
1044
1045 for (pidx = 0; pidx < 4; pidx++) {
1046 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1047 pr_err("GP%i partition not HC WP group size "
1048 "aligned\n", pidx+1);
1049 return -EINVAL;
1050 }
1051 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1052 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1053 part_attrs |= EXT_CSD_ENH_GP(pidx);
1054 tot_enh_size_mult += gp_size_mult[pidx];
1055 }
1056 }
1057
1058 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1059 pr_err("Card does not support enhanced attribute\n");
1060 return -EMEDIUMTYPE;
1061 }
1062
1063 err = mmc_send_ext_csd(mmc, ext_csd);
1064 if (err)
1065 return err;
1066
1067 max_enh_size_mult =
1068 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1069 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1070 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1071 if (tot_enh_size_mult > max_enh_size_mult) {
1072 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1073 tot_enh_size_mult, max_enh_size_mult);
1074 return -EMEDIUMTYPE;
1075 }
1076
1077
1078
1079
1080
1081 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1082 if (conf->user.wr_rel_change) {
1083 if (conf->user.wr_rel_set)
1084 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1085 else
1086 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1087 }
1088 for (pidx = 0; pidx < 4; pidx++) {
1089 if (conf->gp_part[pidx].wr_rel_change) {
1090 if (conf->gp_part[pidx].wr_rel_set)
1091 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1092 else
1093 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1094 }
1095 }
1096
1097 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1098 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1099 puts("Card does not support host controlled partition write "
1100 "reliability settings\n");
1101 return -EMEDIUMTYPE;
1102 }
1103
1104 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1105 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1106 pr_err("Card already partitioned\n");
1107 return -EPERM;
1108 }
1109
1110 if (mode == MMC_HWPART_CONF_CHECK)
1111 return 0;
1112
1113
1114 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1115 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1116 EXT_CSD_ERASE_GROUP_DEF, 1);
1117
1118 if (err)
1119 return err;
1120
1121 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1122
1123#if CONFIG_IS_ENABLED(MMC_WRITE)
1124
1125 mmc->erase_grp_size =
1126 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1127#endif
1128
1129 }
1130
1131
1132 for (i = 0; i < 4; i++) {
1133 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1134 EXT_CSD_ENH_START_ADDR+i,
1135 (enh_start_addr >> (i*8)) & 0xFF);
1136 if (err)
1137 return err;
1138 }
1139 for (i = 0; i < 3; i++) {
1140 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1141 EXT_CSD_ENH_SIZE_MULT+i,
1142 (enh_size_mult >> (i*8)) & 0xFF);
1143 if (err)
1144 return err;
1145 }
1146 for (pidx = 0; pidx < 4; pidx++) {
1147 for (i = 0; i < 3; i++) {
1148 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1149 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1150 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1151 if (err)
1152 return err;
1153 }
1154 }
1155 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1156 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1157 if (err)
1158 return err;
1159
1160 if (mode == MMC_HWPART_CONF_SET)
1161 return 0;
1162
1163
1164
1165
1166
1167 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1168 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1169 EXT_CSD_WR_REL_SET, wr_rel_set);
1170 if (err)
1171 return err;
1172 }
1173
1174
1175
1176
1177
1178
1179 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1180 EXT_CSD_PARTITION_SETTING,
1181 EXT_CSD_PARTITION_SETTING_COMPLETED);
1182 if (err)
1183 return err;
1184
1185 return 0;
1186}
1187#endif
1188
1189#if !CONFIG_IS_ENABLED(DM_MMC)
1190int mmc_getcd(struct mmc *mmc)
1191{
1192 int cd;
1193
1194 cd = board_mmc_getcd(mmc);
1195
1196 if (cd < 0) {
1197 if (mmc->cfg->ops->getcd)
1198 cd = mmc->cfg->ops->getcd(mmc);
1199 else
1200 cd = 1;
1201 }
1202
1203 return cd;
1204}
1205#endif
1206
1207#if !CONFIG_IS_ENABLED(MMC_TINY)
1208static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1209{
1210 struct mmc_cmd cmd;
1211 struct mmc_data data;
1212
1213
1214 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1215 cmd.resp_type = MMC_RSP_R1;
1216 cmd.cmdarg = (mode << 31) | 0xffffff;
1217 cmd.cmdarg &= ~(0xf << (group * 4));
1218 cmd.cmdarg |= value << (group * 4);
1219
1220 data.dest = (char *)resp;
1221 data.blocksize = 64;
1222 data.blocks = 1;
1223 data.flags = MMC_DATA_READ;
1224
1225 return mmc_send_cmd(mmc, &cmd, &data);
1226}
1227
1228static int sd_get_capabilities(struct mmc *mmc)
1229{
1230 int err;
1231 struct mmc_cmd cmd;
1232 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1233 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1234 struct mmc_data data;
1235 int timeout;
1236#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1237 u32 sd3_bus_mode;
1238#endif
1239
1240 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1241
1242 if (mmc_host_is_spi(mmc))
1243 return 0;
1244
1245
1246 cmd.cmdidx = MMC_CMD_APP_CMD;
1247 cmd.resp_type = MMC_RSP_R1;
1248 cmd.cmdarg = mmc->rca << 16;
1249
1250 err = mmc_send_cmd(mmc, &cmd, NULL);
1251
1252 if (err)
1253 return err;
1254
1255 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1256 cmd.resp_type = MMC_RSP_R1;
1257 cmd.cmdarg = 0;
1258
1259 timeout = 3;
1260
1261retry_scr:
1262 data.dest = (char *)scr;
1263 data.blocksize = 8;
1264 data.blocks = 1;
1265 data.flags = MMC_DATA_READ;
1266
1267 err = mmc_send_cmd(mmc, &cmd, &data);
1268
1269 if (err) {
1270 if (timeout--)
1271 goto retry_scr;
1272
1273 return err;
1274 }
1275
1276 mmc->scr[0] = __be32_to_cpu(scr[0]);
1277 mmc->scr[1] = __be32_to_cpu(scr[1]);
1278
1279 switch ((mmc->scr[0] >> 24) & 0xf) {
1280 case 0:
1281 mmc->version = SD_VERSION_1_0;
1282 break;
1283 case 1:
1284 mmc->version = SD_VERSION_1_10;
1285 break;
1286 case 2:
1287 mmc->version = SD_VERSION_2;
1288 if ((mmc->scr[0] >> 15) & 0x1)
1289 mmc->version = SD_VERSION_3;
1290 break;
1291 default:
1292 mmc->version = SD_VERSION_1_0;
1293 break;
1294 }
1295
1296 if (mmc->scr[0] & SD_DATA_4BIT)
1297 mmc->card_caps |= MMC_MODE_4BIT;
1298
1299
1300 if (mmc->version == SD_VERSION_1_0)
1301 return 0;
1302
1303 timeout = 4;
1304 while (timeout--) {
1305 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1306 (u8 *)switch_status);
1307
1308 if (err)
1309 return err;
1310
1311
1312 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1313 break;
1314 }
1315
1316
1317 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1318 mmc->card_caps |= MMC_CAP(SD_HS);
1319
1320#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1321
1322 if (mmc->version < SD_VERSION_3)
1323 return 0;
1324
1325 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1326 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1327 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1328 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1329 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1330 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1331 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1332 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1333 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1334 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1335 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1336#endif
1337
1338 return 0;
1339}
1340
1341static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1342{
1343 int err;
1344
1345 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1346 int speed;
1347
1348
1349 if (mmc->version == SD_VERSION_1_0)
1350 return 0;
1351
1352 switch (mode) {
1353 case MMC_LEGACY:
1354 speed = UHS_SDR12_BUS_SPEED;
1355 break;
1356 case SD_HS:
1357 speed = HIGH_SPEED_BUS_SPEED;
1358 break;
1359#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1360 case UHS_SDR12:
1361 speed = UHS_SDR12_BUS_SPEED;
1362 break;
1363 case UHS_SDR25:
1364 speed = UHS_SDR25_BUS_SPEED;
1365 break;
1366 case UHS_SDR50:
1367 speed = UHS_SDR50_BUS_SPEED;
1368 break;
1369 case UHS_DDR50:
1370 speed = UHS_DDR50_BUS_SPEED;
1371 break;
1372 case UHS_SDR104:
1373 speed = UHS_SDR104_BUS_SPEED;
1374 break;
1375#endif
1376 default:
1377 return -EINVAL;
1378 }
1379
1380 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1381 if (err)
1382 return err;
1383
1384 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1385 return -ENOTSUPP;
1386
1387 return 0;
1388}
1389
1390static int sd_select_bus_width(struct mmc *mmc, int w)
1391{
1392 int err;
1393 struct mmc_cmd cmd;
1394
1395 if ((w != 4) && (w != 1))
1396 return -EINVAL;
1397
1398 cmd.cmdidx = MMC_CMD_APP_CMD;
1399 cmd.resp_type = MMC_RSP_R1;
1400 cmd.cmdarg = mmc->rca << 16;
1401
1402 err = mmc_send_cmd(mmc, &cmd, NULL);
1403 if (err)
1404 return err;
1405
1406 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1407 cmd.resp_type = MMC_RSP_R1;
1408 if (w == 4)
1409 cmd.cmdarg = 2;
1410 else if (w == 1)
1411 cmd.cmdarg = 0;
1412 err = mmc_send_cmd(mmc, &cmd, NULL);
1413 if (err)
1414 return err;
1415
1416 return 0;
1417}
1418#endif
1419
1420#if CONFIG_IS_ENABLED(MMC_WRITE)
1421static int sd_read_ssr(struct mmc *mmc)
1422{
1423 static const unsigned int sd_au_size[] = {
1424 0, SZ_16K / 512, SZ_32K / 512,
1425 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1426 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1427 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1428 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1429 SZ_64M / 512,
1430 };
1431 int err, i;
1432 struct mmc_cmd cmd;
1433 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1434 struct mmc_data data;
1435 int timeout = 3;
1436 unsigned int au, eo, et, es;
1437
1438 cmd.cmdidx = MMC_CMD_APP_CMD;
1439 cmd.resp_type = MMC_RSP_R1;
1440 cmd.cmdarg = mmc->rca << 16;
1441
1442 err = mmc_send_cmd(mmc, &cmd, NULL);
1443#ifdef CONFIG_MMC_QUIRKS
1444 if (err && (mmc->quirks & MMC_QUIRK_RETRY_APP_CMD)) {
1445 int retries = 4;
1446
1447
1448
1449
1450 do {
1451 err = mmc_send_cmd(mmc, &cmd, NULL);
1452 if (!err)
1453 break;
1454 } while (retries--);
1455 }
1456#endif
1457 if (err)
1458 return err;
1459
1460 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1461 cmd.resp_type = MMC_RSP_R1;
1462 cmd.cmdarg = 0;
1463
1464retry_ssr:
1465 data.dest = (char *)ssr;
1466 data.blocksize = 64;
1467 data.blocks = 1;
1468 data.flags = MMC_DATA_READ;
1469
1470 err = mmc_send_cmd(mmc, &cmd, &data);
1471 if (err) {
1472 if (timeout--)
1473 goto retry_ssr;
1474
1475 return err;
1476 }
1477
1478 for (i = 0; i < 16; i++)
1479 ssr[i] = be32_to_cpu(ssr[i]);
1480
1481 au = (ssr[2] >> 12) & 0xF;
1482 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1483 mmc->ssr.au = sd_au_size[au];
1484 es = (ssr[3] >> 24) & 0xFF;
1485 es |= (ssr[2] & 0xFF) << 8;
1486 et = (ssr[3] >> 18) & 0x3F;
1487 if (es && et) {
1488 eo = (ssr[3] >> 16) & 0x3;
1489 mmc->ssr.erase_timeout = (et * 1000) / es;
1490 mmc->ssr.erase_offset = eo * 1000;
1491 }
1492 } else {
1493 pr_debug("Invalid Allocation Unit Size.\n");
1494 }
1495
1496 return 0;
1497}
1498#endif
1499
1500
1501static const int fbase[] = {
1502 10000,
1503 100000,
1504 1000000,
1505 10000000,
1506};
1507
1508
1509
1510
1511static const u8 multipliers[] = {
1512 0,
1513 10,
1514 12,
1515 13,
1516 15,
1517 20,
1518 25,
1519 30,
1520 35,
1521 40,
1522 45,
1523 50,
1524 55,
1525 60,
1526 70,
1527 80,
1528};
1529
1530static inline int bus_width(uint cap)
1531{
1532 if (cap == MMC_MODE_8BIT)
1533 return 8;
1534 if (cap == MMC_MODE_4BIT)
1535 return 4;
1536 if (cap == MMC_MODE_1BIT)
1537 return 1;
1538 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1539 return 0;
1540}
1541
1542#if !CONFIG_IS_ENABLED(DM_MMC)
1543#ifdef MMC_SUPPORTS_TUNING
1544static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1545{
1546 return -ENOTSUPP;
1547}
1548#endif
1549
1550static int mmc_set_ios(struct mmc *mmc)
1551{
1552 int ret = 0;
1553
1554 if (mmc->cfg->ops->set_ios)
1555 ret = mmc->cfg->ops->set_ios(mmc);
1556
1557 return ret;
1558}
1559
1560static int mmc_host_power_cycle(struct mmc *mmc)
1561{
1562 int ret = 0;
1563
1564 if (mmc->cfg->ops->host_power_cycle)
1565 ret = mmc->cfg->ops->host_power_cycle(mmc);
1566
1567 return ret;
1568}
1569#endif
1570
1571int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1572{
1573 if (!disable) {
1574 if (clock > mmc->cfg->f_max)
1575 clock = mmc->cfg->f_max;
1576
1577 if (clock < mmc->cfg->f_min)
1578 clock = mmc->cfg->f_min;
1579 }
1580
1581 mmc->clock = clock;
1582 mmc->clk_disable = disable;
1583
1584 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1585
1586 return mmc_set_ios(mmc);
1587}
1588
1589static int mmc_set_bus_width(struct mmc *mmc, uint width)
1590{
1591 mmc->bus_width = width;
1592
1593 return mmc_set_ios(mmc);
1594}
1595
1596#if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1597
1598
1599
1600
1601
1602void mmc_dump_capabilities(const char *text, uint caps)
1603{
1604 enum bus_mode mode;
1605
1606 pr_debug("%s: widths [", text);
1607 if (caps & MMC_MODE_8BIT)
1608 pr_debug("8, ");
1609 if (caps & MMC_MODE_4BIT)
1610 pr_debug("4, ");
1611 if (caps & MMC_MODE_1BIT)
1612 pr_debug("1, ");
1613 pr_debug("\b\b] modes [");
1614 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1615 if (MMC_CAP(mode) & caps)
1616 pr_debug("%s, ", mmc_mode_name(mode));
1617 pr_debug("\b\b]\n");
1618}
1619#endif
1620
1621struct mode_width_tuning {
1622 enum bus_mode mode;
1623 uint widths;
1624#ifdef MMC_SUPPORTS_TUNING
1625 uint tuning;
1626#endif
1627};
1628
1629#if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1630int mmc_voltage_to_mv(enum mmc_voltage voltage)
1631{
1632 switch (voltage) {
1633 case MMC_SIGNAL_VOLTAGE_000: return 0;
1634 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1635 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1636 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1637 }
1638 return -EINVAL;
1639}
1640
1641static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1642{
1643 int err;
1644
1645 if (mmc->signal_voltage == signal_voltage)
1646 return 0;
1647
1648 mmc->signal_voltage = signal_voltage;
1649 err = mmc_set_ios(mmc);
1650 if (err)
1651 pr_debug("unable to set voltage (err %d)\n", err);
1652
1653 return err;
1654}
1655#else
1656static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1657{
1658 return 0;
1659}
1660#endif
1661
1662#if !CONFIG_IS_ENABLED(MMC_TINY)
1663static const struct mode_width_tuning sd_modes_by_pref[] = {
1664#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1665#ifdef MMC_SUPPORTS_TUNING
1666 {
1667 .mode = UHS_SDR104,
1668 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1669 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1670 },
1671#endif
1672 {
1673 .mode = UHS_SDR50,
1674 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1675 },
1676 {
1677 .mode = UHS_DDR50,
1678 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1679 },
1680 {
1681 .mode = UHS_SDR25,
1682 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1683 },
1684#endif
1685 {
1686 .mode = SD_HS,
1687 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1688 },
1689#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1690 {
1691 .mode = UHS_SDR12,
1692 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1693 },
1694#endif
1695 {
1696 .mode = MMC_LEGACY,
1697 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1698 }
1699};
1700
1701#define for_each_sd_mode_by_pref(caps, mwt) \
1702 for (mwt = sd_modes_by_pref;\
1703 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1704 mwt++) \
1705 if (caps & MMC_CAP(mwt->mode))
1706
1707static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1708{
1709 int err;
1710 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1711 const struct mode_width_tuning *mwt;
1712#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1713 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1714#else
1715 bool uhs_en = false;
1716#endif
1717 uint caps;
1718
1719#ifdef DEBUG
1720 mmc_dump_capabilities("sd card", card_caps);
1721 mmc_dump_capabilities("host", mmc->host_caps);
1722#endif
1723
1724 if (mmc_host_is_spi(mmc)) {
1725 mmc_set_bus_width(mmc, 1);
1726 mmc_select_mode(mmc, MMC_LEGACY);
1727 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1728 return 0;
1729 }
1730
1731
1732 caps = card_caps & mmc->host_caps;
1733
1734 if (!uhs_en)
1735 caps &= ~UHS_CAPS;
1736
1737 for_each_sd_mode_by_pref(caps, mwt) {
1738 uint *w;
1739
1740 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1741 if (*w & caps & mwt->widths) {
1742 pr_debug("trying mode %s width %d (at %d MHz)\n",
1743 mmc_mode_name(mwt->mode),
1744 bus_width(*w),
1745 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1746
1747
1748 err = sd_select_bus_width(mmc, bus_width(*w));
1749 if (err)
1750 goto error;
1751 mmc_set_bus_width(mmc, bus_width(*w));
1752
1753
1754 err = sd_set_card_speed(mmc, mwt->mode);
1755 if (err)
1756 goto error;
1757
1758
1759 mmc_select_mode(mmc, mwt->mode);
1760 mmc_set_clock(mmc, mmc->tran_speed,
1761 MMC_CLK_ENABLE);
1762
1763#ifdef MMC_SUPPORTS_TUNING
1764
1765 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1766 err = mmc_execute_tuning(mmc,
1767 mwt->tuning);
1768 if (err) {
1769 pr_debug("tuning failed\n");
1770 goto error;
1771 }
1772 }
1773#endif
1774
1775#if CONFIG_IS_ENABLED(MMC_WRITE)
1776 err = sd_read_ssr(mmc);
1777 if (err)
1778 pr_warn("unable to read ssr\n");
1779#endif
1780 if (!err)
1781 return 0;
1782
1783error:
1784
1785 mmc_select_mode(mmc, MMC_LEGACY);
1786 mmc_set_clock(mmc, mmc->tran_speed,
1787 MMC_CLK_ENABLE);
1788 }
1789 }
1790 }
1791
1792 pr_err("unable to select a mode\n");
1793 return -ENOTSUPP;
1794}
1795
1796
1797
1798
1799
1800
1801static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1802{
1803 int err;
1804 const u8 *ext_csd = mmc->ext_csd;
1805 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1806
1807 if (mmc->version < MMC_VERSION_4)
1808 return 0;
1809
1810 err = mmc_send_ext_csd(mmc, test_csd);
1811 if (err)
1812 return err;
1813
1814
1815 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1816 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1817 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1818 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1819 ext_csd[EXT_CSD_REV]
1820 == test_csd[EXT_CSD_REV] &&
1821 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1822 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1823 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1824 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1825 return 0;
1826
1827 return -EBADMSG;
1828}
1829
1830#if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1831static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1832 uint32_t allowed_mask)
1833{
1834 u32 card_mask = 0;
1835
1836 switch (mode) {
1837 case MMC_HS_400_ES:
1838 case MMC_HS_400:
1839 case MMC_HS_200:
1840 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1841 EXT_CSD_CARD_TYPE_HS400_1_8V))
1842 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1843 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1844 EXT_CSD_CARD_TYPE_HS400_1_2V))
1845 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1846 break;
1847 case MMC_DDR_52:
1848 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1849 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1850 MMC_SIGNAL_VOLTAGE_180;
1851 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1852 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1853 break;
1854 default:
1855 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1856 break;
1857 }
1858
1859 while (card_mask & allowed_mask) {
1860 enum mmc_voltage best_match;
1861
1862 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1863 if (!mmc_set_signal_voltage(mmc, best_match))
1864 return 0;
1865
1866 allowed_mask &= ~best_match;
1867 }
1868
1869 return -ENOTSUPP;
1870}
1871#else
1872static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1873 uint32_t allowed_mask)
1874{
1875 return 0;
1876}
1877#endif
1878
1879static const struct mode_width_tuning mmc_modes_by_pref[] = {
1880#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1881 {
1882 .mode = MMC_HS_400_ES,
1883 .widths = MMC_MODE_8BIT,
1884 },
1885#endif
1886#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1887 {
1888 .mode = MMC_HS_400,
1889 .widths = MMC_MODE_8BIT,
1890 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1891 },
1892#endif
1893#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1894 {
1895 .mode = MMC_HS_200,
1896 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1897 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1898 },
1899#endif
1900 {
1901 .mode = MMC_DDR_52,
1902 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1903 },
1904 {
1905 .mode = MMC_HS_52,
1906 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1907 },
1908 {
1909 .mode = MMC_HS,
1910 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1911 },
1912 {
1913 .mode = MMC_LEGACY,
1914 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1915 }
1916};
1917
1918#define for_each_mmc_mode_by_pref(caps, mwt) \
1919 for (mwt = mmc_modes_by_pref;\
1920 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1921 mwt++) \
1922 if (caps & MMC_CAP(mwt->mode))
1923
1924static const struct ext_csd_bus_width {
1925 uint cap;
1926 bool is_ddr;
1927 uint ext_csd_bits;
1928} ext_csd_bus_width[] = {
1929 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1930 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1931 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1932 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1933 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1934};
1935
1936#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1937static int mmc_select_hs400(struct mmc *mmc)
1938{
1939 int err;
1940
1941
1942 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1943 if (err)
1944 return err;
1945
1946
1947 mmc_select_mode(mmc, MMC_HS_200);
1948 mmc_set_clock(mmc, mmc->tran_speed, false);
1949
1950
1951 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1952 if (err) {
1953 debug("tuning failed\n");
1954 return err;
1955 }
1956
1957
1958 mmc_set_card_speed(mmc, MMC_HS, true);
1959
1960 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1961 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1962 if (err)
1963 return err;
1964
1965 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
1966 if (err)
1967 return err;
1968
1969 mmc_select_mode(mmc, MMC_HS_400);
1970 err = mmc_set_clock(mmc, mmc->tran_speed, false);
1971 if (err)
1972 return err;
1973
1974 return 0;
1975}
1976#else
1977static int mmc_select_hs400(struct mmc *mmc)
1978{
1979 return -ENOTSUPP;
1980}
1981#endif
1982
1983#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1984#if !CONFIG_IS_ENABLED(DM_MMC)
1985static int mmc_set_enhanced_strobe(struct mmc *mmc)
1986{
1987 return -ENOTSUPP;
1988}
1989#endif
1990static int mmc_select_hs400es(struct mmc *mmc)
1991{
1992 int err;
1993
1994 err = mmc_set_card_speed(mmc, MMC_HS, true);
1995 if (err)
1996 return err;
1997
1998 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1999 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2000 EXT_CSD_BUS_WIDTH_STROBE);
2001 if (err) {
2002 printf("switch to bus width for hs400 failed\n");
2003 return err;
2004 }
2005
2006 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2007 if (err)
2008 return err;
2009
2010 mmc_select_mode(mmc, MMC_HS_400_ES);
2011 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2012 if (err)
2013 return err;
2014
2015 return mmc_set_enhanced_strobe(mmc);
2016}
2017#else
2018static int mmc_select_hs400es(struct mmc *mmc)
2019{
2020 return -ENOTSUPP;
2021}
2022#endif
2023
2024#define for_each_supported_width(caps, ddr, ecbv) \
2025 for (ecbv = ext_csd_bus_width;\
2026 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2027 ecbv++) \
2028 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2029
2030static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2031{
2032 int err;
2033 const struct mode_width_tuning *mwt;
2034 const struct ext_csd_bus_width *ecbw;
2035
2036#ifdef DEBUG
2037 mmc_dump_capabilities("mmc", card_caps);
2038 mmc_dump_capabilities("host", mmc->host_caps);
2039#endif
2040
2041 if (mmc_host_is_spi(mmc)) {
2042 mmc_set_bus_width(mmc, 1);
2043 mmc_select_mode(mmc, MMC_LEGACY);
2044 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2045 return 0;
2046 }
2047
2048
2049 card_caps &= mmc->host_caps;
2050
2051
2052 if (mmc->version < MMC_VERSION_4)
2053 return 0;
2054
2055 if (!mmc->ext_csd) {
2056 pr_debug("No ext_csd found!\n");
2057 return -ENOTSUPP;
2058 }
2059
2060#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2061 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2062
2063
2064
2065
2066
2067 if (mmc->selected_mode == MMC_HS_200 ||
2068 mmc->selected_mode == MMC_HS_400)
2069 mmc_set_card_speed(mmc, MMC_HS, true);
2070 else
2071#endif
2072 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2073
2074 for_each_mmc_mode_by_pref(card_caps, mwt) {
2075 for_each_supported_width(card_caps & mwt->widths,
2076 mmc_is_mode_ddr(mwt->mode), ecbw) {
2077 enum mmc_voltage old_voltage;
2078 pr_debug("trying mode %s width %d (at %d MHz)\n",
2079 mmc_mode_name(mwt->mode),
2080 bus_width(ecbw->cap),
2081 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2082 old_voltage = mmc->signal_voltage;
2083 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2084 MMC_ALL_SIGNAL_VOLTAGE);
2085 if (err)
2086 continue;
2087
2088
2089 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2090 EXT_CSD_BUS_WIDTH,
2091 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2092 if (err)
2093 goto error;
2094 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2095
2096 if (mwt->mode == MMC_HS_400) {
2097 err = mmc_select_hs400(mmc);
2098 if (err) {
2099 printf("Select HS400 failed %d\n", err);
2100 goto error;
2101 }
2102 } else if (mwt->mode == MMC_HS_400_ES) {
2103 err = mmc_select_hs400es(mmc);
2104 if (err) {
2105 printf("Select HS400ES failed %d\n",
2106 err);
2107 goto error;
2108 }
2109 } else {
2110
2111 err = mmc_set_card_speed(mmc, mwt->mode, false);
2112 if (err)
2113 goto error;
2114
2115
2116
2117
2118
2119
2120 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2121 err = mmc_switch(mmc,
2122 EXT_CSD_CMD_SET_NORMAL,
2123 EXT_CSD_BUS_WIDTH,
2124 ecbw->ext_csd_bits);
2125 if (err)
2126 goto error;
2127 }
2128
2129
2130 mmc_select_mode(mmc, mwt->mode);
2131 mmc_set_clock(mmc, mmc->tran_speed,
2132 MMC_CLK_ENABLE);
2133#ifdef MMC_SUPPORTS_TUNING
2134
2135
2136 if (mwt->tuning) {
2137 err = mmc_execute_tuning(mmc,
2138 mwt->tuning);
2139 if (err) {
2140 pr_debug("tuning failed\n");
2141 goto error;
2142 }
2143 }
2144#endif
2145 }
2146
2147
2148 err = mmc_read_and_compare_ext_csd(mmc);
2149 if (!err)
2150 return 0;
2151error:
2152 mmc_set_signal_voltage(mmc, old_voltage);
2153
2154 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2155 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2156 mmc_select_mode(mmc, MMC_LEGACY);
2157 mmc_set_bus_width(mmc, 1);
2158 }
2159 }
2160
2161 pr_err("unable to select a mode\n");
2162
2163 return -ENOTSUPP;
2164}
2165#endif
2166
2167#if CONFIG_IS_ENABLED(MMC_TINY)
2168DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2169#endif
2170
2171static int mmc_startup_v4(struct mmc *mmc)
2172{
2173 int err, i;
2174 u64 capacity;
2175 bool has_parts = false;
2176 bool part_completed;
2177 static const u32 mmc_versions[] = {
2178 MMC_VERSION_4,
2179 MMC_VERSION_4_1,
2180 MMC_VERSION_4_2,
2181 MMC_VERSION_4_3,
2182 MMC_VERSION_4_4,
2183 MMC_VERSION_4_41,
2184 MMC_VERSION_4_5,
2185 MMC_VERSION_5_0,
2186 MMC_VERSION_5_1
2187 };
2188
2189#if CONFIG_IS_ENABLED(MMC_TINY)
2190 u8 *ext_csd = ext_csd_bkup;
2191
2192 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2193 return 0;
2194
2195 if (!mmc->ext_csd)
2196 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2197
2198 err = mmc_send_ext_csd(mmc, ext_csd);
2199 if (err)
2200 goto error;
2201
2202
2203 if (!mmc->ext_csd)
2204 mmc->ext_csd = ext_csd;
2205#else
2206 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2207
2208 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2209 return 0;
2210
2211
2212 err = mmc_send_ext_csd(mmc, ext_csd);
2213 if (err)
2214 goto error;
2215
2216
2217 if (!mmc->ext_csd)
2218 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2219 if (!mmc->ext_csd)
2220 return -ENOMEM;
2221 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2222#endif
2223 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2224 return -EINVAL;
2225
2226 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2227
2228 if (mmc->version >= MMC_VERSION_4_2) {
2229
2230
2231
2232
2233
2234 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2235 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2236 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2237 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2238 capacity *= MMC_MAX_BLOCK_LEN;
2239 if ((capacity >> 20) > 2 * 1024)
2240 mmc->capacity_user = capacity;
2241 }
2242
2243 if (mmc->version >= MMC_VERSION_4_5)
2244 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2245
2246
2247
2248
2249
2250
2251
2252 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2253 EXT_CSD_PARTITION_SETTING_COMPLETED);
2254
2255 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2256
2257 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2258 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2259
2260
2261 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2262 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2263 ext_csd[EXT_CSD_BOOT_MULT])
2264 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2265 if (part_completed &&
2266 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2267 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2268
2269 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2270
2271 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2272
2273 for (i = 0; i < 4; i++) {
2274 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2275 uint mult = (ext_csd[idx + 2] << 16) +
2276 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2277 if (mult)
2278 has_parts = true;
2279 if (!part_completed)
2280 continue;
2281 mmc->capacity_gp[i] = mult;
2282 mmc->capacity_gp[i] *=
2283 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2284 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2285 mmc->capacity_gp[i] <<= 19;
2286 }
2287
2288#ifndef CONFIG_SPL_BUILD
2289 if (part_completed) {
2290 mmc->enh_user_size =
2291 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2292 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2293 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2294 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2295 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2296 mmc->enh_user_size <<= 19;
2297 mmc->enh_user_start =
2298 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2299 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2300 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2301 ext_csd[EXT_CSD_ENH_START_ADDR];
2302 if (mmc->high_capacity)
2303 mmc->enh_user_start <<= 9;
2304 }
2305#endif
2306
2307
2308
2309
2310
2311
2312 if (part_completed)
2313 has_parts = true;
2314 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2315 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2316 has_parts = true;
2317 if (has_parts) {
2318 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2319 EXT_CSD_ERASE_GROUP_DEF, 1);
2320
2321 if (err)
2322 goto error;
2323
2324 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2325 }
2326
2327 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2328#if CONFIG_IS_ENABLED(MMC_WRITE)
2329
2330 mmc->erase_grp_size =
2331 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2332#endif
2333
2334
2335
2336
2337
2338 if (mmc->high_capacity && part_completed) {
2339 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2340 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2341 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2342 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2343 capacity *= MMC_MAX_BLOCK_LEN;
2344 mmc->capacity_user = capacity;
2345 }
2346 }
2347#if CONFIG_IS_ENABLED(MMC_WRITE)
2348 else {
2349
2350 int erase_gsz, erase_gmul;
2351
2352 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2353 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2354 mmc->erase_grp_size = (erase_gsz + 1)
2355 * (erase_gmul + 1);
2356 }
2357#endif
2358#if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2359 mmc->hc_wp_grp_size = 1024
2360 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2361 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2362#endif
2363
2364 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2365
2366 return 0;
2367error:
2368 if (mmc->ext_csd) {
2369#if !CONFIG_IS_ENABLED(MMC_TINY)
2370 free(mmc->ext_csd);
2371#endif
2372 mmc->ext_csd = NULL;
2373 }
2374 return err;
2375}
2376
2377static int mmc_startup(struct mmc *mmc)
2378{
2379 int err, i;
2380 uint mult, freq;
2381 u64 cmult, csize;
2382 struct mmc_cmd cmd;
2383 struct blk_desc *bdesc;
2384
2385#ifdef CONFIG_MMC_SPI_CRC_ON
2386 if (mmc_host_is_spi(mmc)) {
2387 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2388 cmd.resp_type = MMC_RSP_R1;
2389 cmd.cmdarg = 1;
2390 err = mmc_send_cmd(mmc, &cmd, NULL);
2391 if (err)
2392 return err;
2393 }
2394#endif
2395
2396
2397 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2398 MMC_CMD_ALL_SEND_CID;
2399 cmd.resp_type = MMC_RSP_R2;
2400 cmd.cmdarg = 0;
2401
2402 err = mmc_send_cmd(mmc, &cmd, NULL);
2403
2404#ifdef CONFIG_MMC_QUIRKS
2405 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2406 int retries = 4;
2407
2408
2409
2410
2411 do {
2412 err = mmc_send_cmd(mmc, &cmd, NULL);
2413 if (!err)
2414 break;
2415 } while (retries--);
2416 }
2417#endif
2418
2419 if (err)
2420 return err;
2421
2422 memcpy(mmc->cid, cmd.response, 16);
2423
2424
2425
2426
2427
2428
2429 if (!mmc_host_is_spi(mmc)) {
2430 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2431 cmd.cmdarg = mmc->rca << 16;
2432 cmd.resp_type = MMC_RSP_R6;
2433
2434 err = mmc_send_cmd(mmc, &cmd, NULL);
2435
2436 if (err)
2437 return err;
2438
2439 if (IS_SD(mmc))
2440 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2441 }
2442
2443
2444 cmd.cmdidx = MMC_CMD_SEND_CSD;
2445 cmd.resp_type = MMC_RSP_R2;
2446 cmd.cmdarg = mmc->rca << 16;
2447
2448 err = mmc_send_cmd(mmc, &cmd, NULL);
2449
2450 if (err)
2451 return err;
2452
2453 mmc->csd[0] = cmd.response[0];
2454 mmc->csd[1] = cmd.response[1];
2455 mmc->csd[2] = cmd.response[2];
2456 mmc->csd[3] = cmd.response[3];
2457
2458 if (mmc->version == MMC_VERSION_UNKNOWN) {
2459 int version = (cmd.response[0] >> 26) & 0xf;
2460
2461 switch (version) {
2462 case 0:
2463 mmc->version = MMC_VERSION_1_2;
2464 break;
2465 case 1:
2466 mmc->version = MMC_VERSION_1_4;
2467 break;
2468 case 2:
2469 mmc->version = MMC_VERSION_2_2;
2470 break;
2471 case 3:
2472 mmc->version = MMC_VERSION_3;
2473 break;
2474 case 4:
2475 mmc->version = MMC_VERSION_4;
2476 break;
2477 default:
2478 mmc->version = MMC_VERSION_1_2;
2479 break;
2480 }
2481 }
2482
2483
2484 freq = fbase[(cmd.response[0] & 0x7)];
2485 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2486
2487 mmc->legacy_speed = freq * mult;
2488 mmc_select_mode(mmc, MMC_LEGACY);
2489
2490 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2491 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2492#if CONFIG_IS_ENABLED(MMC_WRITE)
2493
2494 if (IS_SD(mmc))
2495 mmc->write_bl_len = mmc->read_bl_len;
2496 else
2497 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2498#endif
2499
2500 if (mmc->high_capacity) {
2501 csize = (mmc->csd[1] & 0x3f) << 16
2502 | (mmc->csd[2] & 0xffff0000) >> 16;
2503 cmult = 8;
2504 } else {
2505 csize = (mmc->csd[1] & 0x3ff) << 2
2506 | (mmc->csd[2] & 0xc0000000) >> 30;
2507 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2508 }
2509
2510 mmc->capacity_user = (csize + 1) << (cmult + 2);
2511 mmc->capacity_user *= mmc->read_bl_len;
2512 mmc->capacity_boot = 0;
2513 mmc->capacity_rpmb = 0;
2514 for (i = 0; i < 4; i++)
2515 mmc->capacity_gp[i] = 0;
2516
2517 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2518 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2519
2520#if CONFIG_IS_ENABLED(MMC_WRITE)
2521 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2522 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2523#endif
2524
2525 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2526 cmd.cmdidx = MMC_CMD_SET_DSR;
2527 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2528 cmd.resp_type = MMC_RSP_NONE;
2529 if (mmc_send_cmd(mmc, &cmd, NULL))
2530 pr_warn("MMC: SET_DSR failed\n");
2531 }
2532
2533
2534 if (!mmc_host_is_spi(mmc)) {
2535 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2536 cmd.resp_type = MMC_RSP_R1;
2537 cmd.cmdarg = mmc->rca << 16;
2538 err = mmc_send_cmd(mmc, &cmd, NULL);
2539
2540 if (err)
2541 return err;
2542 }
2543
2544
2545
2546
2547#if CONFIG_IS_ENABLED(MMC_WRITE)
2548 mmc->erase_grp_size = 1;
2549#endif
2550 mmc->part_config = MMCPART_NOAVAILABLE;
2551
2552 err = mmc_startup_v4(mmc);
2553 if (err)
2554 return err;
2555
2556 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2557 if (err)
2558 return err;
2559
2560#if CONFIG_IS_ENABLED(MMC_TINY)
2561 mmc_set_clock(mmc, mmc->legacy_speed, false);
2562 mmc_select_mode(mmc, MMC_LEGACY);
2563 mmc_set_bus_width(mmc, 1);
2564#else
2565 if (IS_SD(mmc)) {
2566 err = sd_get_capabilities(mmc);
2567 if (err)
2568 return err;
2569 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2570 } else {
2571 err = mmc_get_capabilities(mmc);
2572 if (err)
2573 return err;
2574 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2575 }
2576#endif
2577 if (err)
2578 return err;
2579
2580 mmc->best_mode = mmc->selected_mode;
2581
2582
2583 if (mmc->ddr_mode) {
2584 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2585#if CONFIG_IS_ENABLED(MMC_WRITE)
2586 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2587#endif
2588 }
2589
2590
2591 bdesc = mmc_get_blk_desc(mmc);
2592 bdesc->lun = 0;
2593 bdesc->hwpart = 0;
2594 bdesc->type = 0;
2595 bdesc->blksz = mmc->read_bl_len;
2596 bdesc->log2blksz = LOG2(bdesc->blksz);
2597 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2598#if !defined(CONFIG_SPL_BUILD) || \
2599 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2600 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2601 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2602 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2603 (mmc->cid[3] >> 16) & 0xffff);
2604 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2605 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2606 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2607 (mmc->cid[2] >> 24) & 0xff);
2608 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2609 (mmc->cid[2] >> 16) & 0xf);
2610#else
2611 bdesc->vendor[0] = 0;
2612 bdesc->product[0] = 0;
2613 bdesc->revision[0] = 0;
2614#endif
2615
2616#if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2617 part_init(bdesc);
2618#endif
2619
2620 return 0;
2621}
2622
2623static int mmc_send_if_cond(struct mmc *mmc)
2624{
2625 struct mmc_cmd cmd;
2626 int err;
2627
2628 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2629
2630 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2631 cmd.resp_type = MMC_RSP_R7;
2632
2633 err = mmc_send_cmd(mmc, &cmd, NULL);
2634
2635 if (err)
2636 return err;
2637
2638 if ((cmd.response[0] & 0xff) != 0xaa)
2639 return -EOPNOTSUPP;
2640 else
2641 mmc->version = SD_VERSION_2;
2642
2643 return 0;
2644}
2645
2646#if !CONFIG_IS_ENABLED(DM_MMC)
2647
2648__weak void board_mmc_power_init(void)
2649{
2650}
2651#endif
2652
2653static int mmc_power_init(struct mmc *mmc)
2654{
2655#if CONFIG_IS_ENABLED(DM_MMC)
2656#if CONFIG_IS_ENABLED(DM_REGULATOR)
2657 int ret;
2658
2659 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2660 &mmc->vmmc_supply);
2661 if (ret)
2662 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2663
2664 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2665 &mmc->vqmmc_supply);
2666 if (ret)
2667 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2668#endif
2669#else
2670
2671
2672
2673
2674 board_mmc_power_init();
2675#endif
2676 return 0;
2677}
2678
2679
2680
2681
2682
2683
2684static void mmc_set_initial_state(struct mmc *mmc)
2685{
2686 int err;
2687
2688
2689 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2690 if (err != 0)
2691 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2692 if (err != 0)
2693 pr_warn("mmc: failed to set signal voltage\n");
2694
2695 mmc_select_mode(mmc, MMC_LEGACY);
2696 mmc_set_bus_width(mmc, 1);
2697 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2698}
2699
2700static int mmc_power_on(struct mmc *mmc)
2701{
2702#if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2703 if (mmc->vmmc_supply) {
2704 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2705
2706 if (ret) {
2707 puts("Error enabling VMMC supply\n");
2708 return ret;
2709 }
2710 }
2711#endif
2712 return 0;
2713}
2714
2715static int mmc_power_off(struct mmc *mmc)
2716{
2717 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2718#if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2719 if (mmc->vmmc_supply) {
2720 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2721
2722 if (ret) {
2723 pr_debug("Error disabling VMMC supply\n");
2724 return ret;
2725 }
2726 }
2727#endif
2728 return 0;
2729}
2730
2731static int mmc_power_cycle(struct mmc *mmc)
2732{
2733 int ret;
2734
2735 ret = mmc_power_off(mmc);
2736 if (ret)
2737 return ret;
2738
2739 ret = mmc_host_power_cycle(mmc);
2740 if (ret)
2741 return ret;
2742
2743
2744
2745
2746
2747 udelay(2000);
2748 return mmc_power_on(mmc);
2749}
2750
2751int mmc_get_op_cond(struct mmc *mmc)
2752{
2753 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2754 int err;
2755
2756 if (mmc->has_init)
2757 return 0;
2758
2759#ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2760 mmc_adapter_card_type_ident();
2761#endif
2762 err = mmc_power_init(mmc);
2763 if (err)
2764 return err;
2765
2766#ifdef CONFIG_MMC_QUIRKS
2767 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2768 MMC_QUIRK_RETRY_SEND_CID |
2769 MMC_QUIRK_RETRY_APP_CMD;
2770#endif
2771
2772 err = mmc_power_cycle(mmc);
2773 if (err) {
2774
2775
2776
2777
2778
2779 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2780 uhs_en = false;
2781 mmc->host_caps &= ~UHS_CAPS;
2782 err = mmc_power_on(mmc);
2783 }
2784 if (err)
2785 return err;
2786
2787#if CONFIG_IS_ENABLED(DM_MMC)
2788
2789#else
2790
2791 err = mmc->cfg->ops->init(mmc);
2792 if (err)
2793 return err;
2794#endif
2795 mmc->ddr_mode = 0;
2796
2797retry:
2798 mmc_set_initial_state(mmc);
2799
2800
2801 err = mmc_go_idle(mmc);
2802
2803 if (err)
2804 return err;
2805
2806
2807 mmc_get_blk_desc(mmc)->hwpart = 0;
2808
2809
2810 err = mmc_send_if_cond(mmc);
2811
2812
2813 err = sd_send_op_cond(mmc, uhs_en);
2814 if (err && uhs_en) {
2815 uhs_en = false;
2816 mmc_power_cycle(mmc);
2817 goto retry;
2818 }
2819
2820
2821 if (err == -ETIMEDOUT) {
2822 err = mmc_send_op_cond(mmc);
2823
2824 if (err) {
2825#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2826 pr_err("Card did not respond to voltage select!\n");
2827#endif
2828 return -EOPNOTSUPP;
2829 }
2830 }
2831
2832 return err;
2833}
2834
2835int mmc_start_init(struct mmc *mmc)
2836{
2837 bool no_card;
2838 int err = 0;
2839
2840
2841
2842
2843
2844 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2845 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2846#if CONFIG_IS_ENABLED(DM_MMC)
2847 mmc_deferred_probe(mmc);
2848#endif
2849#if !defined(CONFIG_MMC_BROKEN_CD)
2850 no_card = mmc_getcd(mmc) == 0;
2851#else
2852 no_card = 0;
2853#endif
2854#if !CONFIG_IS_ENABLED(DM_MMC)
2855
2856 no_card = no_card || (mmc->cfg->ops->init == NULL);
2857#endif
2858 if (no_card) {
2859 mmc->has_init = 0;
2860#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2861 pr_err("MMC: no card present\n");
2862#endif
2863 return -ENOMEDIUM;
2864 }
2865
2866 err = mmc_get_op_cond(mmc);
2867
2868 if (!err)
2869 mmc->init_in_progress = 1;
2870
2871 return err;
2872}
2873
2874static int mmc_complete_init(struct mmc *mmc)
2875{
2876 int err = 0;
2877
2878 mmc->init_in_progress = 0;
2879 if (mmc->op_cond_pending)
2880 err = mmc_complete_op_cond(mmc);
2881
2882 if (!err)
2883 err = mmc_startup(mmc);
2884 if (err)
2885 mmc->has_init = 0;
2886 else
2887 mmc->has_init = 1;
2888 return err;
2889}
2890
2891int mmc_init(struct mmc *mmc)
2892{
2893 int err = 0;
2894 __maybe_unused ulong start;
2895#if CONFIG_IS_ENABLED(DM_MMC)
2896 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2897
2898 upriv->mmc = mmc;
2899#endif
2900 if (mmc->has_init)
2901 return 0;
2902
2903 start = get_timer(0);
2904
2905 if (!mmc->init_in_progress)
2906 err = mmc_start_init(mmc);
2907
2908 if (!err)
2909 err = mmc_complete_init(mmc);
2910 if (err)
2911 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2912
2913 return err;
2914}
2915
2916#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2917 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2918 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2919int mmc_deinit(struct mmc *mmc)
2920{
2921 u32 caps_filtered;
2922
2923 if (!mmc->has_init)
2924 return 0;
2925
2926 if (IS_SD(mmc)) {
2927 caps_filtered = mmc->card_caps &
2928 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2929 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2930 MMC_CAP(UHS_SDR104));
2931
2932 return sd_select_mode_and_width(mmc, caps_filtered);
2933 } else {
2934 caps_filtered = mmc->card_caps &
2935 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2936
2937 return mmc_select_mode_and_width(mmc, caps_filtered);
2938 }
2939}
2940#endif
2941
2942int mmc_set_dsr(struct mmc *mmc, u16 val)
2943{
2944 mmc->dsr = val;
2945 return 0;
2946}
2947
2948
2949__weak int cpu_mmc_init(bd_t *bis)
2950{
2951 return -1;
2952}
2953
2954
2955__weak int board_mmc_init(bd_t *bis)
2956{
2957 return -1;
2958}
2959
2960void mmc_set_preinit(struct mmc *mmc, int preinit)
2961{
2962 mmc->preinit = preinit;
2963}
2964
2965#if CONFIG_IS_ENABLED(DM_MMC)
2966static int mmc_probe(bd_t *bis)
2967{
2968 int ret, i;
2969 struct uclass *uc;
2970 struct udevice *dev;
2971
2972 ret = uclass_get(UCLASS_MMC, &uc);
2973 if (ret)
2974 return ret;
2975
2976
2977
2978
2979
2980
2981 for (i = 0; ; i++) {
2982 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2983 if (ret == -ENODEV)
2984 break;
2985 }
2986 uclass_foreach_dev(dev, uc) {
2987 ret = device_probe(dev);
2988 if (ret)
2989 pr_err("%s - probe failed: %d\n", dev->name, ret);
2990 }
2991
2992 return 0;
2993}
2994#else
2995static int mmc_probe(bd_t *bis)
2996{
2997 if (board_mmc_init(bis) < 0)
2998 cpu_mmc_init(bis);
2999
3000 return 0;
3001}
3002#endif
3003
3004int mmc_initialize(bd_t *bis)
3005{
3006 static int initialized = 0;
3007 int ret;
3008 if (initialized)
3009 return 0;
3010 initialized = 1;
3011
3012#if !CONFIG_IS_ENABLED(BLK)
3013#if !CONFIG_IS_ENABLED(MMC_TINY)
3014 mmc_list_init();
3015#endif
3016#endif
3017 ret = mmc_probe(bis);
3018 if (ret)
3019 return ret;
3020
3021#ifndef CONFIG_SPL_BUILD
3022 print_mmc_devices(',');
3023#endif
3024
3025 mmc_do_preinit();
3026 return 0;
3027}
3028
3029#if CONFIG_IS_ENABLED(DM_MMC)
3030int mmc_init_device(int num)
3031{
3032 struct udevice *dev;
3033 struct mmc *m;
3034 int ret;
3035
3036 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3037 if (ret)
3038 return ret;
3039
3040 m = mmc_get_mmc_dev(dev);
3041 if (!m)
3042 return 0;
3043#ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
3044 mmc_set_preinit(m, 1);
3045#endif
3046 if (m->preinit)
3047 mmc_start_init(m);
3048
3049 return 0;
3050}
3051#endif
3052
3053#ifdef CONFIG_CMD_BKOPS_ENABLE
3054int mmc_set_bkops_enable(struct mmc *mmc)
3055{
3056 int err;
3057 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3058
3059 err = mmc_send_ext_csd(mmc, ext_csd);
3060 if (err) {
3061 puts("Could not get ext_csd register values\n");
3062 return err;
3063 }
3064
3065 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3066 puts("Background operations not supported on device\n");
3067 return -EMEDIUMTYPE;
3068 }
3069
3070 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3071 puts("Background operations already enabled\n");
3072 return 0;
3073 }
3074
3075 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3076 if (err) {
3077 puts("Failed to enable manual background operations\n");
3078 return err;
3079 }
3080
3081 puts("Enabled manual background operations\n");
3082
3083 return 0;
3084}
3085#endif
3086