1
2
3
4
5
6
7
8
9
10#include <config.h>
11#include <common.h>
12#include <blk.h>
13#include <command.h>
14#include <dm.h>
15#include <log.h>
16#include <dm/device-internal.h>
17#include <errno.h>
18#include <mmc.h>
19#include <part.h>
20#include <linux/bitops.h>
21#include <linux/delay.h>
22#include <power/regulator.h>
23#include <malloc.h>
24#include <memalign.h>
25#include <linux/list.h>
26#include <div64.h>
27#include "mmc_private.h"
28
29#define DEFAULT_CMD6_TIMEOUT_MS 500
30
31static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
32
33#if !CONFIG_IS_ENABLED(DM_MMC)
34
35static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
36{
37 return -ENOSYS;
38}
39
40__weak int board_mmc_getwp(struct mmc *mmc)
41{
42 return -1;
43}
44
45int mmc_getwp(struct mmc *mmc)
46{
47 int wp;
48
49 wp = board_mmc_getwp(mmc);
50
51 if (wp < 0) {
52 if (mmc->cfg->ops->getwp)
53 wp = mmc->cfg->ops->getwp(mmc);
54 else
55 wp = 0;
56 }
57
58 return wp;
59}
60
61__weak int board_mmc_getcd(struct mmc *mmc)
62{
63 return -1;
64}
65#endif
66
67#ifdef CONFIG_MMC_TRACE
68void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
69{
70 printf("CMD_SEND:%d\n", cmd->cmdidx);
71 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
72}
73
74void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
75{
76 int i;
77 u8 *ptr;
78
79 if (ret) {
80 printf("\t\tRET\t\t\t %d\n", ret);
81 } else {
82 switch (cmd->resp_type) {
83 case MMC_RSP_NONE:
84 printf("\t\tMMC_RSP_NONE\n");
85 break;
86 case MMC_RSP_R1:
87 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
88 cmd->response[0]);
89 break;
90 case MMC_RSP_R1b:
91 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
92 cmd->response[0]);
93 break;
94 case MMC_RSP_R2:
95 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
96 cmd->response[0]);
97 printf("\t\t \t\t 0x%08x \n",
98 cmd->response[1]);
99 printf("\t\t \t\t 0x%08x \n",
100 cmd->response[2]);
101 printf("\t\t \t\t 0x%08x \n",
102 cmd->response[3]);
103 printf("\n");
104 printf("\t\t\t\t\tDUMPING DATA\n");
105 for (i = 0; i < 4; i++) {
106 int j;
107 printf("\t\t\t\t\t%03d - ", i*4);
108 ptr = (u8 *)&cmd->response[i];
109 ptr += 3;
110 for (j = 0; j < 4; j++)
111 printf("%02x ", *ptr--);
112 printf("\n");
113 }
114 break;
115 case MMC_RSP_R3:
116 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
117 cmd->response[0]);
118 break;
119 default:
120 printf("\t\tERROR MMC rsp not supported\n");
121 break;
122 }
123 }
124}
125
126void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
127{
128 int status;
129
130 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
131 printf("CURR STATE:%d\n", status);
132}
133#endif
134
135#if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
136const char *mmc_mode_name(enum bus_mode mode)
137{
138 static const char *const names[] = {
139 [MMC_LEGACY] = "MMC legacy",
140 [MMC_HS] = "MMC High Speed (26MHz)",
141 [SD_HS] = "SD High Speed (50MHz)",
142 [UHS_SDR12] = "UHS SDR12 (25MHz)",
143 [UHS_SDR25] = "UHS SDR25 (50MHz)",
144 [UHS_SDR50] = "UHS SDR50 (100MHz)",
145 [UHS_SDR104] = "UHS SDR104 (208MHz)",
146 [UHS_DDR50] = "UHS DDR50 (50MHz)",
147 [MMC_HS_52] = "MMC High Speed (52MHz)",
148 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
149 [MMC_HS_200] = "HS200 (200MHz)",
150 [MMC_HS_400] = "HS400 (200MHz)",
151 [MMC_HS_400_ES] = "HS400ES (200MHz)",
152 };
153
154 if (mode >= MMC_MODES_END)
155 return "Unknown mode";
156 else
157 return names[mode];
158}
159#endif
160
161static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
162{
163 static const int freqs[] = {
164 [MMC_LEGACY] = 25000000,
165 [MMC_HS] = 26000000,
166 [SD_HS] = 50000000,
167 [MMC_HS_52] = 52000000,
168 [MMC_DDR_52] = 52000000,
169 [UHS_SDR12] = 25000000,
170 [UHS_SDR25] = 50000000,
171 [UHS_SDR50] = 100000000,
172 [UHS_DDR50] = 50000000,
173 [UHS_SDR104] = 208000000,
174 [MMC_HS_200] = 200000000,
175 [MMC_HS_400] = 200000000,
176 [MMC_HS_400_ES] = 200000000,
177 };
178
179 if (mode == MMC_LEGACY)
180 return mmc->legacy_speed;
181 else if (mode >= MMC_MODES_END)
182 return 0;
183 else
184 return freqs[mode];
185}
186
187static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
188{
189 mmc->selected_mode = mode;
190 mmc->tran_speed = mmc_mode2freq(mmc, mode);
191 mmc->ddr_mode = mmc_is_mode_ddr(mode);
192 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
193 mmc->tran_speed / 1000000);
194 return 0;
195}
196
197#if !CONFIG_IS_ENABLED(DM_MMC)
198int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
199{
200 int ret;
201
202 mmmc_trace_before_send(mmc, cmd);
203 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
204 mmmc_trace_after_send(mmc, cmd, ret);
205
206 return ret;
207}
208#endif
209
210
211
212
213
214
215
216
217
218
219
220static int mmc_send_cmd_retry(struct mmc *mmc, struct mmc_cmd *cmd,
221 struct mmc_data *data, uint retries)
222{
223 int ret;
224
225 do {
226 ret = mmc_send_cmd(mmc, cmd, data);
227 } while (ret && retries--);
228
229 return ret;
230}
231
232
233
234
235
236
237
238
239
240
241
242
243
244static int mmc_send_cmd_quirks(struct mmc *mmc, struct mmc_cmd *cmd,
245 struct mmc_data *data, u32 quirk, uint retries)
246{
247 if (CONFIG_IS_ENABLED(MMC_QUIRKS) && mmc->quirks & quirk)
248 return mmc_send_cmd_retry(mmc, cmd, data, retries);
249 else
250 return mmc_send_cmd(mmc, cmd, data);
251}
252
253int mmc_send_status(struct mmc *mmc, unsigned int *status)
254{
255 struct mmc_cmd cmd;
256 int ret;
257
258 cmd.cmdidx = MMC_CMD_SEND_STATUS;
259 cmd.resp_type = MMC_RSP_R1;
260 if (!mmc_host_is_spi(mmc))
261 cmd.cmdarg = mmc->rca << 16;
262
263 ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 4);
264 mmc_trace_state(mmc, &cmd);
265 if (!ret)
266 *status = cmd.response[0];
267
268 return ret;
269}
270
271int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
272{
273 unsigned int status;
274 int err;
275
276 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
277 if (err != -ENOSYS)
278 return err;
279
280 while (1) {
281 err = mmc_send_status(mmc, &status);
282 if (err)
283 return err;
284
285 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
286 (status & MMC_STATUS_CURR_STATE) !=
287 MMC_STATE_PRG)
288 break;
289
290 if (status & MMC_STATUS_MASK) {
291#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
292 pr_err("Status Error: 0x%08x\n", status);
293#endif
294 return -ECOMM;
295 }
296
297 if (timeout_ms-- <= 0)
298 break;
299
300 udelay(1000);
301 }
302
303 if (timeout_ms <= 0) {
304#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
305 pr_err("Timeout waiting card ready\n");
306#endif
307 return -ETIMEDOUT;
308 }
309
310 return 0;
311}
312
313int mmc_set_blocklen(struct mmc *mmc, int len)
314{
315 struct mmc_cmd cmd;
316
317 if (mmc->ddr_mode)
318 return 0;
319
320 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
321 cmd.resp_type = MMC_RSP_R1;
322 cmd.cmdarg = len;
323
324 return mmc_send_cmd_quirks(mmc, &cmd, NULL,
325 MMC_QUIRK_RETRY_SET_BLOCKLEN, 4);
326}
327
328#ifdef MMC_SUPPORTS_TUNING
329static const u8 tuning_blk_pattern_4bit[] = {
330 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
331 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
332 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
333 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
334 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
335 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
336 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
337 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
338};
339
340static const u8 tuning_blk_pattern_8bit[] = {
341 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
342 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
343 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
344 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
345 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
346 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
347 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
348 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
349 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
350 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
351 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
352 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
353 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
354 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
355 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
356 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
357};
358
359int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
360{
361 struct mmc_cmd cmd;
362 struct mmc_data data;
363 const u8 *tuning_block_pattern;
364 int size, err;
365
366 if (mmc->bus_width == 8) {
367 tuning_block_pattern = tuning_blk_pattern_8bit;
368 size = sizeof(tuning_blk_pattern_8bit);
369 } else if (mmc->bus_width == 4) {
370 tuning_block_pattern = tuning_blk_pattern_4bit;
371 size = sizeof(tuning_blk_pattern_4bit);
372 } else {
373 return -EINVAL;
374 }
375
376 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
377
378 cmd.cmdidx = opcode;
379 cmd.cmdarg = 0;
380 cmd.resp_type = MMC_RSP_R1;
381
382 data.dest = (void *)data_buf;
383 data.blocks = 1;
384 data.blocksize = size;
385 data.flags = MMC_DATA_READ;
386
387 err = mmc_send_cmd(mmc, &cmd, &data);
388 if (err)
389 return err;
390
391 if (memcmp(data_buf, tuning_block_pattern, size))
392 return -EIO;
393
394 return 0;
395}
396#endif
397
398static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
399 lbaint_t blkcnt)
400{
401 struct mmc_cmd cmd;
402 struct mmc_data data;
403
404 if (blkcnt > 1)
405 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
406 else
407 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
408
409 if (mmc->high_capacity)
410 cmd.cmdarg = start;
411 else
412 cmd.cmdarg = start * mmc->read_bl_len;
413
414 cmd.resp_type = MMC_RSP_R1;
415
416 data.dest = dst;
417 data.blocks = blkcnt;
418 data.blocksize = mmc->read_bl_len;
419 data.flags = MMC_DATA_READ;
420
421 if (mmc_send_cmd(mmc, &cmd, &data))
422 return 0;
423
424 if (blkcnt > 1) {
425 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
426 cmd.cmdarg = 0;
427 cmd.resp_type = MMC_RSP_R1b;
428 if (mmc_send_cmd(mmc, &cmd, NULL)) {
429#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
430 pr_err("mmc fail to send stop cmd\n");
431#endif
432 return 0;
433 }
434 }
435
436 return blkcnt;
437}
438
439#if !CONFIG_IS_ENABLED(DM_MMC)
440static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
441{
442 if (mmc->cfg->ops->get_b_max)
443 return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
444 else
445 return mmc->cfg->b_max;
446}
447#endif
448
449#if CONFIG_IS_ENABLED(BLK)
450ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
451#else
452ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
453 void *dst)
454#endif
455{
456#if CONFIG_IS_ENABLED(BLK)
457 struct blk_desc *block_dev = dev_get_uclass_plat(dev);
458#endif
459 int dev_num = block_dev->devnum;
460 int err;
461 lbaint_t cur, blocks_todo = blkcnt;
462 uint b_max;
463
464 if (blkcnt == 0)
465 return 0;
466
467 struct mmc *mmc = find_mmc_device(dev_num);
468 if (!mmc)
469 return 0;
470
471 if (CONFIG_IS_ENABLED(MMC_TINY))
472 err = mmc_switch_part(mmc, block_dev->hwpart);
473 else
474 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
475
476 if (err < 0)
477 return 0;
478
479 if ((start + blkcnt) > block_dev->lba) {
480#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
481 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
482 start + blkcnt, block_dev->lba);
483#endif
484 return 0;
485 }
486
487 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
488 pr_debug("%s: Failed to set blocklen\n", __func__);
489 return 0;
490 }
491
492 b_max = mmc_get_b_max(mmc, dst, blkcnt);
493
494 do {
495 cur = (blocks_todo > b_max) ? b_max : blocks_todo;
496 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
497 pr_debug("%s: Failed to read blocks\n", __func__);
498 return 0;
499 }
500 blocks_todo -= cur;
501 start += cur;
502 dst += cur * mmc->read_bl_len;
503 } while (blocks_todo > 0);
504
505 return blkcnt;
506}
507
508static int mmc_go_idle(struct mmc *mmc)
509{
510 struct mmc_cmd cmd;
511 int err;
512
513 udelay(1000);
514
515 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
516 cmd.cmdarg = 0;
517 cmd.resp_type = MMC_RSP_NONE;
518
519 err = mmc_send_cmd(mmc, &cmd, NULL);
520
521 if (err)
522 return err;
523
524 udelay(2000);
525
526 return 0;
527}
528
529#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
530static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
531{
532 struct mmc_cmd cmd;
533 int err = 0;
534
535
536
537
538
539 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
540 return mmc_set_signal_voltage(mmc, signal_voltage);
541
542 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
543 cmd.cmdarg = 0;
544 cmd.resp_type = MMC_RSP_R1;
545
546 err = mmc_send_cmd(mmc, &cmd, NULL);
547 if (err)
548 return err;
549
550 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
551 return -EIO;
552
553
554
555
556
557 err = mmc_wait_dat0(mmc, 0, 100);
558 if (err == -ENOSYS)
559 udelay(100);
560 else if (err)
561 return -ETIMEDOUT;
562
563
564
565
566
567 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
568
569 err = mmc_set_signal_voltage(mmc, signal_voltage);
570 if (err)
571 return err;
572
573
574 mdelay(10);
575 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
576
577
578
579
580
581 err = mmc_wait_dat0(mmc, 1, 1000);
582 if (err == -ENOSYS)
583 udelay(1000);
584 else if (err)
585 return -ETIMEDOUT;
586
587 return 0;
588}
589#endif
590
591static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
592{
593 int timeout = 1000;
594 int err;
595 struct mmc_cmd cmd;
596
597 while (1) {
598 cmd.cmdidx = MMC_CMD_APP_CMD;
599 cmd.resp_type = MMC_RSP_R1;
600 cmd.cmdarg = 0;
601
602 err = mmc_send_cmd(mmc, &cmd, NULL);
603
604 if (err)
605 return err;
606
607 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
608 cmd.resp_type = MMC_RSP_R3;
609
610
611
612
613
614
615
616
617 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
618 (mmc->cfg->voltages & 0xff8000);
619
620 if (mmc->version == SD_VERSION_2)
621 cmd.cmdarg |= OCR_HCS;
622
623 if (uhs_en)
624 cmd.cmdarg |= OCR_S18R;
625
626 err = mmc_send_cmd(mmc, &cmd, NULL);
627
628 if (err)
629 return err;
630
631 if (cmd.response[0] & OCR_BUSY)
632 break;
633
634 if (timeout-- <= 0)
635 return -EOPNOTSUPP;
636
637 udelay(1000);
638 }
639
640 if (mmc->version != SD_VERSION_2)
641 mmc->version = SD_VERSION_1_0;
642
643 if (mmc_host_is_spi(mmc)) {
644 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
645 cmd.resp_type = MMC_RSP_R3;
646 cmd.cmdarg = 0;
647
648 err = mmc_send_cmd(mmc, &cmd, NULL);
649
650 if (err)
651 return err;
652 }
653
654 mmc->ocr = cmd.response[0];
655
656#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
657 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
658 == 0x41000000) {
659 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
660 if (err)
661 return err;
662 }
663#endif
664
665 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
666 mmc->rca = 0;
667
668 return 0;
669}
670
671static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
672{
673 struct mmc_cmd cmd;
674 int err;
675
676 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
677 cmd.resp_type = MMC_RSP_R3;
678 cmd.cmdarg = 0;
679 if (use_arg && !mmc_host_is_spi(mmc))
680 cmd.cmdarg = OCR_HCS |
681 (mmc->cfg->voltages &
682 (mmc->ocr & OCR_VOLTAGE_MASK)) |
683 (mmc->ocr & OCR_ACCESS_MODE);
684
685 err = mmc_send_cmd(mmc, &cmd, NULL);
686 if (err)
687 return err;
688 mmc->ocr = cmd.response[0];
689 return 0;
690}
691
692static int mmc_send_op_cond(struct mmc *mmc)
693{
694 int err, i;
695 int timeout = 1000;
696 uint start;
697
698
699 mmc_go_idle(mmc);
700
701 start = get_timer(0);
702
703 for (i = 0; ; i++) {
704 err = mmc_send_op_cond_iter(mmc, i != 0);
705 if (err)
706 return err;
707
708
709 if (mmc->ocr & OCR_BUSY)
710 break;
711
712 if (get_timer(start) > timeout)
713 return -ETIMEDOUT;
714 udelay(100);
715 }
716 mmc->op_cond_pending = 1;
717 return 0;
718}
719
720static int mmc_complete_op_cond(struct mmc *mmc)
721{
722 struct mmc_cmd cmd;
723 int timeout = 1000;
724 ulong start;
725 int err;
726
727 mmc->op_cond_pending = 0;
728 if (!(mmc->ocr & OCR_BUSY)) {
729
730 mmc_go_idle(mmc);
731
732 start = get_timer(0);
733 while (1) {
734 err = mmc_send_op_cond_iter(mmc, 1);
735 if (err)
736 return err;
737 if (mmc->ocr & OCR_BUSY)
738 break;
739 if (get_timer(start) > timeout)
740 return -EOPNOTSUPP;
741 udelay(100);
742 }
743 }
744
745 if (mmc_host_is_spi(mmc)) {
746 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
747 cmd.resp_type = MMC_RSP_R3;
748 cmd.cmdarg = 0;
749
750 err = mmc_send_cmd(mmc, &cmd, NULL);
751
752 if (err)
753 return err;
754
755 mmc->ocr = cmd.response[0];
756 }
757
758 mmc->version = MMC_VERSION_UNKNOWN;
759
760 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
761 mmc->rca = 1;
762
763 return 0;
764}
765
766
767int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
768{
769 struct mmc_cmd cmd;
770 struct mmc_data data;
771 int err;
772
773
774 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
775 cmd.resp_type = MMC_RSP_R1;
776 cmd.cmdarg = 0;
777
778 data.dest = (char *)ext_csd;
779 data.blocks = 1;
780 data.blocksize = MMC_MAX_BLOCK_LEN;
781 data.flags = MMC_DATA_READ;
782
783 err = mmc_send_cmd(mmc, &cmd, &data);
784
785 return err;
786}
787
788static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
789 bool send_status)
790{
791 unsigned int status, start;
792 struct mmc_cmd cmd;
793 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
794 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
795 (index == EXT_CSD_PART_CONF);
796 int ret;
797
798 if (mmc->gen_cmd6_time)
799 timeout_ms = mmc->gen_cmd6_time * 10;
800
801 if (is_part_switch && mmc->part_switch_time)
802 timeout_ms = mmc->part_switch_time * 10;
803
804 cmd.cmdidx = MMC_CMD_SWITCH;
805 cmd.resp_type = MMC_RSP_R1b;
806 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
807 (index << 16) |
808 (value << 8);
809
810 ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 3);
811 if (ret)
812 return ret;
813
814 start = get_timer(0);
815
816
817 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
818 if (ret && ret != -ENOSYS)
819 return ret;
820
821
822
823
824
825
826 if (ret == -ENOSYS && !send_status) {
827 mdelay(timeout_ms);
828 return 0;
829 }
830
831
832
833
834
835
836 do {
837 ret = mmc_send_status(mmc, &status);
838
839 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
840 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
841 value);
842 return -EIO;
843 }
844 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA) &&
845 (status & MMC_STATUS_CURR_STATE) == MMC_STATE_TRANS)
846 return 0;
847 udelay(100);
848 } while (get_timer(start) < timeout_ms);
849
850 return -ETIMEDOUT;
851}
852
853int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
854{
855 return __mmc_switch(mmc, set, index, value, true);
856}
857
858int mmc_boot_wp(struct mmc *mmc)
859{
860 return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
861}
862
863#if !CONFIG_IS_ENABLED(MMC_TINY)
864static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
865 bool hsdowngrade)
866{
867 int err;
868 int speed_bits;
869
870 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
871
872 switch (mode) {
873 case MMC_HS:
874 case MMC_HS_52:
875 case MMC_DDR_52:
876 speed_bits = EXT_CSD_TIMING_HS;
877 break;
878#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
879 case MMC_HS_200:
880 speed_bits = EXT_CSD_TIMING_HS200;
881 break;
882#endif
883#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
884 case MMC_HS_400:
885 speed_bits = EXT_CSD_TIMING_HS400;
886 break;
887#endif
888#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
889 case MMC_HS_400_ES:
890 speed_bits = EXT_CSD_TIMING_HS400;
891 break;
892#endif
893 case MMC_LEGACY:
894 speed_bits = EXT_CSD_TIMING_LEGACY;
895 break;
896 default:
897 return -EINVAL;
898 }
899
900 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
901 speed_bits, !hsdowngrade);
902 if (err)
903 return err;
904
905#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
906 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
907
908
909
910
911
912
913 if (hsdowngrade) {
914 mmc_select_mode(mmc, MMC_HS);
915 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
916 }
917#endif
918
919 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
920
921 err = mmc_send_ext_csd(mmc, test_csd);
922 if (err)
923 return err;
924
925
926 if (!test_csd[EXT_CSD_HS_TIMING])
927 return -ENOTSUPP;
928 }
929
930 return 0;
931}
932
933static int mmc_get_capabilities(struct mmc *mmc)
934{
935 u8 *ext_csd = mmc->ext_csd;
936 char cardtype;
937
938 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
939
940 if (mmc_host_is_spi(mmc))
941 return 0;
942
943
944 if (mmc->version < MMC_VERSION_4)
945 return 0;
946
947 if (!ext_csd) {
948 pr_err("No ext_csd found!\n");
949 return -ENOTSUPP;
950 }
951
952 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
953
954 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
955 mmc->cardtype = cardtype;
956
957#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
958 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
959 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
960 mmc->card_caps |= MMC_MODE_HS200;
961 }
962#endif
963#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
964 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
965 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
966 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
967 mmc->card_caps |= MMC_MODE_HS400;
968 }
969#endif
970 if (cardtype & EXT_CSD_CARD_TYPE_52) {
971 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
972 mmc->card_caps |= MMC_MODE_DDR_52MHz;
973 mmc->card_caps |= MMC_MODE_HS_52MHz;
974 }
975 if (cardtype & EXT_CSD_CARD_TYPE_26)
976 mmc->card_caps |= MMC_MODE_HS;
977
978#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
979 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
980 (mmc->card_caps & MMC_MODE_HS400)) {
981 mmc->card_caps |= MMC_MODE_HS400_ES;
982 }
983#endif
984
985 return 0;
986}
987#endif
988
989static int mmc_set_capacity(struct mmc *mmc, int part_num)
990{
991 switch (part_num) {
992 case 0:
993 mmc->capacity = mmc->capacity_user;
994 break;
995 case 1:
996 case 2:
997 mmc->capacity = mmc->capacity_boot;
998 break;
999 case 3:
1000 mmc->capacity = mmc->capacity_rpmb;
1001 break;
1002 case 4:
1003 case 5:
1004 case 6:
1005 case 7:
1006 mmc->capacity = mmc->capacity_gp[part_num - 4];
1007 break;
1008 default:
1009 return -1;
1010 }
1011
1012 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1013
1014 return 0;
1015}
1016
1017int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1018{
1019 int ret;
1020 int retry = 3;
1021
1022 do {
1023 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1024 EXT_CSD_PART_CONF,
1025 (mmc->part_config & ~PART_ACCESS_MASK)
1026 | (part_num & PART_ACCESS_MASK));
1027 } while (ret && retry--);
1028
1029
1030
1031
1032
1033 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1034 ret = mmc_set_capacity(mmc, part_num);
1035 mmc_get_blk_desc(mmc)->hwpart = part_num;
1036 }
1037
1038 return ret;
1039}
1040
1041#if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
1042int mmc_hwpart_config(struct mmc *mmc,
1043 const struct mmc_hwpart_conf *conf,
1044 enum mmc_hwpart_conf_mode mode)
1045{
1046 u8 part_attrs = 0;
1047 u32 enh_size_mult;
1048 u32 enh_start_addr;
1049 u32 gp_size_mult[4];
1050 u32 max_enh_size_mult;
1051 u32 tot_enh_size_mult = 0;
1052 u8 wr_rel_set;
1053 int i, pidx, err;
1054 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1055
1056 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1057 return -EINVAL;
1058
1059 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1060 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1061 return -EMEDIUMTYPE;
1062 }
1063
1064 if (!(mmc->part_support & PART_SUPPORT)) {
1065 pr_err("Card does not support partitioning\n");
1066 return -EMEDIUMTYPE;
1067 }
1068
1069 if (!mmc->hc_wp_grp_size) {
1070 pr_err("Card does not define HC WP group size\n");
1071 return -EMEDIUMTYPE;
1072 }
1073
1074
1075 if (conf->user.enh_size) {
1076 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1077 conf->user.enh_start % mmc->hc_wp_grp_size) {
1078 pr_err("User data enhanced area not HC WP group "
1079 "size aligned\n");
1080 return -EINVAL;
1081 }
1082 part_attrs |= EXT_CSD_ENH_USR;
1083 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1084 if (mmc->high_capacity) {
1085 enh_start_addr = conf->user.enh_start;
1086 } else {
1087 enh_start_addr = (conf->user.enh_start << 9);
1088 }
1089 } else {
1090 enh_size_mult = 0;
1091 enh_start_addr = 0;
1092 }
1093 tot_enh_size_mult += enh_size_mult;
1094
1095 for (pidx = 0; pidx < 4; pidx++) {
1096 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1097 pr_err("GP%i partition not HC WP group size "
1098 "aligned\n", pidx+1);
1099 return -EINVAL;
1100 }
1101 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1102 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1103 part_attrs |= EXT_CSD_ENH_GP(pidx);
1104 tot_enh_size_mult += gp_size_mult[pidx];
1105 }
1106 }
1107
1108 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1109 pr_err("Card does not support enhanced attribute\n");
1110 return -EMEDIUMTYPE;
1111 }
1112
1113 err = mmc_send_ext_csd(mmc, ext_csd);
1114 if (err)
1115 return err;
1116
1117 max_enh_size_mult =
1118 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1119 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1120 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1121 if (tot_enh_size_mult > max_enh_size_mult) {
1122 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1123 tot_enh_size_mult, max_enh_size_mult);
1124 return -EMEDIUMTYPE;
1125 }
1126
1127
1128
1129
1130
1131 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1132 if (conf->user.wr_rel_change) {
1133 if (conf->user.wr_rel_set)
1134 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1135 else
1136 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1137 }
1138 for (pidx = 0; pidx < 4; pidx++) {
1139 if (conf->gp_part[pidx].wr_rel_change) {
1140 if (conf->gp_part[pidx].wr_rel_set)
1141 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1142 else
1143 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1144 }
1145 }
1146
1147 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1148 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1149 puts("Card does not support host controlled partition write "
1150 "reliability settings\n");
1151 return -EMEDIUMTYPE;
1152 }
1153
1154 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1155 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1156 pr_err("Card already partitioned\n");
1157 return -EPERM;
1158 }
1159
1160 if (mode == MMC_HWPART_CONF_CHECK)
1161 return 0;
1162
1163
1164 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1165 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1166 EXT_CSD_ERASE_GROUP_DEF, 1);
1167
1168 if (err)
1169 return err;
1170
1171 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1172
1173#if CONFIG_IS_ENABLED(MMC_WRITE)
1174
1175 mmc->erase_grp_size =
1176 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1177#endif
1178
1179 }
1180
1181
1182 for (i = 0; i < 4; i++) {
1183 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1184 EXT_CSD_ENH_START_ADDR+i,
1185 (enh_start_addr >> (i*8)) & 0xFF);
1186 if (err)
1187 return err;
1188 }
1189 for (i = 0; i < 3; i++) {
1190 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1191 EXT_CSD_ENH_SIZE_MULT+i,
1192 (enh_size_mult >> (i*8)) & 0xFF);
1193 if (err)
1194 return err;
1195 }
1196 for (pidx = 0; pidx < 4; pidx++) {
1197 for (i = 0; i < 3; i++) {
1198 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1199 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1200 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1201 if (err)
1202 return err;
1203 }
1204 }
1205 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1206 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1207 if (err)
1208 return err;
1209
1210 if (mode == MMC_HWPART_CONF_SET)
1211 return 0;
1212
1213
1214
1215
1216
1217 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1218 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1219 EXT_CSD_WR_REL_SET, wr_rel_set);
1220 if (err)
1221 return err;
1222 }
1223
1224
1225
1226
1227
1228
1229 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1230 EXT_CSD_PARTITION_SETTING,
1231 EXT_CSD_PARTITION_SETTING_COMPLETED);
1232 if (err)
1233 return err;
1234
1235 return 0;
1236}
1237#endif
1238
1239#if !CONFIG_IS_ENABLED(DM_MMC)
1240int mmc_getcd(struct mmc *mmc)
1241{
1242 int cd;
1243
1244 cd = board_mmc_getcd(mmc);
1245
1246 if (cd < 0) {
1247 if (mmc->cfg->ops->getcd)
1248 cd = mmc->cfg->ops->getcd(mmc);
1249 else
1250 cd = 1;
1251 }
1252
1253 return cd;
1254}
1255#endif
1256
1257#if !CONFIG_IS_ENABLED(MMC_TINY)
1258static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1259{
1260 struct mmc_cmd cmd;
1261 struct mmc_data data;
1262
1263
1264 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1265 cmd.resp_type = MMC_RSP_R1;
1266 cmd.cmdarg = (mode << 31) | 0xffffff;
1267 cmd.cmdarg &= ~(0xf << (group * 4));
1268 cmd.cmdarg |= value << (group * 4);
1269
1270 data.dest = (char *)resp;
1271 data.blocksize = 64;
1272 data.blocks = 1;
1273 data.flags = MMC_DATA_READ;
1274
1275 return mmc_send_cmd(mmc, &cmd, &data);
1276}
1277
1278static int sd_get_capabilities(struct mmc *mmc)
1279{
1280 int err;
1281 struct mmc_cmd cmd;
1282 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1283 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1284 struct mmc_data data;
1285 int timeout;
1286#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1287 u32 sd3_bus_mode;
1288#endif
1289
1290 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1291
1292 if (mmc_host_is_spi(mmc))
1293 return 0;
1294
1295
1296 cmd.cmdidx = MMC_CMD_APP_CMD;
1297 cmd.resp_type = MMC_RSP_R1;
1298 cmd.cmdarg = mmc->rca << 16;
1299
1300 err = mmc_send_cmd(mmc, &cmd, NULL);
1301
1302 if (err)
1303 return err;
1304
1305 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1306 cmd.resp_type = MMC_RSP_R1;
1307 cmd.cmdarg = 0;
1308
1309 data.dest = (char *)scr;
1310 data.blocksize = 8;
1311 data.blocks = 1;
1312 data.flags = MMC_DATA_READ;
1313
1314 err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
1315
1316 if (err)
1317 return err;
1318
1319 mmc->scr[0] = __be32_to_cpu(scr[0]);
1320 mmc->scr[1] = __be32_to_cpu(scr[1]);
1321
1322 switch ((mmc->scr[0] >> 24) & 0xf) {
1323 case 0:
1324 mmc->version = SD_VERSION_1_0;
1325 break;
1326 case 1:
1327 mmc->version = SD_VERSION_1_10;
1328 break;
1329 case 2:
1330 mmc->version = SD_VERSION_2;
1331 if ((mmc->scr[0] >> 15) & 0x1)
1332 mmc->version = SD_VERSION_3;
1333 break;
1334 default:
1335 mmc->version = SD_VERSION_1_0;
1336 break;
1337 }
1338
1339 if (mmc->scr[0] & SD_DATA_4BIT)
1340 mmc->card_caps |= MMC_MODE_4BIT;
1341
1342
1343 if (mmc->version == SD_VERSION_1_0)
1344 return 0;
1345
1346 timeout = 4;
1347 while (timeout--) {
1348 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1349 (u8 *)switch_status);
1350
1351 if (err)
1352 return err;
1353
1354
1355 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1356 break;
1357 }
1358
1359
1360 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1361 mmc->card_caps |= MMC_CAP(SD_HS);
1362
1363#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1364
1365 if (mmc->version < SD_VERSION_3)
1366 return 0;
1367
1368 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1369 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1370 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1371 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1372 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1373 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1374 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1375 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1376 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1377 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1378 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1379#endif
1380
1381 return 0;
1382}
1383
1384static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1385{
1386 int err;
1387
1388 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1389 int speed;
1390
1391
1392 if (mmc->version == SD_VERSION_1_0)
1393 return 0;
1394
1395 switch (mode) {
1396 case MMC_LEGACY:
1397 speed = UHS_SDR12_BUS_SPEED;
1398 break;
1399 case SD_HS:
1400 speed = HIGH_SPEED_BUS_SPEED;
1401 break;
1402#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1403 case UHS_SDR12:
1404 speed = UHS_SDR12_BUS_SPEED;
1405 break;
1406 case UHS_SDR25:
1407 speed = UHS_SDR25_BUS_SPEED;
1408 break;
1409 case UHS_SDR50:
1410 speed = UHS_SDR50_BUS_SPEED;
1411 break;
1412 case UHS_DDR50:
1413 speed = UHS_DDR50_BUS_SPEED;
1414 break;
1415 case UHS_SDR104:
1416 speed = UHS_SDR104_BUS_SPEED;
1417 break;
1418#endif
1419 default:
1420 return -EINVAL;
1421 }
1422
1423 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1424 if (err)
1425 return err;
1426
1427 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1428 return -ENOTSUPP;
1429
1430 return 0;
1431}
1432
1433static int sd_select_bus_width(struct mmc *mmc, int w)
1434{
1435 int err;
1436 struct mmc_cmd cmd;
1437
1438 if ((w != 4) && (w != 1))
1439 return -EINVAL;
1440
1441 cmd.cmdidx = MMC_CMD_APP_CMD;
1442 cmd.resp_type = MMC_RSP_R1;
1443 cmd.cmdarg = mmc->rca << 16;
1444
1445 err = mmc_send_cmd(mmc, &cmd, NULL);
1446 if (err)
1447 return err;
1448
1449 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1450 cmd.resp_type = MMC_RSP_R1;
1451 if (w == 4)
1452 cmd.cmdarg = 2;
1453 else if (w == 1)
1454 cmd.cmdarg = 0;
1455 err = mmc_send_cmd(mmc, &cmd, NULL);
1456 if (err)
1457 return err;
1458
1459 return 0;
1460}
1461#endif
1462
1463#if CONFIG_IS_ENABLED(MMC_WRITE)
1464static int sd_read_ssr(struct mmc *mmc)
1465{
1466 static const unsigned int sd_au_size[] = {
1467 0, SZ_16K / 512, SZ_32K / 512,
1468 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1469 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1470 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1471 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1472 SZ_64M / 512,
1473 };
1474 int err, i;
1475 struct mmc_cmd cmd;
1476 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1477 struct mmc_data data;
1478 unsigned int au, eo, et, es;
1479
1480 cmd.cmdidx = MMC_CMD_APP_CMD;
1481 cmd.resp_type = MMC_RSP_R1;
1482 cmd.cmdarg = mmc->rca << 16;
1483
1484 err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_APP_CMD, 4);
1485 if (err)
1486 return err;
1487
1488 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1489 cmd.resp_type = MMC_RSP_R1;
1490 cmd.cmdarg = 0;
1491
1492 data.dest = (char *)ssr;
1493 data.blocksize = 64;
1494 data.blocks = 1;
1495 data.flags = MMC_DATA_READ;
1496
1497 err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
1498 if (err)
1499 return err;
1500
1501 for (i = 0; i < 16; i++)
1502 ssr[i] = be32_to_cpu(ssr[i]);
1503
1504 au = (ssr[2] >> 12) & 0xF;
1505 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1506 mmc->ssr.au = sd_au_size[au];
1507 es = (ssr[3] >> 24) & 0xFF;
1508 es |= (ssr[2] & 0xFF) << 8;
1509 et = (ssr[3] >> 18) & 0x3F;
1510 if (es && et) {
1511 eo = (ssr[3] >> 16) & 0x3;
1512 mmc->ssr.erase_timeout = (et * 1000) / es;
1513 mmc->ssr.erase_offset = eo * 1000;
1514 }
1515 } else {
1516 pr_debug("Invalid Allocation Unit Size.\n");
1517 }
1518
1519 return 0;
1520}
1521#endif
1522
1523
1524static const int fbase[] = {
1525 10000,
1526 100000,
1527 1000000,
1528 10000000,
1529};
1530
1531
1532
1533
1534static const u8 multipliers[] = {
1535 0,
1536 10,
1537 12,
1538 13,
1539 15,
1540 20,
1541 25,
1542 30,
1543 35,
1544 40,
1545 45,
1546 50,
1547 55,
1548 60,
1549 70,
1550 80,
1551};
1552
1553static inline int bus_width(uint cap)
1554{
1555 if (cap == MMC_MODE_8BIT)
1556 return 8;
1557 if (cap == MMC_MODE_4BIT)
1558 return 4;
1559 if (cap == MMC_MODE_1BIT)
1560 return 1;
1561 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1562 return 0;
1563}
1564
1565#if !CONFIG_IS_ENABLED(DM_MMC)
1566#ifdef MMC_SUPPORTS_TUNING
1567static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1568{
1569 return -ENOTSUPP;
1570}
1571#endif
1572
1573static int mmc_set_ios(struct mmc *mmc)
1574{
1575 int ret = 0;
1576
1577 if (mmc->cfg->ops->set_ios)
1578 ret = mmc->cfg->ops->set_ios(mmc);
1579
1580 return ret;
1581}
1582
1583static int mmc_host_power_cycle(struct mmc *mmc)
1584{
1585 int ret = 0;
1586
1587 if (mmc->cfg->ops->host_power_cycle)
1588 ret = mmc->cfg->ops->host_power_cycle(mmc);
1589
1590 return ret;
1591}
1592#endif
1593
1594int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1595{
1596 if (!disable) {
1597 if (clock > mmc->cfg->f_max)
1598 clock = mmc->cfg->f_max;
1599
1600 if (clock < mmc->cfg->f_min)
1601 clock = mmc->cfg->f_min;
1602 }
1603
1604 mmc->clock = clock;
1605 mmc->clk_disable = disable;
1606
1607 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1608
1609 return mmc_set_ios(mmc);
1610}
1611
1612static int mmc_set_bus_width(struct mmc *mmc, uint width)
1613{
1614 mmc->bus_width = width;
1615
1616 return mmc_set_ios(mmc);
1617}
1618
1619#if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1620
1621
1622
1623
1624
1625void mmc_dump_capabilities(const char *text, uint caps)
1626{
1627 enum bus_mode mode;
1628
1629 pr_debug("%s: widths [", text);
1630 if (caps & MMC_MODE_8BIT)
1631 pr_debug("8, ");
1632 if (caps & MMC_MODE_4BIT)
1633 pr_debug("4, ");
1634 if (caps & MMC_MODE_1BIT)
1635 pr_debug("1, ");
1636 pr_debug("\b\b] modes [");
1637 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1638 if (MMC_CAP(mode) & caps)
1639 pr_debug("%s, ", mmc_mode_name(mode));
1640 pr_debug("\b\b]\n");
1641}
1642#endif
1643
1644struct mode_width_tuning {
1645 enum bus_mode mode;
1646 uint widths;
1647#ifdef MMC_SUPPORTS_TUNING
1648 uint tuning;
1649#endif
1650};
1651
1652#if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1653int mmc_voltage_to_mv(enum mmc_voltage voltage)
1654{
1655 switch (voltage) {
1656 case MMC_SIGNAL_VOLTAGE_000: return 0;
1657 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1658 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1659 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1660 }
1661 return -EINVAL;
1662}
1663
1664static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1665{
1666 int err;
1667
1668 if (mmc->signal_voltage == signal_voltage)
1669 return 0;
1670
1671 mmc->signal_voltage = signal_voltage;
1672 err = mmc_set_ios(mmc);
1673 if (err)
1674 pr_debug("unable to set voltage (err %d)\n", err);
1675
1676 return err;
1677}
1678#else
1679static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1680{
1681 return 0;
1682}
1683#endif
1684
1685#if !CONFIG_IS_ENABLED(MMC_TINY)
1686static const struct mode_width_tuning sd_modes_by_pref[] = {
1687#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1688#ifdef MMC_SUPPORTS_TUNING
1689 {
1690 .mode = UHS_SDR104,
1691 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1692 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1693 },
1694#endif
1695 {
1696 .mode = UHS_SDR50,
1697 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1698 },
1699 {
1700 .mode = UHS_DDR50,
1701 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1702 },
1703 {
1704 .mode = UHS_SDR25,
1705 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1706 },
1707#endif
1708 {
1709 .mode = SD_HS,
1710 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1711 },
1712#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1713 {
1714 .mode = UHS_SDR12,
1715 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1716 },
1717#endif
1718 {
1719 .mode = MMC_LEGACY,
1720 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1721 }
1722};
1723
1724#define for_each_sd_mode_by_pref(caps, mwt) \
1725 for (mwt = sd_modes_by_pref;\
1726 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1727 mwt++) \
1728 if (caps & MMC_CAP(mwt->mode))
1729
1730static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1731{
1732 int err;
1733 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1734 const struct mode_width_tuning *mwt;
1735#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1736 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1737#else
1738 bool uhs_en = false;
1739#endif
1740 uint caps;
1741
1742#ifdef DEBUG
1743 mmc_dump_capabilities("sd card", card_caps);
1744 mmc_dump_capabilities("host", mmc->host_caps);
1745#endif
1746
1747 if (mmc_host_is_spi(mmc)) {
1748 mmc_set_bus_width(mmc, 1);
1749 mmc_select_mode(mmc, MMC_LEGACY);
1750 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1751#if CONFIG_IS_ENABLED(MMC_WRITE)
1752 err = sd_read_ssr(mmc);
1753 if (err)
1754 pr_warn("unable to read ssr\n");
1755#endif
1756 return 0;
1757 }
1758
1759
1760 caps = card_caps & mmc->host_caps;
1761
1762 if (!uhs_en)
1763 caps &= ~UHS_CAPS;
1764
1765 for_each_sd_mode_by_pref(caps, mwt) {
1766 uint *w;
1767
1768 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1769 if (*w & caps & mwt->widths) {
1770 pr_debug("trying mode %s width %d (at %d MHz)\n",
1771 mmc_mode_name(mwt->mode),
1772 bus_width(*w),
1773 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1774
1775
1776 err = sd_select_bus_width(mmc, bus_width(*w));
1777 if (err)
1778 goto error;
1779 mmc_set_bus_width(mmc, bus_width(*w));
1780
1781
1782 err = sd_set_card_speed(mmc, mwt->mode);
1783 if (err)
1784 goto error;
1785
1786
1787 mmc_select_mode(mmc, mwt->mode);
1788 mmc_set_clock(mmc, mmc->tran_speed,
1789 MMC_CLK_ENABLE);
1790
1791#ifdef MMC_SUPPORTS_TUNING
1792
1793 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1794 err = mmc_execute_tuning(mmc,
1795 mwt->tuning);
1796 if (err) {
1797 pr_debug("tuning failed\n");
1798 goto error;
1799 }
1800 }
1801#endif
1802
1803#if CONFIG_IS_ENABLED(MMC_WRITE)
1804 err = sd_read_ssr(mmc);
1805 if (err)
1806 pr_warn("unable to read ssr\n");
1807#endif
1808 if (!err)
1809 return 0;
1810
1811error:
1812
1813 mmc_select_mode(mmc, MMC_LEGACY);
1814 mmc_set_clock(mmc, mmc->tran_speed,
1815 MMC_CLK_ENABLE);
1816 }
1817 }
1818 }
1819
1820 pr_err("unable to select a mode\n");
1821 return -ENOTSUPP;
1822}
1823
1824
1825
1826
1827
1828
1829static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1830{
1831 int err;
1832 const u8 *ext_csd = mmc->ext_csd;
1833 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1834
1835 if (mmc->version < MMC_VERSION_4)
1836 return 0;
1837
1838 err = mmc_send_ext_csd(mmc, test_csd);
1839 if (err)
1840 return err;
1841
1842
1843 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1844 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1845 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1846 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1847 ext_csd[EXT_CSD_REV]
1848 == test_csd[EXT_CSD_REV] &&
1849 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1850 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1851 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1852 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1853 return 0;
1854
1855 return -EBADMSG;
1856}
1857
1858#if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1859static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1860 uint32_t allowed_mask)
1861{
1862 u32 card_mask = 0;
1863
1864 switch (mode) {
1865 case MMC_HS_400_ES:
1866 case MMC_HS_400:
1867 case MMC_HS_200:
1868 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1869 EXT_CSD_CARD_TYPE_HS400_1_8V))
1870 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1871 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1872 EXT_CSD_CARD_TYPE_HS400_1_2V))
1873 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1874 break;
1875 case MMC_DDR_52:
1876 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1877 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1878 MMC_SIGNAL_VOLTAGE_180;
1879 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1880 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1881 break;
1882 default:
1883 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1884 break;
1885 }
1886
1887 while (card_mask & allowed_mask) {
1888 enum mmc_voltage best_match;
1889
1890 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1891 if (!mmc_set_signal_voltage(mmc, best_match))
1892 return 0;
1893
1894 allowed_mask &= ~best_match;
1895 }
1896
1897 return -ENOTSUPP;
1898}
1899#else
1900static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1901 uint32_t allowed_mask)
1902{
1903 return 0;
1904}
1905#endif
1906
1907static const struct mode_width_tuning mmc_modes_by_pref[] = {
1908#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1909 {
1910 .mode = MMC_HS_400_ES,
1911 .widths = MMC_MODE_8BIT,
1912 },
1913#endif
1914#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1915 {
1916 .mode = MMC_HS_400,
1917 .widths = MMC_MODE_8BIT,
1918 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1919 },
1920#endif
1921#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1922 {
1923 .mode = MMC_HS_200,
1924 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1925 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1926 },
1927#endif
1928 {
1929 .mode = MMC_DDR_52,
1930 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1931 },
1932 {
1933 .mode = MMC_HS_52,
1934 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1935 },
1936 {
1937 .mode = MMC_HS,
1938 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1939 },
1940 {
1941 .mode = MMC_LEGACY,
1942 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1943 }
1944};
1945
1946#define for_each_mmc_mode_by_pref(caps, mwt) \
1947 for (mwt = mmc_modes_by_pref;\
1948 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1949 mwt++) \
1950 if (caps & MMC_CAP(mwt->mode))
1951
1952static const struct ext_csd_bus_width {
1953 uint cap;
1954 bool is_ddr;
1955 uint ext_csd_bits;
1956} ext_csd_bus_width[] = {
1957 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1958 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1959 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1960 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1961 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1962};
1963
1964#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1965static int mmc_select_hs400(struct mmc *mmc)
1966{
1967 int err;
1968
1969
1970 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
1971 if (err)
1972 return err;
1973
1974
1975 mmc_select_mode(mmc, MMC_HS_200);
1976 mmc_set_clock(mmc, mmc->tran_speed, false);
1977
1978
1979 mmc->hs400_tuning = 1;
1980 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
1981 mmc->hs400_tuning = 0;
1982 if (err) {
1983 debug("tuning failed\n");
1984 return err;
1985 }
1986
1987
1988 mmc_set_card_speed(mmc, MMC_HS, true);
1989
1990 err = mmc_hs400_prepare_ddr(mmc);
1991 if (err)
1992 return err;
1993
1994 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1995 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1996 if (err)
1997 return err;
1998
1999 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
2000 if (err)
2001 return err;
2002
2003 mmc_select_mode(mmc, MMC_HS_400);
2004 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2005 if (err)
2006 return err;
2007
2008 return 0;
2009}
2010#else
2011static int mmc_select_hs400(struct mmc *mmc)
2012{
2013 return -ENOTSUPP;
2014}
2015#endif
2016
2017#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2018#if !CONFIG_IS_ENABLED(DM_MMC)
2019static int mmc_set_enhanced_strobe(struct mmc *mmc)
2020{
2021 return -ENOTSUPP;
2022}
2023#endif
2024static int mmc_select_hs400es(struct mmc *mmc)
2025{
2026 int err;
2027
2028 err = mmc_set_card_speed(mmc, MMC_HS, true);
2029 if (err)
2030 return err;
2031
2032 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2033 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2034 EXT_CSD_BUS_WIDTH_STROBE);
2035 if (err) {
2036 printf("switch to bus width for hs400 failed\n");
2037 return err;
2038 }
2039
2040 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2041 if (err)
2042 return err;
2043
2044 mmc_select_mode(mmc, MMC_HS_400_ES);
2045 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2046 if (err)
2047 return err;
2048
2049 return mmc_set_enhanced_strobe(mmc);
2050}
2051#else
2052static int mmc_select_hs400es(struct mmc *mmc)
2053{
2054 return -ENOTSUPP;
2055}
2056#endif
2057
2058#define for_each_supported_width(caps, ddr, ecbv) \
2059 for (ecbv = ext_csd_bus_width;\
2060 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2061 ecbv++) \
2062 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2063
2064static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2065{
2066 int err = 0;
2067 const struct mode_width_tuning *mwt;
2068 const struct ext_csd_bus_width *ecbw;
2069
2070#ifdef DEBUG
2071 mmc_dump_capabilities("mmc", card_caps);
2072 mmc_dump_capabilities("host", mmc->host_caps);
2073#endif
2074
2075 if (mmc_host_is_spi(mmc)) {
2076 mmc_set_bus_width(mmc, 1);
2077 mmc_select_mode(mmc, MMC_LEGACY);
2078 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2079 return 0;
2080 }
2081
2082
2083 card_caps &= mmc->host_caps;
2084
2085
2086 if (mmc->version < MMC_VERSION_4)
2087 return 0;
2088
2089 if (!mmc->ext_csd) {
2090 pr_debug("No ext_csd found!\n");
2091 return -ENOTSUPP;
2092 }
2093
2094#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2095 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
2096 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2097
2098
2099
2100
2101
2102 if (mmc->selected_mode == MMC_HS_200 ||
2103 mmc->selected_mode == MMC_HS_400 ||
2104 mmc->selected_mode == MMC_HS_400_ES)
2105 mmc_set_card_speed(mmc, MMC_HS, true);
2106 else
2107#endif
2108 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2109
2110 for_each_mmc_mode_by_pref(card_caps, mwt) {
2111 for_each_supported_width(card_caps & mwt->widths,
2112 mmc_is_mode_ddr(mwt->mode), ecbw) {
2113 enum mmc_voltage old_voltage;
2114 pr_debug("trying mode %s width %d (at %d MHz)\n",
2115 mmc_mode_name(mwt->mode),
2116 bus_width(ecbw->cap),
2117 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2118 old_voltage = mmc->signal_voltage;
2119 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2120 MMC_ALL_SIGNAL_VOLTAGE);
2121 if (err)
2122 continue;
2123
2124
2125 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2126 EXT_CSD_BUS_WIDTH,
2127 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2128 if (err)
2129 goto error;
2130 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2131
2132 if (mwt->mode == MMC_HS_400) {
2133 err = mmc_select_hs400(mmc);
2134 if (err) {
2135 printf("Select HS400 failed %d\n", err);
2136 goto error;
2137 }
2138 } else if (mwt->mode == MMC_HS_400_ES) {
2139 err = mmc_select_hs400es(mmc);
2140 if (err) {
2141 printf("Select HS400ES failed %d\n",
2142 err);
2143 goto error;
2144 }
2145 } else {
2146
2147 err = mmc_set_card_speed(mmc, mwt->mode, false);
2148 if (err)
2149 goto error;
2150
2151
2152
2153
2154
2155
2156 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2157 err = mmc_switch(mmc,
2158 EXT_CSD_CMD_SET_NORMAL,
2159 EXT_CSD_BUS_WIDTH,
2160 ecbw->ext_csd_bits);
2161 if (err)
2162 goto error;
2163 }
2164
2165
2166 mmc_select_mode(mmc, mwt->mode);
2167 mmc_set_clock(mmc, mmc->tran_speed,
2168 MMC_CLK_ENABLE);
2169#ifdef MMC_SUPPORTS_TUNING
2170
2171
2172 if (mwt->tuning) {
2173 err = mmc_execute_tuning(mmc,
2174 mwt->tuning);
2175 if (err) {
2176 pr_debug("tuning failed : %d\n", err);
2177 goto error;
2178 }
2179 }
2180#endif
2181 }
2182
2183
2184 err = mmc_read_and_compare_ext_csd(mmc);
2185 if (!err)
2186 return 0;
2187error:
2188 mmc_set_signal_voltage(mmc, old_voltage);
2189
2190 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2191 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2192 mmc_select_mode(mmc, MMC_LEGACY);
2193 mmc_set_bus_width(mmc, 1);
2194 }
2195 }
2196
2197 pr_err("unable to select a mode : %d\n", err);
2198
2199 return -ENOTSUPP;
2200}
2201#endif
2202
2203#if CONFIG_IS_ENABLED(MMC_TINY)
2204DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2205#endif
2206
2207static int mmc_startup_v4(struct mmc *mmc)
2208{
2209 int err, i;
2210 u64 capacity;
2211 bool has_parts = false;
2212 bool part_completed;
2213 static const u32 mmc_versions[] = {
2214 MMC_VERSION_4,
2215 MMC_VERSION_4_1,
2216 MMC_VERSION_4_2,
2217 MMC_VERSION_4_3,
2218 MMC_VERSION_4_4,
2219 MMC_VERSION_4_41,
2220 MMC_VERSION_4_5,
2221 MMC_VERSION_5_0,
2222 MMC_VERSION_5_1
2223 };
2224
2225#if CONFIG_IS_ENABLED(MMC_TINY)
2226 u8 *ext_csd = ext_csd_bkup;
2227
2228 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2229 return 0;
2230
2231 if (!mmc->ext_csd)
2232 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2233
2234 err = mmc_send_ext_csd(mmc, ext_csd);
2235 if (err)
2236 goto error;
2237
2238
2239 if (!mmc->ext_csd)
2240 mmc->ext_csd = ext_csd;
2241#else
2242 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2243
2244 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2245 return 0;
2246
2247
2248 err = mmc_send_ext_csd(mmc, ext_csd);
2249 if (err)
2250 goto error;
2251
2252
2253 if (!mmc->ext_csd)
2254 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2255 if (!mmc->ext_csd)
2256 return -ENOMEM;
2257 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2258#endif
2259 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2260 return -EINVAL;
2261
2262 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2263
2264 if (mmc->version >= MMC_VERSION_4_2) {
2265
2266
2267
2268
2269
2270 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2271 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2272 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2273 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2274 capacity *= MMC_MAX_BLOCK_LEN;
2275 if ((capacity >> 20) > 2 * 1024)
2276 mmc->capacity_user = capacity;
2277 }
2278
2279 if (mmc->version >= MMC_VERSION_4_5)
2280 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2281
2282
2283
2284
2285
2286
2287
2288 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2289 EXT_CSD_PARTITION_SETTING_COMPLETED);
2290
2291 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2292
2293 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2294 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2295
2296
2297 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2298 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2299 ext_csd[EXT_CSD_BOOT_MULT])
2300 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2301 if (part_completed &&
2302 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2303 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2304
2305 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2306
2307 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2308
2309 for (i = 0; i < 4; i++) {
2310 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2311 uint mult = (ext_csd[idx + 2] << 16) +
2312 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2313 if (mult)
2314 has_parts = true;
2315 if (!part_completed)
2316 continue;
2317 mmc->capacity_gp[i] = mult;
2318 mmc->capacity_gp[i] *=
2319 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2320 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2321 mmc->capacity_gp[i] <<= 19;
2322 }
2323
2324#ifndef CONFIG_SPL_BUILD
2325 if (part_completed) {
2326 mmc->enh_user_size =
2327 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2328 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2329 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2330 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2331 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2332 mmc->enh_user_size <<= 19;
2333 mmc->enh_user_start =
2334 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2335 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2336 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2337 ext_csd[EXT_CSD_ENH_START_ADDR];
2338 if (mmc->high_capacity)
2339 mmc->enh_user_start <<= 9;
2340 }
2341#endif
2342
2343
2344
2345
2346
2347
2348 if (part_completed)
2349 has_parts = true;
2350 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2351 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2352 has_parts = true;
2353 if (has_parts) {
2354 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2355 EXT_CSD_ERASE_GROUP_DEF, 1);
2356
2357 if (err)
2358 goto error;
2359
2360 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2361 }
2362
2363 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2364#if CONFIG_IS_ENABLED(MMC_WRITE)
2365
2366 mmc->erase_grp_size =
2367 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2368#endif
2369
2370
2371
2372
2373
2374 if (mmc->high_capacity && part_completed) {
2375 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2376 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2377 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2378 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2379 capacity *= MMC_MAX_BLOCK_LEN;
2380 mmc->capacity_user = capacity;
2381 }
2382 }
2383#if CONFIG_IS_ENABLED(MMC_WRITE)
2384 else {
2385
2386 int erase_gsz, erase_gmul;
2387
2388 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2389 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2390 mmc->erase_grp_size = (erase_gsz + 1)
2391 * (erase_gmul + 1);
2392 }
2393#endif
2394#if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2395 mmc->hc_wp_grp_size = 1024
2396 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2397 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2398#endif
2399
2400 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2401
2402 return 0;
2403error:
2404 if (mmc->ext_csd) {
2405#if !CONFIG_IS_ENABLED(MMC_TINY)
2406 free(mmc->ext_csd);
2407#endif
2408 mmc->ext_csd = NULL;
2409 }
2410 return err;
2411}
2412
2413static int mmc_startup(struct mmc *mmc)
2414{
2415 int err, i;
2416 uint mult, freq;
2417 u64 cmult, csize;
2418 struct mmc_cmd cmd;
2419 struct blk_desc *bdesc;
2420
2421#ifdef CONFIG_MMC_SPI_CRC_ON
2422 if (mmc_host_is_spi(mmc)) {
2423 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2424 cmd.resp_type = MMC_RSP_R1;
2425 cmd.cmdarg = 1;
2426 err = mmc_send_cmd(mmc, &cmd, NULL);
2427 if (err)
2428 return err;
2429 }
2430#endif
2431
2432
2433 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2434 MMC_CMD_ALL_SEND_CID;
2435 cmd.resp_type = MMC_RSP_R2;
2436 cmd.cmdarg = 0;
2437
2438 err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_SEND_CID, 4);
2439 if (err)
2440 return err;
2441
2442 memcpy(mmc->cid, cmd.response, 16);
2443
2444
2445
2446
2447
2448
2449 if (!mmc_host_is_spi(mmc)) {
2450 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2451 cmd.cmdarg = mmc->rca << 16;
2452 cmd.resp_type = MMC_RSP_R6;
2453
2454 err = mmc_send_cmd(mmc, &cmd, NULL);
2455
2456 if (err)
2457 return err;
2458
2459 if (IS_SD(mmc))
2460 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2461 }
2462
2463
2464 cmd.cmdidx = MMC_CMD_SEND_CSD;
2465 cmd.resp_type = MMC_RSP_R2;
2466 cmd.cmdarg = mmc->rca << 16;
2467
2468 err = mmc_send_cmd(mmc, &cmd, NULL);
2469
2470 if (err)
2471 return err;
2472
2473 mmc->csd[0] = cmd.response[0];
2474 mmc->csd[1] = cmd.response[1];
2475 mmc->csd[2] = cmd.response[2];
2476 mmc->csd[3] = cmd.response[3];
2477
2478 if (mmc->version == MMC_VERSION_UNKNOWN) {
2479 int version = (cmd.response[0] >> 26) & 0xf;
2480
2481 switch (version) {
2482 case 0:
2483 mmc->version = MMC_VERSION_1_2;
2484 break;
2485 case 1:
2486 mmc->version = MMC_VERSION_1_4;
2487 break;
2488 case 2:
2489 mmc->version = MMC_VERSION_2_2;
2490 break;
2491 case 3:
2492 mmc->version = MMC_VERSION_3;
2493 break;
2494 case 4:
2495 mmc->version = MMC_VERSION_4;
2496 break;
2497 default:
2498 mmc->version = MMC_VERSION_1_2;
2499 break;
2500 }
2501 }
2502
2503
2504 freq = fbase[(cmd.response[0] & 0x7)];
2505 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2506
2507 mmc->legacy_speed = freq * mult;
2508 mmc_select_mode(mmc, MMC_LEGACY);
2509
2510 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2511 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2512#if CONFIG_IS_ENABLED(MMC_WRITE)
2513
2514 if (IS_SD(mmc))
2515 mmc->write_bl_len = mmc->read_bl_len;
2516 else
2517 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2518#endif
2519
2520 if (mmc->high_capacity) {
2521 csize = (mmc->csd[1] & 0x3f) << 16
2522 | (mmc->csd[2] & 0xffff0000) >> 16;
2523 cmult = 8;
2524 } else {
2525 csize = (mmc->csd[1] & 0x3ff) << 2
2526 | (mmc->csd[2] & 0xc0000000) >> 30;
2527 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2528 }
2529
2530 mmc->capacity_user = (csize + 1) << (cmult + 2);
2531 mmc->capacity_user *= mmc->read_bl_len;
2532 mmc->capacity_boot = 0;
2533 mmc->capacity_rpmb = 0;
2534 for (i = 0; i < 4; i++)
2535 mmc->capacity_gp[i] = 0;
2536
2537 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2538 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2539
2540#if CONFIG_IS_ENABLED(MMC_WRITE)
2541 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2542 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2543#endif
2544
2545 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2546 cmd.cmdidx = MMC_CMD_SET_DSR;
2547 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2548 cmd.resp_type = MMC_RSP_NONE;
2549 if (mmc_send_cmd(mmc, &cmd, NULL))
2550 pr_warn("MMC: SET_DSR failed\n");
2551 }
2552
2553
2554 if (!mmc_host_is_spi(mmc)) {
2555 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2556 cmd.resp_type = MMC_RSP_R1;
2557 cmd.cmdarg = mmc->rca << 16;
2558 err = mmc_send_cmd(mmc, &cmd, NULL);
2559
2560 if (err)
2561 return err;
2562 }
2563
2564
2565
2566
2567#if CONFIG_IS_ENABLED(MMC_WRITE)
2568 mmc->erase_grp_size = 1;
2569#endif
2570 mmc->part_config = MMCPART_NOAVAILABLE;
2571
2572 err = mmc_startup_v4(mmc);
2573 if (err)
2574 return err;
2575
2576 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2577 if (err)
2578 return err;
2579
2580#if CONFIG_IS_ENABLED(MMC_TINY)
2581 mmc_set_clock(mmc, mmc->legacy_speed, false);
2582 mmc_select_mode(mmc, MMC_LEGACY);
2583 mmc_set_bus_width(mmc, 1);
2584#else
2585 if (IS_SD(mmc)) {
2586 err = sd_get_capabilities(mmc);
2587 if (err)
2588 return err;
2589 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2590 } else {
2591 err = mmc_get_capabilities(mmc);
2592 if (err)
2593 return err;
2594 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2595 }
2596#endif
2597 if (err)
2598 return err;
2599
2600 mmc->best_mode = mmc->selected_mode;
2601
2602
2603 if (mmc->ddr_mode) {
2604 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2605#if CONFIG_IS_ENABLED(MMC_WRITE)
2606 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2607#endif
2608 }
2609
2610
2611 bdesc = mmc_get_blk_desc(mmc);
2612 bdesc->lun = 0;
2613 bdesc->hwpart = 0;
2614 bdesc->type = 0;
2615 bdesc->blksz = mmc->read_bl_len;
2616 bdesc->log2blksz = LOG2(bdesc->blksz);
2617 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2618#if !defined(CONFIG_SPL_BUILD) || \
2619 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2620 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2621 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2622 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2623 (mmc->cid[3] >> 16) & 0xffff);
2624 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2625 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2626 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2627 (mmc->cid[2] >> 24) & 0xff);
2628 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2629 (mmc->cid[2] >> 16) & 0xf);
2630#else
2631 bdesc->vendor[0] = 0;
2632 bdesc->product[0] = 0;
2633 bdesc->revision[0] = 0;
2634#endif
2635
2636#if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2637 part_init(bdesc);
2638#endif
2639
2640 return 0;
2641}
2642
2643static int mmc_send_if_cond(struct mmc *mmc)
2644{
2645 struct mmc_cmd cmd;
2646 int err;
2647
2648 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2649
2650 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2651 cmd.resp_type = MMC_RSP_R7;
2652
2653 err = mmc_send_cmd(mmc, &cmd, NULL);
2654
2655 if (err)
2656 return err;
2657
2658 if ((cmd.response[0] & 0xff) != 0xaa)
2659 return -EOPNOTSUPP;
2660 else
2661 mmc->version = SD_VERSION_2;
2662
2663 return 0;
2664}
2665
2666#if !CONFIG_IS_ENABLED(DM_MMC)
2667
2668__weak void board_mmc_power_init(void)
2669{
2670}
2671#endif
2672
2673static int mmc_power_init(struct mmc *mmc)
2674{
2675#if CONFIG_IS_ENABLED(DM_MMC)
2676#if CONFIG_IS_ENABLED(DM_REGULATOR)
2677 int ret;
2678
2679 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2680 &mmc->vmmc_supply);
2681 if (ret)
2682 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2683
2684 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2685 &mmc->vqmmc_supply);
2686 if (ret)
2687 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2688#endif
2689#else
2690
2691
2692
2693
2694 board_mmc_power_init();
2695#endif
2696 return 0;
2697}
2698
2699
2700
2701
2702
2703
2704static void mmc_set_initial_state(struct mmc *mmc)
2705{
2706 int err;
2707
2708
2709 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2710 if (err != 0)
2711 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2712 if (err != 0)
2713 pr_warn("mmc: failed to set signal voltage\n");
2714
2715 mmc_select_mode(mmc, MMC_LEGACY);
2716 mmc_set_bus_width(mmc, 1);
2717 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2718}
2719
2720static int mmc_power_on(struct mmc *mmc)
2721{
2722#if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2723 if (mmc->vmmc_supply) {
2724 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2725
2726 if (ret && ret != -EACCES) {
2727 printf("Error enabling VMMC supply : %d\n", ret);
2728 return ret;
2729 }
2730 }
2731#endif
2732 return 0;
2733}
2734
2735static int mmc_power_off(struct mmc *mmc)
2736{
2737 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2738#if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2739 if (mmc->vmmc_supply) {
2740 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2741
2742 if (ret && ret != -EACCES) {
2743 pr_debug("Error disabling VMMC supply : %d\n", ret);
2744 return ret;
2745 }
2746 }
2747#endif
2748 return 0;
2749}
2750
2751static int mmc_power_cycle(struct mmc *mmc)
2752{
2753 int ret;
2754
2755 ret = mmc_power_off(mmc);
2756 if (ret)
2757 return ret;
2758
2759 ret = mmc_host_power_cycle(mmc);
2760 if (ret)
2761 return ret;
2762
2763
2764
2765
2766
2767 udelay(2000);
2768 return mmc_power_on(mmc);
2769}
2770
2771int mmc_get_op_cond(struct mmc *mmc, bool quiet)
2772{
2773 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2774 int err;
2775
2776 if (mmc->has_init)
2777 return 0;
2778
2779 err = mmc_power_init(mmc);
2780 if (err)
2781 return err;
2782
2783#ifdef CONFIG_MMC_QUIRKS
2784 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2785 MMC_QUIRK_RETRY_SEND_CID |
2786 MMC_QUIRK_RETRY_APP_CMD;
2787#endif
2788
2789 err = mmc_power_cycle(mmc);
2790 if (err) {
2791
2792
2793
2794
2795
2796 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2797 uhs_en = false;
2798 mmc->host_caps &= ~UHS_CAPS;
2799 err = mmc_power_on(mmc);
2800 }
2801 if (err)
2802 return err;
2803
2804#if CONFIG_IS_ENABLED(DM_MMC)
2805
2806
2807
2808
2809 err = mmc_reinit(mmc);
2810#else
2811
2812 err = mmc->cfg->ops->init(mmc);
2813#endif
2814 if (err)
2815 return err;
2816 mmc->ddr_mode = 0;
2817
2818retry:
2819 mmc_set_initial_state(mmc);
2820
2821
2822 err = mmc_go_idle(mmc);
2823
2824 if (err)
2825 return err;
2826
2827
2828 mmc_get_blk_desc(mmc)->hwpart = 0;
2829
2830
2831 err = mmc_send_if_cond(mmc);
2832
2833
2834 err = sd_send_op_cond(mmc, uhs_en);
2835 if (err && uhs_en) {
2836 uhs_en = false;
2837 mmc_power_cycle(mmc);
2838 goto retry;
2839 }
2840
2841
2842 if (err == -ETIMEDOUT) {
2843 err = mmc_send_op_cond(mmc);
2844
2845 if (err) {
2846#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2847 if (!quiet)
2848 pr_err("Card did not respond to voltage select! : %d\n", err);
2849#endif
2850 return -EOPNOTSUPP;
2851 }
2852 }
2853
2854 return err;
2855}
2856
2857int mmc_start_init(struct mmc *mmc)
2858{
2859 bool no_card;
2860 int err = 0;
2861
2862
2863
2864
2865
2866 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2867 MMC_MODE_1BIT;
2868
2869 if (IS_ENABLED(CONFIG_MMC_SPEED_MODE_SET)) {
2870 if (mmc->user_speed_mode != MMC_MODES_END) {
2871 int i;
2872
2873 if (mmc->host_caps & MMC_CAP(mmc->user_speed_mode)) {
2874
2875 for (i = MMC_LEGACY; i < MMC_MODES_END; i++)
2876 mmc->host_caps &= ~MMC_CAP(i);
2877 mmc->host_caps |= (MMC_CAP(mmc->user_speed_mode)
2878 | MMC_CAP(MMC_LEGACY) |
2879 MMC_MODE_1BIT);
2880 } else {
2881 pr_err("bus_mode requested is not supported\n");
2882 return -EINVAL;
2883 }
2884 }
2885 }
2886#if CONFIG_IS_ENABLED(DM_MMC)
2887 mmc_deferred_probe(mmc);
2888#endif
2889#if !defined(CONFIG_MMC_BROKEN_CD)
2890 no_card = mmc_getcd(mmc) == 0;
2891#else
2892 no_card = 0;
2893#endif
2894#if !CONFIG_IS_ENABLED(DM_MMC)
2895
2896 no_card = no_card || (mmc->cfg->ops->init == NULL);
2897#endif
2898 if (no_card) {
2899 mmc->has_init = 0;
2900#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2901 pr_err("MMC: no card present\n");
2902#endif
2903 return -ENOMEDIUM;
2904 }
2905
2906 err = mmc_get_op_cond(mmc, false);
2907
2908 if (!err)
2909 mmc->init_in_progress = 1;
2910
2911 return err;
2912}
2913
2914static int mmc_complete_init(struct mmc *mmc)
2915{
2916 int err = 0;
2917
2918 mmc->init_in_progress = 0;
2919 if (mmc->op_cond_pending)
2920 err = mmc_complete_op_cond(mmc);
2921
2922 if (!err)
2923 err = mmc_startup(mmc);
2924 if (err)
2925 mmc->has_init = 0;
2926 else
2927 mmc->has_init = 1;
2928 return err;
2929}
2930
2931int mmc_init(struct mmc *mmc)
2932{
2933 int err = 0;
2934 __maybe_unused ulong start;
2935#if CONFIG_IS_ENABLED(DM_MMC)
2936 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2937
2938 upriv->mmc = mmc;
2939#endif
2940 if (mmc->has_init)
2941 return 0;
2942
2943 start = get_timer(0);
2944
2945 if (!mmc->init_in_progress)
2946 err = mmc_start_init(mmc);
2947
2948 if (!err)
2949 err = mmc_complete_init(mmc);
2950 if (err)
2951 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2952
2953 return err;
2954}
2955
2956#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2957 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2958 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2959int mmc_deinit(struct mmc *mmc)
2960{
2961 u32 caps_filtered;
2962
2963 if (!mmc->has_init)
2964 return 0;
2965
2966 if (IS_SD(mmc)) {
2967 caps_filtered = mmc->card_caps &
2968 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2969 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2970 MMC_CAP(UHS_SDR104));
2971
2972 return sd_select_mode_and_width(mmc, caps_filtered);
2973 } else {
2974 caps_filtered = mmc->card_caps &
2975 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400) | MMC_CAP(MMC_HS_400_ES));
2976
2977 return mmc_select_mode_and_width(mmc, caps_filtered);
2978 }
2979}
2980#endif
2981
2982int mmc_set_dsr(struct mmc *mmc, u16 val)
2983{
2984 mmc->dsr = val;
2985 return 0;
2986}
2987
2988
2989__weak int cpu_mmc_init(struct bd_info *bis)
2990{
2991 return -1;
2992}
2993
2994
2995__weak int board_mmc_init(struct bd_info *bis)
2996{
2997 return -1;
2998}
2999
3000void mmc_set_preinit(struct mmc *mmc, int preinit)
3001{
3002 mmc->preinit = preinit;
3003}
3004
3005#if CONFIG_IS_ENABLED(DM_MMC)
3006static int mmc_probe(struct bd_info *bis)
3007{
3008 int ret, i;
3009 struct uclass *uc;
3010 struct udevice *dev;
3011
3012 ret = uclass_get(UCLASS_MMC, &uc);
3013 if (ret)
3014 return ret;
3015
3016
3017
3018
3019
3020
3021 for (i = 0; ; i++) {
3022 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3023 if (ret == -ENODEV)
3024 break;
3025 }
3026 uclass_foreach_dev(dev, uc) {
3027 ret = device_probe(dev);
3028 if (ret)
3029 pr_err("%s - probe failed: %d\n", dev->name, ret);
3030 }
3031
3032 return 0;
3033}
3034#else
3035static int mmc_probe(struct bd_info *bis)
3036{
3037 if (board_mmc_init(bis) < 0)
3038 cpu_mmc_init(bis);
3039
3040 return 0;
3041}
3042#endif
3043
3044int mmc_initialize(struct bd_info *bis)
3045{
3046 static int initialized = 0;
3047 int ret;
3048 if (initialized)
3049 return 0;
3050 initialized = 1;
3051
3052#if !CONFIG_IS_ENABLED(BLK)
3053#if !CONFIG_IS_ENABLED(MMC_TINY)
3054 mmc_list_init();
3055#endif
3056#endif
3057 ret = mmc_probe(bis);
3058 if (ret)
3059 return ret;
3060
3061#ifndef CONFIG_SPL_BUILD
3062 print_mmc_devices(',');
3063#endif
3064
3065 mmc_do_preinit();
3066 return 0;
3067}
3068
3069#if CONFIG_IS_ENABLED(DM_MMC)
3070int mmc_init_device(int num)
3071{
3072 struct udevice *dev;
3073 struct mmc *m;
3074 int ret;
3075
3076 if (uclass_get_device_by_seq(UCLASS_MMC, num, &dev)) {
3077 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3078 if (ret)
3079 return ret;
3080 }
3081
3082 m = mmc_get_mmc_dev(dev);
3083 m->user_speed_mode = MMC_MODES_END;
3084
3085 if (!m)
3086 return 0;
3087 if (m->preinit)
3088 mmc_start_init(m);
3089
3090 return 0;
3091}
3092#endif
3093
3094#ifdef CONFIG_CMD_BKOPS_ENABLE
3095int mmc_set_bkops_enable(struct mmc *mmc)
3096{
3097 int err;
3098 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3099
3100 err = mmc_send_ext_csd(mmc, ext_csd);
3101 if (err) {
3102 puts("Could not get ext_csd register values\n");
3103 return err;
3104 }
3105
3106 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3107 puts("Background operations not supported on device\n");
3108 return -EMEDIUMTYPE;
3109 }
3110
3111 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3112 puts("Background operations already enabled\n");
3113 return 0;
3114 }
3115
3116 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3117 if (err) {
3118 puts("Failed to enable manual background operations\n");
3119 return err;
3120 }
3121
3122 puts("Enabled manual background operations\n");
3123
3124 return 0;
3125}
3126#endif
3127
3128__weak int mmc_get_env_dev(void)
3129{
3130#ifdef CONFIG_SYS_MMC_ENV_DEV
3131 return CONFIG_SYS_MMC_ENV_DEV;
3132#else
3133 return 0;
3134#endif
3135}
3136