1
2
3
4
5
6
7
8
9
10#include <config.h>
11#include <common.h>
12#include <blk.h>
13#include <command.h>
14#include <dm.h>
15#include <log.h>
16#include <dm/device-internal.h>
17#include <errno.h>
18#include <mmc.h>
19#include <part.h>
20#include <linux/bitops.h>
21#include <linux/delay.h>
22#include <power/regulator.h>
23#include <malloc.h>
24#include <memalign.h>
25#include <linux/list.h>
26#include <div64.h>
27#include "mmc_private.h"
28
29#define DEFAULT_CMD6_TIMEOUT_MS 500
30
31static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
32
33#if !CONFIG_IS_ENABLED(DM_MMC)
34
35static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
36{
37 if (mmc->cfg->ops->wait_dat0)
38 return mmc->cfg->ops->wait_dat0(mmc, state, timeout_us);
39
40 return -ENOSYS;
41}
42
43__weak int board_mmc_getwp(struct mmc *mmc)
44{
45 return -1;
46}
47
48int mmc_getwp(struct mmc *mmc)
49{
50 int wp;
51
52 wp = board_mmc_getwp(mmc);
53
54 if (wp < 0) {
55 if (mmc->cfg->ops->getwp)
56 wp = mmc->cfg->ops->getwp(mmc);
57 else
58 wp = 0;
59 }
60
61 return wp;
62}
63
64__weak int board_mmc_getcd(struct mmc *mmc)
65{
66 return -1;
67}
68#endif
69
70#ifdef CONFIG_MMC_TRACE
71void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
72{
73 printf("CMD_SEND:%d\n", cmd->cmdidx);
74 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
75}
76
77void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
78{
79 int i;
80 u8 *ptr;
81
82 if (ret) {
83 printf("\t\tRET\t\t\t %d\n", ret);
84 } else {
85 switch (cmd->resp_type) {
86 case MMC_RSP_NONE:
87 printf("\t\tMMC_RSP_NONE\n");
88 break;
89 case MMC_RSP_R1:
90 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
91 cmd->response[0]);
92 break;
93 case MMC_RSP_R1b:
94 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
95 cmd->response[0]);
96 break;
97 case MMC_RSP_R2:
98 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
99 cmd->response[0]);
100 printf("\t\t \t\t 0x%08x \n",
101 cmd->response[1]);
102 printf("\t\t \t\t 0x%08x \n",
103 cmd->response[2]);
104 printf("\t\t \t\t 0x%08x \n",
105 cmd->response[3]);
106 printf("\n");
107 printf("\t\t\t\t\tDUMPING DATA\n");
108 for (i = 0; i < 4; i++) {
109 int j;
110 printf("\t\t\t\t\t%03d - ", i*4);
111 ptr = (u8 *)&cmd->response[i];
112 ptr += 3;
113 for (j = 0; j < 4; j++)
114 printf("%02x ", *ptr--);
115 printf("\n");
116 }
117 break;
118 case MMC_RSP_R3:
119 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
120 cmd->response[0]);
121 break;
122 default:
123 printf("\t\tERROR MMC rsp not supported\n");
124 break;
125 }
126 }
127}
128
129void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
130{
131 int status;
132
133 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
134 printf("CURR STATE:%d\n", status);
135}
136#endif
137
138#if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG) || CONFIG_VAL(LOGLEVEL) >= LOGL_DEBUG
139const char *mmc_mode_name(enum bus_mode mode)
140{
141 static const char *const names[] = {
142 [MMC_LEGACY] = "MMC legacy",
143 [MMC_HS] = "MMC High Speed (26MHz)",
144 [SD_HS] = "SD High Speed (50MHz)",
145 [UHS_SDR12] = "UHS SDR12 (25MHz)",
146 [UHS_SDR25] = "UHS SDR25 (50MHz)",
147 [UHS_SDR50] = "UHS SDR50 (100MHz)",
148 [UHS_SDR104] = "UHS SDR104 (208MHz)",
149 [UHS_DDR50] = "UHS DDR50 (50MHz)",
150 [MMC_HS_52] = "MMC High Speed (52MHz)",
151 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
152 [MMC_HS_200] = "HS200 (200MHz)",
153 [MMC_HS_400] = "HS400 (200MHz)",
154 [MMC_HS_400_ES] = "HS400ES (200MHz)",
155 };
156
157 if (mode >= MMC_MODES_END)
158 return "Unknown mode";
159 else
160 return names[mode];
161}
162#endif
163
164static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
165{
166 static const int freqs[] = {
167 [MMC_LEGACY] = 25000000,
168 [MMC_HS] = 26000000,
169 [SD_HS] = 50000000,
170 [MMC_HS_52] = 52000000,
171 [MMC_DDR_52] = 52000000,
172 [UHS_SDR12] = 25000000,
173 [UHS_SDR25] = 50000000,
174 [UHS_SDR50] = 100000000,
175 [UHS_DDR50] = 50000000,
176 [UHS_SDR104] = 208000000,
177 [MMC_HS_200] = 200000000,
178 [MMC_HS_400] = 200000000,
179 [MMC_HS_400_ES] = 200000000,
180 };
181
182 if (mode == MMC_LEGACY)
183 return mmc->legacy_speed;
184 else if (mode >= MMC_MODES_END)
185 return 0;
186 else
187 return freqs[mode];
188}
189
190static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
191{
192 mmc->selected_mode = mode;
193 mmc->tran_speed = mmc_mode2freq(mmc, mode);
194 mmc->ddr_mode = mmc_is_mode_ddr(mode);
195 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
196 mmc->tran_speed / 1000000);
197 return 0;
198}
199
200#if !CONFIG_IS_ENABLED(DM_MMC)
201int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
202{
203 int ret;
204
205 mmmc_trace_before_send(mmc, cmd);
206 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
207 mmmc_trace_after_send(mmc, cmd, ret);
208
209 return ret;
210}
211#endif
212
213
214
215
216
217
218
219
220
221
222
223static int mmc_send_cmd_retry(struct mmc *mmc, struct mmc_cmd *cmd,
224 struct mmc_data *data, uint retries)
225{
226 int ret;
227
228 do {
229 ret = mmc_send_cmd(mmc, cmd, data);
230 } while (ret && retries--);
231
232 return ret;
233}
234
235
236
237
238
239
240
241
242
243
244
245
246
247static int mmc_send_cmd_quirks(struct mmc *mmc, struct mmc_cmd *cmd,
248 struct mmc_data *data, u32 quirk, uint retries)
249{
250 if (IS_ENABLED(CONFIG_MMC_QUIRKS) && mmc->quirks & quirk)
251 return mmc_send_cmd_retry(mmc, cmd, data, retries);
252 else
253 return mmc_send_cmd(mmc, cmd, data);
254}
255
256int mmc_send_status(struct mmc *mmc, unsigned int *status)
257{
258 struct mmc_cmd cmd;
259 int ret;
260
261 cmd.cmdidx = MMC_CMD_SEND_STATUS;
262 cmd.resp_type = MMC_RSP_R1;
263 if (!mmc_host_is_spi(mmc))
264 cmd.cmdarg = mmc->rca << 16;
265
266 ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 4);
267 mmc_trace_state(mmc, &cmd);
268 if (!ret)
269 *status = cmd.response[0];
270
271 return ret;
272}
273
274int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
275{
276 unsigned int status;
277 int err;
278
279 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
280 if (err != -ENOSYS)
281 return err;
282
283 while (1) {
284 err = mmc_send_status(mmc, &status);
285 if (err)
286 return err;
287
288 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
289 (status & MMC_STATUS_CURR_STATE) !=
290 MMC_STATE_PRG)
291 break;
292
293 if (status & MMC_STATUS_MASK) {
294#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
295 pr_err("Status Error: 0x%08x\n", status);
296#endif
297 return -ECOMM;
298 }
299
300 if (timeout_ms-- <= 0)
301 break;
302
303 udelay(1000);
304 }
305
306 if (timeout_ms <= 0) {
307#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
308 pr_err("Timeout waiting card ready\n");
309#endif
310 return -ETIMEDOUT;
311 }
312
313 return 0;
314}
315
316int mmc_set_blocklen(struct mmc *mmc, int len)
317{
318 struct mmc_cmd cmd;
319
320 if (mmc->ddr_mode)
321 return 0;
322
323 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
324 cmd.resp_type = MMC_RSP_R1;
325 cmd.cmdarg = len;
326
327 return mmc_send_cmd_quirks(mmc, &cmd, NULL,
328 MMC_QUIRK_RETRY_SET_BLOCKLEN, 4);
329}
330
331#ifdef MMC_SUPPORTS_TUNING
332static const u8 tuning_blk_pattern_4bit[] = {
333 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
334 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
335 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
336 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
337 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
338 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
339 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
340 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
341};
342
343static const u8 tuning_blk_pattern_8bit[] = {
344 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
345 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
346 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
347 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
348 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
349 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
350 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
351 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
352 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
353 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
354 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
355 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
356 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
357 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
358 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
359 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
360};
361
362int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
363{
364 struct mmc_cmd cmd;
365 struct mmc_data data;
366 const u8 *tuning_block_pattern;
367 int size, err;
368
369 if (mmc->bus_width == 8) {
370 tuning_block_pattern = tuning_blk_pattern_8bit;
371 size = sizeof(tuning_blk_pattern_8bit);
372 } else if (mmc->bus_width == 4) {
373 tuning_block_pattern = tuning_blk_pattern_4bit;
374 size = sizeof(tuning_blk_pattern_4bit);
375 } else {
376 return -EINVAL;
377 }
378
379 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
380
381 cmd.cmdidx = opcode;
382 cmd.cmdarg = 0;
383 cmd.resp_type = MMC_RSP_R1;
384
385 data.dest = (void *)data_buf;
386 data.blocks = 1;
387 data.blocksize = size;
388 data.flags = MMC_DATA_READ;
389
390 err = mmc_send_cmd(mmc, &cmd, &data);
391 if (err)
392 return err;
393
394 if (memcmp(data_buf, tuning_block_pattern, size))
395 return -EIO;
396
397 return 0;
398}
399#endif
400
401static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
402 lbaint_t blkcnt)
403{
404 struct mmc_cmd cmd;
405 struct mmc_data data;
406
407 if (blkcnt > 1)
408 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
409 else
410 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
411
412 if (mmc->high_capacity)
413 cmd.cmdarg = start;
414 else
415 cmd.cmdarg = start * mmc->read_bl_len;
416
417 cmd.resp_type = MMC_RSP_R1;
418
419 data.dest = dst;
420 data.blocks = blkcnt;
421 data.blocksize = mmc->read_bl_len;
422 data.flags = MMC_DATA_READ;
423
424 if (mmc_send_cmd(mmc, &cmd, &data))
425 return 0;
426
427 if (blkcnt > 1) {
428 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
429 cmd.cmdarg = 0;
430 cmd.resp_type = MMC_RSP_R1b;
431 if (mmc_send_cmd(mmc, &cmd, NULL)) {
432#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
433 pr_err("mmc fail to send stop cmd\n");
434#endif
435 return 0;
436 }
437 }
438
439 return blkcnt;
440}
441
442#if !CONFIG_IS_ENABLED(DM_MMC)
443static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
444{
445 if (mmc->cfg->ops->get_b_max)
446 return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
447 else
448 return mmc->cfg->b_max;
449}
450#endif
451
452#if CONFIG_IS_ENABLED(BLK)
453ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
454#else
455ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
456 void *dst)
457#endif
458{
459#if CONFIG_IS_ENABLED(BLK)
460 struct blk_desc *block_dev = dev_get_uclass_plat(dev);
461#endif
462 int dev_num = block_dev->devnum;
463 int err;
464 lbaint_t cur, blocks_todo = blkcnt;
465 uint b_max;
466
467 if (blkcnt == 0)
468 return 0;
469
470 struct mmc *mmc = find_mmc_device(dev_num);
471 if (!mmc)
472 return 0;
473
474 if (CONFIG_IS_ENABLED(MMC_TINY))
475 err = mmc_switch_part(mmc, block_dev->hwpart);
476 else
477 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
478
479 if (err < 0)
480 return 0;
481
482 if ((start + blkcnt) > block_dev->lba) {
483#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
484 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
485 start + blkcnt, block_dev->lba);
486#endif
487 return 0;
488 }
489
490 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
491 pr_debug("%s: Failed to set blocklen\n", __func__);
492 return 0;
493 }
494
495 b_max = mmc_get_b_max(mmc, dst, blkcnt);
496
497 do {
498 cur = (blocks_todo > b_max) ? b_max : blocks_todo;
499 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
500 pr_debug("%s: Failed to read blocks\n", __func__);
501 return 0;
502 }
503 blocks_todo -= cur;
504 start += cur;
505 dst += cur * mmc->read_bl_len;
506 } while (blocks_todo > 0);
507
508 return blkcnt;
509}
510
511static int mmc_go_idle(struct mmc *mmc)
512{
513 struct mmc_cmd cmd;
514 int err;
515
516 udelay(1000);
517
518 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
519 cmd.cmdarg = 0;
520 cmd.resp_type = MMC_RSP_NONE;
521
522 err = mmc_send_cmd(mmc, &cmd, NULL);
523
524 if (err)
525 return err;
526
527 udelay(2000);
528
529 return 0;
530}
531
532#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
533static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
534{
535 struct mmc_cmd cmd;
536 int err = 0;
537
538
539
540
541
542 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
543 return mmc_set_signal_voltage(mmc, signal_voltage);
544
545 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
546 cmd.cmdarg = 0;
547 cmd.resp_type = MMC_RSP_R1;
548
549 err = mmc_send_cmd(mmc, &cmd, NULL);
550 if (err)
551 return err;
552
553 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
554 return -EIO;
555
556
557
558
559
560 err = mmc_wait_dat0(mmc, 0, 100);
561 if (err == -ENOSYS)
562 udelay(100);
563 else if (err)
564 return -ETIMEDOUT;
565
566
567
568
569
570 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
571
572 err = mmc_set_signal_voltage(mmc, signal_voltage);
573 if (err)
574 return err;
575
576
577 mdelay(10);
578 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
579
580
581
582
583
584 err = mmc_wait_dat0(mmc, 1, 1000);
585 if (err == -ENOSYS)
586 udelay(1000);
587 else if (err)
588 return -ETIMEDOUT;
589
590 return 0;
591}
592#endif
593
594static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
595{
596 int timeout = 1000;
597 int err;
598 struct mmc_cmd cmd;
599
600 while (1) {
601 cmd.cmdidx = MMC_CMD_APP_CMD;
602 cmd.resp_type = MMC_RSP_R1;
603 cmd.cmdarg = 0;
604
605 err = mmc_send_cmd(mmc, &cmd, NULL);
606
607 if (err)
608 return err;
609
610 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
611 cmd.resp_type = MMC_RSP_R3;
612
613
614
615
616
617
618
619
620 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
621 (mmc->cfg->voltages & 0xff8000);
622
623 if (mmc->version == SD_VERSION_2)
624 cmd.cmdarg |= OCR_HCS;
625
626 if (uhs_en)
627 cmd.cmdarg |= OCR_S18R;
628
629 err = mmc_send_cmd(mmc, &cmd, NULL);
630
631 if (err)
632 return err;
633
634 if (cmd.response[0] & OCR_BUSY)
635 break;
636
637 if (timeout-- <= 0)
638 return -EOPNOTSUPP;
639
640 udelay(1000);
641 }
642
643 if (mmc->version != SD_VERSION_2)
644 mmc->version = SD_VERSION_1_0;
645
646 if (mmc_host_is_spi(mmc)) {
647 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
648 cmd.resp_type = MMC_RSP_R3;
649 cmd.cmdarg = 0;
650
651 err = mmc_send_cmd(mmc, &cmd, NULL);
652
653 if (err)
654 return err;
655 }
656
657 mmc->ocr = cmd.response[0];
658
659#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
660 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
661 == 0x41000000) {
662 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
663 if (err)
664 return err;
665 }
666#endif
667
668 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
669 mmc->rca = 0;
670
671 return 0;
672}
673
674static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
675{
676 struct mmc_cmd cmd;
677 int err;
678
679 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
680 cmd.resp_type = MMC_RSP_R3;
681 cmd.cmdarg = 0;
682 if (use_arg && !mmc_host_is_spi(mmc))
683 cmd.cmdarg = OCR_HCS |
684 (mmc->cfg->voltages &
685 (mmc->ocr & OCR_VOLTAGE_MASK)) |
686 (mmc->ocr & OCR_ACCESS_MODE);
687
688 err = mmc_send_cmd(mmc, &cmd, NULL);
689 if (err)
690 return err;
691 mmc->ocr = cmd.response[0];
692 return 0;
693}
694
695static int mmc_send_op_cond(struct mmc *mmc)
696{
697 int err, i;
698 int timeout = 1000;
699 uint start;
700
701
702 mmc_go_idle(mmc);
703
704 start = get_timer(0);
705
706 for (i = 0; ; i++) {
707 err = mmc_send_op_cond_iter(mmc, i != 0);
708 if (err)
709 return err;
710
711
712 if (mmc->ocr & OCR_BUSY)
713 break;
714
715 if (get_timer(start) > timeout)
716 return -ETIMEDOUT;
717 udelay(100);
718 }
719 mmc->op_cond_pending = 1;
720 return 0;
721}
722
723static int mmc_complete_op_cond(struct mmc *mmc)
724{
725 struct mmc_cmd cmd;
726 int timeout = 1000;
727 ulong start;
728 int err;
729
730 mmc->op_cond_pending = 0;
731 if (!(mmc->ocr & OCR_BUSY)) {
732
733 mmc_go_idle(mmc);
734
735 start = get_timer(0);
736 while (1) {
737 err = mmc_send_op_cond_iter(mmc, 1);
738 if (err)
739 return err;
740 if (mmc->ocr & OCR_BUSY)
741 break;
742 if (get_timer(start) > timeout)
743 return -EOPNOTSUPP;
744 udelay(100);
745 }
746 }
747
748 if (mmc_host_is_spi(mmc)) {
749 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
750 cmd.resp_type = MMC_RSP_R3;
751 cmd.cmdarg = 0;
752
753 err = mmc_send_cmd(mmc, &cmd, NULL);
754
755 if (err)
756 return err;
757
758 mmc->ocr = cmd.response[0];
759 }
760
761 mmc->version = MMC_VERSION_UNKNOWN;
762
763 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
764 mmc->rca = 1;
765
766 return 0;
767}
768
769
770int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
771{
772 struct mmc_cmd cmd;
773 struct mmc_data data;
774 int err;
775
776
777 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
778 cmd.resp_type = MMC_RSP_R1;
779 cmd.cmdarg = 0;
780
781 data.dest = (char *)ext_csd;
782 data.blocks = 1;
783 data.blocksize = MMC_MAX_BLOCK_LEN;
784 data.flags = MMC_DATA_READ;
785
786 err = mmc_send_cmd(mmc, &cmd, &data);
787
788 return err;
789}
790
791static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
792 bool send_status)
793{
794 unsigned int status, start;
795 struct mmc_cmd cmd;
796 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
797 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
798 (index == EXT_CSD_PART_CONF);
799 int ret;
800
801 if (mmc->gen_cmd6_time)
802 timeout_ms = mmc->gen_cmd6_time * 10;
803
804 if (is_part_switch && mmc->part_switch_time)
805 timeout_ms = mmc->part_switch_time * 10;
806
807 cmd.cmdidx = MMC_CMD_SWITCH;
808 cmd.resp_type = MMC_RSP_R1b;
809 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
810 (index << 16) |
811 (value << 8);
812
813 ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 3);
814 if (ret)
815 return ret;
816
817 start = get_timer(0);
818
819
820 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
821 if (ret && ret != -ENOSYS)
822 return ret;
823
824
825
826
827
828
829 if (ret == -ENOSYS && !send_status) {
830 mdelay(timeout_ms);
831 return 0;
832 }
833
834 if (!send_status)
835 return 0;
836
837
838
839
840
841
842 do {
843 ret = mmc_send_status(mmc, &status);
844
845 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
846 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
847 value);
848 return -EIO;
849 }
850 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA) &&
851 (status & MMC_STATUS_CURR_STATE) == MMC_STATE_TRANS)
852 return 0;
853 udelay(100);
854 } while (get_timer(start) < timeout_ms);
855
856 return -ETIMEDOUT;
857}
858
859int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
860{
861 return __mmc_switch(mmc, set, index, value, true);
862}
863
864int mmc_boot_wp(struct mmc *mmc)
865{
866 return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
867}
868
869int mmc_boot_wp_single_partition(struct mmc *mmc, int partition)
870{
871 u8 value;
872 int ret;
873
874 value = EXT_CSD_BOOT_WP_B_PWR_WP_EN;
875
876 if (partition == 0) {
877 value |= EXT_CSD_BOOT_WP_B_SEC_WP_SEL;
878 ret = mmc_switch(mmc,
879 EXT_CSD_CMD_SET_NORMAL,
880 EXT_CSD_BOOT_WP,
881 value);
882 } else if (partition == 1) {
883 value |= EXT_CSD_BOOT_WP_B_SEC_WP_SEL;
884 value |= EXT_CSD_BOOT_WP_B_PWR_WP_SEC_SEL;
885 ret = mmc_switch(mmc,
886 EXT_CSD_CMD_SET_NORMAL,
887 EXT_CSD_BOOT_WP,
888 value);
889 } else {
890 ret = mmc_boot_wp(mmc);
891 }
892
893 return ret;
894}
895
896#if !CONFIG_IS_ENABLED(MMC_TINY)
897static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
898 bool hsdowngrade)
899{
900 int err;
901 int speed_bits;
902
903 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
904
905 switch (mode) {
906 case MMC_HS:
907 case MMC_HS_52:
908 case MMC_DDR_52:
909 speed_bits = EXT_CSD_TIMING_HS;
910 break;
911#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
912 case MMC_HS_200:
913 speed_bits = EXT_CSD_TIMING_HS200;
914 break;
915#endif
916#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
917 case MMC_HS_400:
918 speed_bits = EXT_CSD_TIMING_HS400;
919 break;
920#endif
921#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
922 case MMC_HS_400_ES:
923 speed_bits = EXT_CSD_TIMING_HS400;
924 break;
925#endif
926 case MMC_LEGACY:
927 speed_bits = EXT_CSD_TIMING_LEGACY;
928 break;
929 default:
930 return -EINVAL;
931 }
932
933 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
934 speed_bits, !hsdowngrade);
935 if (err)
936 return err;
937
938#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
939 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
940
941
942
943
944
945
946 if (hsdowngrade) {
947 mmc_select_mode(mmc, MMC_HS);
948 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
949 }
950#endif
951
952 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
953
954 err = mmc_send_ext_csd(mmc, test_csd);
955 if (err)
956 return err;
957
958
959 if (!test_csd[EXT_CSD_HS_TIMING])
960 return -ENOTSUPP;
961 }
962
963 return 0;
964}
965
966static int mmc_get_capabilities(struct mmc *mmc)
967{
968 u8 *ext_csd = mmc->ext_csd;
969 char cardtype;
970
971 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
972
973 if (mmc_host_is_spi(mmc))
974 return 0;
975
976
977 if (mmc->version < MMC_VERSION_4)
978 return 0;
979
980 if (!ext_csd) {
981 pr_err("No ext_csd found!\n");
982 return -ENOTSUPP;
983 }
984
985 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
986
987 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
988 mmc->cardtype = cardtype;
989
990#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
991 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
992 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
993 mmc->card_caps |= MMC_MODE_HS200;
994 }
995#endif
996#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
997 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
998 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
999 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
1000 mmc->card_caps |= MMC_MODE_HS400;
1001 }
1002#endif
1003 if (cardtype & EXT_CSD_CARD_TYPE_52) {
1004 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
1005 mmc->card_caps |= MMC_MODE_DDR_52MHz;
1006 mmc->card_caps |= MMC_MODE_HS_52MHz;
1007 }
1008 if (cardtype & EXT_CSD_CARD_TYPE_26)
1009 mmc->card_caps |= MMC_MODE_HS;
1010
1011#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1012 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
1013 (mmc->card_caps & MMC_MODE_HS400)) {
1014 mmc->card_caps |= MMC_MODE_HS400_ES;
1015 }
1016#endif
1017
1018 return 0;
1019}
1020#endif
1021
1022static int mmc_set_capacity(struct mmc *mmc, int part_num)
1023{
1024 switch (part_num) {
1025 case 0:
1026 mmc->capacity = mmc->capacity_user;
1027 break;
1028 case 1:
1029 case 2:
1030 mmc->capacity = mmc->capacity_boot;
1031 break;
1032 case 3:
1033 mmc->capacity = mmc->capacity_rpmb;
1034 break;
1035 case 4:
1036 case 5:
1037 case 6:
1038 case 7:
1039 mmc->capacity = mmc->capacity_gp[part_num - 4];
1040 break;
1041 default:
1042 return -1;
1043 }
1044
1045 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
1046
1047 return 0;
1048}
1049
1050int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
1051{
1052 int ret;
1053 int retry = 3;
1054
1055 do {
1056 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1057 EXT_CSD_PART_CONF,
1058 (mmc->part_config & ~PART_ACCESS_MASK)
1059 | (part_num & PART_ACCESS_MASK));
1060 } while (ret && retry--);
1061
1062
1063
1064
1065
1066 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
1067 ret = mmc_set_capacity(mmc, part_num);
1068 mmc_get_blk_desc(mmc)->hwpart = part_num;
1069 }
1070
1071 return ret;
1072}
1073
1074#if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
1075int mmc_hwpart_config(struct mmc *mmc,
1076 const struct mmc_hwpart_conf *conf,
1077 enum mmc_hwpart_conf_mode mode)
1078{
1079 u8 part_attrs = 0;
1080 u32 enh_size_mult;
1081 u32 enh_start_addr;
1082 u32 gp_size_mult[4];
1083 u32 max_enh_size_mult;
1084 u32 tot_enh_size_mult = 0;
1085 u8 wr_rel_set;
1086 int i, pidx, err;
1087 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1088
1089 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1090 return -EINVAL;
1091
1092 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
1093 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
1094 return -EMEDIUMTYPE;
1095 }
1096
1097 if (!(mmc->part_support & PART_SUPPORT)) {
1098 pr_err("Card does not support partitioning\n");
1099 return -EMEDIUMTYPE;
1100 }
1101
1102 if (!mmc->hc_wp_grp_size) {
1103 pr_err("Card does not define HC WP group size\n");
1104 return -EMEDIUMTYPE;
1105 }
1106
1107
1108 if (conf->user.enh_size) {
1109 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1110 conf->user.enh_start % mmc->hc_wp_grp_size) {
1111 pr_err("User data enhanced area not HC WP group "
1112 "size aligned\n");
1113 return -EINVAL;
1114 }
1115 part_attrs |= EXT_CSD_ENH_USR;
1116 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1117 if (mmc->high_capacity) {
1118 enh_start_addr = conf->user.enh_start;
1119 } else {
1120 enh_start_addr = (conf->user.enh_start << 9);
1121 }
1122 } else {
1123 enh_size_mult = 0;
1124 enh_start_addr = 0;
1125 }
1126 tot_enh_size_mult += enh_size_mult;
1127
1128 for (pidx = 0; pidx < 4; pidx++) {
1129 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
1130 pr_err("GP%i partition not HC WP group size "
1131 "aligned\n", pidx+1);
1132 return -EINVAL;
1133 }
1134 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1135 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1136 part_attrs |= EXT_CSD_ENH_GP(pidx);
1137 tot_enh_size_mult += gp_size_mult[pidx];
1138 }
1139 }
1140
1141 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1142 pr_err("Card does not support enhanced attribute\n");
1143 return -EMEDIUMTYPE;
1144 }
1145
1146 err = mmc_send_ext_csd(mmc, ext_csd);
1147 if (err)
1148 return err;
1149
1150 max_enh_size_mult =
1151 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1152 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1153 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1154 if (tot_enh_size_mult > max_enh_size_mult) {
1155 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1156 tot_enh_size_mult, max_enh_size_mult);
1157 return -EMEDIUMTYPE;
1158 }
1159
1160
1161
1162
1163
1164 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1165 if (conf->user.wr_rel_change) {
1166 if (conf->user.wr_rel_set)
1167 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1168 else
1169 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1170 }
1171 for (pidx = 0; pidx < 4; pidx++) {
1172 if (conf->gp_part[pidx].wr_rel_change) {
1173 if (conf->gp_part[pidx].wr_rel_set)
1174 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1175 else
1176 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1177 }
1178 }
1179
1180 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1181 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1182 puts("Card does not support host controlled partition write "
1183 "reliability settings\n");
1184 return -EMEDIUMTYPE;
1185 }
1186
1187 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1188 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1189 pr_err("Card already partitioned\n");
1190 return -EPERM;
1191 }
1192
1193 if (mode == MMC_HWPART_CONF_CHECK)
1194 return 0;
1195
1196
1197 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1198 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1199 EXT_CSD_ERASE_GROUP_DEF, 1);
1200
1201 if (err)
1202 return err;
1203
1204 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1205
1206#if CONFIG_IS_ENABLED(MMC_WRITE)
1207
1208 mmc->erase_grp_size =
1209 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1210#endif
1211
1212 }
1213
1214
1215 for (i = 0; i < 4; i++) {
1216 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1217 EXT_CSD_ENH_START_ADDR+i,
1218 (enh_start_addr >> (i*8)) & 0xFF);
1219 if (err)
1220 return err;
1221 }
1222 for (i = 0; i < 3; i++) {
1223 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1224 EXT_CSD_ENH_SIZE_MULT+i,
1225 (enh_size_mult >> (i*8)) & 0xFF);
1226 if (err)
1227 return err;
1228 }
1229 for (pidx = 0; pidx < 4; pidx++) {
1230 for (i = 0; i < 3; i++) {
1231 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1232 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1233 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1234 if (err)
1235 return err;
1236 }
1237 }
1238 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1239 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1240 if (err)
1241 return err;
1242
1243 if (mode == MMC_HWPART_CONF_SET)
1244 return 0;
1245
1246
1247
1248
1249
1250 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1251 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1252 EXT_CSD_WR_REL_SET, wr_rel_set);
1253 if (err)
1254 return err;
1255 }
1256
1257
1258
1259
1260
1261
1262 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1263 EXT_CSD_PARTITION_SETTING,
1264 EXT_CSD_PARTITION_SETTING_COMPLETED);
1265 if (err)
1266 return err;
1267
1268 return 0;
1269}
1270#endif
1271
1272#if !CONFIG_IS_ENABLED(DM_MMC)
1273int mmc_getcd(struct mmc *mmc)
1274{
1275 int cd;
1276
1277 cd = board_mmc_getcd(mmc);
1278
1279 if (cd < 0) {
1280 if (mmc->cfg->ops->getcd)
1281 cd = mmc->cfg->ops->getcd(mmc);
1282 else
1283 cd = 1;
1284 }
1285
1286 return cd;
1287}
1288#endif
1289
1290#if !CONFIG_IS_ENABLED(MMC_TINY)
1291static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1292{
1293 struct mmc_cmd cmd;
1294 struct mmc_data data;
1295
1296
1297 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1298 cmd.resp_type = MMC_RSP_R1;
1299 cmd.cmdarg = (mode << 31) | 0xffffff;
1300 cmd.cmdarg &= ~(0xf << (group * 4));
1301 cmd.cmdarg |= value << (group * 4);
1302
1303 data.dest = (char *)resp;
1304 data.blocksize = 64;
1305 data.blocks = 1;
1306 data.flags = MMC_DATA_READ;
1307
1308 return mmc_send_cmd(mmc, &cmd, &data);
1309}
1310
1311static int sd_get_capabilities(struct mmc *mmc)
1312{
1313 int err;
1314 struct mmc_cmd cmd;
1315 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1316 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1317 struct mmc_data data;
1318 int timeout;
1319#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1320 u32 sd3_bus_mode;
1321#endif
1322
1323 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
1324
1325 if (mmc_host_is_spi(mmc))
1326 return 0;
1327
1328
1329 cmd.cmdidx = MMC_CMD_APP_CMD;
1330 cmd.resp_type = MMC_RSP_R1;
1331 cmd.cmdarg = mmc->rca << 16;
1332
1333 err = mmc_send_cmd(mmc, &cmd, NULL);
1334
1335 if (err)
1336 return err;
1337
1338 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1339 cmd.resp_type = MMC_RSP_R1;
1340 cmd.cmdarg = 0;
1341
1342 data.dest = (char *)scr;
1343 data.blocksize = 8;
1344 data.blocks = 1;
1345 data.flags = MMC_DATA_READ;
1346
1347 err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
1348
1349 if (err)
1350 return err;
1351
1352 mmc->scr[0] = __be32_to_cpu(scr[0]);
1353 mmc->scr[1] = __be32_to_cpu(scr[1]);
1354
1355 switch ((mmc->scr[0] >> 24) & 0xf) {
1356 case 0:
1357 mmc->version = SD_VERSION_1_0;
1358 break;
1359 case 1:
1360 mmc->version = SD_VERSION_1_10;
1361 break;
1362 case 2:
1363 mmc->version = SD_VERSION_2;
1364 if ((mmc->scr[0] >> 15) & 0x1)
1365 mmc->version = SD_VERSION_3;
1366 break;
1367 default:
1368 mmc->version = SD_VERSION_1_0;
1369 break;
1370 }
1371
1372 if (mmc->scr[0] & SD_DATA_4BIT)
1373 mmc->card_caps |= MMC_MODE_4BIT;
1374
1375
1376 if (mmc->version == SD_VERSION_1_0)
1377 return 0;
1378
1379 timeout = 4;
1380 while (timeout--) {
1381 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1382 (u8 *)switch_status);
1383
1384 if (err)
1385 return err;
1386
1387
1388 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1389 break;
1390 }
1391
1392
1393 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1394 mmc->card_caps |= MMC_CAP(SD_HS);
1395
1396#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1397
1398 if (mmc->version < SD_VERSION_3)
1399 return 0;
1400
1401 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1402 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1403 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1404 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1405 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1406 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1407 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1408 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1409 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1410 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1411 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1412#endif
1413
1414 return 0;
1415}
1416
1417static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1418{
1419 int err;
1420
1421 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1422 int speed;
1423
1424
1425 if (mmc->version == SD_VERSION_1_0)
1426 return 0;
1427
1428 switch (mode) {
1429 case MMC_LEGACY:
1430 speed = UHS_SDR12_BUS_SPEED;
1431 break;
1432 case SD_HS:
1433 speed = HIGH_SPEED_BUS_SPEED;
1434 break;
1435#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1436 case UHS_SDR12:
1437 speed = UHS_SDR12_BUS_SPEED;
1438 break;
1439 case UHS_SDR25:
1440 speed = UHS_SDR25_BUS_SPEED;
1441 break;
1442 case UHS_SDR50:
1443 speed = UHS_SDR50_BUS_SPEED;
1444 break;
1445 case UHS_DDR50:
1446 speed = UHS_DDR50_BUS_SPEED;
1447 break;
1448 case UHS_SDR104:
1449 speed = UHS_SDR104_BUS_SPEED;
1450 break;
1451#endif
1452 default:
1453 return -EINVAL;
1454 }
1455
1456 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1457 if (err)
1458 return err;
1459
1460 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1461 return -ENOTSUPP;
1462
1463 return 0;
1464}
1465
1466static int sd_select_bus_width(struct mmc *mmc, int w)
1467{
1468 int err;
1469 struct mmc_cmd cmd;
1470
1471 if ((w != 4) && (w != 1))
1472 return -EINVAL;
1473
1474 cmd.cmdidx = MMC_CMD_APP_CMD;
1475 cmd.resp_type = MMC_RSP_R1;
1476 cmd.cmdarg = mmc->rca << 16;
1477
1478 err = mmc_send_cmd(mmc, &cmd, NULL);
1479 if (err)
1480 return err;
1481
1482 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1483 cmd.resp_type = MMC_RSP_R1;
1484 if (w == 4)
1485 cmd.cmdarg = 2;
1486 else if (w == 1)
1487 cmd.cmdarg = 0;
1488 err = mmc_send_cmd(mmc, &cmd, NULL);
1489 if (err)
1490 return err;
1491
1492 return 0;
1493}
1494#endif
1495
1496#if CONFIG_IS_ENABLED(MMC_WRITE)
1497static int sd_read_ssr(struct mmc *mmc)
1498{
1499 static const unsigned int sd_au_size[] = {
1500 0, SZ_16K / 512, SZ_32K / 512,
1501 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1502 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1503 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1504 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1505 SZ_64M / 512,
1506 };
1507 int err, i;
1508 struct mmc_cmd cmd;
1509 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1510 struct mmc_data data;
1511 unsigned int au, eo, et, es;
1512
1513 cmd.cmdidx = MMC_CMD_APP_CMD;
1514 cmd.resp_type = MMC_RSP_R1;
1515 cmd.cmdarg = mmc->rca << 16;
1516
1517 err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_APP_CMD, 4);
1518 if (err)
1519 return err;
1520
1521 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1522 cmd.resp_type = MMC_RSP_R1;
1523 cmd.cmdarg = 0;
1524
1525 data.dest = (char *)ssr;
1526 data.blocksize = 64;
1527 data.blocks = 1;
1528 data.flags = MMC_DATA_READ;
1529
1530 err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
1531 if (err)
1532 return err;
1533
1534 for (i = 0; i < 16; i++)
1535 ssr[i] = be32_to_cpu(ssr[i]);
1536
1537 au = (ssr[2] >> 12) & 0xF;
1538 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1539 mmc->ssr.au = sd_au_size[au];
1540 es = (ssr[3] >> 24) & 0xFF;
1541 es |= (ssr[2] & 0xFF) << 8;
1542 et = (ssr[3] >> 18) & 0x3F;
1543 if (es && et) {
1544 eo = (ssr[3] >> 16) & 0x3;
1545 mmc->ssr.erase_timeout = (et * 1000) / es;
1546 mmc->ssr.erase_offset = eo * 1000;
1547 }
1548 } else {
1549 pr_debug("Invalid Allocation Unit Size.\n");
1550 }
1551
1552 return 0;
1553}
1554#endif
1555
1556
1557static const int fbase[] = {
1558 10000,
1559 100000,
1560 1000000,
1561 10000000,
1562};
1563
1564
1565
1566
1567static const u8 multipliers[] = {
1568 0,
1569 10,
1570 12,
1571 13,
1572 15,
1573 20,
1574 25,
1575 30,
1576 35,
1577 40,
1578 45,
1579 50,
1580 55,
1581 60,
1582 70,
1583 80,
1584};
1585
1586static inline int bus_width(uint cap)
1587{
1588 if (cap == MMC_MODE_8BIT)
1589 return 8;
1590 if (cap == MMC_MODE_4BIT)
1591 return 4;
1592 if (cap == MMC_MODE_1BIT)
1593 return 1;
1594 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1595 return 0;
1596}
1597
1598#if !CONFIG_IS_ENABLED(DM_MMC)
1599#ifdef MMC_SUPPORTS_TUNING
1600static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1601{
1602 return -ENOTSUPP;
1603}
1604#endif
1605
1606static int mmc_set_ios(struct mmc *mmc)
1607{
1608 int ret = 0;
1609
1610 if (mmc->cfg->ops->set_ios)
1611 ret = mmc->cfg->ops->set_ios(mmc);
1612
1613 return ret;
1614}
1615
1616static int mmc_host_power_cycle(struct mmc *mmc)
1617{
1618 int ret = 0;
1619
1620 if (mmc->cfg->ops->host_power_cycle)
1621 ret = mmc->cfg->ops->host_power_cycle(mmc);
1622
1623 return ret;
1624}
1625#endif
1626
1627int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1628{
1629 if (!disable) {
1630 if (clock > mmc->cfg->f_max)
1631 clock = mmc->cfg->f_max;
1632
1633 if (clock < mmc->cfg->f_min)
1634 clock = mmc->cfg->f_min;
1635 }
1636
1637 mmc->clock = clock;
1638 mmc->clk_disable = disable;
1639
1640 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1641
1642 return mmc_set_ios(mmc);
1643}
1644
1645static int mmc_set_bus_width(struct mmc *mmc, uint width)
1646{
1647 mmc->bus_width = width;
1648
1649 return mmc_set_ios(mmc);
1650}
1651
1652#if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1653
1654
1655
1656
1657
1658void mmc_dump_capabilities(const char *text, uint caps)
1659{
1660 enum bus_mode mode;
1661
1662 pr_debug("%s: widths [", text);
1663 if (caps & MMC_MODE_8BIT)
1664 pr_debug("8, ");
1665 if (caps & MMC_MODE_4BIT)
1666 pr_debug("4, ");
1667 if (caps & MMC_MODE_1BIT)
1668 pr_debug("1, ");
1669 pr_debug("\b\b] modes [");
1670 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1671 if (MMC_CAP(mode) & caps)
1672 pr_debug("%s, ", mmc_mode_name(mode));
1673 pr_debug("\b\b]\n");
1674}
1675#endif
1676
1677struct mode_width_tuning {
1678 enum bus_mode mode;
1679 uint widths;
1680#ifdef MMC_SUPPORTS_TUNING
1681 uint tuning;
1682#endif
1683};
1684
1685#if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1686int mmc_voltage_to_mv(enum mmc_voltage voltage)
1687{
1688 switch (voltage) {
1689 case MMC_SIGNAL_VOLTAGE_000: return 0;
1690 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1691 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1692 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1693 }
1694 return -EINVAL;
1695}
1696
1697static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1698{
1699 int err;
1700
1701 if (mmc->signal_voltage == signal_voltage)
1702 return 0;
1703
1704 mmc->signal_voltage = signal_voltage;
1705 err = mmc_set_ios(mmc);
1706 if (err)
1707 pr_debug("unable to set voltage (err %d)\n", err);
1708
1709 return err;
1710}
1711#else
1712static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1713{
1714 return 0;
1715}
1716#endif
1717
1718#if !CONFIG_IS_ENABLED(MMC_TINY)
1719static const struct mode_width_tuning sd_modes_by_pref[] = {
1720#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1721#ifdef MMC_SUPPORTS_TUNING
1722 {
1723 .mode = UHS_SDR104,
1724 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1725 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1726 },
1727#endif
1728 {
1729 .mode = UHS_SDR50,
1730 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1731 },
1732 {
1733 .mode = UHS_DDR50,
1734 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1735 },
1736 {
1737 .mode = UHS_SDR25,
1738 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1739 },
1740#endif
1741 {
1742 .mode = SD_HS,
1743 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1744 },
1745#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1746 {
1747 .mode = UHS_SDR12,
1748 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1749 },
1750#endif
1751 {
1752 .mode = MMC_LEGACY,
1753 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1754 }
1755};
1756
1757#define for_each_sd_mode_by_pref(caps, mwt) \
1758 for (mwt = sd_modes_by_pref;\
1759 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1760 mwt++) \
1761 if (caps & MMC_CAP(mwt->mode))
1762
1763static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1764{
1765 int err;
1766 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1767 const struct mode_width_tuning *mwt;
1768#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1769 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1770#else
1771 bool uhs_en = false;
1772#endif
1773 uint caps;
1774
1775#ifdef DEBUG
1776 mmc_dump_capabilities("sd card", card_caps);
1777 mmc_dump_capabilities("host", mmc->host_caps);
1778#endif
1779
1780 if (mmc_host_is_spi(mmc)) {
1781 mmc_set_bus_width(mmc, 1);
1782 mmc_select_mode(mmc, MMC_LEGACY);
1783 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
1784#if CONFIG_IS_ENABLED(MMC_WRITE)
1785 err = sd_read_ssr(mmc);
1786 if (err)
1787 pr_warn("unable to read ssr\n");
1788#endif
1789 return 0;
1790 }
1791
1792
1793 caps = card_caps & mmc->host_caps;
1794
1795 if (!uhs_en)
1796 caps &= ~UHS_CAPS;
1797
1798 for_each_sd_mode_by_pref(caps, mwt) {
1799 uint *w;
1800
1801 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1802 if (*w & caps & mwt->widths) {
1803 pr_debug("trying mode %s width %d (at %d MHz)\n",
1804 mmc_mode_name(mwt->mode),
1805 bus_width(*w),
1806 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1807
1808
1809 err = sd_select_bus_width(mmc, bus_width(*w));
1810 if (err)
1811 goto error;
1812 mmc_set_bus_width(mmc, bus_width(*w));
1813
1814
1815 err = sd_set_card_speed(mmc, mwt->mode);
1816 if (err)
1817 goto error;
1818
1819
1820 mmc_select_mode(mmc, mwt->mode);
1821 mmc_set_clock(mmc, mmc->tran_speed,
1822 MMC_CLK_ENABLE);
1823
1824#ifdef MMC_SUPPORTS_TUNING
1825
1826 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1827 err = mmc_execute_tuning(mmc,
1828 mwt->tuning);
1829 if (err) {
1830 pr_debug("tuning failed\n");
1831 goto error;
1832 }
1833 }
1834#endif
1835
1836#if CONFIG_IS_ENABLED(MMC_WRITE)
1837 err = sd_read_ssr(mmc);
1838 if (err)
1839 pr_warn("unable to read ssr\n");
1840#endif
1841 if (!err)
1842 return 0;
1843
1844error:
1845
1846 mmc_select_mode(mmc, MMC_LEGACY);
1847 mmc_set_clock(mmc, mmc->tran_speed,
1848 MMC_CLK_ENABLE);
1849 }
1850 }
1851 }
1852
1853 pr_err("unable to select a mode\n");
1854 return -ENOTSUPP;
1855}
1856
1857
1858
1859
1860
1861
1862static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1863{
1864 int err;
1865 const u8 *ext_csd = mmc->ext_csd;
1866 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1867
1868 if (mmc->version < MMC_VERSION_4)
1869 return 0;
1870
1871 err = mmc_send_ext_csd(mmc, test_csd);
1872 if (err)
1873 return err;
1874
1875
1876 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1877 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1878 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1879 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1880 ext_csd[EXT_CSD_REV]
1881 == test_csd[EXT_CSD_REV] &&
1882 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1883 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1884 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1885 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1886 return 0;
1887
1888 return -EBADMSG;
1889}
1890
1891#if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1892static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1893 uint32_t allowed_mask)
1894{
1895 u32 card_mask = 0;
1896
1897 switch (mode) {
1898 case MMC_HS_400_ES:
1899 case MMC_HS_400:
1900 case MMC_HS_200:
1901 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1902 EXT_CSD_CARD_TYPE_HS400_1_8V))
1903 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1904 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1905 EXT_CSD_CARD_TYPE_HS400_1_2V))
1906 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1907 break;
1908 case MMC_DDR_52:
1909 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1910 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1911 MMC_SIGNAL_VOLTAGE_180;
1912 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1913 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1914 break;
1915 default:
1916 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1917 break;
1918 }
1919
1920 while (card_mask & allowed_mask) {
1921 enum mmc_voltage best_match;
1922
1923 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1924 if (!mmc_set_signal_voltage(mmc, best_match))
1925 return 0;
1926
1927 allowed_mask &= ~best_match;
1928 }
1929
1930 return -ENOTSUPP;
1931}
1932#else
1933static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1934 uint32_t allowed_mask)
1935{
1936 return 0;
1937}
1938#endif
1939
1940static const struct mode_width_tuning mmc_modes_by_pref[] = {
1941#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1942 {
1943 .mode = MMC_HS_400_ES,
1944 .widths = MMC_MODE_8BIT,
1945 },
1946#endif
1947#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1948 {
1949 .mode = MMC_HS_400,
1950 .widths = MMC_MODE_8BIT,
1951 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1952 },
1953#endif
1954#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1955 {
1956 .mode = MMC_HS_200,
1957 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1958 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1959 },
1960#endif
1961 {
1962 .mode = MMC_DDR_52,
1963 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1964 },
1965 {
1966 .mode = MMC_HS_52,
1967 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1968 },
1969 {
1970 .mode = MMC_HS,
1971 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1972 },
1973 {
1974 .mode = MMC_LEGACY,
1975 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1976 }
1977};
1978
1979#define for_each_mmc_mode_by_pref(caps, mwt) \
1980 for (mwt = mmc_modes_by_pref;\
1981 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1982 mwt++) \
1983 if (caps & MMC_CAP(mwt->mode))
1984
1985static const struct ext_csd_bus_width {
1986 uint cap;
1987 bool is_ddr;
1988 uint ext_csd_bits;
1989} ext_csd_bus_width[] = {
1990 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1991 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1992 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1993 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1994 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1995};
1996
1997#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1998static int mmc_select_hs400(struct mmc *mmc)
1999{
2000 int err;
2001
2002
2003 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
2004 if (err)
2005 return err;
2006
2007
2008 mmc_select_mode(mmc, MMC_HS_200);
2009 mmc_set_clock(mmc, mmc->tran_speed, false);
2010
2011
2012 mmc->hs400_tuning = 1;
2013 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
2014 mmc->hs400_tuning = 0;
2015 if (err) {
2016 debug("tuning failed\n");
2017 return err;
2018 }
2019
2020
2021 mmc_set_card_speed(mmc, MMC_HS, true);
2022
2023 err = mmc_hs400_prepare_ddr(mmc);
2024 if (err)
2025 return err;
2026
2027 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2028 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
2029 if (err)
2030 return err;
2031
2032 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
2033 if (err)
2034 return err;
2035
2036 mmc_select_mode(mmc, MMC_HS_400);
2037 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2038 if (err)
2039 return err;
2040
2041 return 0;
2042}
2043#else
2044static int mmc_select_hs400(struct mmc *mmc)
2045{
2046 return -ENOTSUPP;
2047}
2048#endif
2049
2050#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2051#if !CONFIG_IS_ENABLED(DM_MMC)
2052static int mmc_set_enhanced_strobe(struct mmc *mmc)
2053{
2054 return -ENOTSUPP;
2055}
2056#endif
2057static int mmc_select_hs400es(struct mmc *mmc)
2058{
2059 int err;
2060
2061 err = mmc_set_card_speed(mmc, MMC_HS, true);
2062 if (err)
2063 return err;
2064
2065 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2066 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2067 EXT_CSD_BUS_WIDTH_STROBE);
2068 if (err) {
2069 printf("switch to bus width for hs400 failed\n");
2070 return err;
2071 }
2072
2073 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2074 if (err)
2075 return err;
2076
2077 mmc_select_mode(mmc, MMC_HS_400_ES);
2078 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2079 if (err)
2080 return err;
2081
2082 return mmc_set_enhanced_strobe(mmc);
2083}
2084#else
2085static int mmc_select_hs400es(struct mmc *mmc)
2086{
2087 return -ENOTSUPP;
2088}
2089#endif
2090
2091#define for_each_supported_width(caps, ddr, ecbv) \
2092 for (ecbv = ext_csd_bus_width;\
2093 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2094 ecbv++) \
2095 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2096
2097static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
2098{
2099 int err = 0;
2100 const struct mode_width_tuning *mwt;
2101 const struct ext_csd_bus_width *ecbw;
2102
2103#ifdef DEBUG
2104 mmc_dump_capabilities("mmc", card_caps);
2105 mmc_dump_capabilities("host", mmc->host_caps);
2106#endif
2107
2108 if (mmc_host_is_spi(mmc)) {
2109 mmc_set_bus_width(mmc, 1);
2110 mmc_select_mode(mmc, MMC_LEGACY);
2111 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2112 return 0;
2113 }
2114
2115
2116 card_caps &= mmc->host_caps;
2117
2118
2119 if (mmc->version < MMC_VERSION_4)
2120 return 0;
2121
2122 if (!mmc->ext_csd) {
2123 pr_debug("No ext_csd found!\n");
2124 return -ENOTSUPP;
2125 }
2126
2127#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2128 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
2129 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2130
2131
2132
2133
2134
2135 if (mmc->selected_mode == MMC_HS_200 ||
2136 mmc->selected_mode == MMC_HS_400 ||
2137 mmc->selected_mode == MMC_HS_400_ES)
2138 mmc_set_card_speed(mmc, MMC_HS, true);
2139 else
2140#endif
2141 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
2142
2143 for_each_mmc_mode_by_pref(card_caps, mwt) {
2144 for_each_supported_width(card_caps & mwt->widths,
2145 mmc_is_mode_ddr(mwt->mode), ecbw) {
2146 enum mmc_voltage old_voltage;
2147 pr_debug("trying mode %s width %d (at %d MHz)\n",
2148 mmc_mode_name(mwt->mode),
2149 bus_width(ecbw->cap),
2150 mmc_mode2freq(mmc, mwt->mode) / 1000000);
2151 old_voltage = mmc->signal_voltage;
2152 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2153 MMC_ALL_SIGNAL_VOLTAGE);
2154 if (err)
2155 continue;
2156
2157
2158 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2159 EXT_CSD_BUS_WIDTH,
2160 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2161 if (err)
2162 goto error;
2163 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
2164
2165 if (mwt->mode == MMC_HS_400) {
2166 err = mmc_select_hs400(mmc);
2167 if (err) {
2168 printf("Select HS400 failed %d\n", err);
2169 goto error;
2170 }
2171 } else if (mwt->mode == MMC_HS_400_ES) {
2172 err = mmc_select_hs400es(mmc);
2173 if (err) {
2174 printf("Select HS400ES failed %d\n",
2175 err);
2176 goto error;
2177 }
2178 } else {
2179
2180 err = mmc_set_card_speed(mmc, mwt->mode, false);
2181 if (err)
2182 goto error;
2183
2184
2185
2186
2187
2188
2189 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2190 err = mmc_switch(mmc,
2191 EXT_CSD_CMD_SET_NORMAL,
2192 EXT_CSD_BUS_WIDTH,
2193 ecbw->ext_csd_bits);
2194 if (err)
2195 goto error;
2196 }
2197
2198
2199 mmc_select_mode(mmc, mwt->mode);
2200 mmc_set_clock(mmc, mmc->tran_speed,
2201 MMC_CLK_ENABLE);
2202#ifdef MMC_SUPPORTS_TUNING
2203
2204
2205 if (mwt->tuning) {
2206 err = mmc_execute_tuning(mmc,
2207 mwt->tuning);
2208 if (err) {
2209 pr_debug("tuning failed : %d\n", err);
2210 goto error;
2211 }
2212 }
2213#endif
2214 }
2215
2216
2217 err = mmc_read_and_compare_ext_csd(mmc);
2218 if (!err)
2219 return 0;
2220error:
2221 mmc_set_signal_voltage(mmc, old_voltage);
2222
2223 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2224 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2225 mmc_select_mode(mmc, MMC_LEGACY);
2226 mmc_set_bus_width(mmc, 1);
2227 }
2228 }
2229
2230 pr_err("unable to select a mode : %d\n", err);
2231
2232 return -ENOTSUPP;
2233}
2234#endif
2235
2236#if CONFIG_IS_ENABLED(MMC_TINY)
2237DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2238#endif
2239
2240static int mmc_startup_v4(struct mmc *mmc)
2241{
2242 int err, i;
2243 u64 capacity;
2244 bool has_parts = false;
2245 bool part_completed;
2246 static const u32 mmc_versions[] = {
2247 MMC_VERSION_4,
2248 MMC_VERSION_4_1,
2249 MMC_VERSION_4_2,
2250 MMC_VERSION_4_3,
2251 MMC_VERSION_4_4,
2252 MMC_VERSION_4_41,
2253 MMC_VERSION_4_5,
2254 MMC_VERSION_5_0,
2255 MMC_VERSION_5_1
2256 };
2257
2258#if CONFIG_IS_ENABLED(MMC_TINY)
2259 u8 *ext_csd = ext_csd_bkup;
2260
2261 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2262 return 0;
2263
2264 if (!mmc->ext_csd)
2265 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2266
2267 err = mmc_send_ext_csd(mmc, ext_csd);
2268 if (err)
2269 goto error;
2270
2271
2272 if (!mmc->ext_csd)
2273 mmc->ext_csd = ext_csd;
2274#else
2275 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2276
2277 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2278 return 0;
2279
2280
2281 err = mmc_send_ext_csd(mmc, ext_csd);
2282 if (err)
2283 goto error;
2284
2285
2286 if (!mmc->ext_csd)
2287 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2288 if (!mmc->ext_csd)
2289 return -ENOMEM;
2290 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
2291#endif
2292 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
2293 return -EINVAL;
2294
2295 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2296
2297 if (mmc->version >= MMC_VERSION_4_2) {
2298
2299
2300
2301
2302
2303 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2304 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2305 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2306 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2307 capacity *= MMC_MAX_BLOCK_LEN;
2308 if ((capacity >> 20) > 2 * 1024)
2309 mmc->capacity_user = capacity;
2310 }
2311
2312 if (mmc->version >= MMC_VERSION_4_5)
2313 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2314
2315
2316
2317
2318
2319
2320
2321 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2322 EXT_CSD_PARTITION_SETTING_COMPLETED);
2323
2324 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2325
2326 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2327 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2328
2329
2330 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2331 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2332 ext_csd[EXT_CSD_BOOT_MULT])
2333 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2334 if (part_completed &&
2335 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2336 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2337
2338 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2339
2340 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2341
2342 for (i = 0; i < 4; i++) {
2343 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2344 uint mult = (ext_csd[idx + 2] << 16) +
2345 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2346 if (mult)
2347 has_parts = true;
2348 if (!part_completed)
2349 continue;
2350 mmc->capacity_gp[i] = mult;
2351 mmc->capacity_gp[i] *=
2352 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2353 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2354 mmc->capacity_gp[i] <<= 19;
2355 }
2356
2357#ifndef CONFIG_SPL_BUILD
2358 if (part_completed) {
2359 mmc->enh_user_size =
2360 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2361 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2362 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2363 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2364 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2365 mmc->enh_user_size <<= 19;
2366 mmc->enh_user_start =
2367 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2368 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2369 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2370 ext_csd[EXT_CSD_ENH_START_ADDR];
2371 if (mmc->high_capacity)
2372 mmc->enh_user_start <<= 9;
2373 }
2374#endif
2375
2376
2377
2378
2379
2380
2381 if (part_completed)
2382 has_parts = true;
2383 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2384 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2385 has_parts = true;
2386 if (has_parts) {
2387 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2388 EXT_CSD_ERASE_GROUP_DEF, 1);
2389
2390 if (err)
2391 goto error;
2392
2393 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2394 }
2395
2396 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2397#if CONFIG_IS_ENABLED(MMC_WRITE)
2398
2399 mmc->erase_grp_size =
2400 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2401#endif
2402
2403
2404
2405
2406
2407 if (mmc->high_capacity && part_completed) {
2408 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2409 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2410 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2411 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2412 capacity *= MMC_MAX_BLOCK_LEN;
2413 mmc->capacity_user = capacity;
2414 }
2415 }
2416#if CONFIG_IS_ENABLED(MMC_WRITE)
2417 else {
2418
2419 int erase_gsz, erase_gmul;
2420
2421 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2422 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2423 mmc->erase_grp_size = (erase_gsz + 1)
2424 * (erase_gmul + 1);
2425 }
2426#endif
2427#if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2428 mmc->hc_wp_grp_size = 1024
2429 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2430 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2431#endif
2432
2433 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2434
2435 return 0;
2436error:
2437 if (mmc->ext_csd) {
2438#if !CONFIG_IS_ENABLED(MMC_TINY)
2439 free(mmc->ext_csd);
2440#endif
2441 mmc->ext_csd = NULL;
2442 }
2443 return err;
2444}
2445
2446static int mmc_startup(struct mmc *mmc)
2447{
2448 int err, i;
2449 uint mult, freq;
2450 u64 cmult, csize;
2451 struct mmc_cmd cmd;
2452 struct blk_desc *bdesc;
2453
2454#ifdef CONFIG_MMC_SPI_CRC_ON
2455 if (mmc_host_is_spi(mmc)) {
2456 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2457 cmd.resp_type = MMC_RSP_R1;
2458 cmd.cmdarg = 1;
2459 err = mmc_send_cmd(mmc, &cmd, NULL);
2460 if (err)
2461 return err;
2462 }
2463#endif
2464
2465
2466 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2467 MMC_CMD_ALL_SEND_CID;
2468 cmd.resp_type = MMC_RSP_R2;
2469 cmd.cmdarg = 0;
2470
2471 err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_SEND_CID, 4);
2472 if (err)
2473 return err;
2474
2475 memcpy(mmc->cid, cmd.response, 16);
2476
2477
2478
2479
2480
2481
2482 if (!mmc_host_is_spi(mmc)) {
2483 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2484 cmd.cmdarg = mmc->rca << 16;
2485 cmd.resp_type = MMC_RSP_R6;
2486
2487 err = mmc_send_cmd(mmc, &cmd, NULL);
2488
2489 if (err)
2490 return err;
2491
2492 if (IS_SD(mmc))
2493 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2494 }
2495
2496
2497 cmd.cmdidx = MMC_CMD_SEND_CSD;
2498 cmd.resp_type = MMC_RSP_R2;
2499 cmd.cmdarg = mmc->rca << 16;
2500
2501 err = mmc_send_cmd(mmc, &cmd, NULL);
2502
2503 if (err)
2504 return err;
2505
2506 mmc->csd[0] = cmd.response[0];
2507 mmc->csd[1] = cmd.response[1];
2508 mmc->csd[2] = cmd.response[2];
2509 mmc->csd[3] = cmd.response[3];
2510
2511 if (mmc->version == MMC_VERSION_UNKNOWN) {
2512 int version = (cmd.response[0] >> 26) & 0xf;
2513
2514 switch (version) {
2515 case 0:
2516 mmc->version = MMC_VERSION_1_2;
2517 break;
2518 case 1:
2519 mmc->version = MMC_VERSION_1_4;
2520 break;
2521 case 2:
2522 mmc->version = MMC_VERSION_2_2;
2523 break;
2524 case 3:
2525 mmc->version = MMC_VERSION_3;
2526 break;
2527 case 4:
2528 mmc->version = MMC_VERSION_4;
2529 break;
2530 default:
2531 mmc->version = MMC_VERSION_1_2;
2532 break;
2533 }
2534 }
2535
2536
2537 freq = fbase[(cmd.response[0] & 0x7)];
2538 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2539
2540 mmc->legacy_speed = freq * mult;
2541 mmc_select_mode(mmc, MMC_LEGACY);
2542
2543 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2544 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2545#if CONFIG_IS_ENABLED(MMC_WRITE)
2546
2547 if (IS_SD(mmc))
2548 mmc->write_bl_len = mmc->read_bl_len;
2549 else
2550 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2551#endif
2552
2553 if (mmc->high_capacity) {
2554 csize = (mmc->csd[1] & 0x3f) << 16
2555 | (mmc->csd[2] & 0xffff0000) >> 16;
2556 cmult = 8;
2557 } else {
2558 csize = (mmc->csd[1] & 0x3ff) << 2
2559 | (mmc->csd[2] & 0xc0000000) >> 30;
2560 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2561 }
2562
2563 mmc->capacity_user = (csize + 1) << (cmult + 2);
2564 mmc->capacity_user *= mmc->read_bl_len;
2565 mmc->capacity_boot = 0;
2566 mmc->capacity_rpmb = 0;
2567 for (i = 0; i < 4; i++)
2568 mmc->capacity_gp[i] = 0;
2569
2570 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2571 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2572
2573#if CONFIG_IS_ENABLED(MMC_WRITE)
2574 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2575 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2576#endif
2577
2578 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2579 cmd.cmdidx = MMC_CMD_SET_DSR;
2580 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2581 cmd.resp_type = MMC_RSP_NONE;
2582 if (mmc_send_cmd(mmc, &cmd, NULL))
2583 pr_warn("MMC: SET_DSR failed\n");
2584 }
2585
2586
2587 if (!mmc_host_is_spi(mmc)) {
2588 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2589 cmd.resp_type = MMC_RSP_R1;
2590 cmd.cmdarg = mmc->rca << 16;
2591 err = mmc_send_cmd(mmc, &cmd, NULL);
2592
2593 if (err)
2594 return err;
2595 }
2596
2597
2598
2599
2600#if CONFIG_IS_ENABLED(MMC_WRITE)
2601 mmc->erase_grp_size = 1;
2602#endif
2603 mmc->part_config = MMCPART_NOAVAILABLE;
2604
2605 err = mmc_startup_v4(mmc);
2606 if (err)
2607 return err;
2608
2609 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2610 if (err)
2611 return err;
2612
2613#if CONFIG_IS_ENABLED(MMC_TINY)
2614 mmc_set_clock(mmc, mmc->legacy_speed, false);
2615 mmc_select_mode(mmc, MMC_LEGACY);
2616 mmc_set_bus_width(mmc, 1);
2617#else
2618 if (IS_SD(mmc)) {
2619 err = sd_get_capabilities(mmc);
2620 if (err)
2621 return err;
2622 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2623 } else {
2624 err = mmc_get_capabilities(mmc);
2625 if (err)
2626 return err;
2627 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
2628 }
2629#endif
2630 if (err)
2631 return err;
2632
2633 mmc->best_mode = mmc->selected_mode;
2634
2635
2636 if (mmc->ddr_mode) {
2637 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2638#if CONFIG_IS_ENABLED(MMC_WRITE)
2639 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2640#endif
2641 }
2642
2643
2644 bdesc = mmc_get_blk_desc(mmc);
2645 bdesc->lun = 0;
2646 bdesc->hwpart = 0;
2647 bdesc->type = 0;
2648 bdesc->blksz = mmc->read_bl_len;
2649 bdesc->log2blksz = LOG2(bdesc->blksz);
2650 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2651#if !defined(CONFIG_SPL_BUILD) || \
2652 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2653 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
2654 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2655 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2656 (mmc->cid[3] >> 16) & 0xffff);
2657 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2658 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2659 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2660 (mmc->cid[2] >> 24) & 0xff);
2661 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2662 (mmc->cid[2] >> 16) & 0xf);
2663#else
2664 bdesc->vendor[0] = 0;
2665 bdesc->product[0] = 0;
2666 bdesc->revision[0] = 0;
2667#endif
2668
2669#if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2670 part_init(bdesc);
2671#endif
2672
2673 return 0;
2674}
2675
2676static int mmc_send_if_cond(struct mmc *mmc)
2677{
2678 struct mmc_cmd cmd;
2679 int err;
2680
2681 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2682
2683 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2684 cmd.resp_type = MMC_RSP_R7;
2685
2686 err = mmc_send_cmd(mmc, &cmd, NULL);
2687
2688 if (err)
2689 return err;
2690
2691 if ((cmd.response[0] & 0xff) != 0xaa)
2692 return -EOPNOTSUPP;
2693 else
2694 mmc->version = SD_VERSION_2;
2695
2696 return 0;
2697}
2698
2699#if !CONFIG_IS_ENABLED(DM_MMC)
2700
2701__weak void board_mmc_power_init(void)
2702{
2703}
2704#endif
2705
2706static int mmc_power_init(struct mmc *mmc)
2707{
2708#if CONFIG_IS_ENABLED(DM_MMC)
2709#if CONFIG_IS_ENABLED(DM_REGULATOR)
2710 int ret;
2711
2712 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2713 &mmc->vmmc_supply);
2714 if (ret)
2715 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2716
2717 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2718 &mmc->vqmmc_supply);
2719 if (ret)
2720 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2721#endif
2722#else
2723
2724
2725
2726
2727 board_mmc_power_init();
2728#endif
2729 return 0;
2730}
2731
2732
2733
2734
2735
2736
2737static void mmc_set_initial_state(struct mmc *mmc)
2738{
2739 int err;
2740
2741
2742 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2743 if (err != 0)
2744 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2745 if (err != 0)
2746 pr_warn("mmc: failed to set signal voltage\n");
2747
2748 mmc_select_mode(mmc, MMC_LEGACY);
2749 mmc_set_bus_width(mmc, 1);
2750 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
2751}
2752
2753static int mmc_power_on(struct mmc *mmc)
2754{
2755#if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2756 if (mmc->vmmc_supply) {
2757 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2758
2759 if (ret && ret != -EACCES) {
2760 printf("Error enabling VMMC supply : %d\n", ret);
2761 return ret;
2762 }
2763 }
2764#endif
2765 return 0;
2766}
2767
2768static int mmc_power_off(struct mmc *mmc)
2769{
2770 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
2771#if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2772 if (mmc->vmmc_supply) {
2773 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2774
2775 if (ret && ret != -EACCES) {
2776 pr_debug("Error disabling VMMC supply : %d\n", ret);
2777 return ret;
2778 }
2779 }
2780#endif
2781 return 0;
2782}
2783
2784static int mmc_power_cycle(struct mmc *mmc)
2785{
2786 int ret;
2787
2788 ret = mmc_power_off(mmc);
2789 if (ret)
2790 return ret;
2791
2792 ret = mmc_host_power_cycle(mmc);
2793 if (ret)
2794 return ret;
2795
2796
2797
2798
2799
2800 udelay(2000);
2801 return mmc_power_on(mmc);
2802}
2803
2804int mmc_get_op_cond(struct mmc *mmc, bool quiet)
2805{
2806 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2807 int err;
2808
2809 if (mmc->has_init)
2810 return 0;
2811
2812 err = mmc_power_init(mmc);
2813 if (err)
2814 return err;
2815
2816#ifdef CONFIG_MMC_QUIRKS
2817 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2818 MMC_QUIRK_RETRY_SEND_CID |
2819 MMC_QUIRK_RETRY_APP_CMD;
2820#endif
2821
2822 err = mmc_power_cycle(mmc);
2823 if (err) {
2824
2825
2826
2827
2828
2829 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2830 uhs_en = false;
2831 mmc->host_caps &= ~UHS_CAPS;
2832 err = mmc_power_on(mmc);
2833 }
2834 if (err)
2835 return err;
2836
2837#if CONFIG_IS_ENABLED(DM_MMC)
2838
2839
2840
2841
2842 err = mmc_reinit(mmc);
2843#else
2844
2845 err = mmc->cfg->ops->init(mmc);
2846#endif
2847 if (err)
2848 return err;
2849 mmc->ddr_mode = 0;
2850
2851retry:
2852 mmc_set_initial_state(mmc);
2853
2854
2855 err = mmc_go_idle(mmc);
2856
2857 if (err)
2858 return err;
2859
2860
2861 mmc_get_blk_desc(mmc)->hwpart = 0;
2862
2863
2864 err = mmc_send_if_cond(mmc);
2865
2866
2867 err = sd_send_op_cond(mmc, uhs_en);
2868 if (err && uhs_en) {
2869 uhs_en = false;
2870 mmc_power_cycle(mmc);
2871 goto retry;
2872 }
2873
2874
2875 if (err == -ETIMEDOUT) {
2876 err = mmc_send_op_cond(mmc);
2877
2878 if (err) {
2879#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2880 if (!quiet)
2881 pr_err("Card did not respond to voltage select! : %d\n", err);
2882#endif
2883 return -EOPNOTSUPP;
2884 }
2885 }
2886
2887 return err;
2888}
2889
2890int mmc_start_init(struct mmc *mmc)
2891{
2892 bool no_card;
2893 int err = 0;
2894
2895
2896
2897
2898
2899 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
2900 MMC_MODE_1BIT;
2901
2902 if (IS_ENABLED(CONFIG_MMC_SPEED_MODE_SET)) {
2903 if (mmc->user_speed_mode != MMC_MODES_END) {
2904 int i;
2905
2906 if (mmc->host_caps & MMC_CAP(mmc->user_speed_mode)) {
2907
2908 for (i = MMC_LEGACY; i < MMC_MODES_END; i++)
2909 mmc->host_caps &= ~MMC_CAP(i);
2910 mmc->host_caps |= (MMC_CAP(mmc->user_speed_mode)
2911 | MMC_CAP(MMC_LEGACY) |
2912 MMC_MODE_1BIT);
2913 } else {
2914 pr_err("bus_mode requested is not supported\n");
2915 return -EINVAL;
2916 }
2917 }
2918 }
2919#if CONFIG_IS_ENABLED(DM_MMC)
2920 mmc_deferred_probe(mmc);
2921#endif
2922#if !defined(CONFIG_MMC_BROKEN_CD)
2923 no_card = mmc_getcd(mmc) == 0;
2924#else
2925 no_card = 0;
2926#endif
2927#if !CONFIG_IS_ENABLED(DM_MMC)
2928
2929 no_card = no_card || (mmc->cfg->ops->init == NULL);
2930#endif
2931 if (no_card) {
2932 mmc->has_init = 0;
2933#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2934 pr_err("MMC: no card present\n");
2935#endif
2936 return -ENOMEDIUM;
2937 }
2938
2939 err = mmc_get_op_cond(mmc, false);
2940
2941 if (!err)
2942 mmc->init_in_progress = 1;
2943
2944 return err;
2945}
2946
2947static int mmc_complete_init(struct mmc *mmc)
2948{
2949 int err = 0;
2950
2951 mmc->init_in_progress = 0;
2952 if (mmc->op_cond_pending)
2953 err = mmc_complete_op_cond(mmc);
2954
2955 if (!err)
2956 err = mmc_startup(mmc);
2957 if (err)
2958 mmc->has_init = 0;
2959 else
2960 mmc->has_init = 1;
2961 return err;
2962}
2963
2964int mmc_init(struct mmc *mmc)
2965{
2966 int err = 0;
2967 __maybe_unused ulong start;
2968#if CONFIG_IS_ENABLED(DM_MMC)
2969 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2970
2971 upriv->mmc = mmc;
2972#endif
2973 if (mmc->has_init)
2974 return 0;
2975
2976 start = get_timer(0);
2977
2978 if (!mmc->init_in_progress)
2979 err = mmc_start_init(mmc);
2980
2981 if (!err)
2982 err = mmc_complete_init(mmc);
2983 if (err)
2984 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2985
2986 return err;
2987}
2988
2989#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2990 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2991 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2992int mmc_deinit(struct mmc *mmc)
2993{
2994 u32 caps_filtered;
2995
2996 if (!mmc->has_init)
2997 return 0;
2998
2999 if (IS_SD(mmc)) {
3000 caps_filtered = mmc->card_caps &
3001 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
3002 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
3003 MMC_CAP(UHS_SDR104));
3004
3005 return sd_select_mode_and_width(mmc, caps_filtered);
3006 } else {
3007 caps_filtered = mmc->card_caps &
3008 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400) | MMC_CAP(MMC_HS_400_ES));
3009
3010 return mmc_select_mode_and_width(mmc, caps_filtered);
3011 }
3012}
3013#endif
3014
3015int mmc_set_dsr(struct mmc *mmc, u16 val)
3016{
3017 mmc->dsr = val;
3018 return 0;
3019}
3020
3021
3022__weak int cpu_mmc_init(struct bd_info *bis)
3023{
3024 return -1;
3025}
3026
3027
3028__weak int board_mmc_init(struct bd_info *bis)
3029{
3030 return -1;
3031}
3032
3033void mmc_set_preinit(struct mmc *mmc, int preinit)
3034{
3035 mmc->preinit = preinit;
3036}
3037
3038#if CONFIG_IS_ENABLED(DM_MMC)
3039static int mmc_probe(struct bd_info *bis)
3040{
3041 int ret, i;
3042 struct uclass *uc;
3043 struct udevice *dev;
3044
3045 ret = uclass_get(UCLASS_MMC, &uc);
3046 if (ret)
3047 return ret;
3048
3049
3050
3051
3052
3053
3054 for (i = 0; ; i++) {
3055 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3056 if (ret == -ENODEV)
3057 break;
3058 }
3059 uclass_foreach_dev(dev, uc) {
3060 ret = device_probe(dev);
3061 if (ret)
3062 pr_err("%s - probe failed: %d\n", dev->name, ret);
3063 }
3064
3065 return 0;
3066}
3067#else
3068static int mmc_probe(struct bd_info *bis)
3069{
3070 if (board_mmc_init(bis) < 0)
3071 cpu_mmc_init(bis);
3072
3073 return 0;
3074}
3075#endif
3076
3077int mmc_initialize(struct bd_info *bis)
3078{
3079 static int initialized = 0;
3080 int ret;
3081 if (initialized)
3082 return 0;
3083 initialized = 1;
3084
3085#if !CONFIG_IS_ENABLED(BLK)
3086#if !CONFIG_IS_ENABLED(MMC_TINY)
3087 mmc_list_init();
3088#endif
3089#endif
3090 ret = mmc_probe(bis);
3091 if (ret)
3092 return ret;
3093
3094#ifndef CONFIG_SPL_BUILD
3095 print_mmc_devices(',');
3096#endif
3097
3098 mmc_do_preinit();
3099 return 0;
3100}
3101
3102#if CONFIG_IS_ENABLED(DM_MMC)
3103int mmc_init_device(int num)
3104{
3105 struct udevice *dev;
3106 struct mmc *m;
3107 int ret;
3108
3109 if (uclass_get_device_by_seq(UCLASS_MMC, num, &dev)) {
3110 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3111 if (ret)
3112 return ret;
3113 }
3114
3115 m = mmc_get_mmc_dev(dev);
3116 if (!m)
3117 return 0;
3118
3119
3120 m->user_speed_mode = MMC_MODES_END;
3121
3122 if (m->preinit)
3123 mmc_start_init(m);
3124
3125 return 0;
3126}
3127#endif
3128
3129#ifdef CONFIG_CMD_BKOPS_ENABLE
3130int mmc_set_bkops_enable(struct mmc *mmc, bool autobkops, bool enable)
3131{
3132 int err;
3133 u32 bit = autobkops ? BIT(1) : BIT(0);
3134 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3135
3136 err = mmc_send_ext_csd(mmc, ext_csd);
3137 if (err) {
3138 puts("Could not get ext_csd register values\n");
3139 return err;
3140 }
3141
3142 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3143 puts("Background operations not supported on device\n");
3144 return -EMEDIUMTYPE;
3145 }
3146
3147 if (enable && (ext_csd[EXT_CSD_BKOPS_EN] & bit)) {
3148 puts("Background operations already enabled\n");
3149 return 0;
3150 }
3151
3152 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN,
3153 enable ? bit : 0);
3154 if (err) {
3155 printf("Failed to %sable manual background operations\n",
3156 enable ? "en" : "dis");
3157 return err;
3158 }
3159
3160 printf("%sabled %s background operations\n",
3161 enable ? "En" : "Dis", autobkops ? "auto" : "manual");
3162
3163 return 0;
3164}
3165#endif
3166
3167__weak int mmc_get_env_dev(void)
3168{
3169#ifdef CONFIG_SYS_MMC_ENV_DEV
3170 return CONFIG_SYS_MMC_ENV_DEV;
3171#else
3172 return 0;
3173#endif
3174}
3175