1
2
3
4
5
6
7
8
9#include <config.h>
10#include <common.h>
11#include <command.h>
12#include <dm.h>
13#include <dm/device-internal.h>
14#include <errno.h>
15#include <mmc.h>
16#include <part.h>
17#include <power/regulator.h>
18#include <malloc.h>
19#include <memalign.h>
20#include <linux/list.h>
21#include <div64.h>
22#include "mmc_private.h"
23
24static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
25static int mmc_power_cycle(struct mmc *mmc);
26static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps);
27
28#if CONFIG_IS_ENABLED(MMC_TINY)
29static struct mmc mmc_static;
30struct mmc *find_mmc_device(int dev_num)
31{
32 return &mmc_static;
33}
34
35void mmc_do_preinit(void)
36{
37 struct mmc *m = &mmc_static;
38#ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
39 mmc_set_preinit(m, 1);
40#endif
41 if (m->preinit)
42 mmc_start_init(m);
43}
44
45struct blk_desc *mmc_get_blk_desc(struct mmc *mmc)
46{
47 return &mmc->block_dev;
48}
49#endif
50
51#if !CONFIG_IS_ENABLED(DM_MMC)
52
53#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
54static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout)
55{
56 return -ENOSYS;
57}
58#endif
59
60__weak int board_mmc_getwp(struct mmc *mmc)
61{
62 return -1;
63}
64
65int mmc_getwp(struct mmc *mmc)
66{
67 int wp;
68
69 wp = board_mmc_getwp(mmc);
70
71 if (wp < 0) {
72 if (mmc->cfg->ops->getwp)
73 wp = mmc->cfg->ops->getwp(mmc);
74 else
75 wp = 0;
76 }
77
78 return wp;
79}
80
81__weak int board_mmc_getcd(struct mmc *mmc)
82{
83 return -1;
84}
85#endif
86
87#ifdef CONFIG_MMC_TRACE
88void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
89{
90 printf("CMD_SEND:%d\n", cmd->cmdidx);
91 printf("\t\tARG\t\t\t 0x%08X\n", cmd->cmdarg);
92}
93
94void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
95{
96 int i;
97 u8 *ptr;
98
99 if (ret) {
100 printf("\t\tRET\t\t\t %d\n", ret);
101 } else {
102 switch (cmd->resp_type) {
103 case MMC_RSP_NONE:
104 printf("\t\tMMC_RSP_NONE\n");
105 break;
106 case MMC_RSP_R1:
107 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08X \n",
108 cmd->response[0]);
109 break;
110 case MMC_RSP_R1b:
111 printf("\t\tMMC_RSP_R1b\t\t 0x%08X \n",
112 cmd->response[0]);
113 break;
114 case MMC_RSP_R2:
115 printf("\t\tMMC_RSP_R2\t\t 0x%08X \n",
116 cmd->response[0]);
117 printf("\t\t \t\t 0x%08X \n",
118 cmd->response[1]);
119 printf("\t\t \t\t 0x%08X \n",
120 cmd->response[2]);
121 printf("\t\t \t\t 0x%08X \n",
122 cmd->response[3]);
123 printf("\n");
124 printf("\t\t\t\t\tDUMPING DATA\n");
125 for (i = 0; i < 4; i++) {
126 int j;
127 printf("\t\t\t\t\t%03d - ", i*4);
128 ptr = (u8 *)&cmd->response[i];
129 ptr += 3;
130 for (j = 0; j < 4; j++)
131 printf("%02X ", *ptr--);
132 printf("\n");
133 }
134 break;
135 case MMC_RSP_R3:
136 printf("\t\tMMC_RSP_R3,4\t\t 0x%08X \n",
137 cmd->response[0]);
138 break;
139 default:
140 printf("\t\tERROR MMC rsp not supported\n");
141 break;
142 }
143 }
144}
145
146void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
147{
148 int status;
149
150 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
151 printf("CURR STATE:%d\n", status);
152}
153#endif
154
155#if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
156const char *mmc_mode_name(enum bus_mode mode)
157{
158 static const char *const names[] = {
159 [MMC_LEGACY] = "MMC legacy",
160 [SD_LEGACY] = "SD Legacy",
161 [MMC_HS] = "MMC High Speed (26MHz)",
162 [SD_HS] = "SD High Speed (50MHz)",
163 [UHS_SDR12] = "UHS SDR12 (25MHz)",
164 [UHS_SDR25] = "UHS SDR25 (50MHz)",
165 [UHS_SDR50] = "UHS SDR50 (100MHz)",
166 [UHS_SDR104] = "UHS SDR104 (208MHz)",
167 [UHS_DDR50] = "UHS DDR50 (50MHz)",
168 [MMC_HS_52] = "MMC High Speed (52MHz)",
169 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
170 [MMC_HS_200] = "HS200 (200MHz)",
171 };
172
173 if (mode >= MMC_MODES_END)
174 return "Unknown mode";
175 else
176 return names[mode];
177}
178#endif
179
180static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
181{
182 static const int freqs[] = {
183 [MMC_LEGACY] = 25000000,
184 [SD_LEGACY] = 25000000,
185 [MMC_HS] = 26000000,
186 [SD_HS] = 50000000,
187 [MMC_HS_52] = 52000000,
188 [MMC_DDR_52] = 52000000,
189 [UHS_SDR12] = 25000000,
190 [UHS_SDR25] = 50000000,
191 [UHS_SDR50] = 100000000,
192 [UHS_DDR50] = 50000000,
193 [UHS_SDR104] = 208000000,
194 [MMC_HS_200] = 200000000,
195 };
196
197 if (mode == MMC_LEGACY)
198 return mmc->legacy_speed;
199 else if (mode >= MMC_MODES_END)
200 return 0;
201 else
202 return freqs[mode];
203}
204
205static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
206{
207 mmc->selected_mode = mode;
208 mmc->tran_speed = mmc_mode2freq(mmc, mode);
209 mmc->ddr_mode = mmc_is_mode_ddr(mode);
210 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
211 mmc->tran_speed / 1000000);
212 return 0;
213}
214
215#if !CONFIG_IS_ENABLED(DM_MMC)
216int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
217{
218 int ret;
219
220 mmmc_trace_before_send(mmc, cmd);
221 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
222 mmmc_trace_after_send(mmc, cmd, ret);
223
224 return ret;
225}
226#endif
227
228int mmc_send_status(struct mmc *mmc, int timeout)
229{
230 struct mmc_cmd cmd;
231 int err, retries = 5;
232
233 cmd.cmdidx = MMC_CMD_SEND_STATUS;
234 cmd.resp_type = MMC_RSP_R1;
235 if (!mmc_host_is_spi(mmc))
236 cmd.cmdarg = mmc->rca << 16;
237
238 while (1) {
239 err = mmc_send_cmd(mmc, &cmd, NULL);
240 if (!err) {
241 if ((cmd.response[0] & MMC_STATUS_RDY_FOR_DATA) &&
242 (cmd.response[0] & MMC_STATUS_CURR_STATE) !=
243 MMC_STATE_PRG)
244 break;
245
246 if (cmd.response[0] & MMC_STATUS_MASK) {
247#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
248 pr_err("Status Error: 0x%08X\n",
249 cmd.response[0]);
250#endif
251 return -ECOMM;
252 }
253 } else if (--retries < 0)
254 return err;
255
256 if (timeout-- <= 0)
257 break;
258
259 udelay(1000);
260 }
261
262 mmc_trace_state(mmc, &cmd);
263 if (timeout <= 0) {
264#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
265 pr_err("Timeout waiting card ready\n");
266#endif
267 return -ETIMEDOUT;
268 }
269
270 return 0;
271}
272
273int mmc_set_blocklen(struct mmc *mmc, int len)
274{
275 struct mmc_cmd cmd;
276 int err;
277
278 if (mmc->ddr_mode)
279 return 0;
280
281 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
282 cmd.resp_type = MMC_RSP_R1;
283 cmd.cmdarg = len;
284
285 err = mmc_send_cmd(mmc, &cmd, NULL);
286
287#ifdef CONFIG_MMC_QUIRKS
288 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SET_BLOCKLEN)) {
289 int retries = 4;
290
291
292
293
294 do {
295 err = mmc_send_cmd(mmc, &cmd, NULL);
296 if (!err)
297 break;
298 } while (retries--);
299 }
300#endif
301
302 return err;
303}
304
305#ifdef MMC_SUPPORTS_TUNING
306static const u8 tuning_blk_pattern_4bit[] = {
307 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
308 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
309 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
310 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
311 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
312 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
313 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
314 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
315};
316
317static const u8 tuning_blk_pattern_8bit[] = {
318 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
319 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
320 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
321 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
322 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
323 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
324 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
325 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
326 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
327 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
328 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
329 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
330 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
331 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
332 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
333 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
334};
335
336int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
337{
338 struct mmc_cmd cmd;
339 struct mmc_data data;
340 const u8 *tuning_block_pattern;
341 int size, err;
342
343 if (mmc->bus_width == 8) {
344 tuning_block_pattern = tuning_blk_pattern_8bit;
345 size = sizeof(tuning_blk_pattern_8bit);
346 } else if (mmc->bus_width == 4) {
347 tuning_block_pattern = tuning_blk_pattern_4bit;
348 size = sizeof(tuning_blk_pattern_4bit);
349 } else {
350 return -EINVAL;
351 }
352
353 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
354
355 cmd.cmdidx = opcode;
356 cmd.cmdarg = 0;
357 cmd.resp_type = MMC_RSP_R1;
358
359 data.dest = (void *)data_buf;
360 data.blocks = 1;
361 data.blocksize = size;
362 data.flags = MMC_DATA_READ;
363
364 err = mmc_send_cmd(mmc, &cmd, &data);
365 if (err)
366 return err;
367
368 if (memcmp(data_buf, tuning_block_pattern, size))
369 return -EIO;
370
371 return 0;
372}
373#endif
374
375static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
376 lbaint_t blkcnt)
377{
378 struct mmc_cmd cmd;
379 struct mmc_data data;
380
381 if (blkcnt > 1)
382 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
383 else
384 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
385
386 if (mmc->high_capacity)
387 cmd.cmdarg = start;
388 else
389 cmd.cmdarg = start * mmc->read_bl_len;
390
391 cmd.resp_type = MMC_RSP_R1;
392
393 data.dest = dst;
394 data.blocks = blkcnt;
395 data.blocksize = mmc->read_bl_len;
396 data.flags = MMC_DATA_READ;
397
398 if (mmc_send_cmd(mmc, &cmd, &data))
399 return 0;
400
401 if (blkcnt > 1) {
402 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
403 cmd.cmdarg = 0;
404 cmd.resp_type = MMC_RSP_R1b;
405 if (mmc_send_cmd(mmc, &cmd, NULL)) {
406#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
407 pr_err("mmc fail to send stop cmd\n");
408#endif
409 return 0;
410 }
411 }
412
413 return blkcnt;
414}
415
416#if CONFIG_IS_ENABLED(BLK)
417ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
418#else
419ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
420 void *dst)
421#endif
422{
423#if CONFIG_IS_ENABLED(BLK)
424 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
425#endif
426 int dev_num = block_dev->devnum;
427 int err;
428 lbaint_t cur, blocks_todo = blkcnt;
429
430 if (blkcnt == 0)
431 return 0;
432
433 struct mmc *mmc = find_mmc_device(dev_num);
434 if (!mmc)
435 return 0;
436
437 if (CONFIG_IS_ENABLED(MMC_TINY))
438 err = mmc_switch_part(mmc, block_dev->hwpart);
439 else
440 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
441
442 if (err < 0)
443 return 0;
444
445 if ((start + blkcnt) > block_dev->lba) {
446#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
447 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
448 start + blkcnt, block_dev->lba);
449#endif
450 return 0;
451 }
452
453 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
454 pr_debug("%s: Failed to set blocklen\n", __func__);
455 return 0;
456 }
457
458 do {
459 cur = (blocks_todo > mmc->cfg->b_max) ?
460 mmc->cfg->b_max : blocks_todo;
461 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
462 pr_debug("%s: Failed to read blocks\n", __func__);
463 return 0;
464 }
465 blocks_todo -= cur;
466 start += cur;
467 dst += cur * mmc->read_bl_len;
468 } while (blocks_todo > 0);
469
470 return blkcnt;
471}
472
473static int mmc_go_idle(struct mmc *mmc)
474{
475 struct mmc_cmd cmd;
476 int err;
477
478 udelay(1000);
479
480 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
481 cmd.cmdarg = 0;
482 cmd.resp_type = MMC_RSP_NONE;
483
484 err = mmc_send_cmd(mmc, &cmd, NULL);
485
486 if (err)
487 return err;
488
489 udelay(2000);
490
491 return 0;
492}
493
494#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
495static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
496{
497 struct mmc_cmd cmd;
498 int err = 0;
499
500
501
502
503
504 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
505 return mmc_set_signal_voltage(mmc, signal_voltage);
506
507 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
508 cmd.cmdarg = 0;
509 cmd.resp_type = MMC_RSP_R1;
510
511 err = mmc_send_cmd(mmc, &cmd, NULL);
512 if (err)
513 return err;
514
515 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
516 return -EIO;
517
518
519
520
521
522 err = mmc_wait_dat0(mmc, 0, 100);
523 if (err == -ENOSYS)
524 udelay(100);
525 else if (err)
526 return -ETIMEDOUT;
527
528
529
530
531
532 mmc_set_clock(mmc, mmc->clock, true);
533
534 err = mmc_set_signal_voltage(mmc, signal_voltage);
535 if (err)
536 return err;
537
538
539 mdelay(10);
540 mmc_set_clock(mmc, mmc->clock, false);
541
542
543
544
545
546 err = mmc_wait_dat0(mmc, 1, 1000);
547 if (err == -ENOSYS)
548 udelay(1000);
549 else if (err)
550 return -ETIMEDOUT;
551
552 return 0;
553}
554#endif
555
556static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
557{
558 int timeout = 1000;
559 int err;
560 struct mmc_cmd cmd;
561
562 while (1) {
563 cmd.cmdidx = MMC_CMD_APP_CMD;
564 cmd.resp_type = MMC_RSP_R1;
565 cmd.cmdarg = 0;
566
567 err = mmc_send_cmd(mmc, &cmd, NULL);
568
569 if (err)
570 return err;
571
572 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
573 cmd.resp_type = MMC_RSP_R3;
574
575
576
577
578
579
580
581
582 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
583 (mmc->cfg->voltages & 0xff8000);
584
585 if (mmc->version == SD_VERSION_2)
586 cmd.cmdarg |= OCR_HCS;
587
588 if (uhs_en)
589 cmd.cmdarg |= OCR_S18R;
590
591 err = mmc_send_cmd(mmc, &cmd, NULL);
592
593 if (err)
594 return err;
595
596 if (cmd.response[0] & OCR_BUSY)
597 break;
598
599 if (timeout-- <= 0)
600 return -EOPNOTSUPP;
601
602 udelay(1000);
603 }
604
605 if (mmc->version != SD_VERSION_2)
606 mmc->version = SD_VERSION_1_0;
607
608 if (mmc_host_is_spi(mmc)) {
609 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
610 cmd.resp_type = MMC_RSP_R3;
611 cmd.cmdarg = 0;
612
613 err = mmc_send_cmd(mmc, &cmd, NULL);
614
615 if (err)
616 return err;
617 }
618
619 mmc->ocr = cmd.response[0];
620
621#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
622 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
623 == 0x41000000) {
624 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
625 if (err)
626 return err;
627 }
628#endif
629
630 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
631 mmc->rca = 0;
632
633 return 0;
634}
635
636static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
637{
638 struct mmc_cmd cmd;
639 int err;
640
641 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
642 cmd.resp_type = MMC_RSP_R3;
643 cmd.cmdarg = 0;
644 if (use_arg && !mmc_host_is_spi(mmc))
645 cmd.cmdarg = OCR_HCS |
646 (mmc->cfg->voltages &
647 (mmc->ocr & OCR_VOLTAGE_MASK)) |
648 (mmc->ocr & OCR_ACCESS_MODE);
649
650 err = mmc_send_cmd(mmc, &cmd, NULL);
651 if (err)
652 return err;
653 mmc->ocr = cmd.response[0];
654 return 0;
655}
656
657static int mmc_send_op_cond(struct mmc *mmc)
658{
659 int err, i;
660
661
662 mmc_go_idle(mmc);
663
664
665 for (i = 0; i < 2; i++) {
666 err = mmc_send_op_cond_iter(mmc, i != 0);
667 if (err)
668 return err;
669
670
671 if (mmc->ocr & OCR_BUSY)
672 break;
673 }
674 mmc->op_cond_pending = 1;
675 return 0;
676}
677
678static int mmc_complete_op_cond(struct mmc *mmc)
679{
680 struct mmc_cmd cmd;
681 int timeout = 1000;
682 uint start;
683 int err;
684
685 mmc->op_cond_pending = 0;
686 if (!(mmc->ocr & OCR_BUSY)) {
687
688 mmc_go_idle(mmc);
689
690 start = get_timer(0);
691 while (1) {
692 err = mmc_send_op_cond_iter(mmc, 1);
693 if (err)
694 return err;
695 if (mmc->ocr & OCR_BUSY)
696 break;
697 if (get_timer(start) > timeout)
698 return -EOPNOTSUPP;
699 udelay(100);
700 }
701 }
702
703 if (mmc_host_is_spi(mmc)) {
704 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
705 cmd.resp_type = MMC_RSP_R3;
706 cmd.cmdarg = 0;
707
708 err = mmc_send_cmd(mmc, &cmd, NULL);
709
710 if (err)
711 return err;
712
713 mmc->ocr = cmd.response[0];
714 }
715
716 mmc->version = MMC_VERSION_UNKNOWN;
717
718 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
719 mmc->rca = 1;
720
721 return 0;
722}
723
724
725static int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
726{
727 struct mmc_cmd cmd;
728 struct mmc_data data;
729 int err;
730
731
732 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
733 cmd.resp_type = MMC_RSP_R1;
734 cmd.cmdarg = 0;
735
736 data.dest = (char *)ext_csd;
737 data.blocks = 1;
738 data.blocksize = MMC_MAX_BLOCK_LEN;
739 data.flags = MMC_DATA_READ;
740
741 err = mmc_send_cmd(mmc, &cmd, &data);
742
743 return err;
744}
745
746int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
747{
748 struct mmc_cmd cmd;
749 int timeout = 1000;
750 int retries = 3;
751 int ret;
752
753 cmd.cmdidx = MMC_CMD_SWITCH;
754 cmd.resp_type = MMC_RSP_R1b;
755 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
756 (index << 16) |
757 (value << 8);
758
759 while (retries > 0) {
760 ret = mmc_send_cmd(mmc, &cmd, NULL);
761
762
763 if (!ret) {
764 ret = mmc_send_status(mmc, timeout);
765 return ret;
766 }
767
768 retries--;
769 }
770
771 return ret;
772
773}
774
775static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode)
776{
777 int err;
778 int speed_bits;
779
780 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
781
782 switch (mode) {
783 case MMC_HS:
784 case MMC_HS_52:
785 case MMC_DDR_52:
786 speed_bits = EXT_CSD_TIMING_HS;
787 break;
788#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
789 case MMC_HS_200:
790 speed_bits = EXT_CSD_TIMING_HS200;
791 break;
792#endif
793 case MMC_LEGACY:
794 speed_bits = EXT_CSD_TIMING_LEGACY;
795 break;
796 default:
797 return -EINVAL;
798 }
799 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
800 speed_bits);
801 if (err)
802 return err;
803
804 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
805
806 err = mmc_send_ext_csd(mmc, test_csd);
807 if (err)
808 return err;
809
810
811 if (!test_csd[EXT_CSD_HS_TIMING])
812 return -ENOTSUPP;
813 }
814
815 return 0;
816}
817
818static int mmc_get_capabilities(struct mmc *mmc)
819{
820 u8 *ext_csd = mmc->ext_csd;
821 char cardtype;
822
823 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
824
825 if (mmc_host_is_spi(mmc))
826 return 0;
827
828
829 if (mmc->version < MMC_VERSION_4)
830 return 0;
831
832 if (!ext_csd) {
833 pr_err("No ext_csd found!\n");
834 return -ENOTSUPP;
835 }
836
837 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
838
839 cardtype = ext_csd[EXT_CSD_CARD_TYPE] & 0x3f;
840 mmc->cardtype = cardtype;
841
842#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
843 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
844 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
845 mmc->card_caps |= MMC_MODE_HS200;
846 }
847#endif
848 if (cardtype & EXT_CSD_CARD_TYPE_52) {
849 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
850 mmc->card_caps |= MMC_MODE_DDR_52MHz;
851 mmc->card_caps |= MMC_MODE_HS_52MHz;
852 }
853 if (cardtype & EXT_CSD_CARD_TYPE_26)
854 mmc->card_caps |= MMC_MODE_HS;
855
856 return 0;
857}
858
859static int mmc_set_capacity(struct mmc *mmc, int part_num)
860{
861 switch (part_num) {
862 case 0:
863 mmc->capacity = mmc->capacity_user;
864 break;
865 case 1:
866 case 2:
867 mmc->capacity = mmc->capacity_boot;
868 break;
869 case 3:
870 mmc->capacity = mmc->capacity_rpmb;
871 break;
872 case 4:
873 case 5:
874 case 6:
875 case 7:
876 mmc->capacity = mmc->capacity_gp[part_num - 4];
877 break;
878 default:
879 return -1;
880 }
881
882 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
883
884 return 0;
885}
886
887#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
888static int mmc_boot_part_access_chk(struct mmc *mmc, unsigned int part_num)
889{
890 int forbidden = 0;
891 bool change = false;
892
893 if (part_num & PART_ACCESS_MASK)
894 forbidden = MMC_CAP(MMC_HS_200);
895
896 if (MMC_CAP(mmc->selected_mode) & forbidden) {
897 pr_debug("selected mode (%s) is forbidden for part %d\n",
898 mmc_mode_name(mmc->selected_mode), part_num);
899 change = true;
900 } else if (mmc->selected_mode != mmc->best_mode) {
901 pr_debug("selected mode is not optimal\n");
902 change = true;
903 }
904
905 if (change)
906 return mmc_select_mode_and_width(mmc,
907 mmc->card_caps & ~forbidden);
908
909 return 0;
910}
911#else
912static inline int mmc_boot_part_access_chk(struct mmc *mmc,
913 unsigned int part_num)
914{
915 return 0;
916}
917#endif
918
919int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
920{
921 int ret;
922
923 ret = mmc_boot_part_access_chk(mmc, part_num);
924 if (ret)
925 return ret;
926
927 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
928 (mmc->part_config & ~PART_ACCESS_MASK)
929 | (part_num & PART_ACCESS_MASK));
930
931
932
933
934
935 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
936 ret = mmc_set_capacity(mmc, part_num);
937 mmc_get_blk_desc(mmc)->hwpart = part_num;
938 }
939
940 return ret;
941}
942
943#if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
944int mmc_hwpart_config(struct mmc *mmc,
945 const struct mmc_hwpart_conf *conf,
946 enum mmc_hwpart_conf_mode mode)
947{
948 u8 part_attrs = 0;
949 u32 enh_size_mult;
950 u32 enh_start_addr;
951 u32 gp_size_mult[4];
952 u32 max_enh_size_mult;
953 u32 tot_enh_size_mult = 0;
954 u8 wr_rel_set;
955 int i, pidx, err;
956 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
957
958 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
959 return -EINVAL;
960
961 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
962 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
963 return -EMEDIUMTYPE;
964 }
965
966 if (!(mmc->part_support & PART_SUPPORT)) {
967 pr_err("Card does not support partitioning\n");
968 return -EMEDIUMTYPE;
969 }
970
971 if (!mmc->hc_wp_grp_size) {
972 pr_err("Card does not define HC WP group size\n");
973 return -EMEDIUMTYPE;
974 }
975
976
977 if (conf->user.enh_size) {
978 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
979 conf->user.enh_start % mmc->hc_wp_grp_size) {
980 pr_err("User data enhanced area not HC WP group "
981 "size aligned\n");
982 return -EINVAL;
983 }
984 part_attrs |= EXT_CSD_ENH_USR;
985 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
986 if (mmc->high_capacity) {
987 enh_start_addr = conf->user.enh_start;
988 } else {
989 enh_start_addr = (conf->user.enh_start << 9);
990 }
991 } else {
992 enh_size_mult = 0;
993 enh_start_addr = 0;
994 }
995 tot_enh_size_mult += enh_size_mult;
996
997 for (pidx = 0; pidx < 4; pidx++) {
998 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
999 pr_err("GP%i partition not HC WP group size "
1000 "aligned\n", pidx+1);
1001 return -EINVAL;
1002 }
1003 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1004 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1005 part_attrs |= EXT_CSD_ENH_GP(pidx);
1006 tot_enh_size_mult += gp_size_mult[pidx];
1007 }
1008 }
1009
1010 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
1011 pr_err("Card does not support enhanced attribute\n");
1012 return -EMEDIUMTYPE;
1013 }
1014
1015 err = mmc_send_ext_csd(mmc, ext_csd);
1016 if (err)
1017 return err;
1018
1019 max_enh_size_mult =
1020 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1021 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1022 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1023 if (tot_enh_size_mult > max_enh_size_mult) {
1024 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
1025 tot_enh_size_mult, max_enh_size_mult);
1026 return -EMEDIUMTYPE;
1027 }
1028
1029
1030
1031
1032
1033 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1034 if (conf->user.wr_rel_change) {
1035 if (conf->user.wr_rel_set)
1036 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1037 else
1038 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1039 }
1040 for (pidx = 0; pidx < 4; pidx++) {
1041 if (conf->gp_part[pidx].wr_rel_change) {
1042 if (conf->gp_part[pidx].wr_rel_set)
1043 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1044 else
1045 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1046 }
1047 }
1048
1049 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1050 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1051 puts("Card does not support host controlled partition write "
1052 "reliability settings\n");
1053 return -EMEDIUMTYPE;
1054 }
1055
1056 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1057 EXT_CSD_PARTITION_SETTING_COMPLETED) {
1058 pr_err("Card already partitioned\n");
1059 return -EPERM;
1060 }
1061
1062 if (mode == MMC_HWPART_CONF_CHECK)
1063 return 0;
1064
1065
1066 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1067 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1068 EXT_CSD_ERASE_GROUP_DEF, 1);
1069
1070 if (err)
1071 return err;
1072
1073 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1074
1075
1076 mmc->erase_grp_size =
1077 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
1078
1079 }
1080
1081
1082 for (i = 0; i < 4; i++) {
1083 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1084 EXT_CSD_ENH_START_ADDR+i,
1085 (enh_start_addr >> (i*8)) & 0xFF);
1086 if (err)
1087 return err;
1088 }
1089 for (i = 0; i < 3; i++) {
1090 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1091 EXT_CSD_ENH_SIZE_MULT+i,
1092 (enh_size_mult >> (i*8)) & 0xFF);
1093 if (err)
1094 return err;
1095 }
1096 for (pidx = 0; pidx < 4; pidx++) {
1097 for (i = 0; i < 3; i++) {
1098 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1099 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1100 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1101 if (err)
1102 return err;
1103 }
1104 }
1105 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1106 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1107 if (err)
1108 return err;
1109
1110 if (mode == MMC_HWPART_CONF_SET)
1111 return 0;
1112
1113
1114
1115
1116
1117 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1118 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1119 EXT_CSD_WR_REL_SET, wr_rel_set);
1120 if (err)
1121 return err;
1122 }
1123
1124
1125
1126
1127
1128
1129 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1130 EXT_CSD_PARTITION_SETTING,
1131 EXT_CSD_PARTITION_SETTING_COMPLETED);
1132 if (err)
1133 return err;
1134
1135 return 0;
1136}
1137#endif
1138
1139#if !CONFIG_IS_ENABLED(DM_MMC)
1140int mmc_getcd(struct mmc *mmc)
1141{
1142 int cd;
1143
1144 cd = board_mmc_getcd(mmc);
1145
1146 if (cd < 0) {
1147 if (mmc->cfg->ops->getcd)
1148 cd = mmc->cfg->ops->getcd(mmc);
1149 else
1150 cd = 1;
1151 }
1152
1153 return cd;
1154}
1155#endif
1156
1157static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
1158{
1159 struct mmc_cmd cmd;
1160 struct mmc_data data;
1161
1162
1163 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1164 cmd.resp_type = MMC_RSP_R1;
1165 cmd.cmdarg = (mode << 31) | 0xffffff;
1166 cmd.cmdarg &= ~(0xf << (group * 4));
1167 cmd.cmdarg |= value << (group * 4);
1168
1169 data.dest = (char *)resp;
1170 data.blocksize = 64;
1171 data.blocks = 1;
1172 data.flags = MMC_DATA_READ;
1173
1174 return mmc_send_cmd(mmc, &cmd, &data);
1175}
1176
1177
1178static int sd_get_capabilities(struct mmc *mmc)
1179{
1180 int err;
1181 struct mmc_cmd cmd;
1182 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1183 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
1184 struct mmc_data data;
1185 int timeout;
1186#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1187 u32 sd3_bus_mode;
1188#endif
1189
1190 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(SD_LEGACY);
1191
1192 if (mmc_host_is_spi(mmc))
1193 return 0;
1194
1195
1196 cmd.cmdidx = MMC_CMD_APP_CMD;
1197 cmd.resp_type = MMC_RSP_R1;
1198 cmd.cmdarg = mmc->rca << 16;
1199
1200 err = mmc_send_cmd(mmc, &cmd, NULL);
1201
1202 if (err)
1203 return err;
1204
1205 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1206 cmd.resp_type = MMC_RSP_R1;
1207 cmd.cmdarg = 0;
1208
1209 timeout = 3;
1210
1211retry_scr:
1212 data.dest = (char *)scr;
1213 data.blocksize = 8;
1214 data.blocks = 1;
1215 data.flags = MMC_DATA_READ;
1216
1217 err = mmc_send_cmd(mmc, &cmd, &data);
1218
1219 if (err) {
1220 if (timeout--)
1221 goto retry_scr;
1222
1223 return err;
1224 }
1225
1226 mmc->scr[0] = __be32_to_cpu(scr[0]);
1227 mmc->scr[1] = __be32_to_cpu(scr[1]);
1228
1229 switch ((mmc->scr[0] >> 24) & 0xf) {
1230 case 0:
1231 mmc->version = SD_VERSION_1_0;
1232 break;
1233 case 1:
1234 mmc->version = SD_VERSION_1_10;
1235 break;
1236 case 2:
1237 mmc->version = SD_VERSION_2;
1238 if ((mmc->scr[0] >> 15) & 0x1)
1239 mmc->version = SD_VERSION_3;
1240 break;
1241 default:
1242 mmc->version = SD_VERSION_1_0;
1243 break;
1244 }
1245
1246 if (mmc->scr[0] & SD_DATA_4BIT)
1247 mmc->card_caps |= MMC_MODE_4BIT;
1248
1249
1250 if (mmc->version == SD_VERSION_1_0)
1251 return 0;
1252
1253 timeout = 4;
1254 while (timeout--) {
1255 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
1256 (u8 *)switch_status);
1257
1258 if (err)
1259 return err;
1260
1261
1262 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
1263 break;
1264 }
1265
1266
1267 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1268 mmc->card_caps |= MMC_CAP(SD_HS);
1269
1270#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1271
1272 if (mmc->version < SD_VERSION_3)
1273 return 0;
1274
1275 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1276 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1277 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1278 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1279 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1280 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1281 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1282 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1283 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1284 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1285 mmc->card_caps |= MMC_CAP(UHS_DDR50);
1286#endif
1287
1288 return 0;
1289}
1290
1291static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1292{
1293 int err;
1294
1295 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
1296 int speed;
1297
1298 switch (mode) {
1299 case SD_LEGACY:
1300 speed = UHS_SDR12_BUS_SPEED;
1301 break;
1302 case SD_HS:
1303 speed = HIGH_SPEED_BUS_SPEED;
1304 break;
1305#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1306 case UHS_SDR12:
1307 speed = UHS_SDR12_BUS_SPEED;
1308 break;
1309 case UHS_SDR25:
1310 speed = UHS_SDR25_BUS_SPEED;
1311 break;
1312 case UHS_SDR50:
1313 speed = UHS_SDR50_BUS_SPEED;
1314 break;
1315 case UHS_DDR50:
1316 speed = UHS_DDR50_BUS_SPEED;
1317 break;
1318 case UHS_SDR104:
1319 speed = UHS_SDR104_BUS_SPEED;
1320 break;
1321#endif
1322 default:
1323 return -EINVAL;
1324 }
1325
1326 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
1327 if (err)
1328 return err;
1329
1330 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
1331 return -ENOTSUPP;
1332
1333 return 0;
1334}
1335
1336static int sd_select_bus_width(struct mmc *mmc, int w)
1337{
1338 int err;
1339 struct mmc_cmd cmd;
1340
1341 if ((w != 4) && (w != 1))
1342 return -EINVAL;
1343
1344 cmd.cmdidx = MMC_CMD_APP_CMD;
1345 cmd.resp_type = MMC_RSP_R1;
1346 cmd.cmdarg = mmc->rca << 16;
1347
1348 err = mmc_send_cmd(mmc, &cmd, NULL);
1349 if (err)
1350 return err;
1351
1352 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1353 cmd.resp_type = MMC_RSP_R1;
1354 if (w == 4)
1355 cmd.cmdarg = 2;
1356 else if (w == 1)
1357 cmd.cmdarg = 0;
1358 err = mmc_send_cmd(mmc, &cmd, NULL);
1359 if (err)
1360 return err;
1361
1362 return 0;
1363}
1364
1365#if CONFIG_IS_ENABLED(MMC_WRITE)
1366static int sd_read_ssr(struct mmc *mmc)
1367{
1368 static const unsigned int sd_au_size[] = {
1369 0, SZ_16K / 512, SZ_32K / 512,
1370 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1371 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1372 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1373 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1374 SZ_64M / 512,
1375 };
1376 int err, i;
1377 struct mmc_cmd cmd;
1378 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1379 struct mmc_data data;
1380 int timeout = 3;
1381 unsigned int au, eo, et, es;
1382
1383 cmd.cmdidx = MMC_CMD_APP_CMD;
1384 cmd.resp_type = MMC_RSP_R1;
1385 cmd.cmdarg = mmc->rca << 16;
1386
1387 err = mmc_send_cmd(mmc, &cmd, NULL);
1388 if (err)
1389 return err;
1390
1391 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1392 cmd.resp_type = MMC_RSP_R1;
1393 cmd.cmdarg = 0;
1394
1395retry_ssr:
1396 data.dest = (char *)ssr;
1397 data.blocksize = 64;
1398 data.blocks = 1;
1399 data.flags = MMC_DATA_READ;
1400
1401 err = mmc_send_cmd(mmc, &cmd, &data);
1402 if (err) {
1403 if (timeout--)
1404 goto retry_ssr;
1405
1406 return err;
1407 }
1408
1409 for (i = 0; i < 16; i++)
1410 ssr[i] = be32_to_cpu(ssr[i]);
1411
1412 au = (ssr[2] >> 12) & 0xF;
1413 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1414 mmc->ssr.au = sd_au_size[au];
1415 es = (ssr[3] >> 24) & 0xFF;
1416 es |= (ssr[2] & 0xFF) << 8;
1417 et = (ssr[3] >> 18) & 0x3F;
1418 if (es && et) {
1419 eo = (ssr[3] >> 16) & 0x3;
1420 mmc->ssr.erase_timeout = (et * 1000) / es;
1421 mmc->ssr.erase_offset = eo * 1000;
1422 }
1423 } else {
1424 pr_debug("Invalid Allocation Unit Size.\n");
1425 }
1426
1427 return 0;
1428}
1429#endif
1430
1431
1432static const int fbase[] = {
1433 10000,
1434 100000,
1435 1000000,
1436 10000000,
1437};
1438
1439
1440
1441
1442static const u8 multipliers[] = {
1443 0,
1444 10,
1445 12,
1446 13,
1447 15,
1448 20,
1449 25,
1450 30,
1451 35,
1452 40,
1453 45,
1454 50,
1455 55,
1456 60,
1457 70,
1458 80,
1459};
1460
1461static inline int bus_width(uint cap)
1462{
1463 if (cap == MMC_MODE_8BIT)
1464 return 8;
1465 if (cap == MMC_MODE_4BIT)
1466 return 4;
1467 if (cap == MMC_MODE_1BIT)
1468 return 1;
1469 pr_warn("invalid bus witdh capability 0x%x\n", cap);
1470 return 0;
1471}
1472
1473#if !CONFIG_IS_ENABLED(DM_MMC)
1474#ifdef MMC_SUPPORTS_TUNING
1475static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1476{
1477 return -ENOTSUPP;
1478}
1479#endif
1480
1481static void mmc_send_init_stream(struct mmc *mmc)
1482{
1483}
1484
1485static int mmc_set_ios(struct mmc *mmc)
1486{
1487 int ret = 0;
1488
1489 if (mmc->cfg->ops->set_ios)
1490 ret = mmc->cfg->ops->set_ios(mmc);
1491
1492 return ret;
1493}
1494#endif
1495
1496int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
1497{
1498 if (!disable) {
1499 if (clock > mmc->cfg->f_max)
1500 clock = mmc->cfg->f_max;
1501
1502 if (clock < mmc->cfg->f_min)
1503 clock = mmc->cfg->f_min;
1504 }
1505
1506 mmc->clock = clock;
1507 mmc->clk_disable = disable;
1508
1509 return mmc_set_ios(mmc);
1510}
1511
1512static int mmc_set_bus_width(struct mmc *mmc, uint width)
1513{
1514 mmc->bus_width = width;
1515
1516 return mmc_set_ios(mmc);
1517}
1518
1519#if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1520
1521
1522
1523
1524
1525void mmc_dump_capabilities(const char *text, uint caps)
1526{
1527 enum bus_mode mode;
1528
1529 pr_debug("%s: widths [", text);
1530 if (caps & MMC_MODE_8BIT)
1531 pr_debug("8, ");
1532 if (caps & MMC_MODE_4BIT)
1533 pr_debug("4, ");
1534 if (caps & MMC_MODE_1BIT)
1535 pr_debug("1, ");
1536 pr_debug("\b\b] modes [");
1537 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1538 if (MMC_CAP(mode) & caps)
1539 pr_debug("%s, ", mmc_mode_name(mode));
1540 pr_debug("\b\b]\n");
1541}
1542#endif
1543
1544struct mode_width_tuning {
1545 enum bus_mode mode;
1546 uint widths;
1547#ifdef MMC_SUPPORTS_TUNING
1548 uint tuning;
1549#endif
1550};
1551
1552#if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1553int mmc_voltage_to_mv(enum mmc_voltage voltage)
1554{
1555 switch (voltage) {
1556 case MMC_SIGNAL_VOLTAGE_000: return 0;
1557 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1558 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1559 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1560 }
1561 return -EINVAL;
1562}
1563
1564static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1565{
1566 int err;
1567
1568 if (mmc->signal_voltage == signal_voltage)
1569 return 0;
1570
1571 mmc->signal_voltage = signal_voltage;
1572 err = mmc_set_ios(mmc);
1573 if (err)
1574 pr_debug("unable to set voltage (err %d)\n", err);
1575
1576 return err;
1577}
1578#else
1579static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1580{
1581 return 0;
1582}
1583#endif
1584
1585static const struct mode_width_tuning sd_modes_by_pref[] = {
1586#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1587#ifdef MMC_SUPPORTS_TUNING
1588 {
1589 .mode = UHS_SDR104,
1590 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1591 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1592 },
1593#endif
1594 {
1595 .mode = UHS_SDR50,
1596 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1597 },
1598 {
1599 .mode = UHS_DDR50,
1600 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1601 },
1602 {
1603 .mode = UHS_SDR25,
1604 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1605 },
1606#endif
1607 {
1608 .mode = SD_HS,
1609 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1610 },
1611#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1612 {
1613 .mode = UHS_SDR12,
1614 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1615 },
1616#endif
1617 {
1618 .mode = SD_LEGACY,
1619 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1620 }
1621};
1622
1623#define for_each_sd_mode_by_pref(caps, mwt) \
1624 for (mwt = sd_modes_by_pref;\
1625 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1626 mwt++) \
1627 if (caps & MMC_CAP(mwt->mode))
1628
1629static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
1630{
1631 int err;
1632 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1633 const struct mode_width_tuning *mwt;
1634#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1635 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
1636#else
1637 bool uhs_en = false;
1638#endif
1639 uint caps;
1640
1641#ifdef DEBUG
1642 mmc_dump_capabilities("sd card", card_caps);
1643 mmc_dump_capabilities("host", mmc->host_caps);
1644#endif
1645
1646
1647 caps = card_caps & mmc->host_caps;
1648
1649 if (!uhs_en)
1650 caps &= ~UHS_CAPS;
1651
1652 for_each_sd_mode_by_pref(caps, mwt) {
1653 uint *w;
1654
1655 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
1656 if (*w & caps & mwt->widths) {
1657 pr_debug("trying mode %s width %d (at %d MHz)\n",
1658 mmc_mode_name(mwt->mode),
1659 bus_width(*w),
1660 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1661
1662
1663 err = sd_select_bus_width(mmc, bus_width(*w));
1664 if (err)
1665 goto error;
1666 mmc_set_bus_width(mmc, bus_width(*w));
1667
1668
1669 err = sd_set_card_speed(mmc, mwt->mode);
1670 if (err)
1671 goto error;
1672
1673
1674 mmc_select_mode(mmc, mwt->mode);
1675 mmc_set_clock(mmc, mmc->tran_speed, false);
1676
1677#ifdef MMC_SUPPORTS_TUNING
1678
1679 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1680 err = mmc_execute_tuning(mmc,
1681 mwt->tuning);
1682 if (err) {
1683 pr_debug("tuning failed\n");
1684 goto error;
1685 }
1686 }
1687#endif
1688
1689#if CONFIG_IS_ENABLED(MMC_WRITE)
1690 err = sd_read_ssr(mmc);
1691 if (err)
1692 pr_warn("unable to read ssr\n");
1693#endif
1694 if (!err)
1695 return 0;
1696
1697error:
1698
1699 mmc_select_mode(mmc, SD_LEGACY);
1700 mmc_set_clock(mmc, mmc->tran_speed, false);
1701 }
1702 }
1703 }
1704
1705 pr_err("unable to select a mode\n");
1706 return -ENOTSUPP;
1707}
1708
1709
1710
1711
1712
1713
1714static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
1715{
1716 int err;
1717 const u8 *ext_csd = mmc->ext_csd;
1718 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1719
1720 if (mmc->version < MMC_VERSION_4)
1721 return 0;
1722
1723 err = mmc_send_ext_csd(mmc, test_csd);
1724 if (err)
1725 return err;
1726
1727
1728 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1729 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1730 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1731 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1732 ext_csd[EXT_CSD_REV]
1733 == test_csd[EXT_CSD_REV] &&
1734 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1735 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1736 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1737 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1738 return 0;
1739
1740 return -EBADMSG;
1741}
1742
1743#if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
1744static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1745 uint32_t allowed_mask)
1746{
1747 u32 card_mask = 0;
1748
1749 switch (mode) {
1750 case MMC_HS_200:
1751 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_8V)
1752 card_mask |= MMC_SIGNAL_VOLTAGE_180;
1753 if (mmc->cardtype & EXT_CSD_CARD_TYPE_HS200_1_2V)
1754 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1755 break;
1756 case MMC_DDR_52:
1757 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1758 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1759 MMC_SIGNAL_VOLTAGE_180;
1760 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1761 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1762 break;
1763 default:
1764 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1765 break;
1766 }
1767
1768 while (card_mask & allowed_mask) {
1769 enum mmc_voltage best_match;
1770
1771 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1772 if (!mmc_set_signal_voltage(mmc, best_match))
1773 return 0;
1774
1775 allowed_mask &= ~best_match;
1776 }
1777
1778 return -ENOTSUPP;
1779}
1780#else
1781static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1782 uint32_t allowed_mask)
1783{
1784 return 0;
1785}
1786#endif
1787
1788static const struct mode_width_tuning mmc_modes_by_pref[] = {
1789#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
1790 {
1791 .mode = MMC_HS_200,
1792 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1793 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1794 },
1795#endif
1796 {
1797 .mode = MMC_DDR_52,
1798 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1799 },
1800 {
1801 .mode = MMC_HS_52,
1802 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1803 },
1804 {
1805 .mode = MMC_HS,
1806 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1807 },
1808 {
1809 .mode = MMC_LEGACY,
1810 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1811 }
1812};
1813
1814#define for_each_mmc_mode_by_pref(caps, mwt) \
1815 for (mwt = mmc_modes_by_pref;\
1816 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1817 mwt++) \
1818 if (caps & MMC_CAP(mwt->mode))
1819
1820static const struct ext_csd_bus_width {
1821 uint cap;
1822 bool is_ddr;
1823 uint ext_csd_bits;
1824} ext_csd_bus_width[] = {
1825 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1826 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1827 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1828 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1829 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1830};
1831
1832#define for_each_supported_width(caps, ddr, ecbv) \
1833 for (ecbv = ext_csd_bus_width;\
1834 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
1835 ecbv++) \
1836 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
1837
1838static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
1839{
1840 int err;
1841 const struct mode_width_tuning *mwt;
1842 const struct ext_csd_bus_width *ecbw;
1843
1844#ifdef DEBUG
1845 mmc_dump_capabilities("mmc", card_caps);
1846 mmc_dump_capabilities("host", mmc->host_caps);
1847#endif
1848
1849
1850 card_caps &= mmc->host_caps;
1851
1852
1853 if (mmc->version < MMC_VERSION_4)
1854 return 0;
1855
1856 if (!mmc->ext_csd) {
1857 pr_debug("No ext_csd found!\n");
1858 return -ENOTSUPP;
1859 }
1860
1861 mmc_set_clock(mmc, mmc->legacy_speed, false);
1862
1863 for_each_mmc_mode_by_pref(card_caps, mwt) {
1864 for_each_supported_width(card_caps & mwt->widths,
1865 mmc_is_mode_ddr(mwt->mode), ecbw) {
1866 enum mmc_voltage old_voltage;
1867 pr_debug("trying mode %s width %d (at %d MHz)\n",
1868 mmc_mode_name(mwt->mode),
1869 bus_width(ecbw->cap),
1870 mmc_mode2freq(mmc, mwt->mode) / 1000000);
1871 old_voltage = mmc->signal_voltage;
1872 err = mmc_set_lowest_voltage(mmc, mwt->mode,
1873 MMC_ALL_SIGNAL_VOLTAGE);
1874 if (err)
1875 continue;
1876
1877
1878 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1879 EXT_CSD_BUS_WIDTH,
1880 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
1881 if (err)
1882 goto error;
1883 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
1884
1885
1886 err = mmc_set_card_speed(mmc, mwt->mode);
1887 if (err)
1888 goto error;
1889
1890
1891
1892
1893
1894 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
1895 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1896 EXT_CSD_BUS_WIDTH,
1897 ecbw->ext_csd_bits);
1898 if (err)
1899 goto error;
1900 }
1901
1902
1903 mmc_select_mode(mmc, mwt->mode);
1904 mmc_set_clock(mmc, mmc->tran_speed, false);
1905#ifdef MMC_SUPPORTS_TUNING
1906
1907
1908 if (mwt->tuning) {
1909 err = mmc_execute_tuning(mmc, mwt->tuning);
1910 if (err) {
1911 pr_debug("tuning failed\n");
1912 goto error;
1913 }
1914 }
1915#endif
1916
1917
1918 err = mmc_read_and_compare_ext_csd(mmc);
1919 if (!err)
1920 return 0;
1921error:
1922 mmc_set_signal_voltage(mmc, old_voltage);
1923
1924 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1925 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
1926 mmc_select_mode(mmc, MMC_LEGACY);
1927 mmc_set_bus_width(mmc, 1);
1928 }
1929 }
1930
1931 pr_err("unable to select a mode\n");
1932
1933 return -ENOTSUPP;
1934}
1935
1936static int mmc_startup_v4(struct mmc *mmc)
1937{
1938 int err, i;
1939 u64 capacity;
1940 bool has_parts = false;
1941 bool part_completed;
1942 static const u32 mmc_versions[] = {
1943 MMC_VERSION_4,
1944 MMC_VERSION_4_1,
1945 MMC_VERSION_4_2,
1946 MMC_VERSION_4_3,
1947 MMC_VERSION_4_4,
1948 MMC_VERSION_4_41,
1949 MMC_VERSION_4_5,
1950 MMC_VERSION_5_0,
1951 MMC_VERSION_5_1
1952 };
1953
1954 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1955
1956 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
1957 return 0;
1958
1959
1960 err = mmc_send_ext_csd(mmc, ext_csd);
1961 if (err)
1962 goto error;
1963
1964
1965 if (!mmc->ext_csd)
1966 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
1967 if (!mmc->ext_csd)
1968 return -ENOMEM;
1969 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
1970
1971 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
1972 return -EINVAL;
1973
1974 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
1975
1976 if (mmc->version >= MMC_VERSION_4_2) {
1977
1978
1979
1980
1981
1982 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
1983 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
1984 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
1985 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
1986 capacity *= MMC_MAX_BLOCK_LEN;
1987 if ((capacity >> 20) > 2 * 1024)
1988 mmc->capacity_user = capacity;
1989 }
1990
1991
1992
1993
1994
1995
1996
1997 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
1998 EXT_CSD_PARTITION_SETTING_COMPLETED);
1999
2000
2001 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2002 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2003 ext_csd[EXT_CSD_BOOT_MULT])
2004 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2005 if (part_completed &&
2006 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2007 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2008
2009 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2010
2011 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2012
2013 for (i = 0; i < 4; i++) {
2014 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2015 uint mult = (ext_csd[idx + 2] << 16) +
2016 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2017 if (mult)
2018 has_parts = true;
2019 if (!part_completed)
2020 continue;
2021 mmc->capacity_gp[i] = mult;
2022 mmc->capacity_gp[i] *=
2023 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2024 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2025 mmc->capacity_gp[i] <<= 19;
2026 }
2027
2028#ifndef CONFIG_SPL_BUILD
2029 if (part_completed) {
2030 mmc->enh_user_size =
2031 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2032 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2033 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2034 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2035 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2036 mmc->enh_user_size <<= 19;
2037 mmc->enh_user_start =
2038 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2039 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2040 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2041 ext_csd[EXT_CSD_ENH_START_ADDR];
2042 if (mmc->high_capacity)
2043 mmc->enh_user_start <<= 9;
2044 }
2045#endif
2046
2047
2048
2049
2050
2051
2052 if (part_completed)
2053 has_parts = true;
2054 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2055 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2056 has_parts = true;
2057 if (has_parts) {
2058 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2059 EXT_CSD_ERASE_GROUP_DEF, 1);
2060
2061 if (err)
2062 goto error;
2063
2064 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2065 }
2066
2067 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
2068#if CONFIG_IS_ENABLED(MMC_WRITE)
2069
2070 mmc->erase_grp_size =
2071 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
2072#endif
2073
2074
2075
2076
2077
2078 if (mmc->high_capacity && part_completed) {
2079 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2080 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2081 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2082 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2083 capacity *= MMC_MAX_BLOCK_LEN;
2084 mmc->capacity_user = capacity;
2085 }
2086 }
2087#if CONFIG_IS_ENABLED(MMC_WRITE)
2088 else {
2089
2090 int erase_gsz, erase_gmul;
2091
2092 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2093 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2094 mmc->erase_grp_size = (erase_gsz + 1)
2095 * (erase_gmul + 1);
2096 }
2097#endif
2098#if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
2099 mmc->hc_wp_grp_size = 1024
2100 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2101 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2102#endif
2103
2104 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2105
2106 return 0;
2107error:
2108 if (mmc->ext_csd) {
2109 free(mmc->ext_csd);
2110 mmc->ext_csd = NULL;
2111 }
2112 return err;
2113}
2114
2115static int mmc_startup(struct mmc *mmc)
2116{
2117 int err, i;
2118 uint mult, freq;
2119 u64 cmult, csize;
2120 struct mmc_cmd cmd;
2121 struct blk_desc *bdesc;
2122
2123#ifdef CONFIG_MMC_SPI_CRC_ON
2124 if (mmc_host_is_spi(mmc)) {
2125 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2126 cmd.resp_type = MMC_RSP_R1;
2127 cmd.cmdarg = 1;
2128 err = mmc_send_cmd(mmc, &cmd, NULL);
2129 if (err)
2130 return err;
2131 }
2132#endif
2133
2134
2135 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2136 MMC_CMD_ALL_SEND_CID;
2137 cmd.resp_type = MMC_RSP_R2;
2138 cmd.cmdarg = 0;
2139
2140 err = mmc_send_cmd(mmc, &cmd, NULL);
2141
2142#ifdef CONFIG_MMC_QUIRKS
2143 if (err && (mmc->quirks & MMC_QUIRK_RETRY_SEND_CID)) {
2144 int retries = 4;
2145
2146
2147
2148
2149 do {
2150 err = mmc_send_cmd(mmc, &cmd, NULL);
2151 if (!err)
2152 break;
2153 } while (retries--);
2154 }
2155#endif
2156
2157 if (err)
2158 return err;
2159
2160 memcpy(mmc->cid, cmd.response, 16);
2161
2162
2163
2164
2165
2166
2167 if (!mmc_host_is_spi(mmc)) {
2168 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2169 cmd.cmdarg = mmc->rca << 16;
2170 cmd.resp_type = MMC_RSP_R6;
2171
2172 err = mmc_send_cmd(mmc, &cmd, NULL);
2173
2174 if (err)
2175 return err;
2176
2177 if (IS_SD(mmc))
2178 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2179 }
2180
2181
2182 cmd.cmdidx = MMC_CMD_SEND_CSD;
2183 cmd.resp_type = MMC_RSP_R2;
2184 cmd.cmdarg = mmc->rca << 16;
2185
2186 err = mmc_send_cmd(mmc, &cmd, NULL);
2187
2188 if (err)
2189 return err;
2190
2191 mmc->csd[0] = cmd.response[0];
2192 mmc->csd[1] = cmd.response[1];
2193 mmc->csd[2] = cmd.response[2];
2194 mmc->csd[3] = cmd.response[3];
2195
2196 if (mmc->version == MMC_VERSION_UNKNOWN) {
2197 int version = (cmd.response[0] >> 26) & 0xf;
2198
2199 switch (version) {
2200 case 0:
2201 mmc->version = MMC_VERSION_1_2;
2202 break;
2203 case 1:
2204 mmc->version = MMC_VERSION_1_4;
2205 break;
2206 case 2:
2207 mmc->version = MMC_VERSION_2_2;
2208 break;
2209 case 3:
2210 mmc->version = MMC_VERSION_3;
2211 break;
2212 case 4:
2213 mmc->version = MMC_VERSION_4;
2214 break;
2215 default:
2216 mmc->version = MMC_VERSION_1_2;
2217 break;
2218 }
2219 }
2220
2221
2222 freq = fbase[(cmd.response[0] & 0x7)];
2223 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
2224
2225 mmc->legacy_speed = freq * mult;
2226 mmc_select_mode(mmc, MMC_LEGACY);
2227
2228 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
2229 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
2230#if CONFIG_IS_ENABLED(MMC_WRITE)
2231
2232 if (IS_SD(mmc))
2233 mmc->write_bl_len = mmc->read_bl_len;
2234 else
2235 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
2236#endif
2237
2238 if (mmc->high_capacity) {
2239 csize = (mmc->csd[1] & 0x3f) << 16
2240 | (mmc->csd[2] & 0xffff0000) >> 16;
2241 cmult = 8;
2242 } else {
2243 csize = (mmc->csd[1] & 0x3ff) << 2
2244 | (mmc->csd[2] & 0xc0000000) >> 30;
2245 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2246 }
2247
2248 mmc->capacity_user = (csize + 1) << (cmult + 2);
2249 mmc->capacity_user *= mmc->read_bl_len;
2250 mmc->capacity_boot = 0;
2251 mmc->capacity_rpmb = 0;
2252 for (i = 0; i < 4; i++)
2253 mmc->capacity_gp[i] = 0;
2254
2255 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2256 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2257
2258#if CONFIG_IS_ENABLED(MMC_WRITE)
2259 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2260 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2261#endif
2262
2263 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2264 cmd.cmdidx = MMC_CMD_SET_DSR;
2265 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2266 cmd.resp_type = MMC_RSP_NONE;
2267 if (mmc_send_cmd(mmc, &cmd, NULL))
2268 pr_warn("MMC: SET_DSR failed\n");
2269 }
2270
2271
2272 if (!mmc_host_is_spi(mmc)) {
2273 cmd.cmdidx = MMC_CMD_SELECT_CARD;
2274 cmd.resp_type = MMC_RSP_R1;
2275 cmd.cmdarg = mmc->rca << 16;
2276 err = mmc_send_cmd(mmc, &cmd, NULL);
2277
2278 if (err)
2279 return err;
2280 }
2281
2282
2283
2284
2285#if CONFIG_IS_ENABLED(MMC_WRITE)
2286 mmc->erase_grp_size = 1;
2287#endif
2288 mmc->part_config = MMCPART_NOAVAILABLE;
2289
2290 err = mmc_startup_v4(mmc);
2291 if (err)
2292 return err;
2293
2294 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
2295 if (err)
2296 return err;
2297
2298 if (IS_SD(mmc)) {
2299 err = sd_get_capabilities(mmc);
2300 if (err)
2301 return err;
2302 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2303 } else {
2304 err = mmc_get_capabilities(mmc);
2305 if (err)
2306 return err;
2307 mmc_select_mode_and_width(mmc, mmc->card_caps);
2308 }
2309
2310 if (err)
2311 return err;
2312
2313 mmc->best_mode = mmc->selected_mode;
2314
2315
2316 if (mmc->ddr_mode) {
2317 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
2318#if CONFIG_IS_ENABLED(MMC_WRITE)
2319 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
2320#endif
2321 }
2322
2323
2324 bdesc = mmc_get_blk_desc(mmc);
2325 bdesc->lun = 0;
2326 bdesc->hwpart = 0;
2327 bdesc->type = 0;
2328 bdesc->blksz = mmc->read_bl_len;
2329 bdesc->log2blksz = LOG2(bdesc->blksz);
2330 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
2331#if !defined(CONFIG_SPL_BUILD) || \
2332 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
2333 !defined(CONFIG_USE_TINY_PRINTF))
2334 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
2335 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2336 (mmc->cid[3] >> 16) & 0xffff);
2337 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
2338 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2339 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2340 (mmc->cid[2] >> 24) & 0xff);
2341 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
2342 (mmc->cid[2] >> 16) & 0xf);
2343#else
2344 bdesc->vendor[0] = 0;
2345 bdesc->product[0] = 0;
2346 bdesc->revision[0] = 0;
2347#endif
2348#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT)
2349 part_init(bdesc);
2350#endif
2351
2352 return 0;
2353}
2354
2355static int mmc_send_if_cond(struct mmc *mmc)
2356{
2357 struct mmc_cmd cmd;
2358 int err;
2359
2360 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2361
2362 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
2363 cmd.resp_type = MMC_RSP_R7;
2364
2365 err = mmc_send_cmd(mmc, &cmd, NULL);
2366
2367 if (err)
2368 return err;
2369
2370 if ((cmd.response[0] & 0xff) != 0xaa)
2371 return -EOPNOTSUPP;
2372 else
2373 mmc->version = SD_VERSION_2;
2374
2375 return 0;
2376}
2377
2378#if !CONFIG_IS_ENABLED(DM_MMC)
2379
2380__weak void board_mmc_power_init(void)
2381{
2382}
2383#endif
2384
2385static int mmc_power_init(struct mmc *mmc)
2386{
2387#if CONFIG_IS_ENABLED(DM_MMC)
2388#if CONFIG_IS_ENABLED(DM_REGULATOR)
2389 int ret;
2390
2391 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
2392 &mmc->vmmc_supply);
2393 if (ret)
2394 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
2395
2396 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2397 &mmc->vqmmc_supply);
2398 if (ret)
2399 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
2400#endif
2401#else
2402
2403
2404
2405
2406 board_mmc_power_init();
2407#endif
2408 return 0;
2409}
2410
2411
2412
2413
2414
2415
2416static void mmc_set_initial_state(struct mmc *mmc)
2417{
2418 int err;
2419
2420
2421 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2422 if (err != 0)
2423 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2424 if (err != 0)
2425 pr_warn("mmc: failed to set signal voltage\n");
2426
2427 mmc_select_mode(mmc, MMC_LEGACY);
2428 mmc_set_bus_width(mmc, 1);
2429 mmc_set_clock(mmc, 0, false);
2430}
2431
2432static int mmc_power_on(struct mmc *mmc)
2433{
2434#if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2435 if (mmc->vmmc_supply) {
2436 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2437
2438 if (ret) {
2439 puts("Error enabling VMMC supply\n");
2440 return ret;
2441 }
2442 }
2443#endif
2444 return 0;
2445}
2446
2447static int mmc_power_off(struct mmc *mmc)
2448{
2449 mmc_set_clock(mmc, 0, true);
2450#if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2451 if (mmc->vmmc_supply) {
2452 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2453
2454 if (ret) {
2455 pr_debug("Error disabling VMMC supply\n");
2456 return ret;
2457 }
2458 }
2459#endif
2460 return 0;
2461}
2462
2463static int mmc_power_cycle(struct mmc *mmc)
2464{
2465 int ret;
2466
2467 ret = mmc_power_off(mmc);
2468 if (ret)
2469 return ret;
2470
2471
2472
2473
2474 udelay(2000);
2475 return mmc_power_on(mmc);
2476}
2477
2478int mmc_start_init(struct mmc *mmc)
2479{
2480 bool no_card;
2481 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
2482 int err;
2483
2484
2485
2486
2487
2488 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(SD_LEGACY) |
2489 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
2490
2491#if !defined(CONFIG_MMC_BROKEN_CD)
2492
2493 no_card = mmc_getcd(mmc) == 0;
2494#else
2495 no_card = 0;
2496#endif
2497#if !CONFIG_IS_ENABLED(DM_MMC)
2498 no_card = no_card || (mmc->cfg->ops->init == NULL);
2499#endif
2500 if (no_card) {
2501 mmc->has_init = 0;
2502#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2503 pr_err("MMC: no card present\n");
2504#endif
2505 return -ENOMEDIUM;
2506 }
2507
2508 if (mmc->has_init)
2509 return 0;
2510
2511#ifdef CONFIG_FSL_ESDHC_ADAPTER_IDENT
2512 mmc_adapter_card_type_ident();
2513#endif
2514 err = mmc_power_init(mmc);
2515 if (err)
2516 return err;
2517
2518#ifdef CONFIG_MMC_QUIRKS
2519 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
2520 MMC_QUIRK_RETRY_SEND_CID;
2521#endif
2522
2523 err = mmc_power_cycle(mmc);
2524 if (err) {
2525
2526
2527
2528
2529
2530 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
2531 uhs_en = false;
2532 mmc->host_caps &= ~UHS_CAPS;
2533 err = mmc_power_on(mmc);
2534 }
2535 if (err)
2536 return err;
2537
2538#if CONFIG_IS_ENABLED(DM_MMC)
2539
2540#else
2541
2542 err = mmc->cfg->ops->init(mmc);
2543 if (err)
2544 return err;
2545#endif
2546 mmc->ddr_mode = 0;
2547
2548retry:
2549 mmc_set_initial_state(mmc);
2550 mmc_send_init_stream(mmc);
2551
2552
2553 err = mmc_go_idle(mmc);
2554
2555 if (err)
2556 return err;
2557
2558
2559 mmc_get_blk_desc(mmc)->hwpart = 0;
2560
2561
2562 err = mmc_send_if_cond(mmc);
2563
2564
2565 err = sd_send_op_cond(mmc, uhs_en);
2566 if (err && uhs_en) {
2567 uhs_en = false;
2568 mmc_power_cycle(mmc);
2569 goto retry;
2570 }
2571
2572
2573 if (err == -ETIMEDOUT) {
2574 err = mmc_send_op_cond(mmc);
2575
2576 if (err) {
2577#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2578 pr_err("Card did not respond to voltage select!\n");
2579#endif
2580 return -EOPNOTSUPP;
2581 }
2582 }
2583
2584 if (!err)
2585 mmc->init_in_progress = 1;
2586
2587 return err;
2588}
2589
2590static int mmc_complete_init(struct mmc *mmc)
2591{
2592 int err = 0;
2593
2594 mmc->init_in_progress = 0;
2595 if (mmc->op_cond_pending)
2596 err = mmc_complete_op_cond(mmc);
2597
2598 if (!err)
2599 err = mmc_startup(mmc);
2600 if (err)
2601 mmc->has_init = 0;
2602 else
2603 mmc->has_init = 1;
2604 return err;
2605}
2606
2607int mmc_init(struct mmc *mmc)
2608{
2609 int err = 0;
2610 __maybe_unused unsigned start;
2611#if CONFIG_IS_ENABLED(DM_MMC)
2612 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
2613
2614 upriv->mmc = mmc;
2615#endif
2616 if (mmc->has_init)
2617 return 0;
2618
2619 start = get_timer(0);
2620
2621 if (!mmc->init_in_progress)
2622 err = mmc_start_init(mmc);
2623
2624 if (!err)
2625 err = mmc_complete_init(mmc);
2626 if (err)
2627 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
2628
2629 return err;
2630}
2631
2632int mmc_set_dsr(struct mmc *mmc, u16 val)
2633{
2634 mmc->dsr = val;
2635 return 0;
2636}
2637
2638
2639__weak int cpu_mmc_init(bd_t *bis)
2640{
2641 return -1;
2642}
2643
2644
2645__weak int board_mmc_init(bd_t *bis)
2646{
2647 return -1;
2648}
2649
2650void mmc_set_preinit(struct mmc *mmc, int preinit)
2651{
2652 mmc->preinit = preinit;
2653}
2654
2655#if CONFIG_IS_ENABLED(DM_MMC)
2656static int mmc_probe(bd_t *bis)
2657{
2658 int ret, i;
2659 struct uclass *uc;
2660 struct udevice *dev;
2661
2662 ret = uclass_get(UCLASS_MMC, &uc);
2663 if (ret)
2664 return ret;
2665
2666
2667
2668
2669
2670
2671 for (i = 0; ; i++) {
2672 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
2673 if (ret == -ENODEV)
2674 break;
2675 }
2676 uclass_foreach_dev(dev, uc) {
2677 ret = device_probe(dev);
2678 if (ret)
2679 pr_err("%s - probe failed: %d\n", dev->name, ret);
2680 }
2681
2682 return 0;
2683}
2684#else
2685static int mmc_probe(bd_t *bis)
2686{
2687 if (board_mmc_init(bis) < 0)
2688 cpu_mmc_init(bis);
2689
2690 return 0;
2691}
2692#endif
2693
2694int mmc_initialize(bd_t *bis)
2695{
2696 static int initialized = 0;
2697 int ret;
2698 if (initialized)
2699 return 0;
2700 initialized = 1;
2701
2702#if !CONFIG_IS_ENABLED(BLK)
2703#if !CONFIG_IS_ENABLED(MMC_TINY)
2704 mmc_list_init();
2705#endif
2706#endif
2707 ret = mmc_probe(bis);
2708 if (ret)
2709 return ret;
2710
2711#ifndef CONFIG_SPL_BUILD
2712 print_mmc_devices(',');
2713#endif
2714
2715 mmc_do_preinit();
2716 return 0;
2717}
2718
2719#ifdef CONFIG_CMD_BKOPS_ENABLE
2720int mmc_set_bkops_enable(struct mmc *mmc)
2721{
2722 int err;
2723 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
2724
2725 err = mmc_send_ext_csd(mmc, ext_csd);
2726 if (err) {
2727 puts("Could not get ext_csd register values\n");
2728 return err;
2729 }
2730
2731 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
2732 puts("Background operations not supported on device\n");
2733 return -EMEDIUMTYPE;
2734 }
2735
2736 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
2737 puts("Background operations already enabled\n");
2738 return 0;
2739 }
2740
2741 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
2742 if (err) {
2743 puts("Failed to enable manual background operations\n");
2744 return err;
2745 }
2746
2747 puts("Enabled manual background operations\n");
2748
2749 return 0;
2750}
2751#endif
2752