1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/completion.h>
17#include <linux/device.h>
18#include <linux/delay.h>
19#include <linux/pagemap.h>
20#include <linux/err.h>
21#include <linux/leds.h>
22#include <linux/scatterlist.h>
23#include <linux/log2.h>
24#include <linux/regulator/consumer.h>
25#include <linux/pm_runtime.h>
26#include <linux/suspend.h>
27#include <linux/fault-inject.h>
28#include <linux/random.h>
29
30#include <linux/mmc/card.h>
31#include <linux/mmc/host.h>
32#include <linux/mmc/mmc.h>
33#include <linux/mmc/sd.h>
34
35#include "core.h"
36#include "bus.h"
37#include "host.h"
38#include "sdio_bus.h"
39
40#include "mmc_ops.h"
41#include "sd_ops.h"
42#include "sdio_ops.h"
43
44static struct workqueue_struct *workqueue;
45static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
46
47
48
49
50
51
52bool use_spi_crc = 1;
53module_param(use_spi_crc, bool, 0);
54
55
56
57
58
59
60
61#ifdef CONFIG_MMC_UNSAFE_RESUME
62bool mmc_assume_removable;
63#else
64bool mmc_assume_removable = 1;
65#endif
66EXPORT_SYMBOL(mmc_assume_removable);
67module_param_named(removable, mmc_assume_removable, bool, 0644);
68MODULE_PARM_DESC(
69 removable,
70 "MMC/SD cards are removable and may be removed during suspend");
71
72
73
74
75static int mmc_schedule_delayed_work(struct delayed_work *work,
76 unsigned long delay)
77{
78 return queue_delayed_work(workqueue, work, delay);
79}
80
81
82
83
84static void mmc_flush_scheduled_work(void)
85{
86 flush_workqueue(workqueue);
87}
88
89#ifdef CONFIG_FAIL_MMC_REQUEST
90
91
92
93
94
95static void mmc_should_fail_request(struct mmc_host *host,
96 struct mmc_request *mrq)
97{
98 struct mmc_command *cmd = mrq->cmd;
99 struct mmc_data *data = mrq->data;
100 static const int data_errors[] = {
101 -ETIMEDOUT,
102 -EILSEQ,
103 -EIO,
104 };
105
106 if (!data)
107 return;
108
109 if (cmd->error || data->error ||
110 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
111 return;
112
113 data->error = data_errors[random32() % ARRAY_SIZE(data_errors)];
114 data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9;
115}
116
117#else
118
119static inline void mmc_should_fail_request(struct mmc_host *host,
120 struct mmc_request *mrq)
121{
122}
123
124#endif
125
126
127
128
129
130
131
132
133
134void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
135{
136 struct mmc_command *cmd = mrq->cmd;
137 int err = cmd->error;
138
139 if (err && cmd->retries && mmc_host_is_spi(host)) {
140 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
141 cmd->retries = 0;
142 }
143
144 if (err && cmd->retries && !mmc_card_removed(host->card)) {
145
146
147
148
149 if (mrq->done)
150 mrq->done(mrq);
151 } else {
152 mmc_should_fail_request(host, mrq);
153
154 led_trigger_event(host->led, LED_OFF);
155
156 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
157 mmc_hostname(host), cmd->opcode, err,
158 cmd->resp[0], cmd->resp[1],
159 cmd->resp[2], cmd->resp[3]);
160
161 if (mrq->data) {
162 pr_debug("%s: %d bytes transferred: %d\n",
163 mmc_hostname(host),
164 mrq->data->bytes_xfered, mrq->data->error);
165 }
166
167 if (mrq->stop) {
168 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
169 mmc_hostname(host), mrq->stop->opcode,
170 mrq->stop->error,
171 mrq->stop->resp[0], mrq->stop->resp[1],
172 mrq->stop->resp[2], mrq->stop->resp[3]);
173 }
174
175 if (mrq->done)
176 mrq->done(mrq);
177
178 mmc_host_clk_release(host);
179 }
180}
181
182EXPORT_SYMBOL(mmc_request_done);
183
184static void
185mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
186{
187#ifdef CONFIG_MMC_DEBUG
188 unsigned int i, sz;
189 struct scatterlist *sg;
190#endif
191
192 if (mrq->sbc) {
193 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
194 mmc_hostname(host), mrq->sbc->opcode,
195 mrq->sbc->arg, mrq->sbc->flags);
196 }
197
198 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
199 mmc_hostname(host), mrq->cmd->opcode,
200 mrq->cmd->arg, mrq->cmd->flags);
201
202 if (mrq->data) {
203 pr_debug("%s: blksz %d blocks %d flags %08x "
204 "tsac %d ms nsac %d\n",
205 mmc_hostname(host), mrq->data->blksz,
206 mrq->data->blocks, mrq->data->flags,
207 mrq->data->timeout_ns / 1000000,
208 mrq->data->timeout_clks);
209 }
210
211 if (mrq->stop) {
212 pr_debug("%s: CMD%u arg %08x flags %08x\n",
213 mmc_hostname(host), mrq->stop->opcode,
214 mrq->stop->arg, mrq->stop->flags);
215 }
216
217 WARN_ON(!host->claimed);
218
219 mrq->cmd->error = 0;
220 mrq->cmd->mrq = mrq;
221 if (mrq->data) {
222 BUG_ON(mrq->data->blksz > host->max_blk_size);
223 BUG_ON(mrq->data->blocks > host->max_blk_count);
224 BUG_ON(mrq->data->blocks * mrq->data->blksz >
225 host->max_req_size);
226
227#ifdef CONFIG_MMC_DEBUG
228 sz = 0;
229 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
230 sz += sg->length;
231 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
232#endif
233
234 mrq->cmd->data = mrq->data;
235 mrq->data->error = 0;
236 mrq->data->mrq = mrq;
237 if (mrq->stop) {
238 mrq->data->stop = mrq->stop;
239 mrq->stop->error = 0;
240 mrq->stop->mrq = mrq;
241 }
242 }
243 mmc_host_clk_hold(host);
244 led_trigger_event(host->led, LED_FULL);
245 host->ops->request(host, mrq);
246}
247
248static void mmc_wait_done(struct mmc_request *mrq)
249{
250 complete(&mrq->completion);
251}
252
253static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
254{
255 init_completion(&mrq->completion);
256 mrq->done = mmc_wait_done;
257 if (mmc_card_removed(host->card)) {
258 mrq->cmd->error = -ENOMEDIUM;
259 complete(&mrq->completion);
260 return -ENOMEDIUM;
261 }
262 mmc_start_request(host, mrq);
263 return 0;
264}
265
266static void mmc_wait_for_req_done(struct mmc_host *host,
267 struct mmc_request *mrq)
268{
269 struct mmc_command *cmd;
270
271 while (1) {
272 wait_for_completion(&mrq->completion);
273
274 cmd = mrq->cmd;
275 if (!cmd->error || !cmd->retries ||
276 mmc_card_removed(host->card))
277 break;
278
279 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
280 mmc_hostname(host), cmd->opcode, cmd->error);
281 cmd->retries--;
282 cmd->error = 0;
283 host->ops->request(host, mrq);
284 }
285}
286
287
288
289
290
291
292
293
294
295
296
297
298static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
299 bool is_first_req)
300{
301 if (host->ops->pre_req) {
302 mmc_host_clk_hold(host);
303 host->ops->pre_req(host, mrq, is_first_req);
304 mmc_host_clk_release(host);
305 }
306}
307
308
309
310
311
312
313
314
315
316
317static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
318 int err)
319{
320 if (host->ops->post_req) {
321 mmc_host_clk_hold(host);
322 host->ops->post_req(host, mrq, err);
323 mmc_host_clk_release(host);
324 }
325}
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343struct mmc_async_req *mmc_start_req(struct mmc_host *host,
344 struct mmc_async_req *areq, int *error)
345{
346 int err = 0;
347 int start_err = 0;
348 struct mmc_async_req *data = host->areq;
349
350
351 if (areq)
352 mmc_pre_req(host, areq->mrq, !host->areq);
353
354 if (host->areq) {
355 mmc_wait_for_req_done(host, host->areq->mrq);
356 err = host->areq->err_check(host->card, host->areq);
357 }
358
359 if (!err && areq)
360 start_err = __mmc_start_req(host, areq->mrq);
361
362 if (host->areq)
363 mmc_post_req(host, host->areq->mrq, 0);
364
365
366 if ((err || start_err) && areq)
367 mmc_post_req(host, areq->mrq, -EINVAL);
368
369 if (err)
370 host->areq = NULL;
371 else
372 host->areq = areq;
373
374 if (error)
375 *error = err;
376 return data;
377}
378EXPORT_SYMBOL(mmc_start_req);
379
380
381
382
383
384
385
386
387
388
389void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
390{
391 __mmc_start_req(host, mrq);
392 mmc_wait_for_req_done(host, mrq);
393}
394EXPORT_SYMBOL(mmc_wait_for_req);
395
396
397
398
399
400
401
402
403int mmc_interrupt_hpi(struct mmc_card *card)
404{
405 int err;
406 u32 status;
407 unsigned long prg_wait;
408
409 BUG_ON(!card);
410
411 if (!card->ext_csd.hpi_en) {
412 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
413 return 1;
414 }
415
416 mmc_claim_host(card->host);
417 err = mmc_send_status(card, &status);
418 if (err) {
419 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
420 goto out;
421 }
422
423 switch (R1_CURRENT_STATE(status)) {
424 case R1_STATE_IDLE:
425 case R1_STATE_READY:
426 case R1_STATE_STBY:
427
428
429
430
431 goto out;
432 case R1_STATE_PRG:
433 break;
434 default:
435
436 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
437 mmc_hostname(card->host), R1_CURRENT_STATE(status));
438 err = -EINVAL;
439 goto out;
440 }
441
442 err = mmc_send_hpi_cmd(card, &status);
443 if (err)
444 goto out;
445
446 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
447 do {
448 err = mmc_send_status(card, &status);
449
450 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
451 break;
452 if (time_after(jiffies, prg_wait))
453 err = -ETIMEDOUT;
454 } while (!err);
455
456out:
457 mmc_release_host(card->host);
458 return err;
459}
460EXPORT_SYMBOL(mmc_interrupt_hpi);
461
462
463
464
465
466
467
468
469
470
471
472int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
473{
474 struct mmc_request mrq = {NULL};
475
476 WARN_ON(!host->claimed);
477
478 memset(cmd->resp, 0, sizeof(cmd->resp));
479 cmd->retries = retries;
480
481 mrq.cmd = cmd;
482 cmd->data = NULL;
483
484 mmc_wait_for_req(host, &mrq);
485
486 return cmd->error;
487}
488
489EXPORT_SYMBOL(mmc_wait_for_cmd);
490
491
492
493
494
495
496
497
498
499void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
500{
501 unsigned int mult;
502
503
504
505
506 if (mmc_card_sdio(card)) {
507 data->timeout_ns = 1000000000;
508 data->timeout_clks = 0;
509 return;
510 }
511
512
513
514
515 mult = mmc_card_sd(card) ? 100 : 10;
516
517
518
519
520
521 if (data->flags & MMC_DATA_WRITE)
522 mult <<= card->csd.r2w_factor;
523
524 data->timeout_ns = card->csd.tacc_ns * mult;
525 data->timeout_clks = card->csd.tacc_clks * mult;
526
527
528
529
530 if (mmc_card_sd(card)) {
531 unsigned int timeout_us, limit_us;
532
533 timeout_us = data->timeout_ns / 1000;
534 if (mmc_host_clk_rate(card->host))
535 timeout_us += data->timeout_clks * 1000 /
536 (mmc_host_clk_rate(card->host) / 1000);
537
538 if (data->flags & MMC_DATA_WRITE)
539
540
541
542
543
544
545
546
547 limit_us = 3000000;
548 else
549 limit_us = 100000;
550
551
552
553
554 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
555 data->timeout_ns = limit_us * 1000;
556 data->timeout_clks = 0;
557 }
558 }
559
560
561
562
563
564
565
566 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
567 data->timeout_ns = 300000000;
568 data->timeout_clks = 0;
569 }
570
571
572
573
574
575
576
577 if (mmc_host_is_spi(card->host)) {
578 if (data->flags & MMC_DATA_WRITE) {
579 if (data->timeout_ns < 1000000000)
580 data->timeout_ns = 1000000000;
581 } else {
582 if (data->timeout_ns < 100000000)
583 data->timeout_ns = 100000000;
584 }
585 }
586}
587EXPORT_SYMBOL(mmc_set_data_timeout);
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
604{
605
606
607
608
609
610 sz = ((sz + 3) / 4) * 4;
611
612 return sz;
613}
614EXPORT_SYMBOL(mmc_align_data_size);
615
616
617
618
619
620
621
622
623
624
625
626int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
627{
628 DECLARE_WAITQUEUE(wait, current);
629 unsigned long flags;
630 int stop;
631
632 might_sleep();
633
634 add_wait_queue(&host->wq, &wait);
635 spin_lock_irqsave(&host->lock, flags);
636 while (1) {
637 set_current_state(TASK_UNINTERRUPTIBLE);
638 stop = abort ? atomic_read(abort) : 0;
639 if (stop || !host->claimed || host->claimer == current)
640 break;
641 spin_unlock_irqrestore(&host->lock, flags);
642 schedule();
643 spin_lock_irqsave(&host->lock, flags);
644 }
645 set_current_state(TASK_RUNNING);
646 if (!stop) {
647 host->claimed = 1;
648 host->claimer = current;
649 host->claim_cnt += 1;
650 } else
651 wake_up(&host->wq);
652 spin_unlock_irqrestore(&host->lock, flags);
653 remove_wait_queue(&host->wq, &wait);
654 if (host->ops->enable && !stop && host->claim_cnt == 1)
655 host->ops->enable(host);
656 return stop;
657}
658
659EXPORT_SYMBOL(__mmc_claim_host);
660
661
662
663
664
665
666
667int mmc_try_claim_host(struct mmc_host *host)
668{
669 int claimed_host = 0;
670 unsigned long flags;
671
672 spin_lock_irqsave(&host->lock, flags);
673 if (!host->claimed || host->claimer == current) {
674 host->claimed = 1;
675 host->claimer = current;
676 host->claim_cnt += 1;
677 claimed_host = 1;
678 }
679 spin_unlock_irqrestore(&host->lock, flags);
680 if (host->ops->enable && claimed_host && host->claim_cnt == 1)
681 host->ops->enable(host);
682 return claimed_host;
683}
684EXPORT_SYMBOL(mmc_try_claim_host);
685
686
687
688
689
690
691
692
693void mmc_release_host(struct mmc_host *host)
694{
695 unsigned long flags;
696
697 WARN_ON(!host->claimed);
698
699 if (host->ops->disable && host->claim_cnt == 1)
700 host->ops->disable(host);
701
702 spin_lock_irqsave(&host->lock, flags);
703 if (--host->claim_cnt) {
704
705 spin_unlock_irqrestore(&host->lock, flags);
706 } else {
707 host->claimed = 0;
708 host->claimer = NULL;
709 spin_unlock_irqrestore(&host->lock, flags);
710 wake_up(&host->wq);
711 }
712}
713EXPORT_SYMBOL(mmc_release_host);
714
715
716
717
718
719static inline void mmc_set_ios(struct mmc_host *host)
720{
721 struct mmc_ios *ios = &host->ios;
722
723 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
724 "width %u timing %u\n",
725 mmc_hostname(host), ios->clock, ios->bus_mode,
726 ios->power_mode, ios->chip_select, ios->vdd,
727 ios->bus_width, ios->timing);
728
729 if (ios->clock > 0)
730 mmc_set_ungated(host);
731 host->ops->set_ios(host, ios);
732}
733
734
735
736
737void mmc_set_chip_select(struct mmc_host *host, int mode)
738{
739 mmc_host_clk_hold(host);
740 host->ios.chip_select = mode;
741 mmc_set_ios(host);
742 mmc_host_clk_release(host);
743}
744
745
746
747
748
749static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
750{
751 WARN_ON(hz < host->f_min);
752
753 if (hz > host->f_max)
754 hz = host->f_max;
755
756 host->ios.clock = hz;
757 mmc_set_ios(host);
758}
759
760void mmc_set_clock(struct mmc_host *host, unsigned int hz)
761{
762 mmc_host_clk_hold(host);
763 __mmc_set_clock(host, hz);
764 mmc_host_clk_release(host);
765}
766
767#ifdef CONFIG_MMC_CLKGATE
768
769
770
771void mmc_gate_clock(struct mmc_host *host)
772{
773 unsigned long flags;
774
775 spin_lock_irqsave(&host->clk_lock, flags);
776 host->clk_old = host->ios.clock;
777 host->ios.clock = 0;
778 host->clk_gated = true;
779 spin_unlock_irqrestore(&host->clk_lock, flags);
780 mmc_set_ios(host);
781}
782
783
784
785
786
787void mmc_ungate_clock(struct mmc_host *host)
788{
789
790
791
792
793
794
795
796 if (host->clk_old) {
797 BUG_ON(host->ios.clock);
798
799 __mmc_set_clock(host, host->clk_old);
800 }
801}
802
803void mmc_set_ungated(struct mmc_host *host)
804{
805 unsigned long flags;
806
807
808
809
810
811 spin_lock_irqsave(&host->clk_lock, flags);
812 host->clk_gated = false;
813 spin_unlock_irqrestore(&host->clk_lock, flags);
814}
815
816#else
817void mmc_set_ungated(struct mmc_host *host)
818{
819}
820#endif
821
822
823
824
825void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
826{
827 mmc_host_clk_hold(host);
828 host->ios.bus_mode = mode;
829 mmc_set_ios(host);
830 mmc_host_clk_release(host);
831}
832
833
834
835
836void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
837{
838 mmc_host_clk_hold(host);
839 host->ios.bus_width = width;
840 mmc_set_ios(host);
841 mmc_host_clk_release(host);
842}
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
860{
861 const int max_bit = ilog2(MMC_VDD_35_36);
862 int bit;
863
864 if (vdd < 1650 || vdd > 3600)
865 return -EINVAL;
866
867 if (vdd >= 1650 && vdd <= 1950)
868 return ilog2(MMC_VDD_165_195);
869
870 if (low_bits)
871 vdd -= 1;
872
873
874 bit = (vdd - 2000) / 100 + 8;
875 if (bit > max_bit)
876 return max_bit;
877 return bit;
878}
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
894{
895 u32 mask = 0;
896
897 if (vdd_max < vdd_min)
898 return 0;
899
900
901 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
902 if (vdd_max < 0)
903 return 0;
904
905
906 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
907 if (vdd_min < 0)
908 return 0;
909
910
911 while (vdd_max >= vdd_min)
912 mask |= 1 << vdd_max--;
913
914 return mask;
915}
916EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
917
918#ifdef CONFIG_REGULATOR
919
920
921
922
923
924
925
926
927
928
929int mmc_regulator_get_ocrmask(struct regulator *supply)
930{
931 int result = 0;
932 int count;
933 int i;
934
935 count = regulator_count_voltages(supply);
936 if (count < 0)
937 return count;
938
939 for (i = 0; i < count; i++) {
940 int vdd_uV;
941 int vdd_mV;
942
943 vdd_uV = regulator_list_voltage(supply, i);
944 if (vdd_uV <= 0)
945 continue;
946
947 vdd_mV = vdd_uV / 1000;
948 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
949 }
950
951 return result;
952}
953EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
954
955
956
957
958
959
960
961
962
963
964
965
966
967int mmc_regulator_set_ocr(struct mmc_host *mmc,
968 struct regulator *supply,
969 unsigned short vdd_bit)
970{
971 int result = 0;
972 int min_uV, max_uV;
973
974 if (vdd_bit) {
975 int tmp;
976 int voltage;
977
978
979
980
981
982
983 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
984 if (tmp == 0) {
985 min_uV = 1650 * 1000;
986 max_uV = 1950 * 1000;
987 } else {
988 min_uV = 1900 * 1000 + tmp * 100 * 1000;
989 max_uV = min_uV + 100 * 1000;
990 }
991
992
993
994
995 voltage = regulator_get_voltage(supply);
996
997 if (mmc->caps2 & MMC_CAP2_BROKEN_VOLTAGE)
998 min_uV = max_uV = voltage;
999
1000 if (voltage < 0)
1001 result = voltage;
1002 else if (voltage < min_uV || voltage > max_uV)
1003 result = regulator_set_voltage(supply, min_uV, max_uV);
1004 else
1005 result = 0;
1006
1007 if (result == 0 && !mmc->regulator_enabled) {
1008 result = regulator_enable(supply);
1009 if (!result)
1010 mmc->regulator_enabled = true;
1011 }
1012 } else if (mmc->regulator_enabled) {
1013 result = regulator_disable(supply);
1014 if (result == 0)
1015 mmc->regulator_enabled = false;
1016 }
1017
1018 if (result)
1019 dev_err(mmc_dev(mmc),
1020 "could not set regulator OCR (%d)\n", result);
1021 return result;
1022}
1023EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1024
1025int mmc_regulator_get_supply(struct mmc_host *mmc)
1026{
1027 struct device *dev = mmc_dev(mmc);
1028 struct regulator *supply;
1029 int ret;
1030
1031 supply = devm_regulator_get(dev, "vmmc");
1032 mmc->supply.vmmc = supply;
1033 mmc->supply.vqmmc = devm_regulator_get(dev, "vqmmc");
1034
1035 if (IS_ERR(supply))
1036 return PTR_ERR(supply);
1037
1038 ret = mmc_regulator_get_ocrmask(supply);
1039 if (ret > 0)
1040 mmc->ocr_avail = ret;
1041 else
1042 dev_warn(mmc_dev(mmc), "Failed getting OCR mask: %d\n", ret);
1043
1044 return 0;
1045}
1046EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1047
1048#endif
1049
1050
1051
1052
1053
1054u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1055{
1056 int bit;
1057
1058 ocr &= host->ocr_avail;
1059
1060 bit = ffs(ocr);
1061 if (bit) {
1062 bit -= 1;
1063
1064 ocr &= 3 << bit;
1065
1066 mmc_host_clk_hold(host);
1067 host->ios.vdd = bit;
1068 mmc_set_ios(host);
1069 mmc_host_clk_release(host);
1070 } else {
1071 pr_warning("%s: host doesn't support card's voltages\n",
1072 mmc_hostname(host));
1073 ocr = 0;
1074 }
1075
1076 return ocr;
1077}
1078
1079int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
1080{
1081 struct mmc_command cmd = {0};
1082 int err = 0;
1083
1084 BUG_ON(!host);
1085
1086
1087
1088
1089
1090 if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
1091 cmd.opcode = SD_SWITCH_VOLTAGE;
1092 cmd.arg = 0;
1093 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1094
1095 err = mmc_wait_for_cmd(host, &cmd, 0);
1096 if (err)
1097 return err;
1098
1099 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1100 return -EIO;
1101 }
1102
1103 host->ios.signal_voltage = signal_voltage;
1104
1105 if (host->ops->start_signal_voltage_switch) {
1106 mmc_host_clk_hold(host);
1107 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1108 mmc_host_clk_release(host);
1109 }
1110
1111 return err;
1112}
1113
1114
1115
1116
1117void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1118{
1119 mmc_host_clk_hold(host);
1120 host->ios.timing = timing;
1121 mmc_set_ios(host);
1122 mmc_host_clk_release(host);
1123}
1124
1125
1126
1127
1128void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1129{
1130 mmc_host_clk_hold(host);
1131 host->ios.drv_type = drv_type;
1132 mmc_set_ios(host);
1133 mmc_host_clk_release(host);
1134}
1135
1136static void mmc_poweroff_notify(struct mmc_host *host)
1137{
1138 struct mmc_card *card;
1139 unsigned int timeout;
1140 unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
1141 int err = 0;
1142
1143 card = host->card;
1144 mmc_claim_host(host);
1145
1146
1147
1148
1149
1150 if (card && mmc_card_mmc(card) &&
1151 (card->poweroff_notify_state == MMC_POWERED_ON)) {
1152
1153 if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
1154 notify_type = EXT_CSD_POWER_OFF_SHORT;
1155 timeout = card->ext_csd.generic_cmd6_time;
1156 card->poweroff_notify_state = MMC_POWEROFF_SHORT;
1157 } else {
1158 notify_type = EXT_CSD_POWER_OFF_LONG;
1159 timeout = card->ext_csd.power_off_longtime;
1160 card->poweroff_notify_state = MMC_POWEROFF_LONG;
1161 }
1162
1163 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1164 EXT_CSD_POWER_OFF_NOTIFICATION,
1165 notify_type, timeout);
1166
1167 if (err && err != -EBADMSG)
1168 pr_err("Device failed to respond within %d poweroff "
1169 "time. Forcefully powering down the device\n",
1170 timeout);
1171
1172
1173 card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
1174 }
1175 mmc_release_host(host);
1176}
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189static void mmc_power_up(struct mmc_host *host)
1190{
1191 int bit;
1192
1193 if (host->ios.power_mode == MMC_POWER_ON)
1194 return;
1195
1196 mmc_host_clk_hold(host);
1197
1198
1199 if (host->ocr)
1200 bit = ffs(host->ocr) - 1;
1201 else
1202 bit = fls(host->ocr_avail) - 1;
1203
1204 host->ios.vdd = bit;
1205 if (mmc_host_is_spi(host))
1206 host->ios.chip_select = MMC_CS_HIGH;
1207 else
1208 host->ios.chip_select = MMC_CS_DONTCARE;
1209 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1210 host->ios.power_mode = MMC_POWER_UP;
1211 host->ios.bus_width = MMC_BUS_WIDTH_1;
1212 host->ios.timing = MMC_TIMING_LEGACY;
1213 mmc_set_ios(host);
1214
1215
1216 mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false);
1217
1218
1219
1220
1221
1222 mmc_delay(10);
1223
1224 host->ios.clock = host->f_init;
1225
1226 host->ios.power_mode = MMC_POWER_ON;
1227 mmc_set_ios(host);
1228
1229
1230
1231
1232
1233 mmc_delay(10);
1234
1235 mmc_host_clk_release(host);
1236}
1237
1238void mmc_power_off(struct mmc_host *host)
1239{
1240 int err = 0;
1241
1242 if (host->ios.power_mode == MMC_POWER_OFF)
1243 return;
1244
1245 mmc_host_clk_hold(host);
1246
1247 host->ios.clock = 0;
1248 host->ios.vdd = 0;
1249
1250
1251
1252
1253
1254
1255 if (host->card && mmc_card_is_sleep(host->card) &&
1256 host->bus_ops->resume) {
1257 err = host->bus_ops->resume(host);
1258
1259 if (!err)
1260 mmc_poweroff_notify(host);
1261 else
1262 pr_warning("%s: error %d during resume "
1263 "(continue with poweroff sequence)\n",
1264 mmc_hostname(host), err);
1265 }
1266
1267
1268
1269
1270
1271 host->ocr = 1 << (fls(host->ocr_avail) - 1);
1272
1273 if (!mmc_host_is_spi(host)) {
1274 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1275 host->ios.chip_select = MMC_CS_DONTCARE;
1276 }
1277 host->ios.power_mode = MMC_POWER_OFF;
1278 host->ios.bus_width = MMC_BUS_WIDTH_1;
1279 host->ios.timing = MMC_TIMING_LEGACY;
1280 mmc_set_ios(host);
1281
1282
1283
1284
1285
1286
1287 mmc_delay(1);
1288
1289 mmc_host_clk_release(host);
1290}
1291
1292
1293
1294
1295static void __mmc_release_bus(struct mmc_host *host)
1296{
1297 BUG_ON(!host);
1298 BUG_ON(host->bus_refs);
1299 BUG_ON(!host->bus_dead);
1300
1301 host->bus_ops = NULL;
1302}
1303
1304
1305
1306
1307static inline void mmc_bus_get(struct mmc_host *host)
1308{
1309 unsigned long flags;
1310
1311 spin_lock_irqsave(&host->lock, flags);
1312 host->bus_refs++;
1313 spin_unlock_irqrestore(&host->lock, flags);
1314}
1315
1316
1317
1318
1319
1320static inline void mmc_bus_put(struct mmc_host *host)
1321{
1322 unsigned long flags;
1323
1324 spin_lock_irqsave(&host->lock, flags);
1325 host->bus_refs--;
1326 if ((host->bus_refs == 0) && host->bus_ops)
1327 __mmc_release_bus(host);
1328 spin_unlock_irqrestore(&host->lock, flags);
1329}
1330
1331
1332
1333
1334
1335void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1336{
1337 unsigned long flags;
1338
1339 BUG_ON(!host);
1340 BUG_ON(!ops);
1341
1342 WARN_ON(!host->claimed);
1343
1344 spin_lock_irqsave(&host->lock, flags);
1345
1346 BUG_ON(host->bus_ops);
1347 BUG_ON(host->bus_refs);
1348
1349 host->bus_ops = ops;
1350 host->bus_refs = 1;
1351 host->bus_dead = 0;
1352
1353 spin_unlock_irqrestore(&host->lock, flags);
1354}
1355
1356
1357
1358
1359void mmc_detach_bus(struct mmc_host *host)
1360{
1361 unsigned long flags;
1362
1363 BUG_ON(!host);
1364
1365 WARN_ON(!host->claimed);
1366 WARN_ON(!host->bus_ops);
1367
1368 spin_lock_irqsave(&host->lock, flags);
1369
1370 host->bus_dead = 1;
1371
1372 spin_unlock_irqrestore(&host->lock, flags);
1373
1374 mmc_bus_put(host);
1375}
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1388{
1389#ifdef CONFIG_MMC_DEBUG
1390 unsigned long flags;
1391 spin_lock_irqsave(&host->lock, flags);
1392 WARN_ON(host->removed);
1393 spin_unlock_irqrestore(&host->lock, flags);
1394#endif
1395 host->detect_change = 1;
1396 mmc_schedule_delayed_work(&host->detect, delay);
1397}
1398
1399EXPORT_SYMBOL(mmc_detect_change);
1400
1401void mmc_init_erase(struct mmc_card *card)
1402{
1403 unsigned int sz;
1404
1405 if (is_power_of_2(card->erase_size))
1406 card->erase_shift = ffs(card->erase_size) - 1;
1407 else
1408 card->erase_shift = 0;
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425 if (mmc_card_sd(card) && card->ssr.au) {
1426 card->pref_erase = card->ssr.au;
1427 card->erase_shift = ffs(card->ssr.au) - 1;
1428 } else if (card->ext_csd.hc_erase_size) {
1429 card->pref_erase = card->ext_csd.hc_erase_size;
1430 } else {
1431 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1432 if (sz < 128)
1433 card->pref_erase = 512 * 1024 / 512;
1434 else if (sz < 512)
1435 card->pref_erase = 1024 * 1024 / 512;
1436 else if (sz < 1024)
1437 card->pref_erase = 2 * 1024 * 1024 / 512;
1438 else
1439 card->pref_erase = 4 * 1024 * 1024 / 512;
1440 if (card->pref_erase < card->erase_size)
1441 card->pref_erase = card->erase_size;
1442 else {
1443 sz = card->pref_erase % card->erase_size;
1444 if (sz)
1445 card->pref_erase += card->erase_size - sz;
1446 }
1447 }
1448}
1449
1450static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1451 unsigned int arg, unsigned int qty)
1452{
1453 unsigned int erase_timeout;
1454
1455 if (arg == MMC_DISCARD_ARG ||
1456 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1457 erase_timeout = card->ext_csd.trim_timeout;
1458 } else if (card->ext_csd.erase_group_def & 1) {
1459
1460 if (arg == MMC_TRIM_ARG)
1461 erase_timeout = card->ext_csd.trim_timeout;
1462 else
1463 erase_timeout = card->ext_csd.hc_erase_timeout;
1464 } else {
1465
1466 unsigned int mult = (10 << card->csd.r2w_factor);
1467 unsigned int timeout_clks = card->csd.tacc_clks * mult;
1468 unsigned int timeout_us;
1469
1470
1471 if (card->csd.tacc_ns < 1000000)
1472 timeout_us = (card->csd.tacc_ns * mult) / 1000;
1473 else
1474 timeout_us = (card->csd.tacc_ns / 1000) * mult;
1475
1476
1477
1478
1479
1480 timeout_clks <<= 1;
1481 timeout_us += (timeout_clks * 1000) /
1482 (mmc_host_clk_rate(card->host) / 1000);
1483
1484 erase_timeout = timeout_us / 1000;
1485
1486
1487
1488
1489
1490 if (!erase_timeout)
1491 erase_timeout = 1;
1492 }
1493
1494
1495 if (arg & MMC_SECURE_ARGS) {
1496 if (arg == MMC_SECURE_ERASE_ARG)
1497 erase_timeout *= card->ext_csd.sec_erase_mult;
1498 else
1499 erase_timeout *= card->ext_csd.sec_trim_mult;
1500 }
1501
1502 erase_timeout *= qty;
1503
1504
1505
1506
1507
1508 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1509 erase_timeout = 1000;
1510
1511 return erase_timeout;
1512}
1513
1514static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1515 unsigned int arg,
1516 unsigned int qty)
1517{
1518 unsigned int erase_timeout;
1519
1520 if (card->ssr.erase_timeout) {
1521
1522 erase_timeout = card->ssr.erase_timeout * qty +
1523 card->ssr.erase_offset;
1524 } else {
1525
1526
1527
1528
1529 erase_timeout = 250 * qty;
1530 }
1531
1532
1533 if (erase_timeout < 1000)
1534 erase_timeout = 1000;
1535
1536 return erase_timeout;
1537}
1538
1539static unsigned int mmc_erase_timeout(struct mmc_card *card,
1540 unsigned int arg,
1541 unsigned int qty)
1542{
1543 if (mmc_card_sd(card))
1544 return mmc_sd_erase_timeout(card, arg, qty);
1545 else
1546 return mmc_mmc_erase_timeout(card, arg, qty);
1547}
1548
1549static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1550 unsigned int to, unsigned int arg)
1551{
1552 struct mmc_command cmd = {0};
1553 unsigned int qty = 0;
1554 int err;
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572 if (card->erase_shift)
1573 qty += ((to >> card->erase_shift) -
1574 (from >> card->erase_shift)) + 1;
1575 else if (mmc_card_sd(card))
1576 qty += to - from + 1;
1577 else
1578 qty += ((to / card->erase_size) -
1579 (from / card->erase_size)) + 1;
1580
1581 if (!mmc_card_blockaddr(card)) {
1582 from <<= 9;
1583 to <<= 9;
1584 }
1585
1586 if (mmc_card_sd(card))
1587 cmd.opcode = SD_ERASE_WR_BLK_START;
1588 else
1589 cmd.opcode = MMC_ERASE_GROUP_START;
1590 cmd.arg = from;
1591 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1592 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1593 if (err) {
1594 pr_err("mmc_erase: group start error %d, "
1595 "status %#x\n", err, cmd.resp[0]);
1596 err = -EIO;
1597 goto out;
1598 }
1599
1600 memset(&cmd, 0, sizeof(struct mmc_command));
1601 if (mmc_card_sd(card))
1602 cmd.opcode = SD_ERASE_WR_BLK_END;
1603 else
1604 cmd.opcode = MMC_ERASE_GROUP_END;
1605 cmd.arg = to;
1606 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1607 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1608 if (err) {
1609 pr_err("mmc_erase: group end error %d, status %#x\n",
1610 err, cmd.resp[0]);
1611 err = -EIO;
1612 goto out;
1613 }
1614
1615 memset(&cmd, 0, sizeof(struct mmc_command));
1616 cmd.opcode = MMC_ERASE;
1617 cmd.arg = arg;
1618 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1619 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
1620 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1621 if (err) {
1622 pr_err("mmc_erase: erase error %d, status %#x\n",
1623 err, cmd.resp[0]);
1624 err = -EIO;
1625 goto out;
1626 }
1627
1628 if (mmc_host_is_spi(card->host))
1629 goto out;
1630
1631 do {
1632 memset(&cmd, 0, sizeof(struct mmc_command));
1633 cmd.opcode = MMC_SEND_STATUS;
1634 cmd.arg = card->rca << 16;
1635 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1636
1637 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1638 if (err || (cmd.resp[0] & 0xFDF92000)) {
1639 pr_err("error %d requesting status %#x\n",
1640 err, cmd.resp[0]);
1641 err = -EIO;
1642 goto out;
1643 }
1644 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
1645 R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG);
1646out:
1647 return err;
1648}
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
1660 unsigned int arg)
1661{
1662 unsigned int rem, to = from + nr;
1663
1664 if (!(card->host->caps & MMC_CAP_ERASE) ||
1665 !(card->csd.cmdclass & CCC_ERASE))
1666 return -EOPNOTSUPP;
1667
1668 if (!card->erase_size)
1669 return -EOPNOTSUPP;
1670
1671 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
1672 return -EOPNOTSUPP;
1673
1674 if ((arg & MMC_SECURE_ARGS) &&
1675 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
1676 return -EOPNOTSUPP;
1677
1678 if ((arg & MMC_TRIM_ARGS) &&
1679 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
1680 return -EOPNOTSUPP;
1681
1682 if (arg == MMC_SECURE_ERASE_ARG) {
1683 if (from % card->erase_size || nr % card->erase_size)
1684 return -EINVAL;
1685 }
1686
1687 if (arg == MMC_ERASE_ARG) {
1688 rem = from % card->erase_size;
1689 if (rem) {
1690 rem = card->erase_size - rem;
1691 from += rem;
1692 if (nr > rem)
1693 nr -= rem;
1694 else
1695 return 0;
1696 }
1697 rem = nr % card->erase_size;
1698 if (rem)
1699 nr -= rem;
1700 }
1701
1702 if (nr == 0)
1703 return 0;
1704
1705 to = from + nr;
1706
1707 if (to <= from)
1708 return -EINVAL;
1709
1710
1711 to -= 1;
1712
1713 return mmc_do_erase(card, from, to, arg);
1714}
1715EXPORT_SYMBOL(mmc_erase);
1716
1717int mmc_can_erase(struct mmc_card *card)
1718{
1719 if ((card->host->caps & MMC_CAP_ERASE) &&
1720 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
1721 return 1;
1722 return 0;
1723}
1724EXPORT_SYMBOL(mmc_can_erase);
1725
1726int mmc_can_trim(struct mmc_card *card)
1727{
1728 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
1729 return 1;
1730 return 0;
1731}
1732EXPORT_SYMBOL(mmc_can_trim);
1733
1734int mmc_can_discard(struct mmc_card *card)
1735{
1736
1737
1738
1739
1740 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
1741 return 1;
1742 return 0;
1743}
1744EXPORT_SYMBOL(mmc_can_discard);
1745
1746int mmc_can_sanitize(struct mmc_card *card)
1747{
1748 if (!mmc_can_trim(card) && !mmc_can_erase(card))
1749 return 0;
1750 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
1751 return 1;
1752 return 0;
1753}
1754EXPORT_SYMBOL(mmc_can_sanitize);
1755
1756int mmc_can_secure_erase_trim(struct mmc_card *card)
1757{
1758 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
1759 return 1;
1760 return 0;
1761}
1762EXPORT_SYMBOL(mmc_can_secure_erase_trim);
1763
1764int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1765 unsigned int nr)
1766{
1767 if (!card->erase_size)
1768 return 0;
1769 if (from % card->erase_size || nr % card->erase_size)
1770 return 0;
1771 return 1;
1772}
1773EXPORT_SYMBOL(mmc_erase_group_aligned);
1774
1775static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
1776 unsigned int arg)
1777{
1778 struct mmc_host *host = card->host;
1779 unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
1780 unsigned int last_timeout = 0;
1781
1782 if (card->erase_shift)
1783 max_qty = UINT_MAX >> card->erase_shift;
1784 else if (mmc_card_sd(card))
1785 max_qty = UINT_MAX;
1786 else
1787 max_qty = UINT_MAX / card->erase_size;
1788
1789
1790 do {
1791 y = 0;
1792 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
1793 timeout = mmc_erase_timeout(card, arg, qty + x);
1794 if (timeout > host->max_discard_to)
1795 break;
1796 if (timeout < last_timeout)
1797 break;
1798 last_timeout = timeout;
1799 y = x;
1800 }
1801 qty += y;
1802 } while (y);
1803
1804 if (!qty)
1805 return 0;
1806
1807 if (qty == 1)
1808 return 1;
1809
1810
1811 if (card->erase_shift)
1812 max_discard = --qty << card->erase_shift;
1813 else if (mmc_card_sd(card))
1814 max_discard = qty;
1815 else
1816 max_discard = --qty * card->erase_size;
1817
1818 return max_discard;
1819}
1820
1821unsigned int mmc_calc_max_discard(struct mmc_card *card)
1822{
1823 struct mmc_host *host = card->host;
1824 unsigned int max_discard, max_trim;
1825
1826 if (!host->max_discard_to)
1827 return UINT_MAX;
1828
1829
1830
1831
1832
1833
1834 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
1835 return card->pref_erase;
1836
1837 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
1838 if (mmc_can_trim(card)) {
1839 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
1840 if (max_trim < max_discard)
1841 max_discard = max_trim;
1842 } else if (max_discard < card->erase_size) {
1843 max_discard = 0;
1844 }
1845 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
1846 mmc_hostname(host), max_discard, host->max_discard_to);
1847 return max_discard;
1848}
1849EXPORT_SYMBOL(mmc_calc_max_discard);
1850
1851int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1852{
1853 struct mmc_command cmd = {0};
1854
1855 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
1856 return 0;
1857
1858 cmd.opcode = MMC_SET_BLOCKLEN;
1859 cmd.arg = blocklen;
1860 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1861 return mmc_wait_for_cmd(card->host, &cmd, 5);
1862}
1863EXPORT_SYMBOL(mmc_set_blocklen);
1864
1865static void mmc_hw_reset_for_init(struct mmc_host *host)
1866{
1867 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1868 return;
1869 mmc_host_clk_hold(host);
1870 host->ops->hw_reset(host);
1871 mmc_host_clk_release(host);
1872}
1873
1874int mmc_can_reset(struct mmc_card *card)
1875{
1876 u8 rst_n_function;
1877
1878 if (!mmc_card_mmc(card))
1879 return 0;
1880 rst_n_function = card->ext_csd.rst_n_function;
1881 if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
1882 return 0;
1883 return 1;
1884}
1885EXPORT_SYMBOL(mmc_can_reset);
1886
1887static int mmc_do_hw_reset(struct mmc_host *host, int check)
1888{
1889 struct mmc_card *card = host->card;
1890
1891 if (!host->bus_ops->power_restore)
1892 return -EOPNOTSUPP;
1893
1894 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1895 return -EOPNOTSUPP;
1896
1897 if (!card)
1898 return -EINVAL;
1899
1900 if (!mmc_can_reset(card))
1901 return -EOPNOTSUPP;
1902
1903 mmc_host_clk_hold(host);
1904 mmc_set_clock(host, host->f_init);
1905
1906 host->ops->hw_reset(host);
1907
1908
1909 if (check) {
1910 struct mmc_command cmd = {0};
1911 int err;
1912
1913 cmd.opcode = MMC_SEND_STATUS;
1914 if (!mmc_host_is_spi(card->host))
1915 cmd.arg = card->rca << 16;
1916 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
1917 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1918 if (!err) {
1919 mmc_host_clk_release(host);
1920 return -ENOSYS;
1921 }
1922 }
1923
1924 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR);
1925 if (mmc_host_is_spi(host)) {
1926 host->ios.chip_select = MMC_CS_HIGH;
1927 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1928 } else {
1929 host->ios.chip_select = MMC_CS_DONTCARE;
1930 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1931 }
1932 host->ios.bus_width = MMC_BUS_WIDTH_1;
1933 host->ios.timing = MMC_TIMING_LEGACY;
1934 mmc_set_ios(host);
1935
1936 mmc_host_clk_release(host);
1937
1938 return host->bus_ops->power_restore(host);
1939}
1940
1941int mmc_hw_reset(struct mmc_host *host)
1942{
1943 return mmc_do_hw_reset(host, 0);
1944}
1945EXPORT_SYMBOL(mmc_hw_reset);
1946
1947int mmc_hw_reset_check(struct mmc_host *host)
1948{
1949 return mmc_do_hw_reset(host, 1);
1950}
1951EXPORT_SYMBOL(mmc_hw_reset_check);
1952
1953static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
1954{
1955 host->f_init = freq;
1956
1957#ifdef CONFIG_MMC_DEBUG
1958 pr_info("%s: %s: trying to init card at %u Hz\n",
1959 mmc_hostname(host), __func__, host->f_init);
1960#endif
1961 mmc_power_up(host);
1962
1963
1964
1965
1966
1967 mmc_hw_reset_for_init(host);
1968
1969
1970
1971
1972
1973
1974 sdio_reset(host);
1975 mmc_go_idle(host);
1976
1977 mmc_send_if_cond(host, host->ocr_avail);
1978
1979
1980 if (!mmc_attach_sdio(host))
1981 return 0;
1982 if (!mmc_attach_sd(host))
1983 return 0;
1984 if (!mmc_attach_mmc(host))
1985 return 0;
1986
1987 mmc_power_off(host);
1988 return -EIO;
1989}
1990
1991int _mmc_detect_card_removed(struct mmc_host *host)
1992{
1993 int ret;
1994
1995 if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive)
1996 return 0;
1997
1998 if (!host->card || mmc_card_removed(host->card))
1999 return 1;
2000
2001 ret = host->bus_ops->alive(host);
2002 if (ret) {
2003 mmc_card_set_removed(host->card);
2004 pr_debug("%s: card remove detected\n", mmc_hostname(host));
2005 }
2006
2007 return ret;
2008}
2009
2010int mmc_detect_card_removed(struct mmc_host *host)
2011{
2012 struct mmc_card *card = host->card;
2013 int ret;
2014
2015 WARN_ON(!host->claimed);
2016
2017 if (!card)
2018 return 1;
2019
2020 ret = mmc_card_removed(card);
2021
2022
2023
2024
2025 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL) &&
2026 !(host->caps2 & MMC_CAP2_DETECT_ON_ERR))
2027 return ret;
2028
2029 host->detect_change = 0;
2030 if (!ret) {
2031 ret = _mmc_detect_card_removed(host);
2032 if (ret && (host->caps2 & MMC_CAP2_DETECT_ON_ERR)) {
2033
2034
2035
2036
2037 cancel_delayed_work(&host->detect);
2038 mmc_detect_change(host, 0);
2039 }
2040 }
2041
2042 return ret;
2043}
2044EXPORT_SYMBOL(mmc_detect_card_removed);
2045
2046void mmc_rescan(struct work_struct *work)
2047{
2048 struct mmc_host *host =
2049 container_of(work, struct mmc_host, detect.work);
2050 int i;
2051
2052 if (host->rescan_disable)
2053 return;
2054
2055 mmc_bus_get(host);
2056
2057
2058
2059
2060
2061 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
2062 && !(host->caps & MMC_CAP_NONREMOVABLE))
2063 host->bus_ops->detect(host);
2064
2065 host->detect_change = 0;
2066
2067
2068
2069
2070
2071 mmc_bus_put(host);
2072 mmc_bus_get(host);
2073
2074
2075 if (host->bus_ops != NULL) {
2076 mmc_bus_put(host);
2077 goto out;
2078 }
2079
2080
2081
2082
2083
2084 mmc_bus_put(host);
2085
2086 if (host->ops->get_cd && host->ops->get_cd(host) == 0) {
2087 mmc_claim_host(host);
2088 mmc_power_off(host);
2089 mmc_release_host(host);
2090 goto out;
2091 }
2092
2093 mmc_claim_host(host);
2094 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2095 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2096 break;
2097 if (freqs[i] <= host->f_min)
2098 break;
2099 }
2100 mmc_release_host(host);
2101
2102 out:
2103 if (host->caps & MMC_CAP_NEEDS_POLL)
2104 mmc_schedule_delayed_work(&host->detect, HZ);
2105}
2106
2107void mmc_start_host(struct mmc_host *host)
2108{
2109 host->f_init = max(freqs[0], host->f_min);
2110 host->rescan_disable = 0;
2111 mmc_power_up(host);
2112 mmc_detect_change(host, 0);
2113}
2114
2115void mmc_stop_host(struct mmc_host *host)
2116{
2117#ifdef CONFIG_MMC_DEBUG
2118 unsigned long flags;
2119 spin_lock_irqsave(&host->lock, flags);
2120 host->removed = 1;
2121 spin_unlock_irqrestore(&host->lock, flags);
2122#endif
2123
2124 host->rescan_disable = 1;
2125 cancel_delayed_work_sync(&host->detect);
2126 mmc_flush_scheduled_work();
2127
2128
2129 host->pm_flags = 0;
2130
2131 mmc_bus_get(host);
2132 if (host->bus_ops && !host->bus_dead) {
2133
2134 if (host->bus_ops->remove)
2135 host->bus_ops->remove(host);
2136
2137 mmc_claim_host(host);
2138 mmc_detach_bus(host);
2139 mmc_power_off(host);
2140 mmc_release_host(host);
2141 mmc_bus_put(host);
2142 return;
2143 }
2144 mmc_bus_put(host);
2145
2146 BUG_ON(host->card);
2147
2148 mmc_power_off(host);
2149}
2150
2151int mmc_power_save_host(struct mmc_host *host)
2152{
2153 int ret = 0;
2154
2155#ifdef CONFIG_MMC_DEBUG
2156 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2157#endif
2158
2159 mmc_bus_get(host);
2160
2161 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2162 mmc_bus_put(host);
2163 return -EINVAL;
2164 }
2165
2166 if (host->bus_ops->power_save)
2167 ret = host->bus_ops->power_save(host);
2168
2169 mmc_bus_put(host);
2170
2171 mmc_power_off(host);
2172
2173 return ret;
2174}
2175EXPORT_SYMBOL(mmc_power_save_host);
2176
2177int mmc_power_restore_host(struct mmc_host *host)
2178{
2179 int ret;
2180
2181#ifdef CONFIG_MMC_DEBUG
2182 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2183#endif
2184
2185 mmc_bus_get(host);
2186
2187 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2188 mmc_bus_put(host);
2189 return -EINVAL;
2190 }
2191
2192 mmc_power_up(host);
2193 ret = host->bus_ops->power_restore(host);
2194
2195 mmc_bus_put(host);
2196
2197 return ret;
2198}
2199EXPORT_SYMBOL(mmc_power_restore_host);
2200
2201int mmc_card_awake(struct mmc_host *host)
2202{
2203 int err = -ENOSYS;
2204
2205 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
2206 return 0;
2207
2208 mmc_bus_get(host);
2209
2210 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
2211 err = host->bus_ops->awake(host);
2212
2213 mmc_bus_put(host);
2214
2215 return err;
2216}
2217EXPORT_SYMBOL(mmc_card_awake);
2218
2219int mmc_card_sleep(struct mmc_host *host)
2220{
2221 int err = -ENOSYS;
2222
2223 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
2224 return 0;
2225
2226 mmc_bus_get(host);
2227
2228 if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
2229 err = host->bus_ops->sleep(host);
2230
2231 mmc_bus_put(host);
2232
2233 return err;
2234}
2235EXPORT_SYMBOL(mmc_card_sleep);
2236
2237int mmc_card_can_sleep(struct mmc_host *host)
2238{
2239 struct mmc_card *card = host->card;
2240
2241 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
2242 return 1;
2243 return 0;
2244}
2245EXPORT_SYMBOL(mmc_card_can_sleep);
2246
2247
2248
2249
2250int mmc_flush_cache(struct mmc_card *card)
2251{
2252 struct mmc_host *host = card->host;
2253 int err = 0;
2254
2255 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL))
2256 return err;
2257
2258 if (mmc_card_mmc(card) &&
2259 (card->ext_csd.cache_size > 0) &&
2260 (card->ext_csd.cache_ctrl & 1)) {
2261 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2262 EXT_CSD_FLUSH_CACHE, 1, 0);
2263 if (err)
2264 pr_err("%s: cache flush error %d\n",
2265 mmc_hostname(card->host), err);
2266 }
2267
2268 return err;
2269}
2270EXPORT_SYMBOL(mmc_flush_cache);
2271
2272
2273
2274
2275
2276
2277int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
2278{
2279 struct mmc_card *card = host->card;
2280 unsigned int timeout;
2281 int err = 0;
2282
2283 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
2284 mmc_card_is_removable(host))
2285 return err;
2286
2287 mmc_claim_host(host);
2288 if (card && mmc_card_mmc(card) &&
2289 (card->ext_csd.cache_size > 0)) {
2290 enable = !!enable;
2291
2292 if (card->ext_csd.cache_ctrl ^ enable) {
2293 timeout = enable ? card->ext_csd.generic_cmd6_time : 0;
2294 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2295 EXT_CSD_CACHE_CTRL, enable, timeout);
2296 if (err)
2297 pr_err("%s: cache %s error %d\n",
2298 mmc_hostname(card->host),
2299 enable ? "on" : "off",
2300 err);
2301 else
2302 card->ext_csd.cache_ctrl = enable;
2303 }
2304 }
2305 mmc_release_host(host);
2306
2307 return err;
2308}
2309EXPORT_SYMBOL(mmc_cache_ctrl);
2310
2311#ifdef CONFIG_PM
2312
2313
2314
2315
2316
2317int mmc_suspend_host(struct mmc_host *host)
2318{
2319 int err = 0;
2320
2321 cancel_delayed_work(&host->detect);
2322 mmc_flush_scheduled_work();
2323
2324 err = mmc_cache_ctrl(host, 0);
2325 if (err)
2326 goto out;
2327
2328 mmc_bus_get(host);
2329 if (host->bus_ops && !host->bus_dead) {
2330
2331 if (host->bus_ops->suspend)
2332 err = host->bus_ops->suspend(host);
2333
2334 if (err == -ENOSYS || !host->bus_ops->resume) {
2335
2336
2337
2338
2339
2340
2341 if (host->bus_ops->remove)
2342 host->bus_ops->remove(host);
2343 mmc_claim_host(host);
2344 mmc_detach_bus(host);
2345 mmc_power_off(host);
2346 mmc_release_host(host);
2347 host->pm_flags = 0;
2348 err = 0;
2349 }
2350 }
2351 mmc_bus_put(host);
2352
2353 if (!err && !mmc_card_keep_power(host))
2354 mmc_power_off(host);
2355
2356out:
2357 return err;
2358}
2359
2360EXPORT_SYMBOL(mmc_suspend_host);
2361
2362
2363
2364
2365
2366int mmc_resume_host(struct mmc_host *host)
2367{
2368 int err = 0;
2369
2370 mmc_bus_get(host);
2371 if (host->bus_ops && !host->bus_dead) {
2372 if (!mmc_card_keep_power(host)) {
2373 mmc_power_up(host);
2374 mmc_select_voltage(host, host->ocr);
2375
2376
2377
2378
2379
2380
2381 if (mmc_card_sdio(host->card) &&
2382 (host->caps & MMC_CAP_POWER_OFF_CARD)) {
2383 pm_runtime_disable(&host->card->dev);
2384 pm_runtime_set_active(&host->card->dev);
2385 pm_runtime_enable(&host->card->dev);
2386 }
2387 }
2388 BUG_ON(!host->bus_ops->resume);
2389 err = host->bus_ops->resume(host);
2390 if (err) {
2391 pr_warning("%s: error %d during resume "
2392 "(card was removed?)\n",
2393 mmc_hostname(host), err);
2394 err = 0;
2395 }
2396 }
2397 host->pm_flags &= ~MMC_PM_KEEP_POWER;
2398 mmc_bus_put(host);
2399
2400 return err;
2401}
2402EXPORT_SYMBOL(mmc_resume_host);
2403
2404
2405
2406
2407
2408int mmc_pm_notify(struct notifier_block *notify_block,
2409 unsigned long mode, void *unused)
2410{
2411 struct mmc_host *host = container_of(
2412 notify_block, struct mmc_host, pm_notify);
2413 unsigned long flags;
2414
2415
2416 switch (mode) {
2417 case PM_HIBERNATION_PREPARE:
2418 case PM_SUSPEND_PREPARE:
2419
2420 spin_lock_irqsave(&host->lock, flags);
2421 host->rescan_disable = 1;
2422 host->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
2423 spin_unlock_irqrestore(&host->lock, flags);
2424 cancel_delayed_work_sync(&host->detect);
2425
2426 if (!host->bus_ops || host->bus_ops->suspend)
2427 break;
2428
2429
2430 if (host->bus_ops->remove)
2431 host->bus_ops->remove(host);
2432
2433 mmc_claim_host(host);
2434 mmc_detach_bus(host);
2435 mmc_power_off(host);
2436 mmc_release_host(host);
2437 host->pm_flags = 0;
2438 break;
2439
2440 case PM_POST_SUSPEND:
2441 case PM_POST_HIBERNATION:
2442 case PM_POST_RESTORE:
2443
2444 spin_lock_irqsave(&host->lock, flags);
2445 host->rescan_disable = 0;
2446 host->power_notify_type = MMC_HOST_PW_NOTIFY_LONG;
2447 spin_unlock_irqrestore(&host->lock, flags);
2448 mmc_detect_change(host, 0);
2449
2450 }
2451
2452 return 0;
2453}
2454#endif
2455
2456static int __init mmc_init(void)
2457{
2458 int ret;
2459
2460 workqueue = alloc_ordered_workqueue("kmmcd", 0);
2461 if (!workqueue)
2462 return -ENOMEM;
2463
2464 ret = mmc_register_bus();
2465 if (ret)
2466 goto destroy_workqueue;
2467
2468 ret = mmc_register_host_class();
2469 if (ret)
2470 goto unregister_bus;
2471
2472 ret = sdio_register_bus();
2473 if (ret)
2474 goto unregister_host_class;
2475
2476 return 0;
2477
2478unregister_host_class:
2479 mmc_unregister_host_class();
2480unregister_bus:
2481 mmc_unregister_bus();
2482destroy_workqueue:
2483 destroy_workqueue(workqueue);
2484
2485 return ret;
2486}
2487
2488static void __exit mmc_exit(void)
2489{
2490 sdio_unregister_bus();
2491 mmc_unregister_host_class();
2492 mmc_unregister_bus();
2493 destroy_workqueue(workqueue);
2494}
2495
2496subsys_initcall(mmc_init);
2497module_exit(mmc_exit);
2498
2499MODULE_LICENSE("GPL");
2500