1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/completion.h>
17#include <linux/device.h>
18#include <linux/delay.h>
19#include <linux/pagemap.h>
20#include <linux/err.h>
21#include <linux/leds.h>
22#include <linux/scatterlist.h>
23#include <linux/log2.h>
24#include <linux/regulator/consumer.h>
25#include <linux/pm_runtime.h>
26#include <linux/pm_wakeup.h>
27#include <linux/suspend.h>
28#include <linux/fault-inject.h>
29#include <linux/random.h>
30#include <linux/slab.h>
31#include <linux/of.h>
32
33#include <linux/mmc/card.h>
34#include <linux/mmc/host.h>
35#include <linux/mmc/mmc.h>
36#include <linux/mmc/sd.h>
37#include <linux/mmc/slot-gpio.h>
38
39#define CREATE_TRACE_POINTS
40#include <trace/events/mmc.h>
41
42#include "core.h"
43#include "card.h"
44#include "bus.h"
45#include "host.h"
46#include "sdio_bus.h"
47
48#include "mmc_ops.h"
49#include "sd_ops.h"
50#include "sdio_ops.h"
51
52
53#define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000)
54
55
56#define MMC_ERASE_TIMEOUT_MS (60 * 1000)
57
58static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
59
60
61
62
63
64
65bool use_spi_crc = 1;
66module_param(use_spi_crc, bool, 0);
67
68static int mmc_schedule_delayed_work(struct delayed_work *work,
69 unsigned long delay)
70{
71
72
73
74
75
76
77 return queue_delayed_work(system_freezable_wq, work, delay);
78}
79
80#ifdef CONFIG_FAIL_MMC_REQUEST
81
82
83
84
85
86static void mmc_should_fail_request(struct mmc_host *host,
87 struct mmc_request *mrq)
88{
89 struct mmc_command *cmd = mrq->cmd;
90 struct mmc_data *data = mrq->data;
91 static const int data_errors[] = {
92 -ETIMEDOUT,
93 -EILSEQ,
94 -EIO,
95 };
96
97 if (!data)
98 return;
99
100 if (cmd->error || data->error ||
101 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
102 return;
103
104 data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
105 data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
106}
107
108#else
109
110static inline void mmc_should_fail_request(struct mmc_host *host,
111 struct mmc_request *mrq)
112{
113}
114
115#endif
116
117static inline void mmc_complete_cmd(struct mmc_request *mrq)
118{
119 if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
120 complete_all(&mrq->cmd_completion);
121}
122
123void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
124{
125 if (!mrq->cap_cmd_during_tfr)
126 return;
127
128 mmc_complete_cmd(mrq);
129
130 pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
131 mmc_hostname(host), mrq->cmd->opcode);
132}
133EXPORT_SYMBOL(mmc_command_done);
134
135
136
137
138
139
140
141
142
143void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
144{
145 struct mmc_command *cmd = mrq->cmd;
146 int err = cmd->error;
147
148
149 if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
150 cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
151 (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
152 (mrq->data && mrq->data->error == -EILSEQ) ||
153 (mrq->stop && mrq->stop->error == -EILSEQ)))
154 mmc_retune_needed(host);
155
156 if (err && cmd->retries && mmc_host_is_spi(host)) {
157 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
158 cmd->retries = 0;
159 }
160
161 if (host->ongoing_mrq == mrq)
162 host->ongoing_mrq = NULL;
163
164 mmc_complete_cmd(mrq);
165
166 trace_mmc_request_done(host, mrq);
167
168
169
170
171
172
173
174
175
176
177 if (!err || !cmd->retries || mmc_card_removed(host->card)) {
178 mmc_should_fail_request(host, mrq);
179
180 if (!host->ongoing_mrq)
181 led_trigger_event(host->led, LED_OFF);
182
183 if (mrq->sbc) {
184 pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
185 mmc_hostname(host), mrq->sbc->opcode,
186 mrq->sbc->error,
187 mrq->sbc->resp[0], mrq->sbc->resp[1],
188 mrq->sbc->resp[2], mrq->sbc->resp[3]);
189 }
190
191 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
192 mmc_hostname(host), cmd->opcode, err,
193 cmd->resp[0], cmd->resp[1],
194 cmd->resp[2], cmd->resp[3]);
195
196 if (mrq->data) {
197 pr_debug("%s: %d bytes transferred: %d\n",
198 mmc_hostname(host),
199 mrq->data->bytes_xfered, mrq->data->error);
200 }
201
202 if (mrq->stop) {
203 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
204 mmc_hostname(host), mrq->stop->opcode,
205 mrq->stop->error,
206 mrq->stop->resp[0], mrq->stop->resp[1],
207 mrq->stop->resp[2], mrq->stop->resp[3]);
208 }
209 }
210
211
212
213
214 if (mrq->done)
215 mrq->done(mrq);
216}
217
218EXPORT_SYMBOL(mmc_request_done);
219
220static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
221{
222 int err;
223
224
225 err = mmc_retune(host);
226 if (err) {
227 mrq->cmd->error = err;
228 mmc_request_done(host, mrq);
229 return;
230 }
231
232
233
234
235
236
237 if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) &&
238 host->ops->card_busy) {
239 int tries = 500;
240
241 while (host->ops->card_busy(host) && --tries)
242 mmc_delay(1);
243
244 if (tries == 0) {
245 mrq->cmd->error = -EBUSY;
246 mmc_request_done(host, mrq);
247 return;
248 }
249 }
250
251 if (mrq->cap_cmd_during_tfr) {
252 host->ongoing_mrq = mrq;
253
254
255
256
257 reinit_completion(&mrq->cmd_completion);
258 }
259
260 trace_mmc_request_start(host, mrq);
261
262 if (host->cqe_on)
263 host->cqe_ops->cqe_off(host);
264
265 host->ops->request(host, mrq);
266}
267
268static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq,
269 bool cqe)
270{
271 if (mrq->sbc) {
272 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
273 mmc_hostname(host), mrq->sbc->opcode,
274 mrq->sbc->arg, mrq->sbc->flags);
275 }
276
277 if (mrq->cmd) {
278 pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n",
279 mmc_hostname(host), cqe ? "CQE direct " : "",
280 mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
281 } else if (cqe) {
282 pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n",
283 mmc_hostname(host), mrq->tag, mrq->data->blk_addr);
284 }
285
286 if (mrq->data) {
287 pr_debug("%s: blksz %d blocks %d flags %08x "
288 "tsac %d ms nsac %d\n",
289 mmc_hostname(host), mrq->data->blksz,
290 mrq->data->blocks, mrq->data->flags,
291 mrq->data->timeout_ns / 1000000,
292 mrq->data->timeout_clks);
293 }
294
295 if (mrq->stop) {
296 pr_debug("%s: CMD%u arg %08x flags %08x\n",
297 mmc_hostname(host), mrq->stop->opcode,
298 mrq->stop->arg, mrq->stop->flags);
299 }
300}
301
302static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
303{
304 unsigned int i, sz = 0;
305 struct scatterlist *sg;
306
307 if (mrq->cmd) {
308 mrq->cmd->error = 0;
309 mrq->cmd->mrq = mrq;
310 mrq->cmd->data = mrq->data;
311 }
312 if (mrq->sbc) {
313 mrq->sbc->error = 0;
314 mrq->sbc->mrq = mrq;
315 }
316 if (mrq->data) {
317 if (mrq->data->blksz > host->max_blk_size ||
318 mrq->data->blocks > host->max_blk_count ||
319 mrq->data->blocks * mrq->data->blksz > host->max_req_size)
320 return -EINVAL;
321
322 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
323 sz += sg->length;
324 if (sz != mrq->data->blocks * mrq->data->blksz)
325 return -EINVAL;
326
327 mrq->data->error = 0;
328 mrq->data->mrq = mrq;
329 if (mrq->stop) {
330 mrq->data->stop = mrq->stop;
331 mrq->stop->error = 0;
332 mrq->stop->mrq = mrq;
333 }
334 }
335
336 return 0;
337}
338
339int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
340{
341 int err;
342
343 init_completion(&mrq->cmd_completion);
344
345 mmc_retune_hold(host);
346
347 if (mmc_card_removed(host->card))
348 return -ENOMEDIUM;
349
350 mmc_mrq_pr_debug(host, mrq, false);
351
352 WARN_ON(!host->claimed);
353
354 err = mmc_mrq_prep(host, mrq);
355 if (err)
356 return err;
357
358 led_trigger_event(host->led, LED_FULL);
359 __mmc_start_request(host, mrq);
360
361 return 0;
362}
363EXPORT_SYMBOL(mmc_start_request);
364
365static void mmc_wait_done(struct mmc_request *mrq)
366{
367 complete(&mrq->completion);
368}
369
370static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
371{
372 struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
373
374
375
376
377
378 if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
379 wait_for_completion(&ongoing_mrq->cmd_completion);
380}
381
382static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
383{
384 int err;
385
386 mmc_wait_ongoing_tfr_cmd(host);
387
388 init_completion(&mrq->completion);
389 mrq->done = mmc_wait_done;
390
391 err = mmc_start_request(host, mrq);
392 if (err) {
393 mrq->cmd->error = err;
394 mmc_complete_cmd(mrq);
395 complete(&mrq->completion);
396 }
397
398 return err;
399}
400
401void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
402{
403 struct mmc_command *cmd;
404
405 while (1) {
406 wait_for_completion(&mrq->completion);
407
408 cmd = mrq->cmd;
409
410
411
412
413
414
415
416 if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
417 if (!mmc_interrupt_hpi(host->card)) {
418 pr_warn("%s: %s: Interrupted sanitize\n",
419 mmc_hostname(host), __func__);
420 cmd->error = 0;
421 break;
422 } else {
423 pr_err("%s: %s: Failed to interrupt sanitize\n",
424 mmc_hostname(host), __func__);
425 }
426 }
427 if (!cmd->error || !cmd->retries ||
428 mmc_card_removed(host->card))
429 break;
430
431 mmc_retune_recheck(host);
432
433 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
434 mmc_hostname(host), cmd->opcode, cmd->error);
435 cmd->retries--;
436 cmd->error = 0;
437 __mmc_start_request(host, mrq);
438 }
439
440 mmc_retune_release(host);
441}
442EXPORT_SYMBOL(mmc_wait_for_req_done);
443
444
445
446
447
448
449
450
451
452int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
453{
454 int err;
455
456
457
458
459
460
461
462 err = mmc_retune(host);
463 if (err)
464 goto out_err;
465
466 mrq->host = host;
467
468 mmc_mrq_pr_debug(host, mrq, true);
469
470 err = mmc_mrq_prep(host, mrq);
471 if (err)
472 goto out_err;
473
474 err = host->cqe_ops->cqe_request(host, mrq);
475 if (err)
476 goto out_err;
477
478 trace_mmc_request_start(host, mrq);
479
480 return 0;
481
482out_err:
483 if (mrq->cmd) {
484 pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
485 mmc_hostname(host), mrq->cmd->opcode, err);
486 } else {
487 pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
488 mmc_hostname(host), mrq->tag, err);
489 }
490 return err;
491}
492EXPORT_SYMBOL(mmc_cqe_start_req);
493
494
495
496
497
498
499
500
501
502void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
503{
504 mmc_should_fail_request(host, mrq);
505
506
507 if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
508 (mrq->data && mrq->data->error == -EILSEQ))
509 mmc_retune_needed(host);
510
511 trace_mmc_request_done(host, mrq);
512
513 if (mrq->cmd) {
514 pr_debug("%s: CQE req done (direct CMD%u): %d\n",
515 mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
516 } else {
517 pr_debug("%s: CQE transfer done tag %d\n",
518 mmc_hostname(host), mrq->tag);
519 }
520
521 if (mrq->data) {
522 pr_debug("%s: %d bytes transferred: %d\n",
523 mmc_hostname(host),
524 mrq->data->bytes_xfered, mrq->data->error);
525 }
526
527 mrq->done(mrq);
528}
529EXPORT_SYMBOL(mmc_cqe_request_done);
530
531
532
533
534
535
536void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
537{
538 if (host->cqe_ops->cqe_post_req)
539 host->cqe_ops->cqe_post_req(host, mrq);
540}
541EXPORT_SYMBOL(mmc_cqe_post_req);
542
543
544#define MMC_CQE_RECOVERY_TIMEOUT 1000
545
546
547
548
549
550
551
552
553
554
555int mmc_cqe_recovery(struct mmc_host *host)
556{
557 struct mmc_command cmd;
558 int err;
559
560 mmc_retune_hold_now(host);
561
562
563
564
565
566 pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
567
568 host->cqe_ops->cqe_recovery_start(host);
569
570 memset(&cmd, 0, sizeof(cmd));
571 cmd.opcode = MMC_STOP_TRANSMISSION,
572 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC,
573 cmd.flags &= ~MMC_RSP_CRC;
574 cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
575 mmc_wait_for_cmd(host, &cmd, 0);
576
577 memset(&cmd, 0, sizeof(cmd));
578 cmd.opcode = MMC_CMDQ_TASK_MGMT;
579 cmd.arg = 1;
580 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
581 cmd.flags &= ~MMC_RSP_CRC;
582 cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
583 err = mmc_wait_for_cmd(host, &cmd, 0);
584
585 host->cqe_ops->cqe_recovery_finish(host);
586
587 mmc_retune_release(host);
588
589 return err;
590}
591EXPORT_SYMBOL(mmc_cqe_recovery);
592
593
594
595
596
597
598
599
600
601
602
603
604
605bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
606{
607 return completion_done(&mrq->completion);
608}
609EXPORT_SYMBOL(mmc_is_req_done);
610
611
612
613
614
615
616
617
618
619
620
621
622
623void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
624{
625 __mmc_start_req(host, mrq);
626
627 if (!mrq->cap_cmd_during_tfr)
628 mmc_wait_for_req_done(host, mrq);
629}
630EXPORT_SYMBOL(mmc_wait_for_req);
631
632
633
634
635
636
637
638
639
640
641
642int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
643{
644 struct mmc_request mrq = {};
645
646 WARN_ON(!host->claimed);
647
648 memset(cmd->resp, 0, sizeof(cmd->resp));
649 cmd->retries = retries;
650
651 mrq.cmd = cmd;
652 cmd->data = NULL;
653
654 mmc_wait_for_req(host, &mrq);
655
656 return cmd->error;
657}
658
659EXPORT_SYMBOL(mmc_wait_for_cmd);
660
661
662
663
664
665
666
667
668
669void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
670{
671 unsigned int mult;
672
673
674
675
676 if (mmc_card_sdio(card)) {
677 data->timeout_ns = 1000000000;
678 data->timeout_clks = 0;
679 return;
680 }
681
682
683
684
685 mult = mmc_card_sd(card) ? 100 : 10;
686
687
688
689
690
691 if (data->flags & MMC_DATA_WRITE)
692 mult <<= card->csd.r2w_factor;
693
694 data->timeout_ns = card->csd.taac_ns * mult;
695 data->timeout_clks = card->csd.taac_clks * mult;
696
697
698
699
700 if (mmc_card_sd(card)) {
701 unsigned int timeout_us, limit_us;
702
703 timeout_us = data->timeout_ns / 1000;
704 if (card->host->ios.clock)
705 timeout_us += data->timeout_clks * 1000 /
706 (card->host->ios.clock / 1000);
707
708 if (data->flags & MMC_DATA_WRITE)
709
710
711
712
713
714
715
716
717 limit_us = 3000000;
718 else
719 limit_us = 100000;
720
721
722
723
724 if (timeout_us > limit_us) {
725 data->timeout_ns = limit_us * 1000;
726 data->timeout_clks = 0;
727 }
728
729
730 if (timeout_us == 0)
731 data->timeout_ns = limit_us * 1000;
732 }
733
734
735
736
737
738
739
740 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
741 data->timeout_ns = 600000000;
742 data->timeout_clks = 0;
743 }
744
745
746
747
748
749
750
751 if (mmc_host_is_spi(card->host)) {
752 if (data->flags & MMC_DATA_WRITE) {
753 if (data->timeout_ns < 1000000000)
754 data->timeout_ns = 1000000000;
755 } else {
756 if (data->timeout_ns < 100000000)
757 data->timeout_ns = 100000000;
758 }
759 }
760}
761EXPORT_SYMBOL(mmc_set_data_timeout);
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
778{
779
780
781
782
783
784 sz = ((sz + 3) / 4) * 4;
785
786 return sz;
787}
788EXPORT_SYMBOL(mmc_align_data_size);
789
790
791
792
793
794static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx,
795 struct task_struct *task)
796{
797 return host->claimer == ctx ||
798 (!ctx && task && host->claimer->task == task);
799}
800
801static inline void mmc_ctx_set_claimer(struct mmc_host *host,
802 struct mmc_ctx *ctx,
803 struct task_struct *task)
804{
805 if (!host->claimer) {
806 if (ctx)
807 host->claimer = ctx;
808 else
809 host->claimer = &host->default_ctx;
810 }
811 if (task)
812 host->claimer->task = task;
813}
814
815
816
817
818
819
820
821
822
823
824
825
826
827int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
828 atomic_t *abort)
829{
830 struct task_struct *task = ctx ? NULL : current;
831 DECLARE_WAITQUEUE(wait, current);
832 unsigned long flags;
833 int stop;
834 bool pm = false;
835
836 might_sleep();
837
838 add_wait_queue(&host->wq, &wait);
839 spin_lock_irqsave(&host->lock, flags);
840 while (1) {
841 set_current_state(TASK_UNINTERRUPTIBLE);
842 stop = abort ? atomic_read(abort) : 0;
843 if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task))
844 break;
845 spin_unlock_irqrestore(&host->lock, flags);
846 schedule();
847 spin_lock_irqsave(&host->lock, flags);
848 }
849 set_current_state(TASK_RUNNING);
850 if (!stop) {
851 host->claimed = 1;
852 mmc_ctx_set_claimer(host, ctx, task);
853 host->claim_cnt += 1;
854 if (host->claim_cnt == 1)
855 pm = true;
856 } else
857 wake_up(&host->wq);
858 spin_unlock_irqrestore(&host->lock, flags);
859 remove_wait_queue(&host->wq, &wait);
860
861 if (pm)
862 pm_runtime_get_sync(mmc_dev(host));
863
864 return stop;
865}
866EXPORT_SYMBOL(__mmc_claim_host);
867
868
869
870
871
872
873
874
875void mmc_release_host(struct mmc_host *host)
876{
877 unsigned long flags;
878
879 WARN_ON(!host->claimed);
880
881 spin_lock_irqsave(&host->lock, flags);
882 if (--host->claim_cnt) {
883
884 spin_unlock_irqrestore(&host->lock, flags);
885 } else {
886 host->claimed = 0;
887 host->claimer->task = NULL;
888 host->claimer = NULL;
889 spin_unlock_irqrestore(&host->lock, flags);
890 wake_up(&host->wq);
891 pm_runtime_mark_last_busy(mmc_dev(host));
892 pm_runtime_put_autosuspend(mmc_dev(host));
893 }
894}
895EXPORT_SYMBOL(mmc_release_host);
896
897
898
899
900
901void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx)
902{
903 pm_runtime_get_sync(&card->dev);
904 __mmc_claim_host(card->host, ctx, NULL);
905}
906EXPORT_SYMBOL(mmc_get_card);
907
908
909
910
911
912void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx)
913{
914 struct mmc_host *host = card->host;
915
916 WARN_ON(ctx && host->claimer != ctx);
917
918 mmc_release_host(host);
919 pm_runtime_mark_last_busy(&card->dev);
920 pm_runtime_put_autosuspend(&card->dev);
921}
922EXPORT_SYMBOL(mmc_put_card);
923
924
925
926
927
928static inline void mmc_set_ios(struct mmc_host *host)
929{
930 struct mmc_ios *ios = &host->ios;
931
932 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
933 "width %u timing %u\n",
934 mmc_hostname(host), ios->clock, ios->bus_mode,
935 ios->power_mode, ios->chip_select, ios->vdd,
936 1 << ios->bus_width, ios->timing);
937
938 host->ops->set_ios(host, ios);
939}
940
941
942
943
944void mmc_set_chip_select(struct mmc_host *host, int mode)
945{
946 host->ios.chip_select = mode;
947 mmc_set_ios(host);
948}
949
950
951
952
953
954void mmc_set_clock(struct mmc_host *host, unsigned int hz)
955{
956 WARN_ON(hz && hz < host->f_min);
957
958 if (hz > host->f_max)
959 hz = host->f_max;
960
961 host->ios.clock = hz;
962 mmc_set_ios(host);
963}
964
965int mmc_execute_tuning(struct mmc_card *card)
966{
967 struct mmc_host *host = card->host;
968 u32 opcode;
969 int err;
970
971 if (!host->ops->execute_tuning)
972 return 0;
973
974 if (host->cqe_on)
975 host->cqe_ops->cqe_off(host);
976
977 if (mmc_card_mmc(card))
978 opcode = MMC_SEND_TUNING_BLOCK_HS200;
979 else
980 opcode = MMC_SEND_TUNING_BLOCK;
981
982 err = host->ops->execute_tuning(host, opcode);
983
984 if (err)
985 pr_err("%s: tuning execution failed: %d\n",
986 mmc_hostname(host), err);
987 else
988 mmc_retune_enable(host);
989
990 return err;
991}
992
993
994
995
996void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
997{
998 host->ios.bus_mode = mode;
999 mmc_set_ios(host);
1000}
1001
1002
1003
1004
1005void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1006{
1007 host->ios.bus_width = width;
1008 mmc_set_ios(host);
1009}
1010
1011
1012
1013
1014void mmc_set_initial_state(struct mmc_host *host)
1015{
1016 if (host->cqe_on)
1017 host->cqe_ops->cqe_off(host);
1018
1019 mmc_retune_disable(host);
1020
1021 if (mmc_host_is_spi(host))
1022 host->ios.chip_select = MMC_CS_HIGH;
1023 else
1024 host->ios.chip_select = MMC_CS_DONTCARE;
1025 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1026 host->ios.bus_width = MMC_BUS_WIDTH_1;
1027 host->ios.timing = MMC_TIMING_LEGACY;
1028 host->ios.drv_type = 0;
1029 host->ios.enhanced_strobe = false;
1030
1031
1032
1033
1034
1035 if ((host->caps2 & MMC_CAP2_HS400_ES) &&
1036 host->ops->hs400_enhanced_strobe)
1037 host->ops->hs400_enhanced_strobe(host, &host->ios);
1038
1039 mmc_set_ios(host);
1040}
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1058{
1059 const int max_bit = ilog2(MMC_VDD_35_36);
1060 int bit;
1061
1062 if (vdd < 1650 || vdd > 3600)
1063 return -EINVAL;
1064
1065 if (vdd >= 1650 && vdd <= 1950)
1066 return ilog2(MMC_VDD_165_195);
1067
1068 if (low_bits)
1069 vdd -= 1;
1070
1071
1072 bit = (vdd - 2000) / 100 + 8;
1073 if (bit > max_bit)
1074 return max_bit;
1075 return bit;
1076}
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1092{
1093 u32 mask = 0;
1094
1095 if (vdd_max < vdd_min)
1096 return 0;
1097
1098
1099 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1100 if (vdd_max < 0)
1101 return 0;
1102
1103
1104 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1105 if (vdd_min < 0)
1106 return 0;
1107
1108
1109 while (vdd_max >= vdd_min)
1110 mask |= 1 << vdd_max--;
1111
1112 return mask;
1113}
1114EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1115
1116#ifdef CONFIG_OF
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1128{
1129 const u32 *voltage_ranges;
1130 int num_ranges, i;
1131
1132 voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1133 num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1134 if (!voltage_ranges) {
1135 pr_debug("%pOF: voltage-ranges unspecified\n", np);
1136 return 0;
1137 }
1138 if (!num_ranges) {
1139 pr_err("%pOF: voltage-ranges empty\n", np);
1140 return -EINVAL;
1141 }
1142
1143 for (i = 0; i < num_ranges; i++) {
1144 const int j = i * 2;
1145 u32 ocr_mask;
1146
1147 ocr_mask = mmc_vddrange_to_ocrmask(
1148 be32_to_cpu(voltage_ranges[j]),
1149 be32_to_cpu(voltage_ranges[j + 1]));
1150 if (!ocr_mask) {
1151 pr_err("%pOF: voltage-range #%d is invalid\n",
1152 np, i);
1153 return -EINVAL;
1154 }
1155 *mask |= ocr_mask;
1156 }
1157
1158 return 1;
1159}
1160EXPORT_SYMBOL(mmc_of_parse_voltage);
1161
1162#endif
1163
1164static int mmc_of_get_func_num(struct device_node *node)
1165{
1166 u32 reg;
1167 int ret;
1168
1169 ret = of_property_read_u32(node, "reg", ®);
1170 if (ret < 0)
1171 return ret;
1172
1173 return reg;
1174}
1175
1176struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1177 unsigned func_num)
1178{
1179 struct device_node *node;
1180
1181 if (!host->parent || !host->parent->of_node)
1182 return NULL;
1183
1184 for_each_child_of_node(host->parent->of_node, node) {
1185 if (mmc_of_get_func_num(node) == func_num)
1186 return node;
1187 }
1188
1189 return NULL;
1190}
1191
1192#ifdef CONFIG_REGULATOR
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
1204{
1205 int tmp;
1206
1207 if (!vdd_bit)
1208 return -EINVAL;
1209
1210
1211
1212
1213
1214
1215
1216 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1217 if (tmp == 0) {
1218 *min_uV = 1650 * 1000;
1219 *max_uV = 1950 * 1000;
1220 } else {
1221 *min_uV = 1900 * 1000 + tmp * 100 * 1000;
1222 *max_uV = *min_uV + 100 * 1000;
1223 }
1224
1225 return 0;
1226}
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237int mmc_regulator_get_ocrmask(struct regulator *supply)
1238{
1239 int result = 0;
1240 int count;
1241 int i;
1242 int vdd_uV;
1243 int vdd_mV;
1244
1245 count = regulator_count_voltages(supply);
1246 if (count < 0)
1247 return count;
1248
1249 for (i = 0; i < count; i++) {
1250 vdd_uV = regulator_list_voltage(supply, i);
1251 if (vdd_uV <= 0)
1252 continue;
1253
1254 vdd_mV = vdd_uV / 1000;
1255 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1256 }
1257
1258 if (!result) {
1259 vdd_uV = regulator_get_voltage(supply);
1260 if (vdd_uV <= 0)
1261 return vdd_uV;
1262
1263 vdd_mV = vdd_uV / 1000;
1264 result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1265 }
1266
1267 return result;
1268}
1269EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283int mmc_regulator_set_ocr(struct mmc_host *mmc,
1284 struct regulator *supply,
1285 unsigned short vdd_bit)
1286{
1287 int result = 0;
1288 int min_uV, max_uV;
1289
1290 if (vdd_bit) {
1291 mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
1292
1293 result = regulator_set_voltage(supply, min_uV, max_uV);
1294 if (result == 0 && !mmc->regulator_enabled) {
1295 result = regulator_enable(supply);
1296 if (!result)
1297 mmc->regulator_enabled = true;
1298 }
1299 } else if (mmc->regulator_enabled) {
1300 result = regulator_disable(supply);
1301 if (result == 0)
1302 mmc->regulator_enabled = false;
1303 }
1304
1305 if (result)
1306 dev_err(mmc_dev(mmc),
1307 "could not set regulator OCR (%d)\n", result);
1308 return result;
1309}
1310EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1311
1312static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
1313 int min_uV, int target_uV,
1314 int max_uV)
1315{
1316
1317
1318
1319
1320 if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
1321 return -EINVAL;
1322
1323 return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
1324 max_uV);
1325}
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
1345{
1346 struct device *dev = mmc_dev(mmc);
1347 int ret, volt, min_uV, max_uV;
1348
1349
1350 if (IS_ERR(mmc->supply.vqmmc))
1351 return -EINVAL;
1352
1353 switch (ios->signal_voltage) {
1354 case MMC_SIGNAL_VOLTAGE_120:
1355 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1356 1100000, 1200000, 1300000);
1357 case MMC_SIGNAL_VOLTAGE_180:
1358 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1359 1700000, 1800000, 1950000);
1360 case MMC_SIGNAL_VOLTAGE_330:
1361 ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
1362 if (ret < 0)
1363 return ret;
1364
1365 dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
1366 __func__, volt, max_uV);
1367
1368 min_uV = max(volt - 300000, 2700000);
1369 max_uV = min(max_uV + 200000, 3600000);
1370
1371
1372
1373
1374
1375
1376
1377
1378 if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1379 min_uV, volt, max_uV))
1380 return 0;
1381
1382 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1383 2700000, volt, 3600000);
1384 default:
1385 return -EINVAL;
1386 }
1387}
1388EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
1389
1390#endif
1391
1392int mmc_regulator_get_supply(struct mmc_host *mmc)
1393{
1394 struct device *dev = mmc_dev(mmc);
1395 int ret;
1396
1397 mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1398 mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1399
1400 if (IS_ERR(mmc->supply.vmmc)) {
1401 if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1402 return -EPROBE_DEFER;
1403 dev_info(dev, "No vmmc regulator found\n");
1404 } else {
1405 ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1406 if (ret > 0)
1407 mmc->ocr_avail = ret;
1408 else
1409 dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1410 }
1411
1412 if (IS_ERR(mmc->supply.vqmmc)) {
1413 if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1414 return -EPROBE_DEFER;
1415 dev_info(dev, "No vqmmc regulator found\n");
1416 }
1417
1418 return 0;
1419}
1420EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1421
1422
1423
1424
1425
1426u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1427{
1428 int bit;
1429
1430
1431
1432
1433
1434 if (ocr & 0x7F) {
1435 dev_warn(mmc_dev(host),
1436 "card claims to support voltages below defined range\n");
1437 ocr &= ~0x7F;
1438 }
1439
1440 ocr &= host->ocr_avail;
1441 if (!ocr) {
1442 dev_warn(mmc_dev(host), "no support for card's volts\n");
1443 return 0;
1444 }
1445
1446 if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1447 bit = ffs(ocr) - 1;
1448 ocr &= 3 << bit;
1449 mmc_power_cycle(host, ocr);
1450 } else {
1451 bit = fls(ocr) - 1;
1452 ocr &= 3 << bit;
1453 if (bit != host->ios.vdd)
1454 dev_warn(mmc_dev(host), "exceeding card's volts\n");
1455 }
1456
1457 return ocr;
1458}
1459
1460int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1461{
1462 int err = 0;
1463 int old_signal_voltage = host->ios.signal_voltage;
1464
1465 host->ios.signal_voltage = signal_voltage;
1466 if (host->ops->start_signal_voltage_switch)
1467 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1468
1469 if (err)
1470 host->ios.signal_voltage = old_signal_voltage;
1471
1472 return err;
1473
1474}
1475
1476int mmc_host_set_uhs_voltage(struct mmc_host *host)
1477{
1478 u32 clock;
1479
1480
1481
1482
1483
1484 clock = host->ios.clock;
1485 host->ios.clock = 0;
1486 mmc_set_ios(host);
1487
1488 if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1489 return -EAGAIN;
1490
1491
1492 mmc_delay(10);
1493 host->ios.clock = clock;
1494 mmc_set_ios(host);
1495
1496 return 0;
1497}
1498
1499int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
1500{
1501 struct mmc_command cmd = {};
1502 int err = 0;
1503
1504
1505
1506
1507
1508 if (!host->ops->start_signal_voltage_switch)
1509 return -EPERM;
1510 if (!host->ops->card_busy)
1511 pr_warn("%s: cannot verify signal voltage switch\n",
1512 mmc_hostname(host));
1513
1514 cmd.opcode = SD_SWITCH_VOLTAGE;
1515 cmd.arg = 0;
1516 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1517
1518 err = mmc_wait_for_cmd(host, &cmd, 0);
1519 if (err)
1520 return err;
1521
1522 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1523 return -EIO;
1524
1525
1526
1527
1528
1529 mmc_delay(1);
1530 if (host->ops->card_busy && !host->ops->card_busy(host)) {
1531 err = -EAGAIN;
1532 goto power_cycle;
1533 }
1534
1535 if (mmc_host_set_uhs_voltage(host)) {
1536
1537
1538
1539
1540 err = -EAGAIN;
1541 goto power_cycle;
1542 }
1543
1544
1545 mmc_delay(1);
1546
1547
1548
1549
1550
1551 if (host->ops->card_busy && host->ops->card_busy(host))
1552 err = -EAGAIN;
1553
1554power_cycle:
1555 if (err) {
1556 pr_debug("%s: Signal voltage switch failed, "
1557 "power cycling card\n", mmc_hostname(host));
1558 mmc_power_cycle(host, ocr);
1559 }
1560
1561 return err;
1562}
1563
1564
1565
1566
1567void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1568{
1569 host->ios.timing = timing;
1570 mmc_set_ios(host);
1571}
1572
1573
1574
1575
1576void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1577{
1578 host->ios.drv_type = drv_type;
1579 mmc_set_ios(host);
1580}
1581
1582int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1583 int card_drv_type, int *drv_type)
1584{
1585 struct mmc_host *host = card->host;
1586 int host_drv_type = SD_DRIVER_TYPE_B;
1587
1588 *drv_type = 0;
1589
1590 if (!host->ops->select_drive_strength)
1591 return 0;
1592
1593
1594 if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1595 host_drv_type |= SD_DRIVER_TYPE_A;
1596
1597 if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1598 host_drv_type |= SD_DRIVER_TYPE_C;
1599
1600 if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1601 host_drv_type |= SD_DRIVER_TYPE_D;
1602
1603
1604
1605
1606
1607
1608
1609 return host->ops->select_drive_strength(card, max_dtr,
1610 host_drv_type,
1611 card_drv_type,
1612 drv_type);
1613}
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626void mmc_power_up(struct mmc_host *host, u32 ocr)
1627{
1628 if (host->ios.power_mode == MMC_POWER_ON)
1629 return;
1630
1631 host->ios.vdd = fls(ocr) - 1;
1632 host->ios.power_mode = MMC_POWER_UP;
1633
1634 mmc_set_initial_state(host);
1635
1636
1637 if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
1638 dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1639 else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1640 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1641 else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
1642 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1643
1644
1645
1646
1647
1648 mmc_delay(10);
1649
1650 host->ios.clock = host->f_init;
1651
1652 host->ios.power_mode = MMC_POWER_ON;
1653 mmc_set_ios(host);
1654
1655
1656
1657
1658
1659 mmc_delay(10);
1660}
1661
1662void mmc_power_off(struct mmc_host *host)
1663{
1664 if (host->ios.power_mode == MMC_POWER_OFF)
1665 return;
1666
1667 host->ios.clock = 0;
1668 host->ios.vdd = 0;
1669
1670 host->ios.power_mode = MMC_POWER_OFF;
1671
1672 mmc_set_initial_state(host);
1673
1674
1675
1676
1677
1678
1679 mmc_delay(1);
1680}
1681
1682void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1683{
1684 mmc_power_off(host);
1685
1686 mmc_delay(1);
1687 mmc_power_up(host, ocr);
1688}
1689
1690
1691
1692
1693static void __mmc_release_bus(struct mmc_host *host)
1694{
1695 WARN_ON(!host->bus_dead);
1696
1697 host->bus_ops = NULL;
1698}
1699
1700
1701
1702
1703static inline void mmc_bus_get(struct mmc_host *host)
1704{
1705 unsigned long flags;
1706
1707 spin_lock_irqsave(&host->lock, flags);
1708 host->bus_refs++;
1709 spin_unlock_irqrestore(&host->lock, flags);
1710}
1711
1712
1713
1714
1715
1716static inline void mmc_bus_put(struct mmc_host *host)
1717{
1718 unsigned long flags;
1719
1720 spin_lock_irqsave(&host->lock, flags);
1721 host->bus_refs--;
1722 if ((host->bus_refs == 0) && host->bus_ops)
1723 __mmc_release_bus(host);
1724 spin_unlock_irqrestore(&host->lock, flags);
1725}
1726
1727
1728
1729
1730
1731void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1732{
1733 unsigned long flags;
1734
1735 WARN_ON(!host->claimed);
1736
1737 spin_lock_irqsave(&host->lock, flags);
1738
1739 WARN_ON(host->bus_ops);
1740 WARN_ON(host->bus_refs);
1741
1742 host->bus_ops = ops;
1743 host->bus_refs = 1;
1744 host->bus_dead = 0;
1745
1746 spin_unlock_irqrestore(&host->lock, flags);
1747}
1748
1749
1750
1751
1752void mmc_detach_bus(struct mmc_host *host)
1753{
1754 unsigned long flags;
1755
1756 WARN_ON(!host->claimed);
1757 WARN_ON(!host->bus_ops);
1758
1759 spin_lock_irqsave(&host->lock, flags);
1760
1761 host->bus_dead = 1;
1762
1763 spin_unlock_irqrestore(&host->lock, flags);
1764
1765 mmc_bus_put(host);
1766}
1767
1768static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1769 bool cd_irq)
1770{
1771
1772
1773
1774
1775 if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1776 device_can_wakeup(mmc_dev(host)))
1777 pm_wakeup_event(mmc_dev(host), 5000);
1778
1779 host->detect_change = 1;
1780 mmc_schedule_delayed_work(&host->detect, delay);
1781}
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1794{
1795 _mmc_detect_change(host, delay, true);
1796}
1797EXPORT_SYMBOL(mmc_detect_change);
1798
1799void mmc_init_erase(struct mmc_card *card)
1800{
1801 unsigned int sz;
1802
1803 if (is_power_of_2(card->erase_size))
1804 card->erase_shift = ffs(card->erase_size) - 1;
1805 else
1806 card->erase_shift = 0;
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823 if (mmc_card_sd(card) && card->ssr.au) {
1824 card->pref_erase = card->ssr.au;
1825 card->erase_shift = ffs(card->ssr.au) - 1;
1826 } else if (card->erase_size) {
1827 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1828 if (sz < 128)
1829 card->pref_erase = 512 * 1024 / 512;
1830 else if (sz < 512)
1831 card->pref_erase = 1024 * 1024 / 512;
1832 else if (sz < 1024)
1833 card->pref_erase = 2 * 1024 * 1024 / 512;
1834 else
1835 card->pref_erase = 4 * 1024 * 1024 / 512;
1836 if (card->pref_erase < card->erase_size) {
1837 gmb();
1838 card->pref_erase = card->erase_size;
1839 } else {
1840 gmb();
1841 sz = card->pref_erase % card->erase_size;
1842 if (sz)
1843 card->pref_erase += card->erase_size - sz;
1844 }
1845 } else
1846 card->pref_erase = 0;
1847}
1848
1849static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1850 unsigned int arg, unsigned int qty)
1851{
1852 unsigned int erase_timeout;
1853
1854 if (arg == MMC_DISCARD_ARG ||
1855 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1856 erase_timeout = card->ext_csd.trim_timeout;
1857 } else if (card->ext_csd.erase_group_def & 1) {
1858
1859 if (arg == MMC_TRIM_ARG)
1860 erase_timeout = card->ext_csd.trim_timeout;
1861 else
1862 erase_timeout = card->ext_csd.hc_erase_timeout;
1863 } else {
1864
1865 unsigned int mult = (10 << card->csd.r2w_factor);
1866 unsigned int timeout_clks = card->csd.taac_clks * mult;
1867 unsigned int timeout_us;
1868
1869
1870 if (card->csd.taac_ns < 1000000)
1871 timeout_us = (card->csd.taac_ns * mult) / 1000;
1872 else
1873 timeout_us = (card->csd.taac_ns / 1000) * mult;
1874
1875
1876
1877
1878
1879 timeout_clks <<= 1;
1880 timeout_us += (timeout_clks * 1000) /
1881 (card->host->ios.clock / 1000);
1882
1883 erase_timeout = timeout_us / 1000;
1884
1885
1886
1887
1888
1889 if (!erase_timeout)
1890 erase_timeout = 1;
1891 }
1892
1893
1894 if (arg & MMC_SECURE_ARGS) {
1895 if (arg == MMC_SECURE_ERASE_ARG)
1896 erase_timeout *= card->ext_csd.sec_erase_mult;
1897 else
1898 erase_timeout *= card->ext_csd.sec_trim_mult;
1899 }
1900
1901 erase_timeout *= qty;
1902
1903
1904
1905
1906
1907 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1908 erase_timeout = 1000;
1909
1910 return erase_timeout;
1911}
1912
1913static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1914 unsigned int arg,
1915 unsigned int qty)
1916{
1917 unsigned int erase_timeout;
1918
1919 if (card->ssr.erase_timeout) {
1920
1921 erase_timeout = card->ssr.erase_timeout * qty +
1922 card->ssr.erase_offset;
1923 } else {
1924
1925
1926
1927
1928 erase_timeout = 250 * qty;
1929 }
1930
1931
1932 if (erase_timeout < 1000)
1933 erase_timeout = 1000;
1934
1935 return erase_timeout;
1936}
1937
1938static unsigned int mmc_erase_timeout(struct mmc_card *card,
1939 unsigned int arg,
1940 unsigned int qty)
1941{
1942 if (mmc_card_sd(card))
1943 return mmc_sd_erase_timeout(card, arg, qty);
1944 else
1945 return mmc_mmc_erase_timeout(card, arg, qty);
1946}
1947
1948static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1949 unsigned int to, unsigned int arg)
1950{
1951 struct mmc_command cmd = {};
1952 unsigned int qty = 0, busy_timeout = 0;
1953 bool use_r1b_resp = false;
1954 unsigned long timeout;
1955 int err;
1956
1957 mmc_retune_hold(card->host);
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975 if (card->erase_shift)
1976 qty += ((to >> card->erase_shift) -
1977 (from >> card->erase_shift)) + 1;
1978 else if (mmc_card_sd(card))
1979 qty += to - from + 1;
1980 else
1981 qty += ((to / card->erase_size) -
1982 (from / card->erase_size)) + 1;
1983
1984 if (!mmc_card_blockaddr(card)) {
1985 from <<= 9;
1986 to <<= 9;
1987 }
1988
1989 if (mmc_card_sd(card))
1990 cmd.opcode = SD_ERASE_WR_BLK_START;
1991 else
1992 cmd.opcode = MMC_ERASE_GROUP_START;
1993 cmd.arg = from;
1994 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1995 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1996 if (err) {
1997 pr_err("mmc_erase: group start error %d, "
1998 "status %#x\n", err, cmd.resp[0]);
1999 err = -EIO;
2000 goto out;
2001 }
2002
2003 memset(&cmd, 0, sizeof(struct mmc_command));
2004 if (mmc_card_sd(card))
2005 cmd.opcode = SD_ERASE_WR_BLK_END;
2006 else
2007 cmd.opcode = MMC_ERASE_GROUP_END;
2008 cmd.arg = to;
2009 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2010 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2011 if (err) {
2012 pr_err("mmc_erase: group end error %d, status %#x\n",
2013 err, cmd.resp[0]);
2014 err = -EIO;
2015 goto out;
2016 }
2017
2018 memset(&cmd, 0, sizeof(struct mmc_command));
2019 cmd.opcode = MMC_ERASE;
2020 cmd.arg = arg;
2021 busy_timeout = mmc_erase_timeout(card, arg, qty);
2022
2023
2024
2025
2026
2027
2028 if (card->host->max_busy_timeout &&
2029 busy_timeout > card->host->max_busy_timeout) {
2030 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2031 } else {
2032 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2033 cmd.busy_timeout = busy_timeout;
2034 use_r1b_resp = true;
2035 }
2036
2037 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2038 if (err) {
2039 pr_err("mmc_erase: erase error %d, status %#x\n",
2040 err, cmd.resp[0]);
2041 err = -EIO;
2042 goto out;
2043 }
2044
2045 if (mmc_host_is_spi(card->host))
2046 goto out;
2047
2048
2049
2050
2051
2052 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
2053 goto out;
2054
2055 timeout = jiffies + msecs_to_jiffies(busy_timeout);
2056 do {
2057 memset(&cmd, 0, sizeof(struct mmc_command));
2058 cmd.opcode = MMC_SEND_STATUS;
2059 cmd.arg = card->rca << 16;
2060 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2061
2062 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2063 if (err || (cmd.resp[0] & 0xFDF92000)) {
2064 pr_err("error %d requesting status %#x\n",
2065 err, cmd.resp[0]);
2066 err = -EIO;
2067 goto out;
2068 }
2069
2070
2071
2072
2073 if (time_after(jiffies, timeout)) {
2074 pr_err("%s: Card stuck in programming state! %s\n",
2075 mmc_hostname(card->host), __func__);
2076 err = -EIO;
2077 goto out;
2078 }
2079
2080 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
2081 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
2082out:
2083 mmc_retune_release(card->host);
2084 return err;
2085}
2086
2087static unsigned int mmc_align_erase_size(struct mmc_card *card,
2088 unsigned int *from,
2089 unsigned int *to,
2090 unsigned int nr)
2091{
2092 unsigned int from_new = *from, nr_new = nr, rem;
2093
2094
2095
2096
2097
2098 if (is_power_of_2(card->erase_size)) {
2099 unsigned int temp = from_new;
2100
2101 from_new = round_up(temp, card->erase_size);
2102 rem = from_new - temp;
2103
2104 if (nr_new > rem)
2105 nr_new -= rem;
2106 else
2107 return 0;
2108
2109 nr_new = round_down(nr_new, card->erase_size);
2110 } else {
2111 rem = from_new % card->erase_size;
2112 if (rem) {
2113 rem = card->erase_size - rem;
2114 from_new += rem;
2115 if (nr_new > rem)
2116 nr_new -= rem;
2117 else
2118 return 0;
2119 }
2120
2121 rem = nr_new % card->erase_size;
2122 if (rem)
2123 nr_new -= rem;
2124 }
2125
2126 if (nr_new == 0)
2127 return 0;
2128
2129 *to = from_new + nr_new;
2130 *from = from_new;
2131
2132 return nr_new;
2133}
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2145 unsigned int arg)
2146{
2147 unsigned int rem, to = from + nr;
2148 int err;
2149
2150 if (!(card->host->caps & MMC_CAP_ERASE) ||
2151 !(card->csd.cmdclass & CCC_ERASE))
2152 return -EOPNOTSUPP;
2153
2154 if (!card->erase_size)
2155 return -EOPNOTSUPP;
2156
2157 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2158 return -EOPNOTSUPP;
2159
2160 if ((arg & MMC_SECURE_ARGS) &&
2161 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2162 return -EOPNOTSUPP;
2163
2164 if ((arg & MMC_TRIM_ARGS) &&
2165 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2166 return -EOPNOTSUPP;
2167
2168 if (arg == MMC_SECURE_ERASE_ARG) {
2169 if (from % card->erase_size || nr % card->erase_size)
2170 return -EINVAL;
2171 }
2172
2173 if (arg == MMC_ERASE_ARG)
2174 nr = mmc_align_erase_size(card, &from, &to, nr);
2175
2176 if (nr == 0)
2177 return 0;
2178
2179 if (to <= from)
2180 return -EINVAL;
2181
2182
2183 to -= 1;
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193 rem = card->erase_size - (from % card->erase_size);
2194 if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
2195 err = mmc_do_erase(card, from, from + rem - 1, arg);
2196 from += rem;
2197 if ((err) || (to <= from))
2198 return err;
2199 }
2200
2201 return mmc_do_erase(card, from, to, arg);
2202}
2203EXPORT_SYMBOL(mmc_erase);
2204
2205int mmc_can_erase(struct mmc_card *card)
2206{
2207 if ((card->host->caps & MMC_CAP_ERASE) &&
2208 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2209 return 1;
2210 return 0;
2211}
2212EXPORT_SYMBOL(mmc_can_erase);
2213
2214int mmc_can_trim(struct mmc_card *card)
2215{
2216 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
2217 (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
2218 return 1;
2219 return 0;
2220}
2221EXPORT_SYMBOL(mmc_can_trim);
2222
2223int mmc_can_discard(struct mmc_card *card)
2224{
2225
2226
2227
2228
2229 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2230 return 1;
2231 return 0;
2232}
2233EXPORT_SYMBOL(mmc_can_discard);
2234
2235int mmc_can_sanitize(struct mmc_card *card)
2236{
2237 if (!mmc_can_trim(card) && !mmc_can_erase(card))
2238 return 0;
2239 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2240 return 1;
2241 return 0;
2242}
2243EXPORT_SYMBOL(mmc_can_sanitize);
2244
2245int mmc_can_secure_erase_trim(struct mmc_card *card)
2246{
2247 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2248 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2249 return 1;
2250 return 0;
2251}
2252EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2253
2254int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2255 unsigned int nr)
2256{
2257 if (!card->erase_size)
2258 return 0;
2259 if (from % card->erase_size || nr % card->erase_size)
2260 return 0;
2261 return 1;
2262}
2263EXPORT_SYMBOL(mmc_erase_group_aligned);
2264
2265static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2266 unsigned int arg)
2267{
2268 struct mmc_host *host = card->host;
2269 unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
2270 unsigned int last_timeout = 0;
2271 unsigned int max_busy_timeout = host->max_busy_timeout ?
2272 host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
2273
2274 if (card->erase_shift) {
2275 max_qty = UINT_MAX >> card->erase_shift;
2276 min_qty = card->pref_erase >> card->erase_shift;
2277 } else if (mmc_card_sd(card)) {
2278 max_qty = UINT_MAX;
2279 min_qty = card->pref_erase;
2280 } else {
2281 max_qty = UINT_MAX / card->erase_size;
2282 min_qty = card->pref_erase / card->erase_size;
2283 }
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298 do {
2299 y = 0;
2300 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2301 timeout = mmc_erase_timeout(card, arg, qty + x);
2302
2303 if (qty + x > min_qty && timeout > max_busy_timeout)
2304 break;
2305
2306 if (timeout < last_timeout)
2307 break;
2308 last_timeout = timeout;
2309 y = x;
2310 }
2311 qty += y;
2312 } while (y);
2313
2314 if (!qty)
2315 return 0;
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327 if (qty == 1)
2328 card->eg_boundary = 1;
2329 else
2330 qty--;
2331
2332
2333 if (card->erase_shift)
2334 max_discard = qty << card->erase_shift;
2335 else if (mmc_card_sd(card))
2336 max_discard = qty + 1;
2337 else
2338 max_discard = qty * card->erase_size;
2339
2340 return max_discard;
2341}
2342
2343unsigned int mmc_calc_max_discard(struct mmc_card *card)
2344{
2345 struct mmc_host *host = card->host;
2346 unsigned int max_discard, max_trim;
2347
2348
2349
2350
2351
2352
2353 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2354 return card->pref_erase;
2355
2356 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2357 if (mmc_can_trim(card)) {
2358 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2359 if (max_trim < max_discard)
2360 max_discard = max_trim;
2361 } else if (max_discard < card->erase_size) {
2362 max_discard = 0;
2363 }
2364 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2365 mmc_hostname(host), max_discard, host->max_busy_timeout ?
2366 host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
2367 return max_discard;
2368}
2369EXPORT_SYMBOL(mmc_calc_max_discard);
2370
2371bool mmc_card_is_blockaddr(struct mmc_card *card)
2372{
2373 return card ? mmc_card_blockaddr(card) : false;
2374}
2375EXPORT_SYMBOL(mmc_card_is_blockaddr);
2376
2377int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2378{
2379 struct mmc_command cmd = {};
2380
2381 if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
2382 mmc_card_hs400(card) || mmc_card_hs400es(card))
2383 return 0;
2384
2385 cmd.opcode = MMC_SET_BLOCKLEN;
2386 cmd.arg = blocklen;
2387 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2388 return mmc_wait_for_cmd(card->host, &cmd, 5);
2389}
2390EXPORT_SYMBOL(mmc_set_blocklen);
2391
2392int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2393 bool is_rel_write)
2394{
2395 struct mmc_command cmd = {};
2396
2397 cmd.opcode = MMC_SET_BLOCK_COUNT;
2398 cmd.arg = blockcount & 0x0000FFFF;
2399 if (is_rel_write)
2400 cmd.arg |= 1 << 31;
2401 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2402 return mmc_wait_for_cmd(card->host, &cmd, 5);
2403}
2404EXPORT_SYMBOL(mmc_set_blockcount);
2405
2406static void mmc_hw_reset_for_init(struct mmc_host *host)
2407{
2408 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2409 return;
2410 host->ops->hw_reset(host);
2411}
2412
2413int mmc_hw_reset(struct mmc_host *host)
2414{
2415 int ret;
2416
2417 if (!host->card)
2418 return -EINVAL;
2419
2420 mmc_bus_get(host);
2421 if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
2422 mmc_bus_put(host);
2423 return -EOPNOTSUPP;
2424 }
2425
2426 ret = host->bus_ops->reset(host);
2427 mmc_bus_put(host);
2428
2429 if (ret)
2430 pr_warn("%s: tried to reset card, got error %d\n",
2431 mmc_hostname(host), ret);
2432
2433 return ret;
2434}
2435EXPORT_SYMBOL(mmc_hw_reset);
2436
2437static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2438{
2439 host->f_init = freq;
2440
2441 pr_debug("%s: %s: trying to init card at %u Hz\n",
2442 mmc_hostname(host), __func__, host->f_init);
2443
2444 mmc_power_up(host, host->ocr_avail);
2445
2446
2447
2448
2449
2450 mmc_hw_reset_for_init(host);
2451
2452
2453
2454
2455
2456
2457
2458 if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2459 sdio_reset(host);
2460
2461 mmc_go_idle(host);
2462
2463 if (!(host->caps2 & MMC_CAP2_NO_SD))
2464 mmc_send_if_cond(host, host->ocr_avail);
2465
2466
2467 if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2468 if (!mmc_attach_sdio(host))
2469 return 0;
2470
2471 if (!(host->caps2 & MMC_CAP2_NO_SD))
2472 if (!mmc_attach_sd(host))
2473 return 0;
2474
2475 if (!(host->caps2 & MMC_CAP2_NO_MMC))
2476 if (!mmc_attach_mmc(host))
2477 return 0;
2478
2479 mmc_power_off(host);
2480 return -EIO;
2481}
2482
2483int _mmc_detect_card_removed(struct mmc_host *host)
2484{
2485 int ret;
2486
2487 if (!host->card || mmc_card_removed(host->card))
2488 return 1;
2489
2490 ret = host->bus_ops->alive(host);
2491
2492
2493
2494
2495
2496
2497
2498
2499 if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2500 mmc_detect_change(host, msecs_to_jiffies(200));
2501 pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2502 }
2503
2504 if (ret) {
2505 mmc_card_set_removed(host->card);
2506 pr_debug("%s: card remove detected\n", mmc_hostname(host));
2507 }
2508
2509 return ret;
2510}
2511
2512int mmc_detect_card_removed(struct mmc_host *host)
2513{
2514 struct mmc_card *card = host->card;
2515 int ret;
2516
2517 WARN_ON(!host->claimed);
2518
2519 if (!card)
2520 return 1;
2521
2522 if (!mmc_card_is_removable(host))
2523 return 0;
2524
2525 ret = mmc_card_removed(card);
2526
2527
2528
2529
2530 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2531 return ret;
2532
2533 host->detect_change = 0;
2534 if (!ret) {
2535 ret = _mmc_detect_card_removed(host);
2536 if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2537
2538
2539
2540
2541 cancel_delayed_work(&host->detect);
2542 _mmc_detect_change(host, 0, false);
2543 }
2544 }
2545
2546 return ret;
2547}
2548EXPORT_SYMBOL(mmc_detect_card_removed);
2549
2550void mmc_rescan(struct work_struct *work)
2551{
2552 struct mmc_host *host =
2553 container_of(work, struct mmc_host, detect.work);
2554 int i;
2555
2556 if (host->rescan_disable)
2557 return;
2558
2559
2560 if (!mmc_card_is_removable(host) && host->rescan_entered)
2561 return;
2562 host->rescan_entered = 1;
2563
2564 if (host->trigger_card_event && host->ops->card_event) {
2565 mmc_claim_host(host);
2566 host->ops->card_event(host);
2567 mmc_release_host(host);
2568 host->trigger_card_event = false;
2569 }
2570
2571 mmc_bus_get(host);
2572
2573
2574
2575
2576
2577 if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
2578 host->bus_ops->detect(host);
2579
2580 host->detect_change = 0;
2581
2582
2583
2584
2585
2586 mmc_bus_put(host);
2587 mmc_bus_get(host);
2588
2589
2590 if (host->bus_ops != NULL) {
2591 mmc_bus_put(host);
2592 goto out;
2593 }
2594
2595
2596
2597
2598
2599 mmc_bus_put(host);
2600
2601 mmc_claim_host(host);
2602 if (mmc_card_is_removable(host) && host->ops->get_cd &&
2603 host->ops->get_cd(host) == 0) {
2604 mmc_power_off(host);
2605 mmc_release_host(host);
2606 goto out;
2607 }
2608
2609 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2610 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2611 break;
2612 if (freqs[i] <= host->f_min)
2613 break;
2614 }
2615 mmc_release_host(host);
2616
2617 out:
2618 if (host->caps & MMC_CAP_NEEDS_POLL)
2619 mmc_schedule_delayed_work(&host->detect, HZ);
2620}
2621
2622void mmc_start_host(struct mmc_host *host)
2623{
2624 host->f_init = max(freqs[0], host->f_min);
2625 host->rescan_disable = 0;
2626 host->ios.power_mode = MMC_POWER_UNDEFINED;
2627
2628 if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
2629 mmc_claim_host(host);
2630 mmc_power_up(host, host->ocr_avail);
2631 mmc_release_host(host);
2632 }
2633
2634 mmc_gpiod_request_cd_irq(host);
2635 _mmc_detect_change(host, 0, false);
2636}
2637
2638void mmc_stop_host(struct mmc_host *host)
2639{
2640 if (host->slot.cd_irq >= 0) {
2641 if (host->slot.cd_wake_enabled)
2642 disable_irq_wake(host->slot.cd_irq);
2643 disable_irq(host->slot.cd_irq);
2644 }
2645
2646 host->rescan_disable = 1;
2647 cancel_delayed_work_sync(&host->detect);
2648
2649
2650 host->pm_flags = 0;
2651
2652 mmc_bus_get(host);
2653 if (host->bus_ops && !host->bus_dead) {
2654
2655 host->bus_ops->remove(host);
2656 mmc_claim_host(host);
2657 mmc_detach_bus(host);
2658 mmc_power_off(host);
2659 mmc_release_host(host);
2660 mmc_bus_put(host);
2661 return;
2662 }
2663 mmc_bus_put(host);
2664
2665 mmc_claim_host(host);
2666 mmc_power_off(host);
2667 mmc_release_host(host);
2668}
2669
2670int mmc_power_save_host(struct mmc_host *host)
2671{
2672 int ret = 0;
2673
2674 pr_debug("%s: %s: powering down\n", mmc_hostname(host), __func__);
2675
2676 mmc_bus_get(host);
2677
2678 if (!host->bus_ops || host->bus_dead) {
2679 mmc_bus_put(host);
2680 return -EINVAL;
2681 }
2682
2683 if (host->bus_ops->power_save)
2684 ret = host->bus_ops->power_save(host);
2685
2686 mmc_bus_put(host);
2687
2688 mmc_power_off(host);
2689
2690 return ret;
2691}
2692EXPORT_SYMBOL(mmc_power_save_host);
2693
2694int mmc_power_restore_host(struct mmc_host *host)
2695{
2696 int ret;
2697
2698 pr_debug("%s: %s: powering up\n", mmc_hostname(host), __func__);
2699
2700 mmc_bus_get(host);
2701
2702 if (!host->bus_ops || host->bus_dead) {
2703 mmc_bus_put(host);
2704 return -EINVAL;
2705 }
2706
2707 mmc_power_up(host, host->card->ocr);
2708 ret = host->bus_ops->power_restore(host);
2709
2710 mmc_bus_put(host);
2711
2712 return ret;
2713}
2714EXPORT_SYMBOL(mmc_power_restore_host);
2715
2716#ifdef CONFIG_PM_SLEEP
2717
2718
2719
2720
2721static int mmc_pm_notify(struct notifier_block *notify_block,
2722 unsigned long mode, void *unused)
2723{
2724 struct mmc_host *host = container_of(
2725 notify_block, struct mmc_host, pm_notify);
2726 unsigned long flags;
2727 int err = 0;
2728
2729 switch (mode) {
2730 case PM_HIBERNATION_PREPARE:
2731 case PM_SUSPEND_PREPARE:
2732 case PM_RESTORE_PREPARE:
2733 spin_lock_irqsave(&host->lock, flags);
2734 host->rescan_disable = 1;
2735 spin_unlock_irqrestore(&host->lock, flags);
2736 cancel_delayed_work_sync(&host->detect);
2737
2738 if (!host->bus_ops)
2739 break;
2740
2741
2742 if (host->bus_ops->pre_suspend)
2743 err = host->bus_ops->pre_suspend(host);
2744 if (!err)
2745 break;
2746
2747
2748 host->bus_ops->remove(host);
2749 mmc_claim_host(host);
2750 mmc_detach_bus(host);
2751 mmc_power_off(host);
2752 mmc_release_host(host);
2753 host->pm_flags = 0;
2754 break;
2755
2756 case PM_POST_SUSPEND:
2757 case PM_POST_HIBERNATION:
2758 case PM_POST_RESTORE:
2759
2760 spin_lock_irqsave(&host->lock, flags);
2761 host->rescan_disable = 0;
2762 spin_unlock_irqrestore(&host->lock, flags);
2763 _mmc_detect_change(host, 0, false);
2764
2765 }
2766
2767 return 0;
2768}
2769
2770void mmc_register_pm_notifier(struct mmc_host *host)
2771{
2772 host->pm_notify.notifier_call = mmc_pm_notify;
2773 register_pm_notifier(&host->pm_notify);
2774}
2775
2776void mmc_unregister_pm_notifier(struct mmc_host *host)
2777{
2778 unregister_pm_notifier(&host->pm_notify);
2779}
2780#endif
2781
2782static int __init mmc_init(void)
2783{
2784 int ret;
2785
2786 ret = mmc_register_bus();
2787 if (ret)
2788 return ret;
2789
2790 ret = mmc_register_host_class();
2791 if (ret)
2792 goto unregister_bus;
2793
2794 ret = sdio_register_bus();
2795 if (ret)
2796 goto unregister_host_class;
2797
2798 return 0;
2799
2800unregister_host_class:
2801 mmc_unregister_host_class();
2802unregister_bus:
2803 mmc_unregister_bus();
2804 return ret;
2805}
2806
2807static void __exit mmc_exit(void)
2808{
2809 sdio_unregister_bus();
2810 mmc_unregister_host_class();
2811 mmc_unregister_bus();
2812}
2813
2814subsys_initcall(mmc_init);
2815module_exit(mmc_exit);
2816
2817MODULE_LICENSE("GPL");
2818