1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/completion.h>
17#include <linux/device.h>
18#include <linux/delay.h>
19#include <linux/pagemap.h>
20#include <linux/err.h>
21#include <linux/leds.h>
22#include <linux/scatterlist.h>
23#include <linux/log2.h>
24#include <linux/regulator/consumer.h>
25#include <linux/pm_runtime.h>
26#include <linux/pm_wakeup.h>
27#include <linux/suspend.h>
28#include <linux/fault-inject.h>
29#include <linux/random.h>
30#include <linux/slab.h>
31#include <linux/of.h>
32
33#include <linux/mmc/card.h>
34#include <linux/mmc/host.h>
35#include <linux/mmc/mmc.h>
36#include <linux/mmc/sd.h>
37#include <linux/mmc/slot-gpio.h>
38
39#define CREATE_TRACE_POINTS
40#include <trace/events/mmc.h>
41
42#include "core.h"
43#include "card.h"
44#include "bus.h"
45#include "host.h"
46#include "sdio_bus.h"
47#include "pwrseq.h"
48
49#include "mmc_ops.h"
50#include "sd_ops.h"
51#include "sdio_ops.h"
52
53
54#define MMC_ERASE_TIMEOUT_MS (60 * 1000)
55
56static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
57
58
59
60
61
62
63bool use_spi_crc = 1;
64module_param(use_spi_crc, bool, 0);
65
66static int mmc_schedule_delayed_work(struct delayed_work *work,
67 unsigned long delay)
68{
69
70
71
72
73
74
75 return queue_delayed_work(system_freezable_wq, work, delay);
76}
77
78#ifdef CONFIG_FAIL_MMC_REQUEST
79
80
81
82
83
84static void mmc_should_fail_request(struct mmc_host *host,
85 struct mmc_request *mrq)
86{
87 struct mmc_command *cmd = mrq->cmd;
88 struct mmc_data *data = mrq->data;
89 static const int data_errors[] = {
90 -ETIMEDOUT,
91 -EILSEQ,
92 -EIO,
93 };
94
95 if (!data)
96 return;
97
98 if (cmd->error || data->error ||
99 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
100 return;
101
102 data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
103 data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
104}
105
106#else
107
108static inline void mmc_should_fail_request(struct mmc_host *host,
109 struct mmc_request *mrq)
110{
111}
112
113#endif
114
115static inline void mmc_complete_cmd(struct mmc_request *mrq)
116{
117 if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
118 complete_all(&mrq->cmd_completion);
119}
120
121void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq)
122{
123 if (!mrq->cap_cmd_during_tfr)
124 return;
125
126 mmc_complete_cmd(mrq);
127
128 pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
129 mmc_hostname(host), mrq->cmd->opcode);
130}
131EXPORT_SYMBOL(mmc_command_done);
132
133
134
135
136
137
138
139
140
141void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
142{
143 struct mmc_command *cmd = mrq->cmd;
144 int err = cmd->error;
145
146
147 if (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
148 cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 &&
149 !host->retune_crc_disable &&
150 (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
151 (mrq->data && mrq->data->error == -EILSEQ) ||
152 (mrq->stop && mrq->stop->error == -EILSEQ)))
153 mmc_retune_needed(host);
154
155 if (err && cmd->retries && mmc_host_is_spi(host)) {
156 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
157 cmd->retries = 0;
158 }
159
160 if (host->ongoing_mrq == mrq)
161 host->ongoing_mrq = NULL;
162
163 mmc_complete_cmd(mrq);
164
165 trace_mmc_request_done(host, mrq);
166
167
168
169
170
171
172
173
174
175
176 if (!err || !cmd->retries || mmc_card_removed(host->card)) {
177 mmc_should_fail_request(host, mrq);
178
179 if (!host->ongoing_mrq)
180 led_trigger_event(host->led, LED_OFF);
181
182 if (mrq->sbc) {
183 pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
184 mmc_hostname(host), mrq->sbc->opcode,
185 mrq->sbc->error,
186 mrq->sbc->resp[0], mrq->sbc->resp[1],
187 mrq->sbc->resp[2], mrq->sbc->resp[3]);
188 }
189
190 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
191 mmc_hostname(host), cmd->opcode, err,
192 cmd->resp[0], cmd->resp[1],
193 cmd->resp[2], cmd->resp[3]);
194
195 if (mrq->data) {
196 pr_debug("%s: %d bytes transferred: %d\n",
197 mmc_hostname(host),
198 mrq->data->bytes_xfered, mrq->data->error);
199 }
200
201 if (mrq->stop) {
202 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
203 mmc_hostname(host), mrq->stop->opcode,
204 mrq->stop->error,
205 mrq->stop->resp[0], mrq->stop->resp[1],
206 mrq->stop->resp[2], mrq->stop->resp[3]);
207 }
208 }
209
210
211
212
213 if (mrq->done)
214 mrq->done(mrq);
215}
216
217EXPORT_SYMBOL(mmc_request_done);
218
219static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
220{
221 int err;
222
223
224 err = mmc_retune(host);
225 if (err) {
226 mrq->cmd->error = err;
227 mmc_request_done(host, mrq);
228 return;
229 }
230
231
232
233
234
235
236 if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) &&
237 host->ops->card_busy) {
238 int tries = 500;
239
240 while (host->ops->card_busy(host) && --tries)
241 mmc_delay(1);
242
243 if (tries == 0) {
244 mrq->cmd->error = -EBUSY;
245 mmc_request_done(host, mrq);
246 return;
247 }
248 }
249
250 if (mrq->cap_cmd_during_tfr) {
251 host->ongoing_mrq = mrq;
252
253
254
255
256 reinit_completion(&mrq->cmd_completion);
257 }
258
259 trace_mmc_request_start(host, mrq);
260
261 if (host->cqe_on)
262 host->cqe_ops->cqe_off(host);
263
264 host->ops->request(host, mrq);
265}
266
267static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq,
268 bool cqe)
269{
270 if (mrq->sbc) {
271 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
272 mmc_hostname(host), mrq->sbc->opcode,
273 mrq->sbc->arg, mrq->sbc->flags);
274 }
275
276 if (mrq->cmd) {
277 pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n",
278 mmc_hostname(host), cqe ? "CQE direct " : "",
279 mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
280 } else if (cqe) {
281 pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n",
282 mmc_hostname(host), mrq->tag, mrq->data->blk_addr);
283 }
284
285 if (mrq->data) {
286 pr_debug("%s: blksz %d blocks %d flags %08x "
287 "tsac %d ms nsac %d\n",
288 mmc_hostname(host), mrq->data->blksz,
289 mrq->data->blocks, mrq->data->flags,
290 mrq->data->timeout_ns / 1000000,
291 mrq->data->timeout_clks);
292 }
293
294 if (mrq->stop) {
295 pr_debug("%s: CMD%u arg %08x flags %08x\n",
296 mmc_hostname(host), mrq->stop->opcode,
297 mrq->stop->arg, mrq->stop->flags);
298 }
299}
300
301static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
302{
303 unsigned int i, sz = 0;
304 struct scatterlist *sg;
305
306 if (mrq->cmd) {
307 mrq->cmd->error = 0;
308 mrq->cmd->mrq = mrq;
309 mrq->cmd->data = mrq->data;
310 }
311 if (mrq->sbc) {
312 mrq->sbc->error = 0;
313 mrq->sbc->mrq = mrq;
314 }
315 if (mrq->data) {
316 if (mrq->data->blksz > host->max_blk_size ||
317 mrq->data->blocks > host->max_blk_count ||
318 mrq->data->blocks * mrq->data->blksz > host->max_req_size)
319 return -EINVAL;
320
321 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
322 sz += sg->length;
323 if (sz != mrq->data->blocks * mrq->data->blksz)
324 return -EINVAL;
325
326 mrq->data->error = 0;
327 mrq->data->mrq = mrq;
328 if (mrq->stop) {
329 mrq->data->stop = mrq->stop;
330 mrq->stop->error = 0;
331 mrq->stop->mrq = mrq;
332 }
333 }
334
335 return 0;
336}
337
338int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
339{
340 int err;
341
342 init_completion(&mrq->cmd_completion);
343
344 mmc_retune_hold(host);
345
346 if (mmc_card_removed(host->card))
347 return -ENOMEDIUM;
348
349 mmc_mrq_pr_debug(host, mrq, false);
350
351 WARN_ON(!host->claimed);
352
353 err = mmc_mrq_prep(host, mrq);
354 if (err)
355 return err;
356
357 led_trigger_event(host->led, LED_FULL);
358 __mmc_start_request(host, mrq);
359
360 return 0;
361}
362EXPORT_SYMBOL(mmc_start_request);
363
364static void mmc_wait_done(struct mmc_request *mrq)
365{
366 complete(&mrq->completion);
367}
368
369static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
370{
371 struct mmc_request *ongoing_mrq = READ_ONCE(host->ongoing_mrq);
372
373
374
375
376
377 if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
378 wait_for_completion(&ongoing_mrq->cmd_completion);
379}
380
381static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
382{
383 int err;
384
385 mmc_wait_ongoing_tfr_cmd(host);
386
387 init_completion(&mrq->completion);
388 mrq->done = mmc_wait_done;
389
390 err = mmc_start_request(host, mrq);
391 if (err) {
392 mrq->cmd->error = err;
393 mmc_complete_cmd(mrq);
394 complete(&mrq->completion);
395 }
396
397 return err;
398}
399
400void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
401{
402 struct mmc_command *cmd;
403
404 while (1) {
405 wait_for_completion(&mrq->completion);
406
407 cmd = mrq->cmd;
408
409
410
411
412
413
414
415 if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
416 if (!mmc_interrupt_hpi(host->card)) {
417 pr_warn("%s: %s: Interrupted sanitize\n",
418 mmc_hostname(host), __func__);
419 cmd->error = 0;
420 break;
421 } else {
422 pr_err("%s: %s: Failed to interrupt sanitize\n",
423 mmc_hostname(host), __func__);
424 }
425 }
426 if (!cmd->error || !cmd->retries ||
427 mmc_card_removed(host->card))
428 break;
429
430 mmc_retune_recheck(host);
431
432 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
433 mmc_hostname(host), cmd->opcode, cmd->error);
434 cmd->retries--;
435 cmd->error = 0;
436 __mmc_start_request(host, mrq);
437 }
438
439 mmc_retune_release(host);
440}
441EXPORT_SYMBOL(mmc_wait_for_req_done);
442
443
444
445
446
447
448
449
450
451int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
452{
453 int err;
454
455
456
457
458
459
460
461 err = mmc_retune(host);
462 if (err)
463 goto out_err;
464
465 mrq->host = host;
466
467 mmc_mrq_pr_debug(host, mrq, true);
468
469 err = mmc_mrq_prep(host, mrq);
470 if (err)
471 goto out_err;
472
473 err = host->cqe_ops->cqe_request(host, mrq);
474 if (err)
475 goto out_err;
476
477 trace_mmc_request_start(host, mrq);
478
479 return 0;
480
481out_err:
482 if (mrq->cmd) {
483 pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
484 mmc_hostname(host), mrq->cmd->opcode, err);
485 } else {
486 pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
487 mmc_hostname(host), mrq->tag, err);
488 }
489 return err;
490}
491EXPORT_SYMBOL(mmc_cqe_start_req);
492
493
494
495
496
497
498
499
500
501void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
502{
503 mmc_should_fail_request(host, mrq);
504
505
506 if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
507 (mrq->data && mrq->data->error == -EILSEQ))
508 mmc_retune_needed(host);
509
510 trace_mmc_request_done(host, mrq);
511
512 if (mrq->cmd) {
513 pr_debug("%s: CQE req done (direct CMD%u): %d\n",
514 mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
515 } else {
516 pr_debug("%s: CQE transfer done tag %d\n",
517 mmc_hostname(host), mrq->tag);
518 }
519
520 if (mrq->data) {
521 pr_debug("%s: %d bytes transferred: %d\n",
522 mmc_hostname(host),
523 mrq->data->bytes_xfered, mrq->data->error);
524 }
525
526 mrq->done(mrq);
527}
528EXPORT_SYMBOL(mmc_cqe_request_done);
529
530
531
532
533
534
535void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
536{
537 if (host->cqe_ops->cqe_post_req)
538 host->cqe_ops->cqe_post_req(host, mrq);
539}
540EXPORT_SYMBOL(mmc_cqe_post_req);
541
542
543#define MMC_CQE_RECOVERY_TIMEOUT 1000
544
545
546
547
548
549
550
551
552
553
554int mmc_cqe_recovery(struct mmc_host *host)
555{
556 struct mmc_command cmd;
557 int err;
558
559 mmc_retune_hold_now(host);
560
561
562
563
564
565 pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
566
567 host->cqe_ops->cqe_recovery_start(host);
568
569 memset(&cmd, 0, sizeof(cmd));
570 cmd.opcode = MMC_STOP_TRANSMISSION,
571 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC,
572 cmd.flags &= ~MMC_RSP_CRC;
573 cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
574 mmc_wait_for_cmd(host, &cmd, 0);
575
576 memset(&cmd, 0, sizeof(cmd));
577 cmd.opcode = MMC_CMDQ_TASK_MGMT;
578 cmd.arg = 1;
579 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
580 cmd.flags &= ~MMC_RSP_CRC;
581 cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
582 err = mmc_wait_for_cmd(host, &cmd, 0);
583
584 host->cqe_ops->cqe_recovery_finish(host);
585
586 mmc_retune_release(host);
587
588 return err;
589}
590EXPORT_SYMBOL(mmc_cqe_recovery);
591
592
593
594
595
596
597
598
599
600
601
602
603
604bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
605{
606 return completion_done(&mrq->completion);
607}
608EXPORT_SYMBOL(mmc_is_req_done);
609
610
611
612
613
614
615
616
617
618
619
620
621
622void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
623{
624 __mmc_start_req(host, mrq);
625
626 if (!mrq->cap_cmd_during_tfr)
627 mmc_wait_for_req_done(host, mrq);
628}
629EXPORT_SYMBOL(mmc_wait_for_req);
630
631
632
633
634
635
636
637
638
639
640
641int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
642{
643 struct mmc_request mrq = {};
644
645 WARN_ON(!host->claimed);
646
647 memset(cmd->resp, 0, sizeof(cmd->resp));
648 cmd->retries = retries;
649
650 mrq.cmd = cmd;
651 cmd->data = NULL;
652
653 mmc_wait_for_req(host, &mrq);
654
655 return cmd->error;
656}
657
658EXPORT_SYMBOL(mmc_wait_for_cmd);
659
660
661
662
663
664
665
666
667
668void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
669{
670 unsigned int mult;
671
672
673
674
675 if (mmc_card_sdio(card)) {
676 data->timeout_ns = 1000000000;
677 data->timeout_clks = 0;
678 return;
679 }
680
681
682
683
684 mult = mmc_card_sd(card) ? 100 : 10;
685
686
687
688
689
690 if (data->flags & MMC_DATA_WRITE)
691 mult <<= card->csd.r2w_factor;
692
693 data->timeout_ns = card->csd.taac_ns * mult;
694 data->timeout_clks = card->csd.taac_clks * mult;
695
696
697
698
699 if (mmc_card_sd(card)) {
700 unsigned int timeout_us, limit_us;
701
702 timeout_us = data->timeout_ns / 1000;
703 if (card->host->ios.clock)
704 timeout_us += data->timeout_clks * 1000 /
705 (card->host->ios.clock / 1000);
706
707 if (data->flags & MMC_DATA_WRITE)
708
709
710
711
712
713
714
715
716 limit_us = 3000000;
717 else
718 limit_us = 100000;
719
720
721
722
723 if (timeout_us > limit_us) {
724 data->timeout_ns = limit_us * 1000;
725 data->timeout_clks = 0;
726 }
727
728
729 if (timeout_us == 0)
730 data->timeout_ns = limit_us * 1000;
731 }
732
733
734
735
736
737
738
739 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
740 data->timeout_ns = 600000000;
741 data->timeout_clks = 0;
742 }
743
744
745
746
747
748
749
750 if (mmc_host_is_spi(card->host)) {
751 if (data->flags & MMC_DATA_WRITE) {
752 if (data->timeout_ns < 1000000000)
753 data->timeout_ns = 1000000000;
754 } else {
755 if (data->timeout_ns < 100000000)
756 data->timeout_ns = 100000000;
757 }
758 }
759}
760EXPORT_SYMBOL(mmc_set_data_timeout);
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
777{
778
779
780
781
782
783 sz = ((sz + 3) / 4) * 4;
784
785 return sz;
786}
787EXPORT_SYMBOL(mmc_align_data_size);
788
789
790
791
792
793static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx,
794 struct task_struct *task)
795{
796 return host->claimer == ctx ||
797 (!ctx && task && host->claimer->task == task);
798}
799
800static inline void mmc_ctx_set_claimer(struct mmc_host *host,
801 struct mmc_ctx *ctx,
802 struct task_struct *task)
803{
804 if (!host->claimer) {
805 if (ctx)
806 host->claimer = ctx;
807 else
808 host->claimer = &host->default_ctx;
809 }
810 if (task)
811 host->claimer->task = task;
812}
813
814
815
816
817
818
819
820
821
822
823
824
825
826int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
827 atomic_t *abort)
828{
829 struct task_struct *task = ctx ? NULL : current;
830 DECLARE_WAITQUEUE(wait, current);
831 unsigned long flags;
832 int stop;
833 bool pm = false;
834
835 might_sleep();
836
837 add_wait_queue(&host->wq, &wait);
838 spin_lock_irqsave(&host->lock, flags);
839 while (1) {
840 set_current_state(TASK_UNINTERRUPTIBLE);
841 stop = abort ? atomic_read(abort) : 0;
842 if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task))
843 break;
844 spin_unlock_irqrestore(&host->lock, flags);
845 schedule();
846 spin_lock_irqsave(&host->lock, flags);
847 }
848 set_current_state(TASK_RUNNING);
849 if (!stop) {
850 host->claimed = 1;
851 mmc_ctx_set_claimer(host, ctx, task);
852 host->claim_cnt += 1;
853 if (host->claim_cnt == 1)
854 pm = true;
855 } else
856 wake_up(&host->wq);
857 spin_unlock_irqrestore(&host->lock, flags);
858 remove_wait_queue(&host->wq, &wait);
859
860 if (pm)
861 pm_runtime_get_sync(mmc_dev(host));
862
863 return stop;
864}
865EXPORT_SYMBOL(__mmc_claim_host);
866
867
868
869
870
871
872
873
874void mmc_release_host(struct mmc_host *host)
875{
876 unsigned long flags;
877
878 WARN_ON(!host->claimed);
879
880 spin_lock_irqsave(&host->lock, flags);
881 if (--host->claim_cnt) {
882
883 spin_unlock_irqrestore(&host->lock, flags);
884 } else {
885 host->claimed = 0;
886 host->claimer->task = NULL;
887 host->claimer = NULL;
888 spin_unlock_irqrestore(&host->lock, flags);
889 wake_up(&host->wq);
890 pm_runtime_mark_last_busy(mmc_dev(host));
891 pm_runtime_put_autosuspend(mmc_dev(host));
892 }
893}
894EXPORT_SYMBOL(mmc_release_host);
895
896
897
898
899
900void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx)
901{
902 pm_runtime_get_sync(&card->dev);
903 __mmc_claim_host(card->host, ctx, NULL);
904}
905EXPORT_SYMBOL(mmc_get_card);
906
907
908
909
910
911void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx)
912{
913 struct mmc_host *host = card->host;
914
915 WARN_ON(ctx && host->claimer != ctx);
916
917 mmc_release_host(host);
918 pm_runtime_mark_last_busy(&card->dev);
919 pm_runtime_put_autosuspend(&card->dev);
920}
921EXPORT_SYMBOL(mmc_put_card);
922
923
924
925
926
927static inline void mmc_set_ios(struct mmc_host *host)
928{
929 struct mmc_ios *ios = &host->ios;
930
931 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
932 "width %u timing %u\n",
933 mmc_hostname(host), ios->clock, ios->bus_mode,
934 ios->power_mode, ios->chip_select, ios->vdd,
935 1 << ios->bus_width, ios->timing);
936
937 host->ops->set_ios(host, ios);
938}
939
940
941
942
943void mmc_set_chip_select(struct mmc_host *host, int mode)
944{
945 host->ios.chip_select = mode;
946 mmc_set_ios(host);
947}
948
949
950
951
952
953void mmc_set_clock(struct mmc_host *host, unsigned int hz)
954{
955 WARN_ON(hz && hz < host->f_min);
956
957 if (hz > host->f_max)
958 hz = host->f_max;
959
960 host->ios.clock = hz;
961 mmc_set_ios(host);
962}
963
964int mmc_execute_tuning(struct mmc_card *card)
965{
966 struct mmc_host *host = card->host;
967 u32 opcode;
968 int err;
969
970 if (!host->ops->execute_tuning)
971 return 0;
972
973 if (host->cqe_on)
974 host->cqe_ops->cqe_off(host);
975
976 if (mmc_card_mmc(card))
977 opcode = MMC_SEND_TUNING_BLOCK_HS200;
978 else
979 opcode = MMC_SEND_TUNING_BLOCK;
980
981 err = host->ops->execute_tuning(host, opcode);
982
983 if (err)
984 pr_err("%s: tuning execution failed: %d\n",
985 mmc_hostname(host), err);
986 else
987 mmc_retune_enable(host);
988
989 return err;
990}
991
992
993
994
995void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
996{
997 host->ios.bus_mode = mode;
998 mmc_set_ios(host);
999}
1000
1001
1002
1003
1004void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1005{
1006 host->ios.bus_width = width;
1007 mmc_set_ios(host);
1008}
1009
1010
1011
1012
1013void mmc_set_initial_state(struct mmc_host *host)
1014{
1015 if (host->cqe_on)
1016 host->cqe_ops->cqe_off(host);
1017
1018 mmc_retune_disable(host);
1019
1020 if (mmc_host_is_spi(host))
1021 host->ios.chip_select = MMC_CS_HIGH;
1022 else
1023 host->ios.chip_select = MMC_CS_DONTCARE;
1024 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1025 host->ios.bus_width = MMC_BUS_WIDTH_1;
1026 host->ios.timing = MMC_TIMING_LEGACY;
1027 host->ios.drv_type = 0;
1028 host->ios.enhanced_strobe = false;
1029
1030
1031
1032
1033
1034 if ((host->caps2 & MMC_CAP2_HS400_ES) &&
1035 host->ops->hs400_enhanced_strobe)
1036 host->ops->hs400_enhanced_strobe(host, &host->ios);
1037
1038 mmc_set_ios(host);
1039}
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1057{
1058 const int max_bit = ilog2(MMC_VDD_35_36);
1059 int bit;
1060
1061 if (vdd < 1650 || vdd > 3600)
1062 return -EINVAL;
1063
1064 if (vdd >= 1650 && vdd <= 1950)
1065 return ilog2(MMC_VDD_165_195);
1066
1067 if (low_bits)
1068 vdd -= 1;
1069
1070
1071 bit = (vdd - 2000) / 100 + 8;
1072 if (bit > max_bit)
1073 return max_bit;
1074 return bit;
1075}
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1091{
1092 u32 mask = 0;
1093
1094 if (vdd_max < vdd_min)
1095 return 0;
1096
1097
1098 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1099 if (vdd_max < 0)
1100 return 0;
1101
1102
1103 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1104 if (vdd_min < 0)
1105 return 0;
1106
1107
1108 while (vdd_max >= vdd_min)
1109 mask |= 1 << vdd_max--;
1110
1111 return mask;
1112}
1113EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1114
1115#ifdef CONFIG_OF
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1127{
1128 const u32 *voltage_ranges;
1129 int num_ranges, i;
1130
1131 voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1132 num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1133 if (!voltage_ranges) {
1134 pr_debug("%pOF: voltage-ranges unspecified\n", np);
1135 return 0;
1136 }
1137 if (!num_ranges) {
1138 pr_err("%pOF: voltage-ranges empty\n", np);
1139 return -EINVAL;
1140 }
1141
1142 for (i = 0; i < num_ranges; i++) {
1143 const int j = i * 2;
1144 u32 ocr_mask;
1145
1146 ocr_mask = mmc_vddrange_to_ocrmask(
1147 be32_to_cpu(voltage_ranges[j]),
1148 be32_to_cpu(voltage_ranges[j + 1]));
1149 if (!ocr_mask) {
1150 pr_err("%pOF: voltage-range #%d is invalid\n",
1151 np, i);
1152 return -EINVAL;
1153 }
1154 *mask |= ocr_mask;
1155 }
1156
1157 return 1;
1158}
1159EXPORT_SYMBOL(mmc_of_parse_voltage);
1160
1161#endif
1162
1163static int mmc_of_get_func_num(struct device_node *node)
1164{
1165 u32 reg;
1166 int ret;
1167
1168 ret = of_property_read_u32(node, "reg", ®);
1169 if (ret < 0)
1170 return ret;
1171
1172 return reg;
1173}
1174
1175struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1176 unsigned func_num)
1177{
1178 struct device_node *node;
1179
1180 if (!host->parent || !host->parent->of_node)
1181 return NULL;
1182
1183 for_each_child_of_node(host->parent->of_node, node) {
1184 if (mmc_of_get_func_num(node) == func_num)
1185 return node;
1186 }
1187
1188 return NULL;
1189}
1190
1191#ifdef CONFIG_REGULATOR
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
1203{
1204 int tmp;
1205
1206 if (!vdd_bit)
1207 return -EINVAL;
1208
1209
1210
1211
1212
1213
1214
1215 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1216 if (tmp == 0) {
1217 *min_uV = 1650 * 1000;
1218 *max_uV = 1950 * 1000;
1219 } else {
1220 *min_uV = 1900 * 1000 + tmp * 100 * 1000;
1221 *max_uV = *min_uV + 100 * 1000;
1222 }
1223
1224 return 0;
1225}
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236int mmc_regulator_get_ocrmask(struct regulator *supply)
1237{
1238 int result = 0;
1239 int count;
1240 int i;
1241 int vdd_uV;
1242 int vdd_mV;
1243
1244 count = regulator_count_voltages(supply);
1245 if (count < 0)
1246 return count;
1247
1248 for (i = 0; i < count; i++) {
1249 vdd_uV = regulator_list_voltage(supply, i);
1250 if (vdd_uV <= 0)
1251 continue;
1252
1253 vdd_mV = vdd_uV / 1000;
1254 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1255 }
1256
1257 if (!result) {
1258 vdd_uV = regulator_get_voltage(supply);
1259 if (vdd_uV <= 0)
1260 return vdd_uV;
1261
1262 vdd_mV = vdd_uV / 1000;
1263 result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1264 }
1265
1266 return result;
1267}
1268EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282int mmc_regulator_set_ocr(struct mmc_host *mmc,
1283 struct regulator *supply,
1284 unsigned short vdd_bit)
1285{
1286 int result = 0;
1287 int min_uV, max_uV;
1288
1289 if (vdd_bit) {
1290 mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
1291
1292 result = regulator_set_voltage(supply, min_uV, max_uV);
1293 if (result == 0 && !mmc->regulator_enabled) {
1294 result = regulator_enable(supply);
1295 if (!result)
1296 mmc->regulator_enabled = true;
1297 }
1298 } else if (mmc->regulator_enabled) {
1299 result = regulator_disable(supply);
1300 if (result == 0)
1301 mmc->regulator_enabled = false;
1302 }
1303
1304 if (result)
1305 dev_err(mmc_dev(mmc),
1306 "could not set regulator OCR (%d)\n", result);
1307 return result;
1308}
1309EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1310
1311static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
1312 int min_uV, int target_uV,
1313 int max_uV)
1314{
1315
1316
1317
1318
1319 if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
1320 return -EINVAL;
1321
1322 return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
1323 max_uV);
1324}
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
1344{
1345 struct device *dev = mmc_dev(mmc);
1346 int ret, volt, min_uV, max_uV;
1347
1348
1349 if (IS_ERR(mmc->supply.vqmmc))
1350 return -EINVAL;
1351
1352 switch (ios->signal_voltage) {
1353 case MMC_SIGNAL_VOLTAGE_120:
1354 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1355 1100000, 1200000, 1300000);
1356 case MMC_SIGNAL_VOLTAGE_180:
1357 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1358 1700000, 1800000, 1950000);
1359 case MMC_SIGNAL_VOLTAGE_330:
1360 ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
1361 if (ret < 0)
1362 return ret;
1363
1364 dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
1365 __func__, volt, max_uV);
1366
1367 min_uV = max(volt - 300000, 2700000);
1368 max_uV = min(max_uV + 200000, 3600000);
1369
1370
1371
1372
1373
1374
1375
1376
1377 if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1378 min_uV, volt, max_uV))
1379 return 0;
1380
1381 return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
1382 2700000, volt, 3600000);
1383 default:
1384 return -EINVAL;
1385 }
1386}
1387EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
1388
1389#endif
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401int mmc_regulator_get_supply(struct mmc_host *mmc)
1402{
1403 struct device *dev = mmc_dev(mmc);
1404 int ret;
1405
1406 mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1407 mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1408
1409 if (IS_ERR(mmc->supply.vmmc)) {
1410 if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1411 return -EPROBE_DEFER;
1412 dev_dbg(dev, "No vmmc regulator found\n");
1413 } else {
1414 ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1415 if (ret > 0)
1416 mmc->ocr_avail = ret;
1417 else
1418 dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1419 }
1420
1421 if (IS_ERR(mmc->supply.vqmmc)) {
1422 if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1423 return -EPROBE_DEFER;
1424 dev_dbg(dev, "No vqmmc regulator found\n");
1425 }
1426
1427 return 0;
1428}
1429EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1430
1431
1432
1433
1434
1435u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1436{
1437 int bit;
1438
1439
1440
1441
1442
1443 if (ocr & 0x7F) {
1444 dev_warn(mmc_dev(host),
1445 "card claims to support voltages below defined range\n");
1446 ocr &= ~0x7F;
1447 }
1448
1449 ocr &= host->ocr_avail;
1450 if (!ocr) {
1451 dev_warn(mmc_dev(host), "no support for card's volts\n");
1452 return 0;
1453 }
1454
1455 if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1456 bit = ffs(ocr) - 1;
1457 ocr &= 3 << bit;
1458 mmc_power_cycle(host, ocr);
1459 } else {
1460 bit = fls(ocr) - 1;
1461 ocr &= 3 << bit;
1462 if (bit != host->ios.vdd)
1463 dev_warn(mmc_dev(host), "exceeding card's volts\n");
1464 }
1465
1466 return ocr;
1467}
1468
1469int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1470{
1471 int err = 0;
1472 int old_signal_voltage = host->ios.signal_voltage;
1473
1474 host->ios.signal_voltage = signal_voltage;
1475 if (host->ops->start_signal_voltage_switch)
1476 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1477
1478 if (err)
1479 host->ios.signal_voltage = old_signal_voltage;
1480
1481 return err;
1482
1483}
1484
1485void mmc_set_initial_signal_voltage(struct mmc_host *host)
1486{
1487
1488 if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
1489 dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1490 else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1491 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1492 else if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
1493 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1494}
1495
1496int mmc_host_set_uhs_voltage(struct mmc_host *host)
1497{
1498 u32 clock;
1499
1500
1501
1502
1503
1504 clock = host->ios.clock;
1505 host->ios.clock = 0;
1506 mmc_set_ios(host);
1507
1508 if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
1509 return -EAGAIN;
1510
1511
1512 mmc_delay(10);
1513 host->ios.clock = clock;
1514 mmc_set_ios(host);
1515
1516 return 0;
1517}
1518
1519int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
1520{
1521 struct mmc_command cmd = {};
1522 int err = 0;
1523
1524
1525
1526
1527
1528 if (!host->ops->start_signal_voltage_switch)
1529 return -EPERM;
1530 if (!host->ops->card_busy)
1531 pr_warn("%s: cannot verify signal voltage switch\n",
1532 mmc_hostname(host));
1533
1534 cmd.opcode = SD_SWITCH_VOLTAGE;
1535 cmd.arg = 0;
1536 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1537
1538 err = mmc_wait_for_cmd(host, &cmd, 0);
1539 if (err)
1540 return err;
1541
1542 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1543 return -EIO;
1544
1545
1546
1547
1548
1549 mmc_delay(1);
1550 if (host->ops->card_busy && !host->ops->card_busy(host)) {
1551 err = -EAGAIN;
1552 goto power_cycle;
1553 }
1554
1555 if (mmc_host_set_uhs_voltage(host)) {
1556
1557
1558
1559
1560 err = -EAGAIN;
1561 goto power_cycle;
1562 }
1563
1564
1565 mmc_delay(1);
1566
1567
1568
1569
1570
1571 if (host->ops->card_busy && host->ops->card_busy(host))
1572 err = -EAGAIN;
1573
1574power_cycle:
1575 if (err) {
1576 pr_debug("%s: Signal voltage switch failed, "
1577 "power cycling card\n", mmc_hostname(host));
1578 mmc_power_cycle(host, ocr);
1579 }
1580
1581 return err;
1582}
1583
1584
1585
1586
1587void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1588{
1589 host->ios.timing = timing;
1590 mmc_set_ios(host);
1591}
1592
1593
1594
1595
1596void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1597{
1598 host->ios.drv_type = drv_type;
1599 mmc_set_ios(host);
1600}
1601
1602int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
1603 int card_drv_type, int *drv_type)
1604{
1605 struct mmc_host *host = card->host;
1606 int host_drv_type = SD_DRIVER_TYPE_B;
1607
1608 *drv_type = 0;
1609
1610 if (!host->ops->select_drive_strength)
1611 return 0;
1612
1613
1614 if (host->caps & MMC_CAP_DRIVER_TYPE_A)
1615 host_drv_type |= SD_DRIVER_TYPE_A;
1616
1617 if (host->caps & MMC_CAP_DRIVER_TYPE_C)
1618 host_drv_type |= SD_DRIVER_TYPE_C;
1619
1620 if (host->caps & MMC_CAP_DRIVER_TYPE_D)
1621 host_drv_type |= SD_DRIVER_TYPE_D;
1622
1623
1624
1625
1626
1627
1628
1629 return host->ops->select_drive_strength(card, max_dtr,
1630 host_drv_type,
1631 card_drv_type,
1632 drv_type);
1633}
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646void mmc_power_up(struct mmc_host *host, u32 ocr)
1647{
1648 if (host->ios.power_mode == MMC_POWER_ON)
1649 return;
1650
1651 mmc_pwrseq_pre_power_on(host);
1652
1653 host->ios.vdd = fls(ocr) - 1;
1654 host->ios.power_mode = MMC_POWER_UP;
1655
1656 mmc_set_initial_state(host);
1657
1658 mmc_set_initial_signal_voltage(host);
1659
1660
1661
1662
1663
1664 mmc_delay(host->ios.power_delay_ms);
1665
1666 mmc_pwrseq_post_power_on(host);
1667
1668 host->ios.clock = host->f_init;
1669
1670 host->ios.power_mode = MMC_POWER_ON;
1671 mmc_set_ios(host);
1672
1673
1674
1675
1676
1677 mmc_delay(host->ios.power_delay_ms);
1678}
1679
1680void mmc_power_off(struct mmc_host *host)
1681{
1682 if (host->ios.power_mode == MMC_POWER_OFF)
1683 return;
1684
1685 mmc_pwrseq_power_off(host);
1686
1687 host->ios.clock = 0;
1688 host->ios.vdd = 0;
1689
1690 host->ios.power_mode = MMC_POWER_OFF;
1691
1692 mmc_set_initial_state(host);
1693
1694
1695
1696
1697
1698
1699 mmc_delay(1);
1700}
1701
1702void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1703{
1704 mmc_power_off(host);
1705
1706 mmc_delay(1);
1707 mmc_power_up(host, ocr);
1708}
1709
1710
1711
1712
1713static void __mmc_release_bus(struct mmc_host *host)
1714{
1715 WARN_ON(!host->bus_dead);
1716
1717 host->bus_ops = NULL;
1718}
1719
1720
1721
1722
1723static inline void mmc_bus_get(struct mmc_host *host)
1724{
1725 unsigned long flags;
1726
1727 spin_lock_irqsave(&host->lock, flags);
1728 host->bus_refs++;
1729 spin_unlock_irqrestore(&host->lock, flags);
1730}
1731
1732
1733
1734
1735
1736static inline void mmc_bus_put(struct mmc_host *host)
1737{
1738 unsigned long flags;
1739
1740 spin_lock_irqsave(&host->lock, flags);
1741 host->bus_refs--;
1742 if ((host->bus_refs == 0) && host->bus_ops)
1743 __mmc_release_bus(host);
1744 spin_unlock_irqrestore(&host->lock, flags);
1745}
1746
1747
1748
1749
1750
1751void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1752{
1753 unsigned long flags;
1754
1755 WARN_ON(!host->claimed);
1756
1757 spin_lock_irqsave(&host->lock, flags);
1758
1759 WARN_ON(host->bus_ops);
1760 WARN_ON(host->bus_refs);
1761
1762 host->bus_ops = ops;
1763 host->bus_refs = 1;
1764 host->bus_dead = 0;
1765
1766 spin_unlock_irqrestore(&host->lock, flags);
1767}
1768
1769
1770
1771
1772void mmc_detach_bus(struct mmc_host *host)
1773{
1774 unsigned long flags;
1775
1776 WARN_ON(!host->claimed);
1777 WARN_ON(!host->bus_ops);
1778
1779 spin_lock_irqsave(&host->lock, flags);
1780
1781 host->bus_dead = 1;
1782
1783 spin_unlock_irqrestore(&host->lock, flags);
1784
1785 mmc_bus_put(host);
1786}
1787
1788static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1789 bool cd_irq)
1790{
1791
1792
1793
1794
1795 if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1796 device_can_wakeup(mmc_dev(host)))
1797 pm_wakeup_event(mmc_dev(host), 5000);
1798
1799 host->detect_change = 1;
1800 mmc_schedule_delayed_work(&host->detect, delay);
1801}
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1814{
1815 _mmc_detect_change(host, delay, true);
1816}
1817EXPORT_SYMBOL(mmc_detect_change);
1818
1819void mmc_init_erase(struct mmc_card *card)
1820{
1821 unsigned int sz;
1822
1823 if (is_power_of_2(card->erase_size))
1824 card->erase_shift = ffs(card->erase_size) - 1;
1825 else
1826 card->erase_shift = 0;
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843 if (mmc_card_sd(card) && card->ssr.au) {
1844 card->pref_erase = card->ssr.au;
1845 card->erase_shift = ffs(card->ssr.au) - 1;
1846 } else if (card->erase_size) {
1847 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1848 if (sz < 128)
1849 card->pref_erase = 512 * 1024 / 512;
1850 else if (sz < 512)
1851 card->pref_erase = 1024 * 1024 / 512;
1852 else if (sz < 1024)
1853 card->pref_erase = 2 * 1024 * 1024 / 512;
1854 else
1855 card->pref_erase = 4 * 1024 * 1024 / 512;
1856 if (card->pref_erase < card->erase_size)
1857 card->pref_erase = card->erase_size;
1858 else {
1859 sz = card->pref_erase % card->erase_size;
1860 if (sz)
1861 card->pref_erase += card->erase_size - sz;
1862 }
1863 } else
1864 card->pref_erase = 0;
1865}
1866
1867static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1868 unsigned int arg, unsigned int qty)
1869{
1870 unsigned int erase_timeout;
1871
1872 if (arg == MMC_DISCARD_ARG ||
1873 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1874 erase_timeout = card->ext_csd.trim_timeout;
1875 } else if (card->ext_csd.erase_group_def & 1) {
1876
1877 if (arg == MMC_TRIM_ARG)
1878 erase_timeout = card->ext_csd.trim_timeout;
1879 else
1880 erase_timeout = card->ext_csd.hc_erase_timeout;
1881 } else {
1882
1883 unsigned int mult = (10 << card->csd.r2w_factor);
1884 unsigned int timeout_clks = card->csd.taac_clks * mult;
1885 unsigned int timeout_us;
1886
1887
1888 if (card->csd.taac_ns < 1000000)
1889 timeout_us = (card->csd.taac_ns * mult) / 1000;
1890 else
1891 timeout_us = (card->csd.taac_ns / 1000) * mult;
1892
1893
1894
1895
1896
1897 timeout_clks <<= 1;
1898 timeout_us += (timeout_clks * 1000) /
1899 (card->host->ios.clock / 1000);
1900
1901 erase_timeout = timeout_us / 1000;
1902
1903
1904
1905
1906
1907 if (!erase_timeout)
1908 erase_timeout = 1;
1909 }
1910
1911
1912 if (arg & MMC_SECURE_ARGS) {
1913 if (arg == MMC_SECURE_ERASE_ARG)
1914 erase_timeout *= card->ext_csd.sec_erase_mult;
1915 else
1916 erase_timeout *= card->ext_csd.sec_trim_mult;
1917 }
1918
1919 erase_timeout *= qty;
1920
1921
1922
1923
1924
1925 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1926 erase_timeout = 1000;
1927
1928 return erase_timeout;
1929}
1930
1931static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1932 unsigned int arg,
1933 unsigned int qty)
1934{
1935 unsigned int erase_timeout;
1936
1937 if (card->ssr.erase_timeout) {
1938
1939 erase_timeout = card->ssr.erase_timeout * qty +
1940 card->ssr.erase_offset;
1941 } else {
1942
1943
1944
1945
1946 erase_timeout = 250 * qty;
1947 }
1948
1949
1950 if (erase_timeout < 1000)
1951 erase_timeout = 1000;
1952
1953 return erase_timeout;
1954}
1955
1956static unsigned int mmc_erase_timeout(struct mmc_card *card,
1957 unsigned int arg,
1958 unsigned int qty)
1959{
1960 if (mmc_card_sd(card))
1961 return mmc_sd_erase_timeout(card, arg, qty);
1962 else
1963 return mmc_mmc_erase_timeout(card, arg, qty);
1964}
1965
1966static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1967 unsigned int to, unsigned int arg)
1968{
1969 struct mmc_command cmd = {};
1970 unsigned int qty = 0, busy_timeout = 0;
1971 bool use_r1b_resp = false;
1972 unsigned long timeout;
1973 int loop_udelay=64, udelay_max=32768;
1974 int err;
1975
1976 mmc_retune_hold(card->host);
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994 if (card->erase_shift)
1995 qty += ((to >> card->erase_shift) -
1996 (from >> card->erase_shift)) + 1;
1997 else if (mmc_card_sd(card))
1998 qty += to - from + 1;
1999 else
2000 qty += ((to / card->erase_size) -
2001 (from / card->erase_size)) + 1;
2002
2003 if (!mmc_card_blockaddr(card)) {
2004 from <<= 9;
2005 to <<= 9;
2006 }
2007
2008 if (mmc_card_sd(card))
2009 cmd.opcode = SD_ERASE_WR_BLK_START;
2010 else
2011 cmd.opcode = MMC_ERASE_GROUP_START;
2012 cmd.arg = from;
2013 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2014 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2015 if (err) {
2016 pr_err("mmc_erase: group start error %d, "
2017 "status %#x\n", err, cmd.resp[0]);
2018 err = -EIO;
2019 goto out;
2020 }
2021
2022 memset(&cmd, 0, sizeof(struct mmc_command));
2023 if (mmc_card_sd(card))
2024 cmd.opcode = SD_ERASE_WR_BLK_END;
2025 else
2026 cmd.opcode = MMC_ERASE_GROUP_END;
2027 cmd.arg = to;
2028 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2029 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2030 if (err) {
2031 pr_err("mmc_erase: group end error %d, status %#x\n",
2032 err, cmd.resp[0]);
2033 err = -EIO;
2034 goto out;
2035 }
2036
2037 memset(&cmd, 0, sizeof(struct mmc_command));
2038 cmd.opcode = MMC_ERASE;
2039 cmd.arg = arg;
2040 busy_timeout = mmc_erase_timeout(card, arg, qty);
2041
2042
2043
2044
2045
2046
2047 if (card->host->max_busy_timeout &&
2048 busy_timeout > card->host->max_busy_timeout) {
2049 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2050 } else {
2051 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2052 cmd.busy_timeout = busy_timeout;
2053 use_r1b_resp = true;
2054 }
2055
2056 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2057 if (err) {
2058 pr_err("mmc_erase: erase error %d, status %#x\n",
2059 err, cmd.resp[0]);
2060 err = -EIO;
2061 goto out;
2062 }
2063
2064 if (mmc_host_is_spi(card->host))
2065 goto out;
2066
2067
2068
2069
2070
2071 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
2072 goto out;
2073
2074 timeout = jiffies + msecs_to_jiffies(busy_timeout);
2075 do {
2076 memset(&cmd, 0, sizeof(struct mmc_command));
2077 cmd.opcode = MMC_SEND_STATUS;
2078 cmd.arg = card->rca << 16;
2079 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2080
2081 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2082 if (err || (cmd.resp[0] & 0xFDF92000)) {
2083 pr_err("error %d requesting status %#x\n",
2084 err, cmd.resp[0]);
2085 err = -EIO;
2086 goto out;
2087 }
2088
2089
2090
2091
2092 if (time_after(jiffies, timeout)) {
2093 pr_err("%s: Card stuck in programming state! %s\n",
2094 mmc_hostname(card->host), __func__);
2095 err = -EIO;
2096 goto out;
2097 }
2098 if ((cmd.resp[0] & R1_READY_FOR_DATA) &&
2099 R1_CURRENT_STATE(cmd.resp[0]) != R1_STATE_PRG)
2100 break;
2101
2102 usleep_range(loop_udelay, loop_udelay*2);
2103 if (loop_udelay < udelay_max)
2104 loop_udelay *= 2;
2105 } while (1);
2106
2107out:
2108 mmc_retune_release(card->host);
2109 return err;
2110}
2111
2112static unsigned int mmc_align_erase_size(struct mmc_card *card,
2113 unsigned int *from,
2114 unsigned int *to,
2115 unsigned int nr)
2116{
2117 unsigned int from_new = *from, nr_new = nr, rem;
2118
2119
2120
2121
2122
2123 if (is_power_of_2(card->erase_size)) {
2124 unsigned int temp = from_new;
2125
2126 from_new = round_up(temp, card->erase_size);
2127 rem = from_new - temp;
2128
2129 if (nr_new > rem)
2130 nr_new -= rem;
2131 else
2132 return 0;
2133
2134 nr_new = round_down(nr_new, card->erase_size);
2135 } else {
2136 rem = from_new % card->erase_size;
2137 if (rem) {
2138 rem = card->erase_size - rem;
2139 from_new += rem;
2140 if (nr_new > rem)
2141 nr_new -= rem;
2142 else
2143 return 0;
2144 }
2145
2146 rem = nr_new % card->erase_size;
2147 if (rem)
2148 nr_new -= rem;
2149 }
2150
2151 if (nr_new == 0)
2152 return 0;
2153
2154 *to = from_new + nr_new;
2155 *from = from_new;
2156
2157 return nr_new;
2158}
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2170 unsigned int arg)
2171{
2172 unsigned int rem, to = from + nr;
2173 int err;
2174
2175 if (!(card->host->caps & MMC_CAP_ERASE) ||
2176 !(card->csd.cmdclass & CCC_ERASE))
2177 return -EOPNOTSUPP;
2178
2179 if (!card->erase_size)
2180 return -EOPNOTSUPP;
2181
2182 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2183 return -EOPNOTSUPP;
2184
2185 if ((arg & MMC_SECURE_ARGS) &&
2186 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2187 return -EOPNOTSUPP;
2188
2189 if ((arg & MMC_TRIM_ARGS) &&
2190 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2191 return -EOPNOTSUPP;
2192
2193 if (arg == MMC_SECURE_ERASE_ARG) {
2194 if (from % card->erase_size || nr % card->erase_size)
2195 return -EINVAL;
2196 }
2197
2198 if (arg == MMC_ERASE_ARG)
2199 nr = mmc_align_erase_size(card, &from, &to, nr);
2200
2201 if (nr == 0)
2202 return 0;
2203
2204 if (to <= from)
2205 return -EINVAL;
2206
2207
2208 to -= 1;
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218 rem = card->erase_size - (from % card->erase_size);
2219 if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
2220 err = mmc_do_erase(card, from, from + rem - 1, arg);
2221 from += rem;
2222 if ((err) || (to <= from))
2223 return err;
2224 }
2225
2226 return mmc_do_erase(card, from, to, arg);
2227}
2228EXPORT_SYMBOL(mmc_erase);
2229
2230int mmc_can_erase(struct mmc_card *card)
2231{
2232 if ((card->host->caps & MMC_CAP_ERASE) &&
2233 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2234 return 1;
2235 return 0;
2236}
2237EXPORT_SYMBOL(mmc_can_erase);
2238
2239int mmc_can_trim(struct mmc_card *card)
2240{
2241 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) &&
2242 (!(card->quirks & MMC_QUIRK_TRIM_BROKEN)))
2243 return 1;
2244 return 0;
2245}
2246EXPORT_SYMBOL(mmc_can_trim);
2247
2248int mmc_can_discard(struct mmc_card *card)
2249{
2250
2251
2252
2253
2254 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2255 return 1;
2256 return 0;
2257}
2258EXPORT_SYMBOL(mmc_can_discard);
2259
2260int mmc_can_sanitize(struct mmc_card *card)
2261{
2262 if (!mmc_can_trim(card) && !mmc_can_erase(card))
2263 return 0;
2264 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2265 return 1;
2266 return 0;
2267}
2268EXPORT_SYMBOL(mmc_can_sanitize);
2269
2270int mmc_can_secure_erase_trim(struct mmc_card *card)
2271{
2272 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2273 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2274 return 1;
2275 return 0;
2276}
2277EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2278
2279int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2280 unsigned int nr)
2281{
2282 if (!card->erase_size)
2283 return 0;
2284 if (from % card->erase_size || nr % card->erase_size)
2285 return 0;
2286 return 1;
2287}
2288EXPORT_SYMBOL(mmc_erase_group_aligned);
2289
2290static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2291 unsigned int arg)
2292{
2293 struct mmc_host *host = card->host;
2294 unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
2295 unsigned int last_timeout = 0;
2296 unsigned int max_busy_timeout = host->max_busy_timeout ?
2297 host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS;
2298
2299 if (card->erase_shift) {
2300 max_qty = UINT_MAX >> card->erase_shift;
2301 min_qty = card->pref_erase >> card->erase_shift;
2302 } else if (mmc_card_sd(card)) {
2303 max_qty = UINT_MAX;
2304 min_qty = card->pref_erase;
2305 } else {
2306 max_qty = UINT_MAX / card->erase_size;
2307 min_qty = card->pref_erase / card->erase_size;
2308 }
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323 do {
2324 y = 0;
2325 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2326 timeout = mmc_erase_timeout(card, arg, qty + x);
2327
2328 if (qty + x > min_qty && timeout > max_busy_timeout)
2329 break;
2330
2331 if (timeout < last_timeout)
2332 break;
2333 last_timeout = timeout;
2334 y = x;
2335 }
2336 qty += y;
2337 } while (y);
2338
2339 if (!qty)
2340 return 0;
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352 if (qty == 1)
2353 card->eg_boundary = 1;
2354 else
2355 qty--;
2356
2357
2358 if (card->erase_shift)
2359 max_discard = qty << card->erase_shift;
2360 else if (mmc_card_sd(card))
2361 max_discard = qty + 1;
2362 else
2363 max_discard = qty * card->erase_size;
2364
2365 return max_discard;
2366}
2367
2368unsigned int mmc_calc_max_discard(struct mmc_card *card)
2369{
2370 struct mmc_host *host = card->host;
2371 unsigned int max_discard, max_trim;
2372
2373
2374
2375
2376
2377
2378 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2379 return card->pref_erase;
2380
2381 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2382 if (max_discard && mmc_can_trim(card)) {
2383 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2384 if (max_trim < max_discard)
2385 max_discard = max_trim;
2386 } else if (max_discard < card->erase_size) {
2387 max_discard = 0;
2388 }
2389 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2390 mmc_hostname(host), max_discard, host->max_busy_timeout ?
2391 host->max_busy_timeout : MMC_ERASE_TIMEOUT_MS);
2392 return max_discard;
2393}
2394EXPORT_SYMBOL(mmc_calc_max_discard);
2395
2396bool mmc_card_is_blockaddr(struct mmc_card *card)
2397{
2398 return card ? mmc_card_blockaddr(card) : false;
2399}
2400EXPORT_SYMBOL(mmc_card_is_blockaddr);
2401
2402int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2403{
2404 struct mmc_command cmd = {};
2405
2406 if (mmc_card_blockaddr(card) || mmc_card_ddr52(card) ||
2407 mmc_card_hs400(card) || mmc_card_hs400es(card))
2408 return 0;
2409
2410 cmd.opcode = MMC_SET_BLOCKLEN;
2411 cmd.arg = blocklen;
2412 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2413 return mmc_wait_for_cmd(card->host, &cmd, 5);
2414}
2415EXPORT_SYMBOL(mmc_set_blocklen);
2416
2417int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2418 bool is_rel_write)
2419{
2420 struct mmc_command cmd = {};
2421
2422 cmd.opcode = MMC_SET_BLOCK_COUNT;
2423 cmd.arg = blockcount & 0x0000FFFF;
2424 if (is_rel_write)
2425 cmd.arg |= 1 << 31;
2426 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2427 return mmc_wait_for_cmd(card->host, &cmd, 5);
2428}
2429EXPORT_SYMBOL(mmc_set_blockcount);
2430
2431static void mmc_hw_reset_for_init(struct mmc_host *host)
2432{
2433 mmc_pwrseq_reset(host);
2434
2435 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2436 return;
2437 host->ops->hw_reset(host);
2438}
2439
2440int mmc_hw_reset(struct mmc_host *host)
2441{
2442 int ret;
2443
2444 if (!host->card)
2445 return -EINVAL;
2446
2447 mmc_bus_get(host);
2448 if (!host->bus_ops || host->bus_dead || !host->bus_ops->hw_reset) {
2449 mmc_bus_put(host);
2450 return -EOPNOTSUPP;
2451 }
2452
2453 ret = host->bus_ops->hw_reset(host);
2454 mmc_bus_put(host);
2455
2456 if (ret)
2457 pr_warn("%s: tried to HW reset card, got error %d\n",
2458 mmc_hostname(host), ret);
2459
2460 return ret;
2461}
2462EXPORT_SYMBOL(mmc_hw_reset);
2463
2464int mmc_sw_reset(struct mmc_host *host)
2465{
2466 int ret;
2467
2468 if (!host->card)
2469 return -EINVAL;
2470
2471 mmc_bus_get(host);
2472 if (!host->bus_ops || host->bus_dead || !host->bus_ops->sw_reset) {
2473 mmc_bus_put(host);
2474 return -EOPNOTSUPP;
2475 }
2476
2477 ret = host->bus_ops->sw_reset(host);
2478 mmc_bus_put(host);
2479
2480 if (ret)
2481 pr_warn("%s: tried to SW reset card, got error %d\n",
2482 mmc_hostname(host), ret);
2483
2484 return ret;
2485}
2486EXPORT_SYMBOL(mmc_sw_reset);
2487
2488static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2489{
2490 host->f_init = freq;
2491
2492 pr_debug("%s: %s: trying to init card at %u Hz\n",
2493 mmc_hostname(host), __func__, host->f_init);
2494
2495 mmc_power_up(host, host->ocr_avail);
2496
2497
2498
2499
2500
2501 mmc_hw_reset_for_init(host);
2502
2503
2504
2505
2506
2507
2508
2509 if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2510 sdio_reset(host);
2511
2512 mmc_go_idle(host);
2513
2514 if (!(host->caps2 & MMC_CAP2_NO_SD))
2515 mmc_send_if_cond(host, host->ocr_avail);
2516
2517
2518 if (!(host->caps2 & MMC_CAP2_NO_SDIO))
2519 if (!mmc_attach_sdio(host))
2520 return 0;
2521
2522 if (!(host->caps2 & MMC_CAP2_NO_SD))
2523 if (!mmc_attach_sd(host))
2524 return 0;
2525
2526 if (!(host->caps2 & MMC_CAP2_NO_MMC))
2527 if (!mmc_attach_mmc(host))
2528 return 0;
2529
2530 mmc_power_off(host);
2531 return -EIO;
2532}
2533
2534int _mmc_detect_card_removed(struct mmc_host *host)
2535{
2536 int ret;
2537
2538 if (!host->card || mmc_card_removed(host->card))
2539 return 1;
2540
2541 ret = host->bus_ops->alive(host);
2542
2543
2544
2545
2546
2547
2548
2549
2550 if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2551 mmc_detect_change(host, msecs_to_jiffies(200));
2552 pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2553 }
2554
2555 if (ret) {
2556 mmc_card_set_removed(host->card);
2557 pr_debug("%s: card remove detected\n", mmc_hostname(host));
2558 }
2559
2560 return ret;
2561}
2562
2563int mmc_detect_card_removed(struct mmc_host *host)
2564{
2565 struct mmc_card *card = host->card;
2566 int ret;
2567
2568 WARN_ON(!host->claimed);
2569
2570 if (!card)
2571 return 1;
2572
2573 if (!mmc_card_is_removable(host))
2574 return 0;
2575
2576 ret = mmc_card_removed(card);
2577
2578
2579
2580
2581 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2582 return ret;
2583
2584 host->detect_change = 0;
2585 if (!ret) {
2586 ret = _mmc_detect_card_removed(host);
2587 if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2588
2589
2590
2591
2592 cancel_delayed_work(&host->detect);
2593 _mmc_detect_change(host, 0, false);
2594 }
2595 }
2596
2597 return ret;
2598}
2599EXPORT_SYMBOL(mmc_detect_card_removed);
2600
2601void mmc_rescan(struct work_struct *work)
2602{
2603 struct mmc_host *host =
2604 container_of(work, struct mmc_host, detect.work);
2605 int i;
2606
2607 if (host->rescan_disable)
2608 return;
2609
2610
2611 if (!mmc_card_is_removable(host) && host->rescan_entered)
2612 return;
2613 host->rescan_entered = 1;
2614
2615 if (host->trigger_card_event && host->ops->card_event) {
2616 mmc_claim_host(host);
2617 host->ops->card_event(host);
2618 mmc_release_host(host);
2619 host->trigger_card_event = false;
2620 }
2621
2622 mmc_bus_get(host);
2623
2624
2625
2626
2627
2628 if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
2629 host->bus_ops->detect(host);
2630
2631 host->detect_change = 0;
2632
2633
2634
2635
2636
2637 mmc_bus_put(host);
2638 mmc_bus_get(host);
2639
2640
2641 if (host->bus_ops != NULL) {
2642 mmc_bus_put(host);
2643 goto out;
2644 }
2645
2646
2647
2648
2649
2650 mmc_bus_put(host);
2651
2652 mmc_claim_host(host);
2653 if (mmc_card_is_removable(host) && host->ops->get_cd &&
2654 host->ops->get_cd(host) == 0) {
2655 mmc_power_off(host);
2656 mmc_release_host(host);
2657 goto out;
2658 }
2659
2660 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2661 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2662 break;
2663 if (freqs[i] <= host->f_min)
2664 break;
2665 }
2666 mmc_release_host(host);
2667
2668 out:
2669 if (host->caps & MMC_CAP_NEEDS_POLL)
2670 mmc_schedule_delayed_work(&host->detect, HZ);
2671}
2672
2673void mmc_start_host(struct mmc_host *host)
2674{
2675 host->f_init = max(freqs[0], host->f_min);
2676 host->rescan_disable = 0;
2677 host->ios.power_mode = MMC_POWER_UNDEFINED;
2678
2679 if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
2680 mmc_claim_host(host);
2681 mmc_power_up(host, host->ocr_avail);
2682 mmc_release_host(host);
2683 }
2684
2685 mmc_gpiod_request_cd_irq(host);
2686 _mmc_detect_change(host, 0, false);
2687}
2688
2689void mmc_stop_host(struct mmc_host *host)
2690{
2691 if (host->slot.cd_irq >= 0) {
2692 mmc_gpio_set_cd_wake(host, false);
2693 disable_irq(host->slot.cd_irq);
2694 }
2695
2696 host->rescan_disable = 1;
2697 cancel_delayed_work_sync(&host->detect);
2698
2699
2700 host->pm_flags = 0;
2701
2702 mmc_bus_get(host);
2703 if (host->bus_ops && !host->bus_dead) {
2704
2705 host->bus_ops->remove(host);
2706 mmc_claim_host(host);
2707 mmc_detach_bus(host);
2708 mmc_power_off(host);
2709 mmc_release_host(host);
2710 mmc_bus_put(host);
2711 return;
2712 }
2713 mmc_bus_put(host);
2714
2715 mmc_claim_host(host);
2716 mmc_power_off(host);
2717 mmc_release_host(host);
2718}
2719
2720int mmc_power_save_host(struct mmc_host *host)
2721{
2722 int ret = 0;
2723
2724 pr_debug("%s: %s: powering down\n", mmc_hostname(host), __func__);
2725
2726 mmc_bus_get(host);
2727
2728 if (!host->bus_ops || host->bus_dead) {
2729 mmc_bus_put(host);
2730 return -EINVAL;
2731 }
2732
2733 if (host->bus_ops->power_save)
2734 ret = host->bus_ops->power_save(host);
2735
2736 mmc_bus_put(host);
2737
2738 mmc_power_off(host);
2739
2740 return ret;
2741}
2742EXPORT_SYMBOL(mmc_power_save_host);
2743
2744int mmc_power_restore_host(struct mmc_host *host)
2745{
2746 int ret;
2747
2748 pr_debug("%s: %s: powering up\n", mmc_hostname(host), __func__);
2749
2750 mmc_bus_get(host);
2751
2752 if (!host->bus_ops || host->bus_dead) {
2753 mmc_bus_put(host);
2754 return -EINVAL;
2755 }
2756
2757 mmc_power_up(host, host->card->ocr);
2758 ret = host->bus_ops->power_restore(host);
2759
2760 mmc_bus_put(host);
2761
2762 return ret;
2763}
2764EXPORT_SYMBOL(mmc_power_restore_host);
2765
2766#ifdef CONFIG_PM_SLEEP
2767
2768
2769
2770
2771static int mmc_pm_notify(struct notifier_block *notify_block,
2772 unsigned long mode, void *unused)
2773{
2774 struct mmc_host *host = container_of(
2775 notify_block, struct mmc_host, pm_notify);
2776 unsigned long flags;
2777 int err = 0;
2778
2779 switch (mode) {
2780 case PM_HIBERNATION_PREPARE:
2781 case PM_SUSPEND_PREPARE:
2782 case PM_RESTORE_PREPARE:
2783 spin_lock_irqsave(&host->lock, flags);
2784 host->rescan_disable = 1;
2785 spin_unlock_irqrestore(&host->lock, flags);
2786 cancel_delayed_work_sync(&host->detect);
2787
2788 if (!host->bus_ops)
2789 break;
2790
2791
2792 if (host->bus_ops->pre_suspend)
2793 err = host->bus_ops->pre_suspend(host);
2794 if (!err)
2795 break;
2796
2797 if (!mmc_card_is_removable(host)) {
2798 dev_warn(mmc_dev(host),
2799 "pre_suspend failed for non-removable host: "
2800 "%d\n", err);
2801
2802 break;
2803 }
2804
2805
2806 host->bus_ops->remove(host);
2807 mmc_claim_host(host);
2808 mmc_detach_bus(host);
2809 mmc_power_off(host);
2810 mmc_release_host(host);
2811 host->pm_flags = 0;
2812 break;
2813
2814 case PM_POST_SUSPEND:
2815 case PM_POST_HIBERNATION:
2816 case PM_POST_RESTORE:
2817
2818 spin_lock_irqsave(&host->lock, flags);
2819 host->rescan_disable = 0;
2820 spin_unlock_irqrestore(&host->lock, flags);
2821 _mmc_detect_change(host, 0, false);
2822
2823 }
2824
2825 return 0;
2826}
2827
2828void mmc_register_pm_notifier(struct mmc_host *host)
2829{
2830 host->pm_notify.notifier_call = mmc_pm_notify;
2831 register_pm_notifier(&host->pm_notify);
2832}
2833
2834void mmc_unregister_pm_notifier(struct mmc_host *host)
2835{
2836 unregister_pm_notifier(&host->pm_notify);
2837}
2838#endif
2839
2840static int __init mmc_init(void)
2841{
2842 int ret;
2843
2844 ret = mmc_register_bus();
2845 if (ret)
2846 return ret;
2847
2848 ret = mmc_register_host_class();
2849 if (ret)
2850 goto unregister_bus;
2851
2852 ret = sdio_register_bus();
2853 if (ret)
2854 goto unregister_host_class;
2855
2856 return 0;
2857
2858unregister_host_class:
2859 mmc_unregister_host_class();
2860unregister_bus:
2861 mmc_unregister_bus();
2862 return ret;
2863}
2864
2865static void __exit mmc_exit(void)
2866{
2867 sdio_unregister_bus();
2868 mmc_unregister_host_class();
2869 mmc_unregister_bus();
2870}
2871
2872subsys_initcall(mmc_init);
2873module_exit(mmc_exit);
2874
2875MODULE_LICENSE("GPL");
2876