1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/delay.h>
17#include <linux/ktime.h>
18#include <linux/highmem.h>
19#include <linux/io.h>
20#include <linux/module.h>
21#include <linux/dma-mapping.h>
22#include <linux/slab.h>
23#include <linux/scatterlist.h>
24#include <linux/sizes.h>
25#include <linux/swiotlb.h>
26#include <linux/regulator/consumer.h>
27#include <linux/pm_runtime.h>
28#include <linux/of.h>
29
30#include <linux/leds.h>
31
32#include <linux/mmc/mmc.h>
33#include <linux/mmc/host.h>
34#include <linux/mmc/card.h>
35#include <linux/mmc/sdio.h>
36#include <linux/mmc/slot-gpio.h>
37
38#include "sdhci.h"
39
40#define DRIVER_NAME "sdhci"
41
42#define DBG(f, x...) \
43 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
44
45#define SDHCI_DUMP(f, x...) \
46 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
47
48#define MAX_TUNING_LOOP 40
49
50static unsigned int debug_quirks = 0;
51static unsigned int debug_quirks2;
52
53static void sdhci_finish_data(struct sdhci_host *);
54
55static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
56
57void sdhci_dumpregs(struct sdhci_host *host)
58{
59 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
60
61 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
62 sdhci_readl(host, SDHCI_DMA_ADDRESS),
63 sdhci_readw(host, SDHCI_HOST_VERSION));
64 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
65 sdhci_readw(host, SDHCI_BLOCK_SIZE),
66 sdhci_readw(host, SDHCI_BLOCK_COUNT));
67 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
68 sdhci_readl(host, SDHCI_ARGUMENT),
69 sdhci_readw(host, SDHCI_TRANSFER_MODE));
70 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
71 sdhci_readl(host, SDHCI_PRESENT_STATE),
72 sdhci_readb(host, SDHCI_HOST_CONTROL));
73 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
74 sdhci_readb(host, SDHCI_POWER_CONTROL),
75 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
76 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
77 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
78 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
79 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
80 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
81 sdhci_readl(host, SDHCI_INT_STATUS));
82 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
83 sdhci_readl(host, SDHCI_INT_ENABLE),
84 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
85 SDHCI_DUMP("AC12 err: 0x%08x | Slot int: 0x%08x\n",
86 sdhci_readw(host, SDHCI_ACMD12_ERR),
87 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
88 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
89 sdhci_readl(host, SDHCI_CAPABILITIES),
90 sdhci_readl(host, SDHCI_CAPABILITIES_1));
91 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
92 sdhci_readw(host, SDHCI_COMMAND),
93 sdhci_readl(host, SDHCI_MAX_CURRENT));
94 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
95 sdhci_readl(host, SDHCI_RESPONSE),
96 sdhci_readl(host, SDHCI_RESPONSE + 4));
97 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
98 sdhci_readl(host, SDHCI_RESPONSE + 8),
99 sdhci_readl(host, SDHCI_RESPONSE + 12));
100 SDHCI_DUMP("Host ctl2: 0x%08x\n",
101 sdhci_readw(host, SDHCI_HOST_CONTROL2));
102
103 if (host->flags & SDHCI_USE_ADMA) {
104 if (host->flags & SDHCI_USE_64_BIT_DMA) {
105 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
106 sdhci_readl(host, SDHCI_ADMA_ERROR),
107 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
108 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
109 } else {
110 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
111 sdhci_readl(host, SDHCI_ADMA_ERROR),
112 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
113 }
114 }
115
116 SDHCI_DUMP("============================================\n");
117}
118EXPORT_SYMBOL_GPL(sdhci_dumpregs);
119
120
121
122
123
124
125
126static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
127{
128 return cmd->data || cmd->flags & MMC_RSP_BUSY;
129}
130
131static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
132{
133 u32 present;
134
135 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
136 !mmc_card_is_removable(host->mmc))
137 return;
138
139 if (enable) {
140 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
141 SDHCI_CARD_PRESENT;
142
143 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
144 SDHCI_INT_CARD_INSERT;
145 } else {
146 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
147 }
148
149 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
150 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
151}
152
153static void sdhci_enable_card_detection(struct sdhci_host *host)
154{
155 sdhci_set_card_detection(host, true);
156}
157
158static void sdhci_disable_card_detection(struct sdhci_host *host)
159{
160 sdhci_set_card_detection(host, false);
161}
162
163static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
164{
165 if (host->bus_on)
166 return;
167 host->bus_on = true;
168 pm_runtime_get_noresume(host->mmc->parent);
169}
170
171static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
172{
173 if (!host->bus_on)
174 return;
175 host->bus_on = false;
176 pm_runtime_put_noidle(host->mmc->parent);
177}
178
179void sdhci_reset(struct sdhci_host *host, u8 mask)
180{
181 ktime_t timeout;
182
183 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
184
185 if (mask & SDHCI_RESET_ALL) {
186 host->clock = 0;
187
188 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
189 sdhci_runtime_pm_bus_off(host);
190 }
191
192
193 timeout = ktime_add_ms(ktime_get(), 100);
194
195
196 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
197 if (ktime_after(ktime_get(), timeout)) {
198 pr_err("%s: Reset 0x%x never completed.\n",
199 mmc_hostname(host->mmc), (int)mask);
200 sdhci_dumpregs(host);
201 return;
202 }
203 udelay(10);
204 }
205}
206EXPORT_SYMBOL_GPL(sdhci_reset);
207
208static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
209{
210 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
211 struct mmc_host *mmc = host->mmc;
212
213 if (!mmc->ops->get_cd(mmc))
214 return;
215 }
216
217 host->ops->reset(host, mask);
218
219 if (mask & SDHCI_RESET_ALL) {
220 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
221 if (host->ops->enable_dma)
222 host->ops->enable_dma(host);
223 }
224
225
226 host->preset_enabled = false;
227 }
228}
229
230static void sdhci_set_default_irqs(struct sdhci_host *host)
231{
232 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
233 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
234 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
235 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
236 SDHCI_INT_RESPONSE;
237
238 if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
239 host->tuning_mode == SDHCI_TUNING_MODE_3)
240 host->ier |= SDHCI_INT_RETUNE;
241
242 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
243 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
244}
245
246static void sdhci_init(struct sdhci_host *host, int soft)
247{
248 struct mmc_host *mmc = host->mmc;
249
250 if (soft)
251 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
252 else
253 sdhci_do_reset(host, SDHCI_RESET_ALL);
254
255 sdhci_set_default_irqs(host);
256
257 host->cqe_on = false;
258
259 if (soft) {
260
261 host->clock = 0;
262 mmc->ops->set_ios(mmc, &mmc->ios);
263 }
264}
265
266static void sdhci_reinit(struct sdhci_host *host)
267{
268 sdhci_init(host, 0);
269 sdhci_enable_card_detection(host);
270}
271
272static void __sdhci_led_activate(struct sdhci_host *host)
273{
274 u8 ctrl;
275
276 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
277 ctrl |= SDHCI_CTRL_LED;
278 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
279}
280
281static void __sdhci_led_deactivate(struct sdhci_host *host)
282{
283 u8 ctrl;
284
285 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
286 ctrl &= ~SDHCI_CTRL_LED;
287 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
288}
289
290#if IS_REACHABLE(CONFIG_LEDS_CLASS)
291static void sdhci_led_control(struct led_classdev *led,
292 enum led_brightness brightness)
293{
294 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
295 unsigned long flags;
296
297 spin_lock_irqsave(&host->lock, flags);
298
299 if (host->runtime_suspended)
300 goto out;
301
302 if (brightness == LED_OFF)
303 __sdhci_led_deactivate(host);
304 else
305 __sdhci_led_activate(host);
306out:
307 spin_unlock_irqrestore(&host->lock, flags);
308}
309
310static int sdhci_led_register(struct sdhci_host *host)
311{
312 struct mmc_host *mmc = host->mmc;
313
314 snprintf(host->led_name, sizeof(host->led_name),
315 "%s::", mmc_hostname(mmc));
316
317 host->led.name = host->led_name;
318 host->led.brightness = LED_OFF;
319 host->led.default_trigger = mmc_hostname(mmc);
320 host->led.brightness_set = sdhci_led_control;
321
322 return led_classdev_register(mmc_dev(mmc), &host->led);
323}
324
325static void sdhci_led_unregister(struct sdhci_host *host)
326{
327 led_classdev_unregister(&host->led);
328}
329
330static inline void sdhci_led_activate(struct sdhci_host *host)
331{
332}
333
334static inline void sdhci_led_deactivate(struct sdhci_host *host)
335{
336}
337
338#else
339
340static inline int sdhci_led_register(struct sdhci_host *host)
341{
342 return 0;
343}
344
345static inline void sdhci_led_unregister(struct sdhci_host *host)
346{
347}
348
349static inline void sdhci_led_activate(struct sdhci_host *host)
350{
351 __sdhci_led_activate(host);
352}
353
354static inline void sdhci_led_deactivate(struct sdhci_host *host)
355{
356 __sdhci_led_deactivate(host);
357}
358
359#endif
360
361
362
363
364
365
366
367static void sdhci_read_block_pio(struct sdhci_host *host)
368{
369 unsigned long flags;
370 size_t blksize, len, chunk;
371 u32 uninitialized_var(scratch);
372 u8 *buf;
373
374 DBG("PIO reading\n");
375
376 blksize = host->data->blksz;
377 chunk = 0;
378
379 local_irq_save(flags);
380
381 while (blksize) {
382 BUG_ON(!sg_miter_next(&host->sg_miter));
383
384 len = min(host->sg_miter.length, blksize);
385
386 blksize -= len;
387 host->sg_miter.consumed = len;
388
389 buf = host->sg_miter.addr;
390
391 while (len) {
392 if (chunk == 0) {
393 scratch = sdhci_readl(host, SDHCI_BUFFER);
394 chunk = 4;
395 }
396
397 *buf = scratch & 0xFF;
398
399 buf++;
400 scratch >>= 8;
401 chunk--;
402 len--;
403 }
404 }
405
406 sg_miter_stop(&host->sg_miter);
407
408 local_irq_restore(flags);
409}
410
411static void sdhci_write_block_pio(struct sdhci_host *host)
412{
413 unsigned long flags;
414 size_t blksize, len, chunk;
415 u32 scratch;
416 u8 *buf;
417
418 DBG("PIO writing\n");
419
420 blksize = host->data->blksz;
421 chunk = 0;
422 scratch = 0;
423
424 local_irq_save(flags);
425
426 while (blksize) {
427 BUG_ON(!sg_miter_next(&host->sg_miter));
428
429 len = min(host->sg_miter.length, blksize);
430
431 blksize -= len;
432 host->sg_miter.consumed = len;
433
434 buf = host->sg_miter.addr;
435
436 while (len) {
437 scratch |= (u32)*buf << (chunk * 8);
438
439 buf++;
440 chunk++;
441 len--;
442
443 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
444 sdhci_writel(host, scratch, SDHCI_BUFFER);
445 chunk = 0;
446 scratch = 0;
447 }
448 }
449 }
450
451 sg_miter_stop(&host->sg_miter);
452
453 local_irq_restore(flags);
454}
455
456static void sdhci_transfer_pio(struct sdhci_host *host)
457{
458 u32 mask;
459
460 if (host->blocks == 0)
461 return;
462
463 if (host->data->flags & MMC_DATA_READ)
464 mask = SDHCI_DATA_AVAILABLE;
465 else
466 mask = SDHCI_SPACE_AVAILABLE;
467
468
469
470
471
472
473 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
474 (host->data->blocks == 1))
475 mask = ~0;
476
477 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
478 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
479 udelay(100);
480
481 if (host->data->flags & MMC_DATA_READ)
482 sdhci_read_block_pio(host);
483 else
484 sdhci_write_block_pio(host);
485
486 host->blocks--;
487 if (host->blocks == 0)
488 break;
489 }
490
491 DBG("PIO transfer complete.\n");
492}
493
494static int sdhci_pre_dma_transfer(struct sdhci_host *host,
495 struct mmc_data *data, int cookie)
496{
497 int sg_count;
498
499
500
501
502
503 if (data->host_cookie == COOKIE_PRE_MAPPED)
504 return data->sg_count;
505
506
507 if (host->bounce_buffer) {
508 unsigned int length = data->blksz * data->blocks;
509
510 if (length > host->bounce_buffer_size) {
511 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
512 mmc_hostname(host->mmc), length,
513 host->bounce_buffer_size);
514 return -EIO;
515 }
516 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
517
518 sg_copy_to_buffer(data->sg, data->sg_len,
519 host->bounce_buffer,
520 length);
521 }
522
523 dma_sync_single_for_device(host->mmc->parent,
524 host->bounce_addr,
525 host->bounce_buffer_size,
526 mmc_get_dma_dir(data));
527
528 sg_count = 1;
529 } else {
530
531 sg_count = dma_map_sg(mmc_dev(host->mmc),
532 data->sg, data->sg_len,
533 mmc_get_dma_dir(data));
534 }
535
536 if (sg_count == 0)
537 return -ENOSPC;
538
539 data->sg_count = sg_count;
540 data->host_cookie = cookie;
541
542 return sg_count;
543}
544
545static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
546{
547 local_irq_save(*flags);
548 return kmap_atomic(sg_page(sg)) + sg->offset;
549}
550
551static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
552{
553 kunmap_atomic(buffer);
554 local_irq_restore(*flags);
555}
556
557static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
558 dma_addr_t addr, int len, unsigned cmd)
559{
560 struct sdhci_adma2_64_desc *dma_desc = desc;
561
562
563 dma_desc->cmd = cpu_to_le16(cmd);
564 dma_desc->len = cpu_to_le16(len);
565 dma_desc->addr_lo = cpu_to_le32((u32)addr);
566
567 if (host->flags & SDHCI_USE_64_BIT_DMA)
568 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
569}
570
571static void sdhci_adma_mark_end(void *desc)
572{
573 struct sdhci_adma2_64_desc *dma_desc = desc;
574
575
576 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
577}
578
579static void sdhci_adma_table_pre(struct sdhci_host *host,
580 struct mmc_data *data, int sg_count)
581{
582 struct scatterlist *sg;
583 unsigned long flags;
584 dma_addr_t addr, align_addr;
585 void *desc, *align;
586 char *buffer;
587 int len, offset, i;
588
589
590
591
592
593
594 host->sg_count = sg_count;
595
596 desc = host->adma_table;
597 align = host->align_buffer;
598
599 align_addr = host->align_addr;
600
601 for_each_sg(data->sg, sg, host->sg_count, i) {
602 addr = sg_dma_address(sg);
603 len = sg_dma_len(sg);
604
605
606
607
608
609
610
611 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
612 SDHCI_ADMA2_MASK;
613 if (offset) {
614 if (data->flags & MMC_DATA_WRITE) {
615 buffer = sdhci_kmap_atomic(sg, &flags);
616 memcpy(align, buffer, offset);
617 sdhci_kunmap_atomic(buffer, &flags);
618 }
619
620
621 sdhci_adma_write_desc(host, desc, align_addr, offset,
622 ADMA2_TRAN_VALID);
623
624 BUG_ON(offset > 65536);
625
626 align += SDHCI_ADMA2_ALIGN;
627 align_addr += SDHCI_ADMA2_ALIGN;
628
629 desc += host->desc_sz;
630
631 addr += offset;
632 len -= offset;
633 }
634
635 BUG_ON(len > 65536);
636
637 if (len) {
638
639 sdhci_adma_write_desc(host, desc, addr, len,
640 ADMA2_TRAN_VALID);
641 desc += host->desc_sz;
642 }
643
644
645
646
647
648 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
649 }
650
651 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
652
653 if (desc != host->adma_table) {
654 desc -= host->desc_sz;
655 sdhci_adma_mark_end(desc);
656 }
657 } else {
658
659 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
660 }
661}
662
663static void sdhci_adma_table_post(struct sdhci_host *host,
664 struct mmc_data *data)
665{
666 struct scatterlist *sg;
667 int i, size;
668 void *align;
669 char *buffer;
670 unsigned long flags;
671
672 if (data->flags & MMC_DATA_READ) {
673 bool has_unaligned = false;
674
675
676 for_each_sg(data->sg, sg, host->sg_count, i)
677 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
678 has_unaligned = true;
679 break;
680 }
681
682 if (has_unaligned) {
683 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
684 data->sg_len, DMA_FROM_DEVICE);
685
686 align = host->align_buffer;
687
688 for_each_sg(data->sg, sg, host->sg_count, i) {
689 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
690 size = SDHCI_ADMA2_ALIGN -
691 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
692
693 buffer = sdhci_kmap_atomic(sg, &flags);
694 memcpy(buffer, align, size);
695 sdhci_kunmap_atomic(buffer, &flags);
696
697 align += SDHCI_ADMA2_ALIGN;
698 }
699 }
700 }
701 }
702}
703
704static u32 sdhci_sdma_address(struct sdhci_host *host)
705{
706 if (host->bounce_buffer)
707 return host->bounce_addr;
708 else
709 return sg_dma_address(host->data->sg);
710}
711
712static unsigned int sdhci_target_timeout(struct sdhci_host *host,
713 struct mmc_command *cmd,
714 struct mmc_data *data)
715{
716 unsigned int target_timeout;
717
718
719 if (!data) {
720 target_timeout = cmd->busy_timeout * 1000;
721 } else {
722 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
723 if (host->clock && data->timeout_clks) {
724 unsigned long long val;
725
726
727
728
729
730
731 val = 1000000ULL * data->timeout_clks;
732 if (do_div(val, host->clock))
733 target_timeout++;
734 target_timeout += val;
735 }
736 }
737
738 return target_timeout;
739}
740
741static void sdhci_calc_sw_timeout(struct sdhci_host *host,
742 struct mmc_command *cmd)
743{
744 struct mmc_data *data = cmd->data;
745 struct mmc_host *mmc = host->mmc;
746 struct mmc_ios *ios = &mmc->ios;
747 unsigned char bus_width = 1 << ios->bus_width;
748 unsigned int blksz;
749 unsigned int freq;
750 u64 target_timeout;
751 u64 transfer_time;
752
753 target_timeout = sdhci_target_timeout(host, cmd, data);
754 target_timeout *= NSEC_PER_USEC;
755
756 if (data) {
757 blksz = data->blksz;
758 freq = host->mmc->actual_clock ? : host->clock;
759 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
760 do_div(transfer_time, freq);
761
762 transfer_time = transfer_time * 2;
763
764 host->data_timeout = data->blocks * target_timeout +
765 transfer_time;
766 } else {
767 host->data_timeout = target_timeout;
768 }
769
770 if (host->data_timeout)
771 host->data_timeout += MMC_CMD_TRANSFER_TIME;
772}
773
774static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
775 bool *too_big)
776{
777 u8 count;
778 struct mmc_data *data = cmd->data;
779 unsigned target_timeout, current_timeout;
780
781 *too_big = true;
782
783
784
785
786
787
788
789 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
790 return 0xE;
791
792
793 if (!data && !cmd->busy_timeout)
794 return 0xE;
795
796
797 target_timeout = sdhci_target_timeout(host, cmd, data);
798
799
800
801
802
803
804
805
806
807
808
809 count = 0;
810 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
811 while (current_timeout < target_timeout) {
812 count++;
813 current_timeout <<= 1;
814 if (count >= 0xF)
815 break;
816 }
817
818 if (count >= 0xF) {
819 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
820 DBG("Too large timeout 0x%x requested for CMD%d!\n",
821 count, cmd->opcode);
822 count = 0xE;
823 } else {
824 *too_big = false;
825 }
826
827 return count;
828}
829
830static void sdhci_set_transfer_irqs(struct sdhci_host *host)
831{
832 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
833 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
834
835 if (host->flags & SDHCI_REQ_USE_DMA)
836 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
837 else
838 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
839
840 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
841 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
842}
843
844static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
845{
846 if (enable)
847 host->ier |= SDHCI_INT_DATA_TIMEOUT;
848 else
849 host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
850 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
851 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
852}
853
854static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
855{
856 u8 count;
857
858 if (host->ops->set_timeout) {
859 host->ops->set_timeout(host, cmd);
860 } else {
861 bool too_big = false;
862
863 count = sdhci_calc_timeout(host, cmd, &too_big);
864
865 if (too_big &&
866 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
867 sdhci_calc_sw_timeout(host, cmd);
868 sdhci_set_data_timeout_irq(host, false);
869 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
870 sdhci_set_data_timeout_irq(host, true);
871 }
872
873 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
874 }
875}
876
877static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
878{
879 u8 ctrl;
880 struct mmc_data *data = cmd->data;
881
882 host->data_timeout = 0;
883
884 if (sdhci_data_line_cmd(cmd))
885 sdhci_set_timeout(host, cmd);
886
887 if (!data)
888 return;
889
890 WARN_ON(host->data);
891
892
893 BUG_ON(data->blksz * data->blocks > 524288);
894 BUG_ON(data->blksz > host->mmc->max_blk_size);
895 BUG_ON(data->blocks > 65535);
896
897 host->data = data;
898 host->data_early = 0;
899 host->data->bytes_xfered = 0;
900
901 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
902 struct scatterlist *sg;
903 unsigned int length_mask, offset_mask;
904 int i;
905
906 host->flags |= SDHCI_REQ_USE_DMA;
907
908
909
910
911
912
913
914
915 length_mask = 0;
916 offset_mask = 0;
917 if (host->flags & SDHCI_USE_ADMA) {
918 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
919 length_mask = 3;
920
921
922
923
924
925 offset_mask = 3;
926 }
927 } else {
928 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
929 length_mask = 3;
930 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
931 offset_mask = 3;
932 }
933
934 if (unlikely(length_mask | offset_mask)) {
935 for_each_sg(data->sg, sg, data->sg_len, i) {
936 if (sg->length & length_mask) {
937 DBG("Reverting to PIO because of transfer size (%d)\n",
938 sg->length);
939 host->flags &= ~SDHCI_REQ_USE_DMA;
940 break;
941 }
942 if (sg->offset & offset_mask) {
943 DBG("Reverting to PIO because of bad alignment\n");
944 host->flags &= ~SDHCI_REQ_USE_DMA;
945 break;
946 }
947 }
948 }
949 }
950
951 if (host->flags & SDHCI_REQ_USE_DMA) {
952 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
953
954 if (sg_cnt <= 0) {
955
956
957
958
959 WARN_ON(1);
960 host->flags &= ~SDHCI_REQ_USE_DMA;
961 } else if (host->flags & SDHCI_USE_ADMA) {
962 sdhci_adma_table_pre(host, data, sg_cnt);
963
964 sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
965 if (host->flags & SDHCI_USE_64_BIT_DMA)
966 sdhci_writel(host,
967 (u64)host->adma_addr >> 32,
968 SDHCI_ADMA_ADDRESS_HI);
969 } else {
970 WARN_ON(sg_cnt != 1);
971 sdhci_writel(host, sdhci_sdma_address(host),
972 SDHCI_DMA_ADDRESS);
973 }
974 }
975
976
977
978
979
980
981 if (host->version >= SDHCI_SPEC_200) {
982 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
983 ctrl &= ~SDHCI_CTRL_DMA_MASK;
984 if ((host->flags & SDHCI_REQ_USE_DMA) &&
985 (host->flags & SDHCI_USE_ADMA)) {
986 if (host->flags & SDHCI_USE_64_BIT_DMA)
987 ctrl |= SDHCI_CTRL_ADMA64;
988 else
989 ctrl |= SDHCI_CTRL_ADMA32;
990 } else {
991 ctrl |= SDHCI_CTRL_SDMA;
992 }
993 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
994 }
995
996 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
997 int flags;
998
999 flags = SG_MITER_ATOMIC;
1000 if (host->data->flags & MMC_DATA_READ)
1001 flags |= SG_MITER_TO_SG;
1002 else
1003 flags |= SG_MITER_FROM_SG;
1004 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1005 host->blocks = data->blocks;
1006 }
1007
1008 sdhci_set_transfer_irqs(host);
1009
1010
1011 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1012 SDHCI_BLOCK_SIZE);
1013 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1014}
1015
1016static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1017 struct mmc_request *mrq)
1018{
1019 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1020 !mrq->cap_cmd_during_tfr;
1021}
1022
1023static void sdhci_set_transfer_mode(struct sdhci_host *host,
1024 struct mmc_command *cmd)
1025{
1026 u16 mode = 0;
1027 struct mmc_data *data = cmd->data;
1028
1029 if (data == NULL) {
1030 if (host->quirks2 &
1031 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1032 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1033 } else {
1034
1035 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1036 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1037 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1038 }
1039 return;
1040 }
1041
1042 WARN_ON(!host->data);
1043
1044 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1045 mode = SDHCI_TRNS_BLK_CNT_EN;
1046
1047 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1048 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1049
1050
1051
1052
1053 if (sdhci_auto_cmd12(host, cmd->mrq) &&
1054 (cmd->opcode != SD_IO_RW_EXTENDED))
1055 mode |= SDHCI_TRNS_AUTO_CMD12;
1056 else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
1057 mode |= SDHCI_TRNS_AUTO_CMD23;
1058 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1059 }
1060 }
1061
1062 if (data->flags & MMC_DATA_READ)
1063 mode |= SDHCI_TRNS_READ;
1064 if (host->flags & SDHCI_REQ_USE_DMA)
1065 mode |= SDHCI_TRNS_DMA;
1066
1067 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1068}
1069
1070static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1071{
1072 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1073 ((mrq->cmd && mrq->cmd->error) ||
1074 (mrq->sbc && mrq->sbc->error) ||
1075 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
1076 (mrq->data->stop && mrq->data->stop->error))) ||
1077 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1078}
1079
1080static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1081{
1082 int i;
1083
1084 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1085 if (host->mrqs_done[i] == mrq) {
1086 WARN_ON(1);
1087 return;
1088 }
1089 }
1090
1091 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1092 if (!host->mrqs_done[i]) {
1093 host->mrqs_done[i] = mrq;
1094 break;
1095 }
1096 }
1097
1098 WARN_ON(i >= SDHCI_MAX_MRQS);
1099
1100 tasklet_schedule(&host->finish_tasklet);
1101}
1102
1103static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1104{
1105 if (host->cmd && host->cmd->mrq == mrq)
1106 host->cmd = NULL;
1107
1108 if (host->data_cmd && host->data_cmd->mrq == mrq)
1109 host->data_cmd = NULL;
1110
1111 if (host->data && host->data->mrq == mrq)
1112 host->data = NULL;
1113
1114 if (sdhci_needs_reset(host, mrq))
1115 host->pending_reset = true;
1116
1117 __sdhci_finish_mrq(host, mrq);
1118}
1119
1120static void sdhci_finish_data(struct sdhci_host *host)
1121{
1122 struct mmc_command *data_cmd = host->data_cmd;
1123 struct mmc_data *data = host->data;
1124
1125 host->data = NULL;
1126 host->data_cmd = NULL;
1127
1128 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1129 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1130 sdhci_adma_table_post(host, data);
1131
1132
1133
1134
1135
1136
1137
1138
1139 if (data->error)
1140 data->bytes_xfered = 0;
1141 else
1142 data->bytes_xfered = data->blksz * data->blocks;
1143
1144
1145
1146
1147
1148
1149 if (data->stop &&
1150 (data->error ||
1151 !data->mrq->sbc)) {
1152
1153
1154
1155
1156
1157 if (data->error) {
1158 if (!host->cmd || host->cmd == data_cmd)
1159 sdhci_do_reset(host, SDHCI_RESET_CMD);
1160 sdhci_do_reset(host, SDHCI_RESET_DATA);
1161 }
1162
1163
1164
1165
1166
1167
1168 if (data->mrq->cap_cmd_during_tfr) {
1169 sdhci_finish_mrq(host, data->mrq);
1170 } else {
1171
1172 host->cmd = NULL;
1173 sdhci_send_command(host, data->stop);
1174 }
1175 } else {
1176 sdhci_finish_mrq(host, data->mrq);
1177 }
1178}
1179
1180static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
1181 unsigned long timeout)
1182{
1183 if (sdhci_data_line_cmd(mrq->cmd))
1184 mod_timer(&host->data_timer, timeout);
1185 else
1186 mod_timer(&host->timer, timeout);
1187}
1188
1189static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
1190{
1191 if (sdhci_data_line_cmd(mrq->cmd))
1192 del_timer(&host->data_timer);
1193 else
1194 del_timer(&host->timer);
1195}
1196
1197void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1198{
1199 int flags;
1200 u32 mask;
1201 unsigned long timeout;
1202
1203 WARN_ON(host->cmd);
1204
1205
1206 cmd->error = 0;
1207
1208 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1209 cmd->opcode == MMC_STOP_TRANSMISSION)
1210 cmd->flags |= MMC_RSP_BUSY;
1211
1212
1213 timeout = 10;
1214
1215 mask = SDHCI_CMD_INHIBIT;
1216 if (sdhci_data_line_cmd(cmd))
1217 mask |= SDHCI_DATA_INHIBIT;
1218
1219
1220
1221 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1222 mask &= ~SDHCI_DATA_INHIBIT;
1223
1224 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1225 if (timeout == 0) {
1226 pr_err("%s: Controller never released inhibit bit(s).\n",
1227 mmc_hostname(host->mmc));
1228 sdhci_dumpregs(host);
1229 cmd->error = -EIO;
1230 sdhci_finish_mrq(host, cmd->mrq);
1231 return;
1232 }
1233 timeout--;
1234 mdelay(1);
1235 }
1236
1237 host->cmd = cmd;
1238 if (sdhci_data_line_cmd(cmd)) {
1239 WARN_ON(host->data_cmd);
1240 host->data_cmd = cmd;
1241 }
1242
1243 sdhci_prepare_data(host, cmd);
1244
1245 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1246
1247 sdhci_set_transfer_mode(host, cmd);
1248
1249 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1250 pr_err("%s: Unsupported response type!\n",
1251 mmc_hostname(host->mmc));
1252 cmd->error = -EINVAL;
1253 sdhci_finish_mrq(host, cmd->mrq);
1254 return;
1255 }
1256
1257 if (!(cmd->flags & MMC_RSP_PRESENT))
1258 flags = SDHCI_CMD_RESP_NONE;
1259 else if (cmd->flags & MMC_RSP_136)
1260 flags = SDHCI_CMD_RESP_LONG;
1261 else if (cmd->flags & MMC_RSP_BUSY)
1262 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1263 else
1264 flags = SDHCI_CMD_RESP_SHORT;
1265
1266 if (cmd->flags & MMC_RSP_CRC)
1267 flags |= SDHCI_CMD_CRC;
1268 if (cmd->flags & MMC_RSP_OPCODE)
1269 flags |= SDHCI_CMD_INDEX;
1270
1271
1272 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1273 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1274 flags |= SDHCI_CMD_DATA;
1275
1276 timeout = jiffies;
1277 if (host->data_timeout)
1278 timeout += nsecs_to_jiffies(host->data_timeout);
1279 else if (!cmd->data && cmd->busy_timeout > 9000)
1280 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1281 else
1282 timeout += 10 * HZ;
1283 sdhci_mod_timer(host, cmd->mrq, timeout);
1284
1285 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1286}
1287EXPORT_SYMBOL_GPL(sdhci_send_command);
1288
1289static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1290{
1291 int i, reg;
1292
1293 for (i = 0; i < 4; i++) {
1294 reg = SDHCI_RESPONSE + (3 - i) * 4;
1295 cmd->resp[i] = sdhci_readl(host, reg);
1296 }
1297
1298 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1299 return;
1300
1301
1302 for (i = 0; i < 4; i++) {
1303 cmd->resp[i] <<= 8;
1304 if (i != 3)
1305 cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1306 }
1307}
1308
1309static void sdhci_finish_command(struct sdhci_host *host)
1310{
1311 struct mmc_command *cmd = host->cmd;
1312
1313 host->cmd = NULL;
1314
1315 if (cmd->flags & MMC_RSP_PRESENT) {
1316 if (cmd->flags & MMC_RSP_136) {
1317 sdhci_read_rsp_136(host, cmd);
1318 } else {
1319 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1320 }
1321 }
1322
1323 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1324 mmc_command_done(host->mmc, cmd->mrq);
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336 if (cmd->flags & MMC_RSP_BUSY) {
1337 if (cmd->data) {
1338 DBG("Cannot wait for busy signal when also doing a data transfer");
1339 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1340 cmd == host->data_cmd) {
1341
1342 return;
1343 }
1344 }
1345
1346
1347 if (cmd == cmd->mrq->sbc) {
1348 sdhci_send_command(host, cmd->mrq->cmd);
1349 } else {
1350
1351
1352 if (host->data && host->data_early)
1353 sdhci_finish_data(host);
1354
1355 if (!cmd->data)
1356 sdhci_finish_mrq(host, cmd->mrq);
1357 }
1358}
1359
1360static u16 sdhci_get_preset_value(struct sdhci_host *host)
1361{
1362 u16 preset = 0;
1363
1364 switch (host->timing) {
1365 case MMC_TIMING_UHS_SDR12:
1366 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1367 break;
1368 case MMC_TIMING_UHS_SDR25:
1369 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1370 break;
1371 case MMC_TIMING_UHS_SDR50:
1372 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1373 break;
1374 case MMC_TIMING_UHS_SDR104:
1375 case MMC_TIMING_MMC_HS200:
1376 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1377 break;
1378 case MMC_TIMING_UHS_DDR50:
1379 case MMC_TIMING_MMC_DDR52:
1380 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1381 break;
1382 case MMC_TIMING_MMC_HS400:
1383 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1384 break;
1385 default:
1386 pr_warn("%s: Invalid UHS-I mode selected\n",
1387 mmc_hostname(host->mmc));
1388 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1389 break;
1390 }
1391 return preset;
1392}
1393
1394u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1395 unsigned int *actual_clock)
1396{
1397 int div = 0;
1398 int real_div = div, clk_mul = 1;
1399 u16 clk = 0;
1400 bool switch_base_clk = false;
1401
1402 if (host->version >= SDHCI_SPEC_300) {
1403 if (host->preset_enabled) {
1404 u16 pre_val;
1405
1406 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1407 pre_val = sdhci_get_preset_value(host);
1408 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1409 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1410 if (host->clk_mul &&
1411 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1412 clk = SDHCI_PROG_CLOCK_MODE;
1413 real_div = div + 1;
1414 clk_mul = host->clk_mul;
1415 } else {
1416 real_div = max_t(int, 1, div << 1);
1417 }
1418 goto clock_set;
1419 }
1420
1421
1422
1423
1424
1425 if (host->clk_mul) {
1426 for (div = 1; div <= 1024; div++) {
1427 if ((host->max_clk * host->clk_mul / div)
1428 <= clock)
1429 break;
1430 }
1431 if ((host->max_clk * host->clk_mul / div) <= clock) {
1432
1433
1434
1435
1436 clk = SDHCI_PROG_CLOCK_MODE;
1437 real_div = div;
1438 clk_mul = host->clk_mul;
1439 div--;
1440 } else {
1441
1442
1443
1444
1445 switch_base_clk = true;
1446 }
1447 }
1448
1449 if (!host->clk_mul || switch_base_clk) {
1450
1451 if (host->max_clk <= clock)
1452 div = 1;
1453 else {
1454 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1455 div += 2) {
1456 if ((host->max_clk / div) <= clock)
1457 break;
1458 }
1459 }
1460 real_div = div;
1461 div >>= 1;
1462 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1463 && !div && host->max_clk <= 25000000)
1464 div = 1;
1465 }
1466 } else {
1467
1468 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1469 if ((host->max_clk / div) <= clock)
1470 break;
1471 }
1472 real_div = div;
1473 div >>= 1;
1474 }
1475
1476clock_set:
1477 if (real_div)
1478 *actual_clock = (host->max_clk * clk_mul) / real_div;
1479 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1480 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1481 << SDHCI_DIVIDER_HI_SHIFT;
1482
1483 return clk;
1484}
1485EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1486
1487void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1488{
1489 ktime_t timeout;
1490
1491 clk |= SDHCI_CLOCK_INT_EN;
1492 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1493
1494
1495 timeout = ktime_add_ms(ktime_get(), 20);
1496 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1497 & SDHCI_CLOCK_INT_STABLE)) {
1498 if (ktime_after(ktime_get(), timeout)) {
1499 pr_err("%s: Internal clock never stabilised.\n",
1500 mmc_hostname(host->mmc));
1501 sdhci_dumpregs(host);
1502 return;
1503 }
1504 udelay(10);
1505 }
1506
1507 clk |= SDHCI_CLOCK_CARD_EN;
1508 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1509}
1510EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1511
1512void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1513{
1514 u16 clk;
1515
1516 host->mmc->actual_clock = 0;
1517
1518 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1519
1520 if (clock == 0)
1521 return;
1522
1523 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1524 sdhci_enable_clk(host, clk);
1525}
1526EXPORT_SYMBOL_GPL(sdhci_set_clock);
1527
1528static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1529 unsigned short vdd)
1530{
1531 struct mmc_host *mmc = host->mmc;
1532
1533 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1534
1535 if (mode != MMC_POWER_OFF)
1536 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1537 else
1538 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1539}
1540
1541void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1542 unsigned short vdd)
1543{
1544 u8 pwr = 0;
1545
1546 if (mode != MMC_POWER_OFF) {
1547 switch (1 << vdd) {
1548 case MMC_VDD_165_195:
1549
1550
1551
1552
1553
1554
1555 case MMC_VDD_20_21:
1556 pwr = SDHCI_POWER_180;
1557 break;
1558 case MMC_VDD_29_30:
1559 case MMC_VDD_30_31:
1560 pwr = SDHCI_POWER_300;
1561 break;
1562 case MMC_VDD_32_33:
1563 case MMC_VDD_33_34:
1564 pwr = SDHCI_POWER_330;
1565 break;
1566 default:
1567 WARN(1, "%s: Invalid vdd %#x\n",
1568 mmc_hostname(host->mmc), vdd);
1569 break;
1570 }
1571 }
1572
1573 if (host->pwr == pwr)
1574 return;
1575
1576 host->pwr = pwr;
1577
1578 if (pwr == 0) {
1579 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1580 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1581 sdhci_runtime_pm_bus_off(host);
1582 } else {
1583
1584
1585
1586
1587 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1588 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1589
1590
1591
1592
1593
1594
1595 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1596 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1597
1598 pwr |= SDHCI_POWER_ON;
1599
1600 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1601
1602 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1603 sdhci_runtime_pm_bus_on(host);
1604
1605
1606
1607
1608
1609 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1610 mdelay(10);
1611 }
1612}
1613EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1614
1615void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1616 unsigned short vdd)
1617{
1618 if (IS_ERR(host->mmc->supply.vmmc))
1619 sdhci_set_power_noreg(host, mode, vdd);
1620 else
1621 sdhci_set_power_reg(host, mode, vdd);
1622}
1623EXPORT_SYMBOL_GPL(sdhci_set_power);
1624
1625
1626
1627
1628
1629
1630
1631static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1632{
1633 struct sdhci_host *host;
1634 int present;
1635 unsigned long flags;
1636
1637 host = mmc_priv(mmc);
1638
1639
1640 present = mmc->ops->get_cd(mmc);
1641
1642 spin_lock_irqsave(&host->lock, flags);
1643
1644 sdhci_led_activate(host);
1645
1646
1647
1648
1649
1650 if (sdhci_auto_cmd12(host, mrq)) {
1651 if (mrq->stop) {
1652 mrq->data->stop = NULL;
1653 mrq->stop = NULL;
1654 }
1655 }
1656
1657 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1658 mrq->cmd->error = -ENOMEDIUM;
1659 sdhci_finish_mrq(host, mrq);
1660 } else {
1661 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1662 sdhci_send_command(host, mrq->sbc);
1663 else
1664 sdhci_send_command(host, mrq->cmd);
1665 }
1666
1667 spin_unlock_irqrestore(&host->lock, flags);
1668}
1669
1670void sdhci_set_bus_width(struct sdhci_host *host, int width)
1671{
1672 u8 ctrl;
1673
1674 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1675 if (width == MMC_BUS_WIDTH_8) {
1676 ctrl &= ~SDHCI_CTRL_4BITBUS;
1677 ctrl |= SDHCI_CTRL_8BITBUS;
1678 } else {
1679 if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
1680 ctrl &= ~SDHCI_CTRL_8BITBUS;
1681 if (width == MMC_BUS_WIDTH_4)
1682 ctrl |= SDHCI_CTRL_4BITBUS;
1683 else
1684 ctrl &= ~SDHCI_CTRL_4BITBUS;
1685 }
1686 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1687}
1688EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1689
1690void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1691{
1692 u16 ctrl_2;
1693
1694 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1695
1696 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1697 if ((timing == MMC_TIMING_MMC_HS200) ||
1698 (timing == MMC_TIMING_UHS_SDR104))
1699 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1700 else if (timing == MMC_TIMING_UHS_SDR12)
1701 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1702 else if (timing == MMC_TIMING_UHS_SDR25)
1703 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1704 else if (timing == MMC_TIMING_UHS_SDR50)
1705 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1706 else if ((timing == MMC_TIMING_UHS_DDR50) ||
1707 (timing == MMC_TIMING_MMC_DDR52))
1708 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1709 else if (timing == MMC_TIMING_MMC_HS400)
1710 ctrl_2 |= SDHCI_CTRL_HS400;
1711 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1712}
1713EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1714
1715void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1716{
1717 struct sdhci_host *host = mmc_priv(mmc);
1718 u8 ctrl;
1719
1720 if (ios->power_mode == MMC_POWER_UNDEFINED)
1721 return;
1722
1723 if (host->flags & SDHCI_DEVICE_DEAD) {
1724 if (!IS_ERR(mmc->supply.vmmc) &&
1725 ios->power_mode == MMC_POWER_OFF)
1726 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1727 return;
1728 }
1729
1730
1731
1732
1733
1734 if (ios->power_mode == MMC_POWER_OFF) {
1735 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1736 sdhci_reinit(host);
1737 }
1738
1739 if (host->version >= SDHCI_SPEC_300 &&
1740 (ios->power_mode == MMC_POWER_UP) &&
1741 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1742 sdhci_enable_preset_value(host, false);
1743
1744 if (!ios->clock || ios->clock != host->clock) {
1745 host->ops->set_clock(host, ios->clock);
1746 host->clock = ios->clock;
1747
1748 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1749 host->clock) {
1750 host->timeout_clk = host->mmc->actual_clock ?
1751 host->mmc->actual_clock / 1000 :
1752 host->clock / 1000;
1753 host->mmc->max_busy_timeout =
1754 host->ops->get_max_timeout_count ?
1755 host->ops->get_max_timeout_count(host) :
1756 1 << 27;
1757 host->mmc->max_busy_timeout /= host->timeout_clk;
1758 }
1759 }
1760
1761 if (host->ops->set_power)
1762 host->ops->set_power(host, ios->power_mode, ios->vdd);
1763 else
1764 sdhci_set_power(host, ios->power_mode, ios->vdd);
1765
1766 if (host->ops->platform_send_init_74_clocks)
1767 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1768
1769 host->ops->set_bus_width(host, ios->bus_width);
1770
1771 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1772
1773 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
1774 if (ios->timing == MMC_TIMING_SD_HS ||
1775 ios->timing == MMC_TIMING_MMC_HS ||
1776 ios->timing == MMC_TIMING_MMC_HS400 ||
1777 ios->timing == MMC_TIMING_MMC_HS200 ||
1778 ios->timing == MMC_TIMING_MMC_DDR52 ||
1779 ios->timing == MMC_TIMING_UHS_SDR50 ||
1780 ios->timing == MMC_TIMING_UHS_SDR104 ||
1781 ios->timing == MMC_TIMING_UHS_DDR50 ||
1782 ios->timing == MMC_TIMING_UHS_SDR25)
1783 ctrl |= SDHCI_CTRL_HISPD;
1784 else
1785 ctrl &= ~SDHCI_CTRL_HISPD;
1786 }
1787
1788 if (host->version >= SDHCI_SPEC_300) {
1789 u16 clk, ctrl_2;
1790
1791 if (!host->preset_enabled) {
1792 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1793
1794
1795
1796
1797 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1798 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1799 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1800 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1801 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1802 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1803 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1804 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1805 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1806 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1807 else {
1808 pr_warn("%s: invalid driver type, default to driver type B\n",
1809 mmc_hostname(mmc));
1810 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1811 }
1812
1813 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1814 } else {
1815
1816
1817
1818
1819
1820
1821
1822
1823 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1824 clk &= ~SDHCI_CLOCK_CARD_EN;
1825 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1826
1827 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1828
1829
1830 host->ops->set_clock(host, host->clock);
1831 }
1832
1833
1834 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1835 clk &= ~SDHCI_CLOCK_CARD_EN;
1836 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1837
1838 host->ops->set_uhs_signaling(host, ios->timing);
1839 host->timing = ios->timing;
1840
1841 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1842 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
1843 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1844 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1845 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1846 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1847 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1848 u16 preset;
1849
1850 sdhci_enable_preset_value(host, true);
1851 preset = sdhci_get_preset_value(host);
1852 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1853 >> SDHCI_PRESET_DRV_SHIFT;
1854 }
1855
1856
1857 host->ops->set_clock(host, host->clock);
1858 } else
1859 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1860
1861
1862
1863
1864
1865
1866 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1867 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1868}
1869EXPORT_SYMBOL_GPL(sdhci_set_ios);
1870
1871static int sdhci_get_cd(struct mmc_host *mmc)
1872{
1873 struct sdhci_host *host = mmc_priv(mmc);
1874 int gpio_cd = mmc_gpio_get_cd(mmc);
1875
1876 if (host->flags & SDHCI_DEVICE_DEAD)
1877 return 0;
1878
1879
1880 if (!mmc_card_is_removable(host->mmc))
1881 return 1;
1882
1883
1884
1885
1886
1887 if (gpio_cd >= 0)
1888 return !!gpio_cd;
1889
1890
1891 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1892 return 1;
1893
1894
1895 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1896}
1897
1898static int sdhci_check_ro(struct sdhci_host *host)
1899{
1900 unsigned long flags;
1901 int is_readonly;
1902
1903 spin_lock_irqsave(&host->lock, flags);
1904
1905 if (host->flags & SDHCI_DEVICE_DEAD)
1906 is_readonly = 0;
1907 else if (host->ops->get_ro)
1908 is_readonly = host->ops->get_ro(host);
1909 else
1910 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1911 & SDHCI_WRITE_PROTECT);
1912
1913 spin_unlock_irqrestore(&host->lock, flags);
1914
1915
1916 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1917 !is_readonly : is_readonly;
1918}
1919
1920#define SAMPLE_COUNT 5
1921
1922static int sdhci_get_ro(struct mmc_host *mmc)
1923{
1924 struct sdhci_host *host = mmc_priv(mmc);
1925 int i, ro_count;
1926
1927 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1928 return sdhci_check_ro(host);
1929
1930 ro_count = 0;
1931 for (i = 0; i < SAMPLE_COUNT; i++) {
1932 if (sdhci_check_ro(host)) {
1933 if (++ro_count > SAMPLE_COUNT / 2)
1934 return 1;
1935 }
1936 msleep(30);
1937 }
1938 return 0;
1939}
1940
1941static void sdhci_hw_reset(struct mmc_host *mmc)
1942{
1943 struct sdhci_host *host = mmc_priv(mmc);
1944
1945 if (host->ops && host->ops->hw_reset)
1946 host->ops->hw_reset(host);
1947}
1948
1949static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1950{
1951 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1952 if (enable)
1953 host->ier |= SDHCI_INT_CARD_INT;
1954 else
1955 host->ier &= ~SDHCI_INT_CARD_INT;
1956
1957 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1958 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1959 }
1960}
1961
1962void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1963{
1964 struct sdhci_host *host = mmc_priv(mmc);
1965 unsigned long flags;
1966
1967 if (enable)
1968 pm_runtime_get_noresume(host->mmc->parent);
1969
1970 spin_lock_irqsave(&host->lock, flags);
1971 if (enable)
1972 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1973 else
1974 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1975
1976 sdhci_enable_sdio_irq_nolock(host, enable);
1977 spin_unlock_irqrestore(&host->lock, flags);
1978
1979 if (!enable)
1980 pm_runtime_put_noidle(host->mmc->parent);
1981}
1982EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
1983
1984int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1985 struct mmc_ios *ios)
1986{
1987 struct sdhci_host *host = mmc_priv(mmc);
1988 u16 ctrl;
1989 int ret;
1990
1991
1992
1993
1994
1995 if (host->version < SDHCI_SPEC_300)
1996 return 0;
1997
1998 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1999
2000 switch (ios->signal_voltage) {
2001 case MMC_SIGNAL_VOLTAGE_330:
2002 if (!(host->flags & SDHCI_SIGNALING_330))
2003 return -EINVAL;
2004
2005 ctrl &= ~SDHCI_CTRL_VDD_180;
2006 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2007
2008 if (!IS_ERR(mmc->supply.vqmmc)) {
2009 ret = mmc_regulator_set_vqmmc(mmc, ios);
2010 if (ret) {
2011 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2012 mmc_hostname(mmc));
2013 return -EIO;
2014 }
2015 }
2016
2017 usleep_range(5000, 5500);
2018
2019
2020 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2021 if (!(ctrl & SDHCI_CTRL_VDD_180))
2022 return 0;
2023
2024 pr_warn("%s: 3.3V regulator output did not became stable\n",
2025 mmc_hostname(mmc));
2026
2027 return -EAGAIN;
2028 case MMC_SIGNAL_VOLTAGE_180:
2029 if (!(host->flags & SDHCI_SIGNALING_180))
2030 return -EINVAL;
2031 if (!IS_ERR(mmc->supply.vqmmc)) {
2032 ret = mmc_regulator_set_vqmmc(mmc, ios);
2033 if (ret) {
2034 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2035 mmc_hostname(mmc));
2036 return -EIO;
2037 }
2038 }
2039
2040
2041
2042
2043
2044 ctrl |= SDHCI_CTRL_VDD_180;
2045 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2046
2047
2048 if (host->ops->voltage_switch)
2049 host->ops->voltage_switch(host);
2050
2051
2052 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2053 if (ctrl & SDHCI_CTRL_VDD_180)
2054 return 0;
2055
2056 pr_warn("%s: 1.8V regulator output did not became stable\n",
2057 mmc_hostname(mmc));
2058
2059 return -EAGAIN;
2060 case MMC_SIGNAL_VOLTAGE_120:
2061 if (!(host->flags & SDHCI_SIGNALING_120))
2062 return -EINVAL;
2063 if (!IS_ERR(mmc->supply.vqmmc)) {
2064 ret = mmc_regulator_set_vqmmc(mmc, ios);
2065 if (ret) {
2066 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2067 mmc_hostname(mmc));
2068 return -EIO;
2069 }
2070 }
2071 return 0;
2072 default:
2073
2074 return 0;
2075 }
2076}
2077EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2078
2079static int sdhci_card_busy(struct mmc_host *mmc)
2080{
2081 struct sdhci_host *host = mmc_priv(mmc);
2082 u32 present_state;
2083
2084
2085 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2086
2087 return !(present_state & SDHCI_DATA_0_LVL_MASK);
2088}
2089
2090static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2091{
2092 struct sdhci_host *host = mmc_priv(mmc);
2093 unsigned long flags;
2094
2095 spin_lock_irqsave(&host->lock, flags);
2096 host->flags |= SDHCI_HS400_TUNING;
2097 spin_unlock_irqrestore(&host->lock, flags);
2098
2099 return 0;
2100}
2101
2102static void sdhci_start_tuning(struct sdhci_host *host)
2103{
2104 u16 ctrl;
2105
2106 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2107 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2108 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2109 ctrl |= SDHCI_CTRL_TUNED_CLK;
2110 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2123 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2124}
2125
2126static void sdhci_end_tuning(struct sdhci_host *host)
2127{
2128 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2129 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2130}
2131
2132static void sdhci_reset_tuning(struct sdhci_host *host)
2133{
2134 u16 ctrl;
2135
2136 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2137 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2138 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2139 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2140}
2141
2142static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2143{
2144 sdhci_reset_tuning(host);
2145
2146 sdhci_do_reset(host, SDHCI_RESET_CMD);
2147 sdhci_do_reset(host, SDHCI_RESET_DATA);
2148
2149 sdhci_end_tuning(host);
2150
2151 mmc_abort_tuning(host->mmc, opcode);
2152}
2153
2154
2155
2156
2157
2158
2159
2160
2161static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2162{
2163 struct mmc_host *mmc = host->mmc;
2164 struct mmc_command cmd = {};
2165 struct mmc_request mrq = {};
2166 unsigned long flags;
2167 u32 b = host->sdma_boundary;
2168
2169 spin_lock_irqsave(&host->lock, flags);
2170
2171 cmd.opcode = opcode;
2172 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2173 cmd.mrq = &mrq;
2174
2175 mrq.cmd = &cmd;
2176
2177
2178
2179
2180
2181 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2182 mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2183 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2184 else
2185 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2186
2187
2188
2189
2190
2191
2192
2193 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2194
2195 sdhci_send_command(host, &cmd);
2196
2197 host->cmd = NULL;
2198
2199 sdhci_del_timer(host, &mrq);
2200
2201 host->tuning_done = 0;
2202
2203 spin_unlock_irqrestore(&host->lock, flags);
2204
2205
2206 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2207 msecs_to_jiffies(50));
2208
2209}
2210
2211static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2212{
2213 int i;
2214
2215
2216
2217
2218
2219 for (i = 0; i < MAX_TUNING_LOOP; i++) {
2220 u16 ctrl;
2221
2222 sdhci_send_tuning(host, opcode);
2223
2224 if (!host->tuning_done) {
2225 pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
2226 mmc_hostname(host->mmc));
2227 sdhci_abort_tuning(host, opcode);
2228 return;
2229 }
2230
2231 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2232 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2233 if (ctrl & SDHCI_CTRL_TUNED_CLK)
2234 return;
2235 break;
2236 }
2237
2238
2239 if (host->tuning_delay > 0)
2240 mdelay(host->tuning_delay);
2241 }
2242
2243 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2244 mmc_hostname(host->mmc));
2245 sdhci_reset_tuning(host);
2246}
2247
2248int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2249{
2250 struct sdhci_host *host = mmc_priv(mmc);
2251 int err = 0;
2252 unsigned int tuning_count = 0;
2253 bool hs400_tuning;
2254
2255 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2256
2257 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2258 tuning_count = host->tuning_count;
2259
2260
2261
2262
2263
2264
2265
2266
2267 switch (host->timing) {
2268
2269 case MMC_TIMING_MMC_HS400:
2270 err = -EINVAL;
2271 goto out;
2272
2273 case MMC_TIMING_MMC_HS200:
2274
2275
2276
2277
2278 if (hs400_tuning)
2279 tuning_count = 0;
2280 break;
2281
2282 case MMC_TIMING_UHS_SDR104:
2283 case MMC_TIMING_UHS_DDR50:
2284 break;
2285
2286 case MMC_TIMING_UHS_SDR50:
2287 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2288 break;
2289
2290
2291 default:
2292 goto out;
2293 }
2294
2295 if (host->ops->platform_execute_tuning) {
2296 err = host->ops->platform_execute_tuning(host, opcode);
2297 goto out;
2298 }
2299
2300 host->mmc->retune_period = tuning_count;
2301
2302 if (host->tuning_delay < 0)
2303 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2304
2305 sdhci_start_tuning(host);
2306
2307 __sdhci_execute_tuning(host, opcode);
2308
2309 sdhci_end_tuning(host);
2310out:
2311 host->flags &= ~SDHCI_HS400_TUNING;
2312
2313 return err;
2314}
2315EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2316
2317static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2318{
2319
2320 if (host->version < SDHCI_SPEC_300)
2321 return;
2322
2323
2324
2325
2326
2327 if (host->preset_enabled != enable) {
2328 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2329
2330 if (enable)
2331 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2332 else
2333 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2334
2335 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2336
2337 if (enable)
2338 host->flags |= SDHCI_PV_ENABLED;
2339 else
2340 host->flags &= ~SDHCI_PV_ENABLED;
2341
2342 host->preset_enabled = enable;
2343 }
2344}
2345
2346static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2347 int err)
2348{
2349 struct sdhci_host *host = mmc_priv(mmc);
2350 struct mmc_data *data = mrq->data;
2351
2352 if (data->host_cookie != COOKIE_UNMAPPED)
2353 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2354 mmc_get_dma_dir(data));
2355
2356 data->host_cookie = COOKIE_UNMAPPED;
2357}
2358
2359static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2360{
2361 struct sdhci_host *host = mmc_priv(mmc);
2362
2363 mrq->data->host_cookie = COOKIE_UNMAPPED;
2364
2365
2366
2367
2368
2369
2370 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2371 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2372}
2373
2374static inline bool sdhci_has_requests(struct sdhci_host *host)
2375{
2376 return host->cmd || host->data_cmd;
2377}
2378
2379static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2380{
2381 if (host->data_cmd) {
2382 host->data_cmd->error = err;
2383 sdhci_finish_mrq(host, host->data_cmd->mrq);
2384 }
2385
2386 if (host->cmd) {
2387 host->cmd->error = err;
2388 sdhci_finish_mrq(host, host->cmd->mrq);
2389 }
2390}
2391
2392static void sdhci_card_event(struct mmc_host *mmc)
2393{
2394 struct sdhci_host *host = mmc_priv(mmc);
2395 unsigned long flags;
2396 int present;
2397
2398
2399 if (host->ops->card_event)
2400 host->ops->card_event(host);
2401
2402 present = mmc->ops->get_cd(mmc);
2403
2404 spin_lock_irqsave(&host->lock, flags);
2405
2406
2407 if (sdhci_has_requests(host) && !present) {
2408 pr_err("%s: Card removed during transfer!\n",
2409 mmc_hostname(host->mmc));
2410 pr_err("%s: Resetting controller.\n",
2411 mmc_hostname(host->mmc));
2412
2413 sdhci_do_reset(host, SDHCI_RESET_CMD);
2414 sdhci_do_reset(host, SDHCI_RESET_DATA);
2415
2416 sdhci_error_out_mrqs(host, -ENOMEDIUM);
2417 }
2418
2419 spin_unlock_irqrestore(&host->lock, flags);
2420}
2421
2422static const struct mmc_host_ops sdhci_ops = {
2423 .request = sdhci_request,
2424 .post_req = sdhci_post_req,
2425 .pre_req = sdhci_pre_req,
2426 .set_ios = sdhci_set_ios,
2427 .get_cd = sdhci_get_cd,
2428 .get_ro = sdhci_get_ro,
2429 .hw_reset = sdhci_hw_reset,
2430 .enable_sdio_irq = sdhci_enable_sdio_irq,
2431 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2432 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2433 .execute_tuning = sdhci_execute_tuning,
2434 .card_event = sdhci_card_event,
2435 .card_busy = sdhci_card_busy,
2436};
2437
2438
2439
2440
2441
2442
2443
2444static bool sdhci_request_done(struct sdhci_host *host)
2445{
2446 unsigned long flags;
2447 struct mmc_request *mrq;
2448 int i;
2449
2450 spin_lock_irqsave(&host->lock, flags);
2451
2452 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2453 mrq = host->mrqs_done[i];
2454 if (mrq)
2455 break;
2456 }
2457
2458 if (!mrq) {
2459 spin_unlock_irqrestore(&host->lock, flags);
2460 return true;
2461 }
2462
2463 sdhci_del_timer(host, mrq);
2464
2465
2466
2467
2468
2469
2470 if (host->flags & SDHCI_REQ_USE_DMA) {
2471 struct mmc_data *data = mrq->data;
2472
2473 if (data && data->host_cookie == COOKIE_MAPPED) {
2474 if (host->bounce_buffer) {
2475
2476
2477
2478
2479 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
2480 unsigned int length = data->bytes_xfered;
2481
2482 if (length > host->bounce_buffer_size) {
2483 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
2484 mmc_hostname(host->mmc),
2485 host->bounce_buffer_size,
2486 data->bytes_xfered);
2487
2488 length = host->bounce_buffer_size;
2489 }
2490 dma_sync_single_for_cpu(
2491 host->mmc->parent,
2492 host->bounce_addr,
2493 host->bounce_buffer_size,
2494 DMA_FROM_DEVICE);
2495 sg_copy_from_buffer(data->sg,
2496 data->sg_len,
2497 host->bounce_buffer,
2498 length);
2499 } else {
2500
2501 dma_sync_single_for_cpu(
2502 host->mmc->parent,
2503 host->bounce_addr,
2504 host->bounce_buffer_size,
2505 mmc_get_dma_dir(data));
2506 }
2507 } else {
2508
2509 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
2510 data->sg_len,
2511 mmc_get_dma_dir(data));
2512 }
2513 data->host_cookie = COOKIE_UNMAPPED;
2514 }
2515 }
2516
2517
2518
2519
2520
2521 if (sdhci_needs_reset(host, mrq)) {
2522
2523
2524
2525
2526
2527
2528 if (host->cmd || host->data_cmd) {
2529 spin_unlock_irqrestore(&host->lock, flags);
2530 return true;
2531 }
2532
2533
2534 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2535
2536 host->ops->set_clock(host, host->clock);
2537
2538
2539
2540 sdhci_do_reset(host, SDHCI_RESET_CMD);
2541 sdhci_do_reset(host, SDHCI_RESET_DATA);
2542
2543 host->pending_reset = false;
2544 }
2545
2546 if (!sdhci_has_requests(host))
2547 sdhci_led_deactivate(host);
2548
2549 host->mrqs_done[i] = NULL;
2550
2551 spin_unlock_irqrestore(&host->lock, flags);
2552
2553 mmc_request_done(host->mmc, mrq);
2554
2555 return false;
2556}
2557
2558static void sdhci_tasklet_finish(unsigned long param)
2559{
2560 struct sdhci_host *host = (struct sdhci_host *)param;
2561
2562 while (!sdhci_request_done(host))
2563 ;
2564}
2565
2566static void sdhci_timeout_timer(struct timer_list *t)
2567{
2568 struct sdhci_host *host;
2569 unsigned long flags;
2570
2571 host = from_timer(host, t, timer);
2572
2573 spin_lock_irqsave(&host->lock, flags);
2574
2575 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2576 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2577 mmc_hostname(host->mmc));
2578 sdhci_dumpregs(host);
2579
2580 host->cmd->error = -ETIMEDOUT;
2581 sdhci_finish_mrq(host, host->cmd->mrq);
2582 }
2583
2584 spin_unlock_irqrestore(&host->lock, flags);
2585}
2586
2587static void sdhci_timeout_data_timer(struct timer_list *t)
2588{
2589 struct sdhci_host *host;
2590 unsigned long flags;
2591
2592 host = from_timer(host, t, data_timer);
2593
2594 spin_lock_irqsave(&host->lock, flags);
2595
2596 if (host->data || host->data_cmd ||
2597 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2598 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2599 mmc_hostname(host->mmc));
2600 sdhci_dumpregs(host);
2601
2602 if (host->data) {
2603 host->data->error = -ETIMEDOUT;
2604 sdhci_finish_data(host);
2605 } else if (host->data_cmd) {
2606 host->data_cmd->error = -ETIMEDOUT;
2607 sdhci_finish_mrq(host, host->data_cmd->mrq);
2608 } else {
2609 host->cmd->error = -ETIMEDOUT;
2610 sdhci_finish_mrq(host, host->cmd->mrq);
2611 }
2612 }
2613
2614 spin_unlock_irqrestore(&host->lock, flags);
2615}
2616
2617
2618
2619
2620
2621
2622
2623static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
2624{
2625 if (!host->cmd) {
2626
2627
2628
2629
2630
2631 if (host->pending_reset)
2632 return;
2633 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2634 mmc_hostname(host->mmc), (unsigned)intmask);
2635 sdhci_dumpregs(host);
2636 return;
2637 }
2638
2639 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2640 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2641 if (intmask & SDHCI_INT_TIMEOUT)
2642 host->cmd->error = -ETIMEDOUT;
2643 else
2644 host->cmd->error = -EILSEQ;
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656 if (host->cmd->data &&
2657 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2658 SDHCI_INT_CRC) {
2659 host->cmd = NULL;
2660 return;
2661 }
2662
2663 sdhci_finish_mrq(host, host->cmd->mrq);
2664 return;
2665 }
2666
2667 if (intmask & SDHCI_INT_RESPONSE)
2668 sdhci_finish_command(host);
2669}
2670
2671static void sdhci_adma_show_error(struct sdhci_host *host)
2672{
2673 void *desc = host->adma_table;
2674
2675 sdhci_dumpregs(host);
2676
2677 while (true) {
2678 struct sdhci_adma2_64_desc *dma_desc = desc;
2679
2680 if (host->flags & SDHCI_USE_64_BIT_DMA)
2681 DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2682 desc, le32_to_cpu(dma_desc->addr_hi),
2683 le32_to_cpu(dma_desc->addr_lo),
2684 le16_to_cpu(dma_desc->len),
2685 le16_to_cpu(dma_desc->cmd));
2686 else
2687 DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2688 desc, le32_to_cpu(dma_desc->addr_lo),
2689 le16_to_cpu(dma_desc->len),
2690 le16_to_cpu(dma_desc->cmd));
2691
2692 desc += host->desc_sz;
2693
2694 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2695 break;
2696 }
2697}
2698
2699static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2700{
2701 u32 command;
2702
2703
2704 if (intmask & SDHCI_INT_DATA_AVAIL) {
2705 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2706 if (command == MMC_SEND_TUNING_BLOCK ||
2707 command == MMC_SEND_TUNING_BLOCK_HS200) {
2708 host->tuning_done = 1;
2709 wake_up(&host->buf_ready_int);
2710 return;
2711 }
2712 }
2713
2714 if (!host->data) {
2715 struct mmc_command *data_cmd = host->data_cmd;
2716
2717
2718
2719
2720
2721
2722 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2723 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2724 host->data_cmd = NULL;
2725 data_cmd->error = -ETIMEDOUT;
2726 sdhci_finish_mrq(host, data_cmd->mrq);
2727 return;
2728 }
2729 if (intmask & SDHCI_INT_DATA_END) {
2730 host->data_cmd = NULL;
2731
2732
2733
2734
2735
2736 if (host->cmd == data_cmd)
2737 return;
2738
2739 sdhci_finish_mrq(host, data_cmd->mrq);
2740 return;
2741 }
2742 }
2743
2744
2745
2746
2747
2748
2749 if (host->pending_reset)
2750 return;
2751
2752 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2753 mmc_hostname(host->mmc), (unsigned)intmask);
2754 sdhci_dumpregs(host);
2755
2756 return;
2757 }
2758
2759 if (intmask & SDHCI_INT_DATA_TIMEOUT)
2760 host->data->error = -ETIMEDOUT;
2761 else if (intmask & SDHCI_INT_DATA_END_BIT)
2762 host->data->error = -EILSEQ;
2763 else if ((intmask & SDHCI_INT_DATA_CRC) &&
2764 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2765 != MMC_BUS_TEST_R)
2766 host->data->error = -EILSEQ;
2767 else if (intmask & SDHCI_INT_ADMA_ERROR) {
2768 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2769 sdhci_adma_show_error(host);
2770 host->data->error = -EIO;
2771 if (host->ops->adma_workaround)
2772 host->ops->adma_workaround(host, intmask);
2773 }
2774
2775 if (host->data->error)
2776 sdhci_finish_data(host);
2777 else {
2778 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2779 sdhci_transfer_pio(host);
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790 if (intmask & SDHCI_INT_DMA_END) {
2791 u32 dmastart, dmanow;
2792
2793 dmastart = sdhci_sdma_address(host);
2794 dmanow = dmastart + host->data->bytes_xfered;
2795
2796
2797
2798 dmanow = (dmanow &
2799 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2800 SDHCI_DEFAULT_BOUNDARY_SIZE;
2801 host->data->bytes_xfered = dmanow - dmastart;
2802 DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n",
2803 dmastart, host->data->bytes_xfered, dmanow);
2804 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2805 }
2806
2807 if (intmask & SDHCI_INT_DATA_END) {
2808 if (host->cmd == host->data_cmd) {
2809
2810
2811
2812
2813
2814 host->data_early = 1;
2815 } else {
2816 sdhci_finish_data(host);
2817 }
2818 }
2819 }
2820}
2821
2822static irqreturn_t sdhci_irq(int irq, void *dev_id)
2823{
2824 irqreturn_t result = IRQ_NONE;
2825 struct sdhci_host *host = dev_id;
2826 u32 intmask, mask, unexpected = 0;
2827 int max_loops = 16;
2828
2829 spin_lock(&host->lock);
2830
2831 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2832 spin_unlock(&host->lock);
2833 return IRQ_NONE;
2834 }
2835
2836 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2837 if (!intmask || intmask == 0xffffffff) {
2838 result = IRQ_NONE;
2839 goto out;
2840 }
2841
2842 do {
2843 DBG("IRQ status 0x%08x\n", intmask);
2844
2845 if (host->ops->irq) {
2846 intmask = host->ops->irq(host, intmask);
2847 if (!intmask)
2848 goto cont;
2849 }
2850
2851
2852 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2853 SDHCI_INT_BUS_POWER);
2854 sdhci_writel(host, mask, SDHCI_INT_STATUS);
2855
2856 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2857 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2858 SDHCI_CARD_PRESENT;
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871 host->ier &= ~(SDHCI_INT_CARD_INSERT |
2872 SDHCI_INT_CARD_REMOVE);
2873 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2874 SDHCI_INT_CARD_INSERT;
2875 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2876 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2877
2878 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2879 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2880
2881 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2882 SDHCI_INT_CARD_REMOVE);
2883 result = IRQ_WAKE_THREAD;
2884 }
2885
2886 if (intmask & SDHCI_INT_CMD_MASK)
2887 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
2888
2889 if (intmask & SDHCI_INT_DATA_MASK)
2890 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2891
2892 if (intmask & SDHCI_INT_BUS_POWER)
2893 pr_err("%s: Card is consuming too much power!\n",
2894 mmc_hostname(host->mmc));
2895
2896 if (intmask & SDHCI_INT_RETUNE)
2897 mmc_retune_needed(host->mmc);
2898
2899 if ((intmask & SDHCI_INT_CARD_INT) &&
2900 (host->ier & SDHCI_INT_CARD_INT)) {
2901 sdhci_enable_sdio_irq_nolock(host, false);
2902 host->thread_isr |= SDHCI_INT_CARD_INT;
2903 result = IRQ_WAKE_THREAD;
2904 }
2905
2906 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2907 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2908 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2909 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
2910
2911 if (intmask) {
2912 unexpected |= intmask;
2913 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2914 }
2915cont:
2916 if (result == IRQ_NONE)
2917 result = IRQ_HANDLED;
2918
2919 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2920 } while (intmask && --max_loops);
2921out:
2922 spin_unlock(&host->lock);
2923
2924 if (unexpected) {
2925 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2926 mmc_hostname(host->mmc), unexpected);
2927 sdhci_dumpregs(host);
2928 }
2929
2930 return result;
2931}
2932
2933static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2934{
2935 struct sdhci_host *host = dev_id;
2936 unsigned long flags;
2937 u32 isr;
2938
2939 spin_lock_irqsave(&host->lock, flags);
2940 isr = host->thread_isr;
2941 host->thread_isr = 0;
2942 spin_unlock_irqrestore(&host->lock, flags);
2943
2944 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2945 struct mmc_host *mmc = host->mmc;
2946
2947 mmc->ops->card_event(mmc);
2948 mmc_detect_change(mmc, msecs_to_jiffies(200));
2949 }
2950
2951 if (isr & SDHCI_INT_CARD_INT) {
2952 sdio_run_irqs(host->mmc);
2953
2954 spin_lock_irqsave(&host->lock, flags);
2955 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2956 sdhci_enable_sdio_irq_nolock(host, true);
2957 spin_unlock_irqrestore(&host->lock, flags);
2958 }
2959
2960 return isr ? IRQ_HANDLED : IRQ_NONE;
2961}
2962
2963
2964
2965
2966
2967
2968
2969#ifdef CONFIG_PM
2970
2971static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
2972{
2973 return mmc_card_is_removable(host->mmc) &&
2974 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
2975 !mmc_can_gpio_cd(host->mmc);
2976}
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
2987{
2988 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
2989 SDHCI_WAKE_ON_INT;
2990 u32 irq_val = 0;
2991 u8 wake_val = 0;
2992 u8 val;
2993
2994 if (sdhci_cd_irq_can_wakeup(host)) {
2995 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
2996 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
2997 }
2998
2999 if (mmc_card_wake_sdio_irq(host->mmc)) {
3000 wake_val |= SDHCI_WAKE_ON_INT;
3001 irq_val |= SDHCI_INT_CARD_INT;
3002 }
3003
3004 if (!irq_val)
3005 return false;
3006
3007 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3008 val &= ~mask;
3009 val |= wake_val;
3010 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3011
3012 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3013
3014 host->irq_wake_enabled = !enable_irq_wake(host->irq);
3015
3016 return host->irq_wake_enabled;
3017}
3018
3019static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3020{
3021 u8 val;
3022 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3023 | SDHCI_WAKE_ON_INT;
3024
3025 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3026 val &= ~mask;
3027 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3028
3029 disable_irq_wake(host->irq);
3030
3031 host->irq_wake_enabled = false;
3032}
3033
3034int sdhci_suspend_host(struct sdhci_host *host)
3035{
3036 sdhci_disable_card_detection(host);
3037
3038 mmc_retune_timer_stop(host->mmc);
3039
3040 if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3041 !sdhci_enable_irq_wakeups(host)) {
3042 host->ier = 0;
3043 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3044 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3045 free_irq(host->irq, host);
3046 }
3047
3048 return 0;
3049}
3050
3051EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3052
3053int sdhci_resume_host(struct sdhci_host *host)
3054{
3055 struct mmc_host *mmc = host->mmc;
3056 int ret = 0;
3057
3058 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3059 if (host->ops->enable_dma)
3060 host->ops->enable_dma(host);
3061 }
3062
3063 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3064 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3065
3066 sdhci_init(host, 0);
3067 host->pwr = 0;
3068 host->clock = 0;
3069 mmc->ops->set_ios(mmc, &mmc->ios);
3070 } else {
3071 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
3072 }
3073
3074 if (host->irq_wake_enabled) {
3075 sdhci_disable_irq_wakeups(host);
3076 } else {
3077 ret = request_threaded_irq(host->irq, sdhci_irq,
3078 sdhci_thread_irq, IRQF_SHARED,
3079 mmc_hostname(host->mmc), host);
3080 if (ret)
3081 return ret;
3082 }
3083
3084 sdhci_enable_card_detection(host);
3085
3086 return ret;
3087}
3088
3089EXPORT_SYMBOL_GPL(sdhci_resume_host);
3090
3091int sdhci_runtime_suspend_host(struct sdhci_host *host)
3092{
3093 unsigned long flags;
3094
3095 mmc_retune_timer_stop(host->mmc);
3096
3097 spin_lock_irqsave(&host->lock, flags);
3098 host->ier &= SDHCI_INT_CARD_INT;
3099 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3100 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3101 spin_unlock_irqrestore(&host->lock, flags);
3102
3103 synchronize_hardirq(host->irq);
3104
3105 spin_lock_irqsave(&host->lock, flags);
3106 host->runtime_suspended = true;
3107 spin_unlock_irqrestore(&host->lock, flags);
3108
3109 return 0;
3110}
3111EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3112
3113int sdhci_runtime_resume_host(struct sdhci_host *host)
3114{
3115 struct mmc_host *mmc = host->mmc;
3116 unsigned long flags;
3117 int host_flags = host->flags;
3118
3119 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3120 if (host->ops->enable_dma)
3121 host->ops->enable_dma(host);
3122 }
3123
3124 sdhci_init(host, 0);
3125
3126 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3127 mmc->ios.power_mode != MMC_POWER_OFF) {
3128
3129 host->pwr = 0;
3130 host->clock = 0;
3131 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3132 mmc->ops->set_ios(mmc, &mmc->ios);
3133
3134 if ((host_flags & SDHCI_PV_ENABLED) &&
3135 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3136 spin_lock_irqsave(&host->lock, flags);
3137 sdhci_enable_preset_value(host, true);
3138 spin_unlock_irqrestore(&host->lock, flags);
3139 }
3140
3141 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3142 mmc->ops->hs400_enhanced_strobe)
3143 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3144 }
3145
3146 spin_lock_irqsave(&host->lock, flags);
3147
3148 host->runtime_suspended = false;
3149
3150
3151 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
3152 sdhci_enable_sdio_irq_nolock(host, true);
3153
3154
3155 sdhci_enable_card_detection(host);
3156
3157 spin_unlock_irqrestore(&host->lock, flags);
3158
3159 return 0;
3160}
3161EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3162
3163#endif
3164
3165
3166
3167
3168
3169
3170
3171void sdhci_cqe_enable(struct mmc_host *mmc)
3172{
3173 struct sdhci_host *host = mmc_priv(mmc);
3174 unsigned long flags;
3175 u8 ctrl;
3176
3177 spin_lock_irqsave(&host->lock, flags);
3178
3179 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3180 ctrl &= ~SDHCI_CTRL_DMA_MASK;
3181 if (host->flags & SDHCI_USE_64_BIT_DMA)
3182 ctrl |= SDHCI_CTRL_ADMA64;
3183 else
3184 ctrl |= SDHCI_CTRL_ADMA32;
3185 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3186
3187 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3188 SDHCI_BLOCK_SIZE);
3189
3190
3191 sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL);
3192
3193 host->ier = host->cqe_ier;
3194
3195 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3196 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3197
3198 host->cqe_on = true;
3199
3200 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3201 mmc_hostname(mmc), host->ier,
3202 sdhci_readl(host, SDHCI_INT_STATUS));
3203
3204 spin_unlock_irqrestore(&host->lock, flags);
3205}
3206EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3207
3208void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3209{
3210 struct sdhci_host *host = mmc_priv(mmc);
3211 unsigned long flags;
3212
3213 spin_lock_irqsave(&host->lock, flags);
3214
3215 sdhci_set_default_irqs(host);
3216
3217 host->cqe_on = false;
3218
3219 if (recovery) {
3220 sdhci_do_reset(host, SDHCI_RESET_CMD);
3221 sdhci_do_reset(host, SDHCI_RESET_DATA);
3222 }
3223
3224 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3225 mmc_hostname(mmc), host->ier,
3226 sdhci_readl(host, SDHCI_INT_STATUS));
3227
3228 spin_unlock_irqrestore(&host->lock, flags);
3229}
3230EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3231
3232bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3233 int *data_error)
3234{
3235 u32 mask;
3236
3237 if (!host->cqe_on)
3238 return false;
3239
3240 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3241 *cmd_error = -EILSEQ;
3242 else if (intmask & SDHCI_INT_TIMEOUT)
3243 *cmd_error = -ETIMEDOUT;
3244 else
3245 *cmd_error = 0;
3246
3247 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3248 *data_error = -EILSEQ;
3249 else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3250 *data_error = -ETIMEDOUT;
3251 else if (intmask & SDHCI_INT_ADMA_ERROR)
3252 *data_error = -EIO;
3253 else
3254 *data_error = 0;
3255
3256
3257 mask = intmask & host->cqe_ier;
3258 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3259
3260 if (intmask & SDHCI_INT_BUS_POWER)
3261 pr_err("%s: Card is consuming too much power!\n",
3262 mmc_hostname(host->mmc));
3263
3264 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3265 if (intmask) {
3266 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3267 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3268 mmc_hostname(host->mmc), intmask);
3269 sdhci_dumpregs(host);
3270 }
3271
3272 return true;
3273}
3274EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3275
3276
3277
3278
3279
3280
3281
3282struct sdhci_host *sdhci_alloc_host(struct device *dev,
3283 size_t priv_size)
3284{
3285 struct mmc_host *mmc;
3286 struct sdhci_host *host;
3287
3288 WARN_ON(dev == NULL);
3289
3290 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3291 if (!mmc)
3292 return ERR_PTR(-ENOMEM);
3293
3294 host = mmc_priv(mmc);
3295 host->mmc = mmc;
3296 host->mmc_host_ops = sdhci_ops;
3297 mmc->ops = &host->mmc_host_ops;
3298
3299 host->flags = SDHCI_SIGNALING_330;
3300
3301 host->cqe_ier = SDHCI_CQE_INT_MASK;
3302 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3303
3304 host->tuning_delay = -1;
3305
3306 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3307
3308 return host;
3309}
3310
3311EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3312
3313static int sdhci_set_dma_mask(struct sdhci_host *host)
3314{
3315 struct mmc_host *mmc = host->mmc;
3316 struct device *dev = mmc_dev(mmc);
3317 int ret = -EINVAL;
3318
3319 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3320 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3321
3322
3323 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3324 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3325 if (ret) {
3326 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3327 mmc_hostname(mmc));
3328 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3329 }
3330 }
3331
3332
3333 if (ret) {
3334 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3335 if (ret)
3336 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3337 mmc_hostname(mmc));
3338 }
3339
3340 return ret;
3341}
3342
3343void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
3344{
3345 u16 v;
3346 u64 dt_caps_mask = 0;
3347 u64 dt_caps = 0;
3348
3349 if (host->read_caps)
3350 return;
3351
3352 host->read_caps = true;
3353
3354 if (debug_quirks)
3355 host->quirks = debug_quirks;
3356
3357 if (debug_quirks2)
3358 host->quirks2 = debug_quirks2;
3359
3360 sdhci_do_reset(host, SDHCI_RESET_ALL);
3361
3362 of_property_read_u64(mmc_dev(host->mmc)->of_node,
3363 "sdhci-caps-mask", &dt_caps_mask);
3364 of_property_read_u64(mmc_dev(host->mmc)->of_node,
3365 "sdhci-caps", &dt_caps);
3366
3367 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3368 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3369
3370 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3371 return;
3372
3373 if (caps) {
3374 host->caps = *caps;
3375 } else {
3376 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3377 host->caps &= ~lower_32_bits(dt_caps_mask);
3378 host->caps |= lower_32_bits(dt_caps);
3379 }
3380
3381 if (host->version < SDHCI_SPEC_300)
3382 return;
3383
3384 if (caps1) {
3385 host->caps1 = *caps1;
3386 } else {
3387 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3388 host->caps1 &= ~upper_32_bits(dt_caps_mask);
3389 host->caps1 |= upper_32_bits(dt_caps);
3390 }
3391}
3392EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3393
3394static int sdhci_allocate_bounce_buffer(struct sdhci_host *host)
3395{
3396 struct mmc_host *mmc = host->mmc;
3397 unsigned int max_blocks;
3398 unsigned int bounce_size;
3399 int ret;
3400
3401
3402
3403
3404
3405
3406 bounce_size = SZ_64K;
3407
3408
3409
3410
3411
3412 if (mmc->max_req_size < bounce_size)
3413 bounce_size = mmc->max_req_size;
3414 max_blocks = bounce_size / 512;
3415
3416
3417
3418
3419
3420
3421 host->bounce_buffer = devm_kmalloc(mmc->parent,
3422 bounce_size,
3423 GFP_KERNEL);
3424 if (!host->bounce_buffer) {
3425 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
3426 mmc_hostname(mmc),
3427 bounce_size);
3428
3429
3430
3431
3432 return 0;
3433 }
3434
3435 host->bounce_addr = dma_map_single(mmc->parent,
3436 host->bounce_buffer,
3437 bounce_size,
3438 DMA_BIDIRECTIONAL);
3439 ret = dma_mapping_error(mmc->parent, host->bounce_addr);
3440 if (ret)
3441
3442 return 0;
3443 host->bounce_buffer_size = bounce_size;
3444
3445
3446 mmc->max_segs = max_blocks;
3447 mmc->max_seg_size = bounce_size;
3448 mmc->max_req_size = bounce_size;
3449
3450 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
3451 mmc_hostname(mmc), max_blocks, bounce_size);
3452
3453 return 0;
3454}
3455
3456int sdhci_setup_host(struct sdhci_host *host)
3457{
3458 struct mmc_host *mmc;
3459 u32 max_current_caps;
3460 unsigned int ocr_avail;
3461 unsigned int override_timeout_clk;
3462 u32 max_clk;
3463 int ret;
3464
3465 WARN_ON(host == NULL);
3466 if (host == NULL)
3467 return -EINVAL;
3468
3469 mmc = host->mmc;
3470
3471
3472
3473
3474
3475
3476
3477 ret = mmc_regulator_get_supply(mmc);
3478 if (ret)
3479 return ret;
3480
3481 DBG("Version: 0x%08x | Present: 0x%08x\n",
3482 sdhci_readw(host, SDHCI_HOST_VERSION),
3483 sdhci_readl(host, SDHCI_PRESENT_STATE));
3484 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
3485 sdhci_readl(host, SDHCI_CAPABILITIES),
3486 sdhci_readl(host, SDHCI_CAPABILITIES_1));
3487
3488 sdhci_read_caps(host);
3489
3490 override_timeout_clk = host->timeout_clk;
3491
3492 if (host->version > SDHCI_SPEC_300) {
3493 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3494 mmc_hostname(mmc), host->version);
3495 }
3496
3497 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3498 host->flags |= SDHCI_USE_SDMA;
3499 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3500 DBG("Controller doesn't have SDMA capability\n");
3501 else
3502 host->flags |= SDHCI_USE_SDMA;
3503
3504 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3505 (host->flags & SDHCI_USE_SDMA)) {
3506 DBG("Disabling DMA as it is marked broken\n");
3507 host->flags &= ~SDHCI_USE_SDMA;
3508 }
3509
3510 if ((host->version >= SDHCI_SPEC_200) &&
3511 (host->caps & SDHCI_CAN_DO_ADMA2))
3512 host->flags |= SDHCI_USE_ADMA;
3513
3514 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3515 (host->flags & SDHCI_USE_ADMA)) {
3516 DBG("Disabling ADMA as it is marked broken\n");
3517 host->flags &= ~SDHCI_USE_ADMA;
3518 }
3519
3520
3521
3522
3523
3524
3525
3526
3527 if (host->caps & SDHCI_CAN_64BIT)
3528 host->flags |= SDHCI_USE_64_BIT_DMA;
3529
3530 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3531 ret = sdhci_set_dma_mask(host);
3532
3533 if (!ret && host->ops->enable_dma)
3534 ret = host->ops->enable_dma(host);
3535
3536 if (ret) {
3537 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3538 mmc_hostname(mmc));
3539 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3540
3541 ret = 0;
3542 }
3543 }
3544
3545
3546 if (host->flags & SDHCI_USE_64_BIT_DMA)
3547 host->flags &= ~SDHCI_USE_SDMA;
3548
3549 if (host->flags & SDHCI_USE_ADMA) {
3550 dma_addr_t dma;
3551 void *buf;
3552
3553
3554
3555
3556
3557
3558
3559 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3560 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3561 SDHCI_ADMA2_64_DESC_SZ;
3562 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
3563 } else {
3564 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3565 SDHCI_ADMA2_32_DESC_SZ;
3566 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3567 }
3568
3569 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3570 buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
3571 host->adma_table_sz, &dma, GFP_KERNEL);
3572 if (!buf) {
3573 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3574 mmc_hostname(mmc));
3575 host->flags &= ~SDHCI_USE_ADMA;
3576 } else if ((dma + host->align_buffer_sz) &
3577 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3578 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3579 mmc_hostname(mmc));
3580 host->flags &= ~SDHCI_USE_ADMA;
3581 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3582 host->adma_table_sz, buf, dma);
3583 } else {
3584 host->align_buffer = buf;
3585 host->align_addr = dma;
3586
3587 host->adma_table = buf + host->align_buffer_sz;
3588 host->adma_addr = dma + host->align_buffer_sz;
3589 }
3590 }
3591
3592
3593
3594
3595
3596
3597 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3598 host->dma_mask = DMA_BIT_MASK(64);
3599 mmc_dev(mmc)->dma_mask = &host->dma_mask;
3600 }
3601
3602 if (host->version >= SDHCI_SPEC_300)
3603 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3604 >> SDHCI_CLOCK_BASE_SHIFT;
3605 else
3606 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3607 >> SDHCI_CLOCK_BASE_SHIFT;
3608
3609 host->max_clk *= 1000000;
3610 if (host->max_clk == 0 || host->quirks &
3611 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3612 if (!host->ops->get_max_clock) {
3613 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3614 mmc_hostname(mmc));
3615 ret = -ENODEV;
3616 goto undma;
3617 }
3618 host->max_clk = host->ops->get_max_clock(host);
3619 }
3620
3621
3622
3623
3624
3625 host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3626 SDHCI_CLOCK_MUL_SHIFT;
3627
3628
3629
3630
3631
3632
3633
3634 if (host->clk_mul)
3635 host->clk_mul += 1;
3636
3637
3638
3639
3640 max_clk = host->max_clk;
3641
3642 if (host->ops->get_min_clock)
3643 mmc->f_min = host->ops->get_min_clock(host);
3644 else if (host->version >= SDHCI_SPEC_300) {
3645 if (host->clk_mul) {
3646 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3647 max_clk = host->max_clk * host->clk_mul;
3648 } else
3649 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3650 } else
3651 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3652
3653 if (!mmc->f_max || mmc->f_max > max_clk)
3654 mmc->f_max = max_clk;
3655
3656 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3657 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3658 SDHCI_TIMEOUT_CLK_SHIFT;
3659
3660 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3661 host->timeout_clk *= 1000;
3662
3663 if (host->timeout_clk == 0) {
3664 if (!host->ops->get_timeout_clock) {
3665 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3666 mmc_hostname(mmc));
3667 ret = -ENODEV;
3668 goto undma;
3669 }
3670
3671 host->timeout_clk =
3672 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
3673 1000);
3674 }
3675
3676 if (override_timeout_clk)
3677 host->timeout_clk = override_timeout_clk;
3678
3679 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3680 host->ops->get_max_timeout_count(host) : 1 << 27;
3681 mmc->max_busy_timeout /= host->timeout_clk;
3682 }
3683
3684 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
3685 !host->ops->get_max_timeout_count)
3686 mmc->max_busy_timeout = 0;
3687
3688 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3689 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3690
3691 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3692 host->flags |= SDHCI_AUTO_CMD12;
3693
3694
3695 if ((host->version >= SDHCI_SPEC_300) &&
3696 ((host->flags & SDHCI_USE_ADMA) ||
3697 !(host->flags & SDHCI_USE_SDMA)) &&
3698 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3699 host->flags |= SDHCI_AUTO_CMD23;
3700 DBG("Auto-CMD23 available\n");
3701 } else {
3702 DBG("Auto-CMD23 unavailable\n");
3703 }
3704
3705
3706
3707
3708
3709
3710
3711
3712 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3713 mmc->caps |= MMC_CAP_4_BIT_DATA;
3714
3715 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3716 mmc->caps &= ~MMC_CAP_CMD23;
3717
3718 if (host->caps & SDHCI_CAN_DO_HISPD)
3719 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3720
3721 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3722 mmc_card_is_removable(mmc) &&
3723 mmc_gpio_get_cd(host->mmc) < 0)
3724 mmc->caps |= MMC_CAP_NEEDS_POLL;
3725
3726
3727 if (!IS_ERR(mmc->supply.vqmmc)) {
3728 ret = regulator_enable(mmc->supply.vqmmc);
3729 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3730 1950000))
3731 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
3732 SDHCI_SUPPORT_SDR50 |
3733 SDHCI_SUPPORT_DDR50);
3734 if (ret) {
3735 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3736 mmc_hostname(mmc), ret);
3737 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3738 }
3739 }
3740
3741 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
3742 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3743 SDHCI_SUPPORT_DDR50);
3744
3745
3746
3747
3748
3749
3750
3751
3752 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
3753 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
3754 }
3755
3756
3757 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3758 SDHCI_SUPPORT_DDR50))
3759 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3760
3761
3762 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3763 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3764
3765
3766
3767 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3768 mmc->caps2 |= MMC_CAP2_HS200;
3769 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3770 mmc->caps |= MMC_CAP_UHS_SDR50;
3771 }
3772
3773 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3774 (host->caps1 & SDHCI_SUPPORT_HS400))
3775 mmc->caps2 |= MMC_CAP2_HS400;
3776
3777 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3778 (IS_ERR(mmc->supply.vqmmc) ||
3779 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3780 1300000)))
3781 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3782
3783 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
3784 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3785 mmc->caps |= MMC_CAP_UHS_DDR50;
3786
3787
3788 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
3789 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3790
3791
3792 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
3793 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3794 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
3795 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3796 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
3797 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3798
3799
3800 host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3801 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3802
3803
3804
3805
3806
3807 if (host->tuning_count)
3808 host->tuning_count = 1 << (host->tuning_count - 1);
3809
3810
3811 host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
3812 SDHCI_RETUNING_MODE_SHIFT;
3813
3814 ocr_avail = 0;
3815
3816
3817
3818
3819
3820
3821
3822
3823 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3824 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3825 int curr = regulator_get_current_limit(mmc->supply.vmmc);
3826 if (curr > 0) {
3827
3828
3829 curr = curr/1000;
3830 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3831
3832 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3833 max_current_caps =
3834 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3835 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3836 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
3837 }
3838 }
3839
3840 if (host->caps & SDHCI_CAN_VDD_330) {
3841 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3842
3843 mmc->max_current_330 = ((max_current_caps &
3844 SDHCI_MAX_CURRENT_330_MASK) >>
3845 SDHCI_MAX_CURRENT_330_SHIFT) *
3846 SDHCI_MAX_CURRENT_MULTIPLIER;
3847 }
3848 if (host->caps & SDHCI_CAN_VDD_300) {
3849 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3850
3851 mmc->max_current_300 = ((max_current_caps &
3852 SDHCI_MAX_CURRENT_300_MASK) >>
3853 SDHCI_MAX_CURRENT_300_SHIFT) *
3854 SDHCI_MAX_CURRENT_MULTIPLIER;
3855 }
3856 if (host->caps & SDHCI_CAN_VDD_180) {
3857 ocr_avail |= MMC_VDD_165_195;
3858
3859 mmc->max_current_180 = ((max_current_caps &
3860 SDHCI_MAX_CURRENT_180_MASK) >>
3861 SDHCI_MAX_CURRENT_180_SHIFT) *
3862 SDHCI_MAX_CURRENT_MULTIPLIER;
3863 }
3864
3865
3866 if (host->ocr_mask)
3867 ocr_avail = host->ocr_mask;
3868
3869
3870 if (mmc->ocr_avail)
3871 ocr_avail = mmc->ocr_avail;
3872
3873 mmc->ocr_avail = ocr_avail;
3874 mmc->ocr_avail_sdio = ocr_avail;
3875 if (host->ocr_avail_sdio)
3876 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3877 mmc->ocr_avail_sd = ocr_avail;
3878 if (host->ocr_avail_sd)
3879 mmc->ocr_avail_sd &= host->ocr_avail_sd;
3880 else
3881 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3882 mmc->ocr_avail_mmc = ocr_avail;
3883 if (host->ocr_avail_mmc)
3884 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3885
3886 if (mmc->ocr_avail == 0) {
3887 pr_err("%s: Hardware doesn't report any support voltages.\n",
3888 mmc_hostname(mmc));
3889 ret = -ENODEV;
3890 goto unreg;
3891 }
3892
3893 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
3894 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
3895 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
3896 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
3897 host->flags |= SDHCI_SIGNALING_180;
3898
3899 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
3900 host->flags |= SDHCI_SIGNALING_120;
3901
3902 spin_lock_init(&host->lock);
3903
3904
3905
3906
3907
3908
3909 mmc->max_req_size = 524288;
3910
3911
3912
3913
3914
3915 if (host->flags & SDHCI_USE_ADMA) {
3916 mmc->max_segs = SDHCI_MAX_SEGS;
3917 } else if (host->flags & SDHCI_USE_SDMA) {
3918 mmc->max_segs = 1;
3919 if (swiotlb_max_segment()) {
3920 unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
3921 IO_TLB_SEGSIZE;
3922 mmc->max_req_size = min(mmc->max_req_size,
3923 max_req_size);
3924 }
3925 } else {
3926 mmc->max_segs = SDHCI_MAX_SEGS;
3927 }
3928
3929
3930
3931
3932
3933
3934 if (host->flags & SDHCI_USE_ADMA) {
3935 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3936 mmc->max_seg_size = 65535;
3937 else
3938 mmc->max_seg_size = 65536;
3939 } else {
3940 mmc->max_seg_size = mmc->max_req_size;
3941 }
3942
3943
3944
3945
3946
3947 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3948 mmc->max_blk_size = 2;
3949 } else {
3950 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
3951 SDHCI_MAX_BLOCK_SHIFT;
3952 if (mmc->max_blk_size >= 3) {
3953 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3954 mmc_hostname(mmc));
3955 mmc->max_blk_size = 0;
3956 }
3957 }
3958
3959 mmc->max_blk_size = 512 << mmc->max_blk_size;
3960
3961
3962
3963
3964 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3965
3966 if (mmc->max_segs == 1) {
3967
3968 ret = sdhci_allocate_bounce_buffer(host);
3969 if (ret)
3970 return ret;
3971 }
3972
3973 return 0;
3974
3975unreg:
3976 if (!IS_ERR(mmc->supply.vqmmc))
3977 regulator_disable(mmc->supply.vqmmc);
3978undma:
3979 if (host->align_buffer)
3980 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3981 host->adma_table_sz, host->align_buffer,
3982 host->align_addr);
3983 host->adma_table = NULL;
3984 host->align_buffer = NULL;
3985
3986 return ret;
3987}
3988EXPORT_SYMBOL_GPL(sdhci_setup_host);
3989
3990void sdhci_cleanup_host(struct sdhci_host *host)
3991{
3992 struct mmc_host *mmc = host->mmc;
3993
3994 if (!IS_ERR(mmc->supply.vqmmc))
3995 regulator_disable(mmc->supply.vqmmc);
3996
3997 if (host->align_buffer)
3998 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3999 host->adma_table_sz, host->align_buffer,
4000 host->align_addr);
4001 host->adma_table = NULL;
4002 host->align_buffer = NULL;
4003}
4004EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4005
4006int __sdhci_add_host(struct sdhci_host *host)
4007{
4008 struct mmc_host *mmc = host->mmc;
4009 int ret;
4010
4011
4012
4013
4014 tasklet_init(&host->finish_tasklet,
4015 sdhci_tasklet_finish, (unsigned long)host);
4016
4017 timer_setup(&host->timer, sdhci_timeout_timer, 0);
4018 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4019
4020 init_waitqueue_head(&host->buf_ready_int);
4021
4022 sdhci_init(host, 0);
4023
4024 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4025 IRQF_SHARED, mmc_hostname(mmc), host);
4026 if (ret) {
4027 pr_err("%s: Failed to request IRQ %d: %d\n",
4028 mmc_hostname(mmc), host->irq, ret);
4029 goto untasklet;
4030 }
4031
4032 ret = sdhci_led_register(host);
4033 if (ret) {
4034 pr_err("%s: Failed to register LED device: %d\n",
4035 mmc_hostname(mmc), ret);
4036 goto unirq;
4037 }
4038
4039 ret = mmc_add_host(mmc);
4040 if (ret)
4041 goto unled;
4042
4043 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4044 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4045 (host->flags & SDHCI_USE_ADMA) ?
4046 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4047 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4048
4049 sdhci_enable_card_detection(host);
4050
4051 return 0;
4052
4053unled:
4054 sdhci_led_unregister(host);
4055unirq:
4056 sdhci_do_reset(host, SDHCI_RESET_ALL);
4057 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4058 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4059 free_irq(host->irq, host);
4060untasklet:
4061 tasklet_kill(&host->finish_tasklet);
4062
4063 return ret;
4064}
4065EXPORT_SYMBOL_GPL(__sdhci_add_host);
4066
4067int sdhci_add_host(struct sdhci_host *host)
4068{
4069 int ret;
4070
4071 ret = sdhci_setup_host(host);
4072 if (ret)
4073 return ret;
4074
4075 ret = __sdhci_add_host(host);
4076 if (ret)
4077 goto cleanup;
4078
4079 return 0;
4080
4081cleanup:
4082 sdhci_cleanup_host(host);
4083
4084 return ret;
4085}
4086EXPORT_SYMBOL_GPL(sdhci_add_host);
4087
4088void sdhci_remove_host(struct sdhci_host *host, int dead)
4089{
4090 struct mmc_host *mmc = host->mmc;
4091 unsigned long flags;
4092
4093 if (dead) {
4094 spin_lock_irqsave(&host->lock, flags);
4095
4096 host->flags |= SDHCI_DEVICE_DEAD;
4097
4098 if (sdhci_has_requests(host)) {
4099 pr_err("%s: Controller removed during "
4100 " transfer!\n", mmc_hostname(mmc));
4101 sdhci_error_out_mrqs(host, -ENOMEDIUM);
4102 }
4103
4104 spin_unlock_irqrestore(&host->lock, flags);
4105 }
4106
4107 sdhci_disable_card_detection(host);
4108
4109 mmc_remove_host(mmc);
4110
4111 sdhci_led_unregister(host);
4112
4113 if (!dead)
4114 sdhci_do_reset(host, SDHCI_RESET_ALL);
4115
4116 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4117 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4118 free_irq(host->irq, host);
4119
4120 del_timer_sync(&host->timer);
4121 del_timer_sync(&host->data_timer);
4122
4123 tasklet_kill(&host->finish_tasklet);
4124
4125 if (!IS_ERR(mmc->supply.vqmmc))
4126 regulator_disable(mmc->supply.vqmmc);
4127
4128 if (host->align_buffer)
4129 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4130 host->adma_table_sz, host->align_buffer,
4131 host->align_addr);
4132
4133 host->adma_table = NULL;
4134 host->align_buffer = NULL;
4135}
4136
4137EXPORT_SYMBOL_GPL(sdhci_remove_host);
4138
4139void sdhci_free_host(struct sdhci_host *host)
4140{
4141 mmc_free_host(host->mmc);
4142}
4143
4144EXPORT_SYMBOL_GPL(sdhci_free_host);
4145
4146
4147
4148
4149
4150
4151
4152static int __init sdhci_drv_init(void)
4153{
4154 pr_info(DRIVER_NAME
4155 ": Secure Digital Host Controller Interface driver\n");
4156 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4157
4158 return 0;
4159}
4160
4161static void __exit sdhci_drv_exit(void)
4162{
4163}
4164
4165module_init(sdhci_drv_init);
4166module_exit(sdhci_drv_exit);
4167
4168module_param(debug_quirks, uint, 0444);
4169module_param(debug_quirks2, uint, 0444);
4170
4171MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4172MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4173MODULE_LICENSE("GPL");
4174
4175MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4176MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
4177