1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/delay.h>
17#include <linux/ktime.h>
18#include <linux/highmem.h>
19#include <linux/io.h>
20#include <linux/module.h>
21#include <linux/dma-mapping.h>
22#include <linux/slab.h>
23#include <linux/scatterlist.h>
24#include <linux/sizes.h>
25#include <linux/swiotlb.h>
26#include <linux/regulator/consumer.h>
27#include <linux/pm_runtime.h>
28#include <linux/of.h>
29
30#include <linux/leds.h>
31
32#include <linux/mmc/mmc.h>
33#include <linux/mmc/host.h>
34#include <linux/mmc/card.h>
35#include <linux/mmc/sdio.h>
36#include <linux/mmc/slot-gpio.h>
37
38#include "sdhci.h"
39
40#define DRIVER_NAME "sdhci"
41
42#define DBG(f, x...) \
43 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
44
45#define SDHCI_DUMP(f, x...) \
46 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
47
48#define MAX_TUNING_LOOP 40
49
50static unsigned int debug_quirks = 0;
51static unsigned int debug_quirks2;
52
53static void sdhci_finish_data(struct sdhci_host *);
54
55static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
56
57void sdhci_dumpregs(struct sdhci_host *host)
58{
59 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
60
61 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
62 sdhci_readl(host, SDHCI_DMA_ADDRESS),
63 sdhci_readw(host, SDHCI_HOST_VERSION));
64 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
65 sdhci_readw(host, SDHCI_BLOCK_SIZE),
66 sdhci_readw(host, SDHCI_BLOCK_COUNT));
67 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
68 sdhci_readl(host, SDHCI_ARGUMENT),
69 sdhci_readw(host, SDHCI_TRANSFER_MODE));
70 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
71 sdhci_readl(host, SDHCI_PRESENT_STATE),
72 sdhci_readb(host, SDHCI_HOST_CONTROL));
73 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
74 sdhci_readb(host, SDHCI_POWER_CONTROL),
75 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
76 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
77 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
78 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
79 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
80 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
81 sdhci_readl(host, SDHCI_INT_STATUS));
82 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
83 sdhci_readl(host, SDHCI_INT_ENABLE),
84 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
85 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
86 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
87 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
88 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
89 sdhci_readl(host, SDHCI_CAPABILITIES),
90 sdhci_readl(host, SDHCI_CAPABILITIES_1));
91 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
92 sdhci_readw(host, SDHCI_COMMAND),
93 sdhci_readl(host, SDHCI_MAX_CURRENT));
94 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
95 sdhci_readl(host, SDHCI_RESPONSE),
96 sdhci_readl(host, SDHCI_RESPONSE + 4));
97 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
98 sdhci_readl(host, SDHCI_RESPONSE + 8),
99 sdhci_readl(host, SDHCI_RESPONSE + 12));
100 SDHCI_DUMP("Host ctl2: 0x%08x\n",
101 sdhci_readw(host, SDHCI_HOST_CONTROL2));
102
103 if (host->flags & SDHCI_USE_ADMA) {
104 if (host->flags & SDHCI_USE_64_BIT_DMA) {
105 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
106 sdhci_readl(host, SDHCI_ADMA_ERROR),
107 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
108 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
109 } else {
110 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
111 sdhci_readl(host, SDHCI_ADMA_ERROR),
112 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
113 }
114 }
115
116 SDHCI_DUMP("============================================\n");
117}
118EXPORT_SYMBOL_GPL(sdhci_dumpregs);
119
120
121
122
123
124
125
126static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
127{
128 u16 ctrl2;
129
130 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
131 if (ctrl2 & SDHCI_CTRL_V4_MODE)
132 return;
133
134 ctrl2 |= SDHCI_CTRL_V4_MODE;
135 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
136}
137
138
139
140
141
142void sdhci_enable_v4_mode(struct sdhci_host *host)
143{
144 host->v4_mode = true;
145 sdhci_do_enable_v4_mode(host);
146}
147EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
148
149static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
150{
151 return cmd->data || cmd->flags & MMC_RSP_BUSY;
152}
153
154static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
155{
156 u32 present;
157
158 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
159 !mmc_card_is_removable(host->mmc))
160 return;
161
162 if (enable) {
163 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
164 SDHCI_CARD_PRESENT;
165
166 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
167 SDHCI_INT_CARD_INSERT;
168 } else {
169 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
170 }
171
172 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
173 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
174}
175
176static void sdhci_enable_card_detection(struct sdhci_host *host)
177{
178 sdhci_set_card_detection(host, true);
179}
180
181static void sdhci_disable_card_detection(struct sdhci_host *host)
182{
183 sdhci_set_card_detection(host, false);
184}
185
186static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
187{
188 if (host->bus_on)
189 return;
190 host->bus_on = true;
191 pm_runtime_get_noresume(host->mmc->parent);
192}
193
194static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
195{
196 if (!host->bus_on)
197 return;
198 host->bus_on = false;
199 pm_runtime_put_noidle(host->mmc->parent);
200}
201
202void sdhci_reset(struct sdhci_host *host, u8 mask)
203{
204 ktime_t timeout;
205
206 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
207
208 if (mask & SDHCI_RESET_ALL) {
209 host->clock = 0;
210
211 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
212 sdhci_runtime_pm_bus_off(host);
213 }
214
215
216 timeout = ktime_add_ms(ktime_get(), 100);
217
218
219 while (1) {
220 bool timedout = ktime_after(ktime_get(), timeout);
221
222 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
223 break;
224 if (timedout) {
225 pr_err("%s: Reset 0x%x never completed.\n",
226 mmc_hostname(host->mmc), (int)mask);
227 sdhci_dumpregs(host);
228 return;
229 }
230 udelay(10);
231 }
232}
233EXPORT_SYMBOL_GPL(sdhci_reset);
234
235static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
236{
237 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
238 struct mmc_host *mmc = host->mmc;
239
240 if (!mmc->ops->get_cd(mmc))
241 return;
242 }
243
244 host->ops->reset(host, mask);
245
246 if (mask & SDHCI_RESET_ALL) {
247 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
248 if (host->ops->enable_dma)
249 host->ops->enable_dma(host);
250 }
251
252
253 host->preset_enabled = false;
254 }
255}
256
257static void sdhci_set_default_irqs(struct sdhci_host *host)
258{
259 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
260 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
261 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
262 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
263 SDHCI_INT_RESPONSE;
264
265 if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
266 host->tuning_mode == SDHCI_TUNING_MODE_3)
267 host->ier |= SDHCI_INT_RETUNE;
268
269 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
270 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
271}
272
273static void sdhci_config_dma(struct sdhci_host *host)
274{
275 u8 ctrl;
276 u16 ctrl2;
277
278 if (host->version < SDHCI_SPEC_200)
279 return;
280
281 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
282
283
284
285
286
287
288 ctrl &= ~SDHCI_CTRL_DMA_MASK;
289 if (!(host->flags & SDHCI_REQ_USE_DMA))
290 goto out;
291
292
293 if (host->flags & SDHCI_USE_ADMA)
294 ctrl |= SDHCI_CTRL_ADMA32;
295
296 if (host->flags & SDHCI_USE_64_BIT_DMA) {
297
298
299
300
301
302 if (host->v4_mode) {
303 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
304 ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
305 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
306 } else if (host->flags & SDHCI_USE_ADMA) {
307
308
309
310
311 ctrl |= SDHCI_CTRL_ADMA64;
312 }
313 }
314
315out:
316 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
317}
318
319static void sdhci_init(struct sdhci_host *host, int soft)
320{
321 struct mmc_host *mmc = host->mmc;
322
323 if (soft)
324 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
325 else
326 sdhci_do_reset(host, SDHCI_RESET_ALL);
327
328 if (host->v4_mode)
329 sdhci_do_enable_v4_mode(host);
330
331 sdhci_set_default_irqs(host);
332
333 host->cqe_on = false;
334
335 if (soft) {
336
337 host->clock = 0;
338 mmc->ops->set_ios(mmc, &mmc->ios);
339 }
340}
341
342static void sdhci_reinit(struct sdhci_host *host)
343{
344 sdhci_init(host, 0);
345 sdhci_enable_card_detection(host);
346}
347
348static void __sdhci_led_activate(struct sdhci_host *host)
349{
350 u8 ctrl;
351
352 if (host->quirks & SDHCI_QUIRK_NO_LED)
353 return;
354
355 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
356 ctrl |= SDHCI_CTRL_LED;
357 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
358}
359
360static void __sdhci_led_deactivate(struct sdhci_host *host)
361{
362 u8 ctrl;
363
364 if (host->quirks & SDHCI_QUIRK_NO_LED)
365 return;
366
367 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
368 ctrl &= ~SDHCI_CTRL_LED;
369 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
370}
371
372#if IS_REACHABLE(CONFIG_LEDS_CLASS)
373static void sdhci_led_control(struct led_classdev *led,
374 enum led_brightness brightness)
375{
376 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
377 unsigned long flags;
378
379 spin_lock_irqsave(&host->lock, flags);
380
381 if (host->runtime_suspended)
382 goto out;
383
384 if (brightness == LED_OFF)
385 __sdhci_led_deactivate(host);
386 else
387 __sdhci_led_activate(host);
388out:
389 spin_unlock_irqrestore(&host->lock, flags);
390}
391
392static int sdhci_led_register(struct sdhci_host *host)
393{
394 struct mmc_host *mmc = host->mmc;
395
396 if (host->quirks & SDHCI_QUIRK_NO_LED)
397 return 0;
398
399 snprintf(host->led_name, sizeof(host->led_name),
400 "%s::", mmc_hostname(mmc));
401
402 host->led.name = host->led_name;
403 host->led.brightness = LED_OFF;
404 host->led.default_trigger = mmc_hostname(mmc);
405 host->led.brightness_set = sdhci_led_control;
406
407 return led_classdev_register(mmc_dev(mmc), &host->led);
408}
409
410static void sdhci_led_unregister(struct sdhci_host *host)
411{
412 if (host->quirks & SDHCI_QUIRK_NO_LED)
413 return;
414
415 led_classdev_unregister(&host->led);
416}
417
418static inline void sdhci_led_activate(struct sdhci_host *host)
419{
420}
421
422static inline void sdhci_led_deactivate(struct sdhci_host *host)
423{
424}
425
426#else
427
428static inline int sdhci_led_register(struct sdhci_host *host)
429{
430 return 0;
431}
432
433static inline void sdhci_led_unregister(struct sdhci_host *host)
434{
435}
436
437static inline void sdhci_led_activate(struct sdhci_host *host)
438{
439 __sdhci_led_activate(host);
440}
441
442static inline void sdhci_led_deactivate(struct sdhci_host *host)
443{
444 __sdhci_led_deactivate(host);
445}
446
447#endif
448
449
450
451
452
453
454
455static void sdhci_read_block_pio(struct sdhci_host *host)
456{
457 unsigned long flags;
458 size_t blksize, len, chunk;
459 u32 uninitialized_var(scratch);
460 u8 *buf;
461
462 DBG("PIO reading\n");
463
464 blksize = host->data->blksz;
465 chunk = 0;
466
467 local_irq_save(flags);
468
469 while (blksize) {
470 BUG_ON(!sg_miter_next(&host->sg_miter));
471
472 len = min(host->sg_miter.length, blksize);
473
474 blksize -= len;
475 host->sg_miter.consumed = len;
476
477 buf = host->sg_miter.addr;
478
479 while (len) {
480 if (chunk == 0) {
481 scratch = sdhci_readl(host, SDHCI_BUFFER);
482 chunk = 4;
483 }
484
485 *buf = scratch & 0xFF;
486
487 buf++;
488 scratch >>= 8;
489 chunk--;
490 len--;
491 }
492 }
493
494 sg_miter_stop(&host->sg_miter);
495
496 local_irq_restore(flags);
497}
498
499static void sdhci_write_block_pio(struct sdhci_host *host)
500{
501 unsigned long flags;
502 size_t blksize, len, chunk;
503 u32 scratch;
504 u8 *buf;
505
506 DBG("PIO writing\n");
507
508 blksize = host->data->blksz;
509 chunk = 0;
510 scratch = 0;
511
512 local_irq_save(flags);
513
514 while (blksize) {
515 BUG_ON(!sg_miter_next(&host->sg_miter));
516
517 len = min(host->sg_miter.length, blksize);
518
519 blksize -= len;
520 host->sg_miter.consumed = len;
521
522 buf = host->sg_miter.addr;
523
524 while (len) {
525 scratch |= (u32)*buf << (chunk * 8);
526
527 buf++;
528 chunk++;
529 len--;
530
531 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
532 sdhci_writel(host, scratch, SDHCI_BUFFER);
533 chunk = 0;
534 scratch = 0;
535 }
536 }
537 }
538
539 sg_miter_stop(&host->sg_miter);
540
541 local_irq_restore(flags);
542}
543
544static void sdhci_transfer_pio(struct sdhci_host *host)
545{
546 u32 mask;
547
548 if (host->blocks == 0)
549 return;
550
551 if (host->data->flags & MMC_DATA_READ)
552 mask = SDHCI_DATA_AVAILABLE;
553 else
554 mask = SDHCI_SPACE_AVAILABLE;
555
556
557
558
559
560
561 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
562 (host->data->blocks == 1))
563 mask = ~0;
564
565 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
566 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
567 udelay(100);
568
569 if (host->data->flags & MMC_DATA_READ)
570 sdhci_read_block_pio(host);
571 else
572 sdhci_write_block_pio(host);
573
574 host->blocks--;
575 if (host->blocks == 0)
576 break;
577 }
578
579 DBG("PIO transfer complete.\n");
580}
581
582static int sdhci_pre_dma_transfer(struct sdhci_host *host,
583 struct mmc_data *data, int cookie)
584{
585 int sg_count;
586
587
588
589
590
591 if (data->host_cookie == COOKIE_PRE_MAPPED)
592 return data->sg_count;
593
594
595 if (host->bounce_buffer) {
596 unsigned int length = data->blksz * data->blocks;
597
598 if (length > host->bounce_buffer_size) {
599 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
600 mmc_hostname(host->mmc), length,
601 host->bounce_buffer_size);
602 return -EIO;
603 }
604 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
605
606 sg_copy_to_buffer(data->sg, data->sg_len,
607 host->bounce_buffer,
608 length);
609 }
610
611 dma_sync_single_for_device(host->mmc->parent,
612 host->bounce_addr,
613 host->bounce_buffer_size,
614 mmc_get_dma_dir(data));
615
616 sg_count = 1;
617 } else {
618
619 sg_count = dma_map_sg(mmc_dev(host->mmc),
620 data->sg, data->sg_len,
621 mmc_get_dma_dir(data));
622 }
623
624 if (sg_count == 0)
625 return -ENOSPC;
626
627 data->sg_count = sg_count;
628 data->host_cookie = cookie;
629
630 return sg_count;
631}
632
633static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
634{
635 local_irq_save(*flags);
636 return kmap_atomic(sg_page(sg)) + sg->offset;
637}
638
639static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
640{
641 kunmap_atomic(buffer);
642 local_irq_restore(*flags);
643}
644
645void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
646 dma_addr_t addr, int len, unsigned int cmd)
647{
648 struct sdhci_adma2_64_desc *dma_desc = *desc;
649
650
651 dma_desc->cmd = cpu_to_le16(cmd);
652 dma_desc->len = cpu_to_le16(len);
653 dma_desc->addr_lo = cpu_to_le32((u32)addr);
654
655 if (host->flags & SDHCI_USE_64_BIT_DMA)
656 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
657
658 *desc += host->desc_sz;
659}
660EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
661
662static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
663 void **desc, dma_addr_t addr,
664 int len, unsigned int cmd)
665{
666 if (host->ops->adma_write_desc)
667 host->ops->adma_write_desc(host, desc, addr, len, cmd);
668 else
669 sdhci_adma_write_desc(host, desc, addr, len, cmd);
670}
671
672static void sdhci_adma_mark_end(void *desc)
673{
674 struct sdhci_adma2_64_desc *dma_desc = desc;
675
676
677 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
678}
679
680static void sdhci_adma_table_pre(struct sdhci_host *host,
681 struct mmc_data *data, int sg_count)
682{
683 struct scatterlist *sg;
684 unsigned long flags;
685 dma_addr_t addr, align_addr;
686 void *desc, *align;
687 char *buffer;
688 int len, offset, i;
689
690
691
692
693
694
695 host->sg_count = sg_count;
696
697 desc = host->adma_table;
698 align = host->align_buffer;
699
700 align_addr = host->align_addr;
701
702 for_each_sg(data->sg, sg, host->sg_count, i) {
703 addr = sg_dma_address(sg);
704 len = sg_dma_len(sg);
705
706
707
708
709
710
711
712 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
713 SDHCI_ADMA2_MASK;
714 if (offset) {
715 if (data->flags & MMC_DATA_WRITE) {
716 buffer = sdhci_kmap_atomic(sg, &flags);
717 memcpy(align, buffer, offset);
718 sdhci_kunmap_atomic(buffer, &flags);
719 }
720
721
722 __sdhci_adma_write_desc(host, &desc, align_addr,
723 offset, ADMA2_TRAN_VALID);
724
725 BUG_ON(offset > 65536);
726
727 align += SDHCI_ADMA2_ALIGN;
728 align_addr += SDHCI_ADMA2_ALIGN;
729
730 addr += offset;
731 len -= offset;
732 }
733
734 BUG_ON(len > 65536);
735
736
737 if (len)
738 __sdhci_adma_write_desc(host, &desc, addr, len,
739 ADMA2_TRAN_VALID);
740
741
742
743
744
745 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
746 }
747
748 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
749
750 if (desc != host->adma_table) {
751 desc -= host->desc_sz;
752 sdhci_adma_mark_end(desc);
753 }
754 } else {
755
756 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
757 }
758}
759
760static void sdhci_adma_table_post(struct sdhci_host *host,
761 struct mmc_data *data)
762{
763 struct scatterlist *sg;
764 int i, size;
765 void *align;
766 char *buffer;
767 unsigned long flags;
768
769 if (data->flags & MMC_DATA_READ) {
770 bool has_unaligned = false;
771
772
773 for_each_sg(data->sg, sg, host->sg_count, i)
774 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
775 has_unaligned = true;
776 break;
777 }
778
779 if (has_unaligned) {
780 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
781 data->sg_len, DMA_FROM_DEVICE);
782
783 align = host->align_buffer;
784
785 for_each_sg(data->sg, sg, host->sg_count, i) {
786 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
787 size = SDHCI_ADMA2_ALIGN -
788 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
789
790 buffer = sdhci_kmap_atomic(sg, &flags);
791 memcpy(buffer, align, size);
792 sdhci_kunmap_atomic(buffer, &flags);
793
794 align += SDHCI_ADMA2_ALIGN;
795 }
796 }
797 }
798 }
799}
800
801static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
802{
803 if (host->bounce_buffer)
804 return host->bounce_addr;
805 else
806 return sg_dma_address(host->data->sg);
807}
808
809static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
810{
811 if (host->v4_mode) {
812 sdhci_writel(host, addr, SDHCI_ADMA_ADDRESS);
813 if (host->flags & SDHCI_USE_64_BIT_DMA)
814 sdhci_writel(host, (u64)addr >> 32, SDHCI_ADMA_ADDRESS_HI);
815 } else {
816 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
817 }
818}
819
820static unsigned int sdhci_target_timeout(struct sdhci_host *host,
821 struct mmc_command *cmd,
822 struct mmc_data *data)
823{
824 unsigned int target_timeout;
825
826
827 if (!data) {
828 target_timeout = cmd->busy_timeout * 1000;
829 } else {
830 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
831 if (host->clock && data->timeout_clks) {
832 unsigned long long val;
833
834
835
836
837
838
839 val = 1000000ULL * data->timeout_clks;
840 if (do_div(val, host->clock))
841 target_timeout++;
842 target_timeout += val;
843 }
844 }
845
846 return target_timeout;
847}
848
849static void sdhci_calc_sw_timeout(struct sdhci_host *host,
850 struct mmc_command *cmd)
851{
852 struct mmc_data *data = cmd->data;
853 struct mmc_host *mmc = host->mmc;
854 struct mmc_ios *ios = &mmc->ios;
855 unsigned char bus_width = 1 << ios->bus_width;
856 unsigned int blksz;
857 unsigned int freq;
858 u64 target_timeout;
859 u64 transfer_time;
860
861 target_timeout = sdhci_target_timeout(host, cmd, data);
862 target_timeout *= NSEC_PER_USEC;
863
864 if (data) {
865 blksz = data->blksz;
866 freq = host->mmc->actual_clock ? : host->clock;
867 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
868 do_div(transfer_time, freq);
869
870 transfer_time = transfer_time * 2;
871
872 host->data_timeout = data->blocks * target_timeout +
873 transfer_time;
874 } else {
875 host->data_timeout = target_timeout;
876 }
877
878 if (host->data_timeout)
879 host->data_timeout += MMC_CMD_TRANSFER_TIME;
880}
881
882static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
883 bool *too_big)
884{
885 u8 count;
886 struct mmc_data *data;
887 unsigned target_timeout, current_timeout;
888
889 *too_big = true;
890
891
892
893
894
895
896
897 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
898 return 0xE;
899
900
901 if (cmd == NULL)
902 return 0xE;
903
904 data = cmd->data;
905
906 if (!data && !cmd->busy_timeout)
907 return 0xE;
908
909
910 target_timeout = sdhci_target_timeout(host, cmd, data);
911
912
913
914
915
916
917
918
919
920
921
922 count = 0;
923 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
924 while (current_timeout < target_timeout) {
925 count++;
926 current_timeout <<= 1;
927 if (count >= 0xF)
928 break;
929 }
930
931 if (count >= 0xF) {
932 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
933 DBG("Too large timeout 0x%x requested for CMD%d!\n",
934 count, cmd->opcode);
935 count = 0xE;
936 } else {
937 *too_big = false;
938 }
939
940 return count;
941}
942
943static void sdhci_set_transfer_irqs(struct sdhci_host *host)
944{
945 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
946 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
947
948 if (host->flags & SDHCI_REQ_USE_DMA)
949 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
950 else
951 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
952
953 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
954 host->ier |= SDHCI_INT_AUTO_CMD_ERR;
955 else
956 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
957
958 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
959 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
960}
961
962static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
963{
964 if (enable)
965 host->ier |= SDHCI_INT_DATA_TIMEOUT;
966 else
967 host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
968 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
969 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
970}
971
972static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
973{
974 u8 count;
975
976 if (host->ops->set_timeout) {
977 host->ops->set_timeout(host, cmd);
978 } else {
979 bool too_big = false;
980
981 count = sdhci_calc_timeout(host, cmd, &too_big);
982
983 if (too_big &&
984 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
985 sdhci_calc_sw_timeout(host, cmd);
986 sdhci_set_data_timeout_irq(host, false);
987 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
988 sdhci_set_data_timeout_irq(host, true);
989 }
990
991 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
992 }
993}
994
995static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
996{
997 struct mmc_data *data = cmd->data;
998
999 host->data_timeout = 0;
1000
1001 if (sdhci_data_line_cmd(cmd))
1002 sdhci_set_timeout(host, cmd);
1003
1004 if (!data)
1005 return;
1006
1007 WARN_ON(host->data);
1008
1009
1010 BUG_ON(data->blksz * data->blocks > 524288);
1011 BUG_ON(data->blksz > host->mmc->max_blk_size);
1012 BUG_ON(data->blocks > 65535);
1013
1014 host->data = data;
1015 host->data_early = 0;
1016 host->data->bytes_xfered = 0;
1017
1018 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1019 struct scatterlist *sg;
1020 unsigned int length_mask, offset_mask;
1021 int i;
1022
1023 host->flags |= SDHCI_REQ_USE_DMA;
1024
1025
1026
1027
1028
1029
1030
1031
1032 length_mask = 0;
1033 offset_mask = 0;
1034 if (host->flags & SDHCI_USE_ADMA) {
1035 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
1036 length_mask = 3;
1037
1038
1039
1040
1041
1042 offset_mask = 3;
1043 }
1044 } else {
1045 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1046 length_mask = 3;
1047 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
1048 offset_mask = 3;
1049 }
1050
1051 if (unlikely(length_mask | offset_mask)) {
1052 for_each_sg(data->sg, sg, data->sg_len, i) {
1053 if (sg->length & length_mask) {
1054 DBG("Reverting to PIO because of transfer size (%d)\n",
1055 sg->length);
1056 host->flags &= ~SDHCI_REQ_USE_DMA;
1057 break;
1058 }
1059 if (sg->offset & offset_mask) {
1060 DBG("Reverting to PIO because of bad alignment\n");
1061 host->flags &= ~SDHCI_REQ_USE_DMA;
1062 break;
1063 }
1064 }
1065 }
1066 }
1067
1068 if (host->flags & SDHCI_REQ_USE_DMA) {
1069 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1070
1071 if (sg_cnt <= 0) {
1072
1073
1074
1075
1076 WARN_ON(1);
1077 host->flags &= ~SDHCI_REQ_USE_DMA;
1078 } else if (host->flags & SDHCI_USE_ADMA) {
1079 sdhci_adma_table_pre(host, data, sg_cnt);
1080
1081 sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
1082 if (host->flags & SDHCI_USE_64_BIT_DMA)
1083 sdhci_writel(host,
1084 (u64)host->adma_addr >> 32,
1085 SDHCI_ADMA_ADDRESS_HI);
1086 } else {
1087 WARN_ON(sg_cnt != 1);
1088 sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
1089 }
1090 }
1091
1092 sdhci_config_dma(host);
1093
1094 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1095 int flags;
1096
1097 flags = SG_MITER_ATOMIC;
1098 if (host->data->flags & MMC_DATA_READ)
1099 flags |= SG_MITER_TO_SG;
1100 else
1101 flags |= SG_MITER_FROM_SG;
1102 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1103 host->blocks = data->blocks;
1104 }
1105
1106 sdhci_set_transfer_irqs(host);
1107
1108
1109 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1110 SDHCI_BLOCK_SIZE);
1111
1112
1113
1114
1115
1116 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1117 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
1118 if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
1119 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
1120 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
1121 } else {
1122 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1123 }
1124}
1125
1126static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1127 struct mmc_request *mrq)
1128{
1129 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1130 !mrq->cap_cmd_during_tfr;
1131}
1132
1133static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
1134 struct mmc_command *cmd,
1135 u16 *mode)
1136{
1137 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
1138 (cmd->opcode != SD_IO_RW_EXTENDED);
1139 bool use_cmd23 = cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
1140 u16 ctrl2;
1141
1142
1143
1144
1145
1146
1147 if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) {
1148 *mode |= SDHCI_TRNS_AUTO_SEL;
1149
1150 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1151 if (use_cmd23)
1152 ctrl2 |= SDHCI_CMD23_ENABLE;
1153 else
1154 ctrl2 &= ~SDHCI_CMD23_ENABLE;
1155 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
1156
1157 return;
1158 }
1159
1160
1161
1162
1163
1164 if (use_cmd12)
1165 *mode |= SDHCI_TRNS_AUTO_CMD12;
1166 else if (use_cmd23)
1167 *mode |= SDHCI_TRNS_AUTO_CMD23;
1168}
1169
1170static void sdhci_set_transfer_mode(struct sdhci_host *host,
1171 struct mmc_command *cmd)
1172{
1173 u16 mode = 0;
1174 struct mmc_data *data = cmd->data;
1175
1176 if (data == NULL) {
1177 if (host->quirks2 &
1178 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1179
1180 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1181 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1182 } else {
1183
1184 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1185 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1186 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1187 }
1188 return;
1189 }
1190
1191 WARN_ON(!host->data);
1192
1193 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1194 mode = SDHCI_TRNS_BLK_CNT_EN;
1195
1196 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1197 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1198 sdhci_auto_cmd_select(host, cmd, &mode);
1199 if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23))
1200 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1201 }
1202
1203 if (data->flags & MMC_DATA_READ)
1204 mode |= SDHCI_TRNS_READ;
1205 if (host->flags & SDHCI_REQ_USE_DMA)
1206 mode |= SDHCI_TRNS_DMA;
1207
1208 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1209}
1210
1211static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1212{
1213 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1214 ((mrq->cmd && mrq->cmd->error) ||
1215 (mrq->sbc && mrq->sbc->error) ||
1216 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1217 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1218}
1219
1220static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1221{
1222 int i;
1223
1224 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1225 if (host->mrqs_done[i] == mrq) {
1226 WARN_ON(1);
1227 return;
1228 }
1229 }
1230
1231 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1232 if (!host->mrqs_done[i]) {
1233 host->mrqs_done[i] = mrq;
1234 break;
1235 }
1236 }
1237
1238 WARN_ON(i >= SDHCI_MAX_MRQS);
1239
1240 tasklet_schedule(&host->finish_tasklet);
1241}
1242
1243static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1244{
1245 if (host->cmd && host->cmd->mrq == mrq)
1246 host->cmd = NULL;
1247
1248 if (host->data_cmd && host->data_cmd->mrq == mrq)
1249 host->data_cmd = NULL;
1250
1251 if (host->data && host->data->mrq == mrq)
1252 host->data = NULL;
1253
1254 if (sdhci_needs_reset(host, mrq))
1255 host->pending_reset = true;
1256
1257 __sdhci_finish_mrq(host, mrq);
1258}
1259
1260static void sdhci_finish_data(struct sdhci_host *host)
1261{
1262 struct mmc_command *data_cmd = host->data_cmd;
1263 struct mmc_data *data = host->data;
1264
1265 host->data = NULL;
1266 host->data_cmd = NULL;
1267
1268
1269
1270
1271
1272 if (data->error) {
1273 if (!host->cmd || host->cmd == data_cmd)
1274 sdhci_do_reset(host, SDHCI_RESET_CMD);
1275 sdhci_do_reset(host, SDHCI_RESET_DATA);
1276 }
1277
1278 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1279 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1280 sdhci_adma_table_post(host, data);
1281
1282
1283
1284
1285
1286
1287
1288
1289 if (data->error)
1290 data->bytes_xfered = 0;
1291 else
1292 data->bytes_xfered = data->blksz * data->blocks;
1293
1294
1295
1296
1297
1298
1299 if (data->stop &&
1300 (data->error ||
1301 !data->mrq->sbc)) {
1302
1303
1304
1305
1306
1307 if (data->mrq->cap_cmd_during_tfr) {
1308 sdhci_finish_mrq(host, data->mrq);
1309 } else {
1310
1311 host->cmd = NULL;
1312 sdhci_send_command(host, data->stop);
1313 }
1314 } else {
1315 sdhci_finish_mrq(host, data->mrq);
1316 }
1317}
1318
1319static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
1320 unsigned long timeout)
1321{
1322 if (sdhci_data_line_cmd(mrq->cmd))
1323 mod_timer(&host->data_timer, timeout);
1324 else
1325 mod_timer(&host->timer, timeout);
1326}
1327
1328static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
1329{
1330 if (sdhci_data_line_cmd(mrq->cmd))
1331 del_timer(&host->data_timer);
1332 else
1333 del_timer(&host->timer);
1334}
1335
1336void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1337{
1338 int flags;
1339 u32 mask;
1340 unsigned long timeout;
1341
1342 WARN_ON(host->cmd);
1343
1344
1345 cmd->error = 0;
1346
1347 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1348 cmd->opcode == MMC_STOP_TRANSMISSION)
1349 cmd->flags |= MMC_RSP_BUSY;
1350
1351
1352 timeout = 10;
1353
1354 mask = SDHCI_CMD_INHIBIT;
1355 if (sdhci_data_line_cmd(cmd))
1356 mask |= SDHCI_DATA_INHIBIT;
1357
1358
1359
1360 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1361 mask &= ~SDHCI_DATA_INHIBIT;
1362
1363 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1364 if (timeout == 0) {
1365 pr_err("%s: Controller never released inhibit bit(s).\n",
1366 mmc_hostname(host->mmc));
1367 sdhci_dumpregs(host);
1368 cmd->error = -EIO;
1369 sdhci_finish_mrq(host, cmd->mrq);
1370 return;
1371 }
1372 timeout--;
1373 mdelay(1);
1374 }
1375
1376 host->cmd = cmd;
1377 if (sdhci_data_line_cmd(cmd)) {
1378 WARN_ON(host->data_cmd);
1379 host->data_cmd = cmd;
1380 }
1381
1382 sdhci_prepare_data(host, cmd);
1383
1384 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1385
1386 sdhci_set_transfer_mode(host, cmd);
1387
1388 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1389 pr_err("%s: Unsupported response type!\n",
1390 mmc_hostname(host->mmc));
1391 cmd->error = -EINVAL;
1392 sdhci_finish_mrq(host, cmd->mrq);
1393 return;
1394 }
1395
1396 if (!(cmd->flags & MMC_RSP_PRESENT))
1397 flags = SDHCI_CMD_RESP_NONE;
1398 else if (cmd->flags & MMC_RSP_136)
1399 flags = SDHCI_CMD_RESP_LONG;
1400 else if (cmd->flags & MMC_RSP_BUSY)
1401 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1402 else
1403 flags = SDHCI_CMD_RESP_SHORT;
1404
1405 if (cmd->flags & MMC_RSP_CRC)
1406 flags |= SDHCI_CMD_CRC;
1407 if (cmd->flags & MMC_RSP_OPCODE)
1408 flags |= SDHCI_CMD_INDEX;
1409
1410
1411 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1412 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1413 flags |= SDHCI_CMD_DATA;
1414
1415 timeout = jiffies;
1416 if (host->data_timeout)
1417 timeout += nsecs_to_jiffies(host->data_timeout);
1418 else if (!cmd->data && cmd->busy_timeout > 9000)
1419 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1420 else
1421 timeout += 10 * HZ;
1422 sdhci_mod_timer(host, cmd->mrq, timeout);
1423
1424 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1425}
1426EXPORT_SYMBOL_GPL(sdhci_send_command);
1427
1428static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1429{
1430 int i, reg;
1431
1432 for (i = 0; i < 4; i++) {
1433 reg = SDHCI_RESPONSE + (3 - i) * 4;
1434 cmd->resp[i] = sdhci_readl(host, reg);
1435 }
1436
1437 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1438 return;
1439
1440
1441 for (i = 0; i < 4; i++) {
1442 cmd->resp[i] <<= 8;
1443 if (i != 3)
1444 cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1445 }
1446}
1447
1448static void sdhci_finish_command(struct sdhci_host *host)
1449{
1450 struct mmc_command *cmd = host->cmd;
1451
1452 host->cmd = NULL;
1453
1454 if (cmd->flags & MMC_RSP_PRESENT) {
1455 if (cmd->flags & MMC_RSP_136) {
1456 sdhci_read_rsp_136(host, cmd);
1457 } else {
1458 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1459 }
1460 }
1461
1462 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1463 mmc_command_done(host->mmc, cmd->mrq);
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475 if (cmd->flags & MMC_RSP_BUSY) {
1476 if (cmd->data) {
1477 DBG("Cannot wait for busy signal when also doing a data transfer");
1478 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1479 cmd == host->data_cmd) {
1480
1481 return;
1482 }
1483 }
1484
1485
1486 if (cmd == cmd->mrq->sbc) {
1487 sdhci_send_command(host, cmd->mrq->cmd);
1488 } else {
1489
1490
1491 if (host->data && host->data_early)
1492 sdhci_finish_data(host);
1493
1494 if (!cmd->data)
1495 sdhci_finish_mrq(host, cmd->mrq);
1496 }
1497}
1498
1499static u16 sdhci_get_preset_value(struct sdhci_host *host)
1500{
1501 u16 preset = 0;
1502
1503 switch (host->timing) {
1504 case MMC_TIMING_UHS_SDR12:
1505 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1506 break;
1507 case MMC_TIMING_UHS_SDR25:
1508 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1509 break;
1510 case MMC_TIMING_UHS_SDR50:
1511 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1512 break;
1513 case MMC_TIMING_UHS_SDR104:
1514 case MMC_TIMING_MMC_HS200:
1515 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1516 break;
1517 case MMC_TIMING_UHS_DDR50:
1518 case MMC_TIMING_MMC_DDR52:
1519 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1520 break;
1521 case MMC_TIMING_MMC_HS400:
1522 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1523 break;
1524 default:
1525 pr_warn("%s: Invalid UHS-I mode selected\n",
1526 mmc_hostname(host->mmc));
1527 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1528 break;
1529 }
1530 return preset;
1531}
1532
1533u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1534 unsigned int *actual_clock)
1535{
1536 int div = 0;
1537 int real_div = div, clk_mul = 1;
1538 u16 clk = 0;
1539 bool switch_base_clk = false;
1540
1541 if (host->version >= SDHCI_SPEC_300) {
1542 if (host->preset_enabled) {
1543 u16 pre_val;
1544
1545 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1546 pre_val = sdhci_get_preset_value(host);
1547 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1548 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1549 if (host->clk_mul &&
1550 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1551 clk = SDHCI_PROG_CLOCK_MODE;
1552 real_div = div + 1;
1553 clk_mul = host->clk_mul;
1554 } else {
1555 real_div = max_t(int, 1, div << 1);
1556 }
1557 goto clock_set;
1558 }
1559
1560
1561
1562
1563
1564 if (host->clk_mul) {
1565 for (div = 1; div <= 1024; div++) {
1566 if ((host->max_clk * host->clk_mul / div)
1567 <= clock)
1568 break;
1569 }
1570 if ((host->max_clk * host->clk_mul / div) <= clock) {
1571
1572
1573
1574
1575 clk = SDHCI_PROG_CLOCK_MODE;
1576 real_div = div;
1577 clk_mul = host->clk_mul;
1578 div--;
1579 } else {
1580
1581
1582
1583
1584 switch_base_clk = true;
1585 }
1586 }
1587
1588 if (!host->clk_mul || switch_base_clk) {
1589
1590 if (host->max_clk <= clock)
1591 div = 1;
1592 else {
1593 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1594 div += 2) {
1595 if ((host->max_clk / div) <= clock)
1596 break;
1597 }
1598 }
1599 real_div = div;
1600 div >>= 1;
1601 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1602 && !div && host->max_clk <= 25000000)
1603 div = 1;
1604 }
1605 } else {
1606
1607 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1608 if ((host->max_clk / div) <= clock)
1609 break;
1610 }
1611 real_div = div;
1612 div >>= 1;
1613 }
1614
1615clock_set:
1616 if (real_div)
1617 *actual_clock = (host->max_clk * clk_mul) / real_div;
1618 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1619 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1620 << SDHCI_DIVIDER_HI_SHIFT;
1621
1622 return clk;
1623}
1624EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1625
1626void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1627{
1628 ktime_t timeout;
1629
1630 clk |= SDHCI_CLOCK_INT_EN;
1631 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1632
1633
1634 timeout = ktime_add_ms(ktime_get(), 20);
1635 while (1) {
1636 bool timedout = ktime_after(ktime_get(), timeout);
1637
1638 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1639 if (clk & SDHCI_CLOCK_INT_STABLE)
1640 break;
1641 if (timedout) {
1642 pr_err("%s: Internal clock never stabilised.\n",
1643 mmc_hostname(host->mmc));
1644 sdhci_dumpregs(host);
1645 return;
1646 }
1647 udelay(10);
1648 }
1649
1650 clk |= SDHCI_CLOCK_CARD_EN;
1651 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1652}
1653EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1654
1655void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1656{
1657 u16 clk;
1658
1659 host->mmc->actual_clock = 0;
1660
1661 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1662
1663 if (clock == 0)
1664 return;
1665
1666 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1667 sdhci_enable_clk(host, clk);
1668}
1669EXPORT_SYMBOL_GPL(sdhci_set_clock);
1670
1671static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1672 unsigned short vdd)
1673{
1674 struct mmc_host *mmc = host->mmc;
1675
1676 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1677
1678 if (mode != MMC_POWER_OFF)
1679 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1680 else
1681 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1682}
1683
1684void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1685 unsigned short vdd)
1686{
1687 u8 pwr = 0;
1688
1689 if (mode != MMC_POWER_OFF) {
1690 switch (1 << vdd) {
1691 case MMC_VDD_165_195:
1692
1693
1694
1695
1696
1697
1698 case MMC_VDD_20_21:
1699 pwr = SDHCI_POWER_180;
1700 break;
1701 case MMC_VDD_29_30:
1702 case MMC_VDD_30_31:
1703 pwr = SDHCI_POWER_300;
1704 break;
1705 case MMC_VDD_32_33:
1706 case MMC_VDD_33_34:
1707 pwr = SDHCI_POWER_330;
1708 break;
1709 default:
1710 WARN(1, "%s: Invalid vdd %#x\n",
1711 mmc_hostname(host->mmc), vdd);
1712 break;
1713 }
1714 }
1715
1716 if (host->pwr == pwr)
1717 return;
1718
1719 host->pwr = pwr;
1720
1721 if (pwr == 0) {
1722 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1723 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1724 sdhci_runtime_pm_bus_off(host);
1725 } else {
1726
1727
1728
1729
1730 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1731 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1732
1733
1734
1735
1736
1737
1738 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1739 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1740
1741 pwr |= SDHCI_POWER_ON;
1742
1743 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1744
1745 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1746 sdhci_runtime_pm_bus_on(host);
1747
1748
1749
1750
1751
1752 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1753 mdelay(10);
1754 }
1755}
1756EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1757
1758void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1759 unsigned short vdd)
1760{
1761 if (IS_ERR(host->mmc->supply.vmmc))
1762 sdhci_set_power_noreg(host, mode, vdd);
1763 else
1764 sdhci_set_power_reg(host, mode, vdd);
1765}
1766EXPORT_SYMBOL_GPL(sdhci_set_power);
1767
1768
1769
1770
1771
1772
1773
1774void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1775{
1776 struct sdhci_host *host;
1777 int present;
1778 unsigned long flags;
1779
1780 host = mmc_priv(mmc);
1781
1782
1783 present = mmc->ops->get_cd(mmc);
1784
1785 spin_lock_irqsave(&host->lock, flags);
1786
1787 sdhci_led_activate(host);
1788
1789
1790
1791
1792
1793 if (sdhci_auto_cmd12(host, mrq)) {
1794 if (mrq->stop) {
1795 mrq->data->stop = NULL;
1796 mrq->stop = NULL;
1797 }
1798 }
1799
1800 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1801 mrq->cmd->error = -ENOMEDIUM;
1802 sdhci_finish_mrq(host, mrq);
1803 } else {
1804 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1805 sdhci_send_command(host, mrq->sbc);
1806 else
1807 sdhci_send_command(host, mrq->cmd);
1808 }
1809
1810 mmiowb();
1811 spin_unlock_irqrestore(&host->lock, flags);
1812}
1813EXPORT_SYMBOL_GPL(sdhci_request);
1814
1815void sdhci_set_bus_width(struct sdhci_host *host, int width)
1816{
1817 u8 ctrl;
1818
1819 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1820 if (width == MMC_BUS_WIDTH_8) {
1821 ctrl &= ~SDHCI_CTRL_4BITBUS;
1822 ctrl |= SDHCI_CTRL_8BITBUS;
1823 } else {
1824 if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
1825 ctrl &= ~SDHCI_CTRL_8BITBUS;
1826 if (width == MMC_BUS_WIDTH_4)
1827 ctrl |= SDHCI_CTRL_4BITBUS;
1828 else
1829 ctrl &= ~SDHCI_CTRL_4BITBUS;
1830 }
1831 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1832}
1833EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1834
1835void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1836{
1837 u16 ctrl_2;
1838
1839 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1840
1841 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1842 if ((timing == MMC_TIMING_MMC_HS200) ||
1843 (timing == MMC_TIMING_UHS_SDR104))
1844 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1845 else if (timing == MMC_TIMING_UHS_SDR12)
1846 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1847 else if (timing == MMC_TIMING_UHS_SDR25)
1848 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1849 else if (timing == MMC_TIMING_UHS_SDR50)
1850 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1851 else if ((timing == MMC_TIMING_UHS_DDR50) ||
1852 (timing == MMC_TIMING_MMC_DDR52))
1853 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1854 else if (timing == MMC_TIMING_MMC_HS400)
1855 ctrl_2 |= SDHCI_CTRL_HS400;
1856 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1857}
1858EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1859
1860void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1861{
1862 struct sdhci_host *host = mmc_priv(mmc);
1863 u8 ctrl;
1864
1865 if (ios->power_mode == MMC_POWER_UNDEFINED)
1866 return;
1867
1868 if (host->flags & SDHCI_DEVICE_DEAD) {
1869 if (!IS_ERR(mmc->supply.vmmc) &&
1870 ios->power_mode == MMC_POWER_OFF)
1871 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1872 return;
1873 }
1874
1875
1876
1877
1878
1879 if (ios->power_mode == MMC_POWER_OFF) {
1880 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1881 sdhci_reinit(host);
1882 }
1883
1884 if (host->version >= SDHCI_SPEC_300 &&
1885 (ios->power_mode == MMC_POWER_UP) &&
1886 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1887 sdhci_enable_preset_value(host, false);
1888
1889 if (!ios->clock || ios->clock != host->clock) {
1890 host->ops->set_clock(host, ios->clock);
1891 host->clock = ios->clock;
1892
1893 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1894 host->clock) {
1895 host->timeout_clk = host->mmc->actual_clock ?
1896 host->mmc->actual_clock / 1000 :
1897 host->clock / 1000;
1898 host->mmc->max_busy_timeout =
1899 host->ops->get_max_timeout_count ?
1900 host->ops->get_max_timeout_count(host) :
1901 1 << 27;
1902 host->mmc->max_busy_timeout /= host->timeout_clk;
1903 }
1904 }
1905
1906 if (host->ops->set_power)
1907 host->ops->set_power(host, ios->power_mode, ios->vdd);
1908 else
1909 sdhci_set_power(host, ios->power_mode, ios->vdd);
1910
1911 if (host->ops->platform_send_init_74_clocks)
1912 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1913
1914 host->ops->set_bus_width(host, ios->bus_width);
1915
1916 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1917
1918 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
1919 if (ios->timing == MMC_TIMING_SD_HS ||
1920 ios->timing == MMC_TIMING_MMC_HS ||
1921 ios->timing == MMC_TIMING_MMC_HS400 ||
1922 ios->timing == MMC_TIMING_MMC_HS200 ||
1923 ios->timing == MMC_TIMING_MMC_DDR52 ||
1924 ios->timing == MMC_TIMING_UHS_SDR50 ||
1925 ios->timing == MMC_TIMING_UHS_SDR104 ||
1926 ios->timing == MMC_TIMING_UHS_DDR50 ||
1927 ios->timing == MMC_TIMING_UHS_SDR25)
1928 ctrl |= SDHCI_CTRL_HISPD;
1929 else
1930 ctrl &= ~SDHCI_CTRL_HISPD;
1931 }
1932
1933 if (host->version >= SDHCI_SPEC_300) {
1934 u16 clk, ctrl_2;
1935
1936 if (!host->preset_enabled) {
1937 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1938
1939
1940
1941
1942 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1943 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1944 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1945 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1946 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1947 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1948 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1949 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1950 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1951 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1952 else {
1953 pr_warn("%s: invalid driver type, default to driver type B\n",
1954 mmc_hostname(mmc));
1955 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1956 }
1957
1958 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1959 } else {
1960
1961
1962
1963
1964
1965
1966
1967
1968 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1969 clk &= ~SDHCI_CLOCK_CARD_EN;
1970 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1971
1972 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1973
1974
1975 host->ops->set_clock(host, host->clock);
1976 }
1977
1978
1979 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1980 clk &= ~SDHCI_CLOCK_CARD_EN;
1981 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1982
1983 host->ops->set_uhs_signaling(host, ios->timing);
1984 host->timing = ios->timing;
1985
1986 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1987 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
1988 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1989 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1990 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1991 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1992 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1993 u16 preset;
1994
1995 sdhci_enable_preset_value(host, true);
1996 preset = sdhci_get_preset_value(host);
1997 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1998 >> SDHCI_PRESET_DRV_SHIFT;
1999 }
2000
2001
2002 host->ops->set_clock(host, host->clock);
2003 } else
2004 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2005
2006
2007
2008
2009
2010
2011 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
2012 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2013
2014 mmiowb();
2015}
2016EXPORT_SYMBOL_GPL(sdhci_set_ios);
2017
2018static int sdhci_get_cd(struct mmc_host *mmc)
2019{
2020 struct sdhci_host *host = mmc_priv(mmc);
2021 int gpio_cd = mmc_gpio_get_cd(mmc);
2022
2023 if (host->flags & SDHCI_DEVICE_DEAD)
2024 return 0;
2025
2026
2027 if (!mmc_card_is_removable(host->mmc))
2028 return 1;
2029
2030
2031
2032
2033
2034 if (gpio_cd >= 0)
2035 return !!gpio_cd;
2036
2037
2038 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2039 return 1;
2040
2041
2042 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2043}
2044
2045static int sdhci_check_ro(struct sdhci_host *host)
2046{
2047 unsigned long flags;
2048 int is_readonly;
2049
2050 spin_lock_irqsave(&host->lock, flags);
2051
2052 if (host->flags & SDHCI_DEVICE_DEAD)
2053 is_readonly = 0;
2054 else if (host->ops->get_ro)
2055 is_readonly = host->ops->get_ro(host);
2056 else if (mmc_can_gpio_ro(host->mmc))
2057 is_readonly = mmc_gpio_get_ro(host->mmc);
2058 else
2059 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2060 & SDHCI_WRITE_PROTECT);
2061
2062 spin_unlock_irqrestore(&host->lock, flags);
2063
2064
2065 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
2066 !is_readonly : is_readonly;
2067}
2068
2069#define SAMPLE_COUNT 5
2070
2071static int sdhci_get_ro(struct mmc_host *mmc)
2072{
2073 struct sdhci_host *host = mmc_priv(mmc);
2074 int i, ro_count;
2075
2076 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
2077 return sdhci_check_ro(host);
2078
2079 ro_count = 0;
2080 for (i = 0; i < SAMPLE_COUNT; i++) {
2081 if (sdhci_check_ro(host)) {
2082 if (++ro_count > SAMPLE_COUNT / 2)
2083 return 1;
2084 }
2085 msleep(30);
2086 }
2087 return 0;
2088}
2089
2090static void sdhci_hw_reset(struct mmc_host *mmc)
2091{
2092 struct sdhci_host *host = mmc_priv(mmc);
2093
2094 if (host->ops && host->ops->hw_reset)
2095 host->ops->hw_reset(host);
2096}
2097
2098static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
2099{
2100 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
2101 if (enable)
2102 host->ier |= SDHCI_INT_CARD_INT;
2103 else
2104 host->ier &= ~SDHCI_INT_CARD_INT;
2105
2106 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2107 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2108 mmiowb();
2109 }
2110}
2111
2112void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2113{
2114 struct sdhci_host *host = mmc_priv(mmc);
2115 unsigned long flags;
2116
2117 if (enable)
2118 pm_runtime_get_noresume(host->mmc->parent);
2119
2120 spin_lock_irqsave(&host->lock, flags);
2121 if (enable)
2122 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
2123 else
2124 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
2125
2126 sdhci_enable_sdio_irq_nolock(host, enable);
2127 spin_unlock_irqrestore(&host->lock, flags);
2128
2129 if (!enable)
2130 pm_runtime_put_noidle(host->mmc->parent);
2131}
2132EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2133
2134int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2135 struct mmc_ios *ios)
2136{
2137 struct sdhci_host *host = mmc_priv(mmc);
2138 u16 ctrl;
2139 int ret;
2140
2141
2142
2143
2144
2145 if (host->version < SDHCI_SPEC_300)
2146 return 0;
2147
2148 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2149
2150 switch (ios->signal_voltage) {
2151 case MMC_SIGNAL_VOLTAGE_330:
2152 if (!(host->flags & SDHCI_SIGNALING_330))
2153 return -EINVAL;
2154
2155 ctrl &= ~SDHCI_CTRL_VDD_180;
2156 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2157
2158 if (!IS_ERR(mmc->supply.vqmmc)) {
2159 ret = mmc_regulator_set_vqmmc(mmc, ios);
2160 if (ret) {
2161 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2162 mmc_hostname(mmc));
2163 return -EIO;
2164 }
2165 }
2166
2167 usleep_range(5000, 5500);
2168
2169
2170 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2171 if (!(ctrl & SDHCI_CTRL_VDD_180))
2172 return 0;
2173
2174 pr_warn("%s: 3.3V regulator output did not became stable\n",
2175 mmc_hostname(mmc));
2176
2177 return -EAGAIN;
2178 case MMC_SIGNAL_VOLTAGE_180:
2179 if (!(host->flags & SDHCI_SIGNALING_180))
2180 return -EINVAL;
2181 if (!IS_ERR(mmc->supply.vqmmc)) {
2182 ret = mmc_regulator_set_vqmmc(mmc, ios);
2183 if (ret) {
2184 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2185 mmc_hostname(mmc));
2186 return -EIO;
2187 }
2188 }
2189
2190
2191
2192
2193
2194 ctrl |= SDHCI_CTRL_VDD_180;
2195 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2196
2197
2198 if (host->ops->voltage_switch)
2199 host->ops->voltage_switch(host);
2200
2201
2202 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2203 if (ctrl & SDHCI_CTRL_VDD_180)
2204 return 0;
2205
2206 pr_warn("%s: 1.8V regulator output did not became stable\n",
2207 mmc_hostname(mmc));
2208
2209 return -EAGAIN;
2210 case MMC_SIGNAL_VOLTAGE_120:
2211 if (!(host->flags & SDHCI_SIGNALING_120))
2212 return -EINVAL;
2213 if (!IS_ERR(mmc->supply.vqmmc)) {
2214 ret = mmc_regulator_set_vqmmc(mmc, ios);
2215 if (ret) {
2216 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2217 mmc_hostname(mmc));
2218 return -EIO;
2219 }
2220 }
2221 return 0;
2222 default:
2223
2224 return 0;
2225 }
2226}
2227EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2228
2229static int sdhci_card_busy(struct mmc_host *mmc)
2230{
2231 struct sdhci_host *host = mmc_priv(mmc);
2232 u32 present_state;
2233
2234
2235 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2236
2237 return !(present_state & SDHCI_DATA_0_LVL_MASK);
2238}
2239
2240static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2241{
2242 struct sdhci_host *host = mmc_priv(mmc);
2243 unsigned long flags;
2244
2245 spin_lock_irqsave(&host->lock, flags);
2246 host->flags |= SDHCI_HS400_TUNING;
2247 spin_unlock_irqrestore(&host->lock, flags);
2248
2249 return 0;
2250}
2251
2252void sdhci_start_tuning(struct sdhci_host *host)
2253{
2254 u16 ctrl;
2255
2256 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2257 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2258 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2259 ctrl |= SDHCI_CTRL_TUNED_CLK;
2260 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2273 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2274}
2275EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2276
2277void sdhci_end_tuning(struct sdhci_host *host)
2278{
2279 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2280 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2281}
2282EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2283
2284void sdhci_reset_tuning(struct sdhci_host *host)
2285{
2286 u16 ctrl;
2287
2288 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2289 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2290 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2291 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2292}
2293EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2294
2295static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2296{
2297 sdhci_reset_tuning(host);
2298
2299 sdhci_do_reset(host, SDHCI_RESET_CMD);
2300 sdhci_do_reset(host, SDHCI_RESET_DATA);
2301
2302 sdhci_end_tuning(host);
2303
2304 mmc_abort_tuning(host->mmc, opcode);
2305}
2306
2307
2308
2309
2310
2311
2312
2313
2314void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2315{
2316 struct mmc_host *mmc = host->mmc;
2317 struct mmc_command cmd = {};
2318 struct mmc_request mrq = {};
2319 unsigned long flags;
2320 u32 b = host->sdma_boundary;
2321
2322 spin_lock_irqsave(&host->lock, flags);
2323
2324 cmd.opcode = opcode;
2325 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2326 cmd.mrq = &mrq;
2327
2328 mrq.cmd = &cmd;
2329
2330
2331
2332
2333
2334 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2335 mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2336 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2337 else
2338 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2339
2340
2341
2342
2343
2344
2345
2346 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2347
2348 sdhci_send_command(host, &cmd);
2349
2350 host->cmd = NULL;
2351
2352 sdhci_del_timer(host, &mrq);
2353
2354 host->tuning_done = 0;
2355
2356 mmiowb();
2357 spin_unlock_irqrestore(&host->lock, flags);
2358
2359
2360 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2361 msecs_to_jiffies(50));
2362
2363}
2364EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2365
2366static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2367{
2368 int i;
2369
2370
2371
2372
2373
2374 for (i = 0; i < MAX_TUNING_LOOP; i++) {
2375 u16 ctrl;
2376
2377 sdhci_send_tuning(host, opcode);
2378
2379 if (!host->tuning_done) {
2380 pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
2381 mmc_hostname(host->mmc));
2382 sdhci_abort_tuning(host, opcode);
2383 return -ETIMEDOUT;
2384 }
2385
2386
2387 if (host->tuning_delay > 0)
2388 mdelay(host->tuning_delay);
2389
2390 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2391 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2392 if (ctrl & SDHCI_CTRL_TUNED_CLK)
2393 return 0;
2394 break;
2395 }
2396
2397 }
2398
2399 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2400 mmc_hostname(host->mmc));
2401 sdhci_reset_tuning(host);
2402 return -EAGAIN;
2403}
2404
2405int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2406{
2407 struct sdhci_host *host = mmc_priv(mmc);
2408 int err = 0;
2409 unsigned int tuning_count = 0;
2410 bool hs400_tuning;
2411
2412 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2413
2414 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2415 tuning_count = host->tuning_count;
2416
2417
2418
2419
2420
2421
2422
2423
2424 switch (host->timing) {
2425
2426 case MMC_TIMING_MMC_HS400:
2427 err = -EINVAL;
2428 goto out;
2429
2430 case MMC_TIMING_MMC_HS200:
2431
2432
2433
2434
2435 if (hs400_tuning)
2436 tuning_count = 0;
2437 break;
2438
2439 case MMC_TIMING_UHS_SDR104:
2440 case MMC_TIMING_UHS_DDR50:
2441 break;
2442
2443 case MMC_TIMING_UHS_SDR50:
2444 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2445 break;
2446
2447
2448 default:
2449 goto out;
2450 }
2451
2452 if (host->ops->platform_execute_tuning) {
2453 err = host->ops->platform_execute_tuning(host, opcode);
2454 goto out;
2455 }
2456
2457 host->mmc->retune_period = tuning_count;
2458
2459 if (host->tuning_delay < 0)
2460 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2461
2462 sdhci_start_tuning(host);
2463
2464 host->tuning_err = __sdhci_execute_tuning(host, opcode);
2465
2466 sdhci_end_tuning(host);
2467out:
2468 host->flags &= ~SDHCI_HS400_TUNING;
2469
2470 return err;
2471}
2472EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2473
2474static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2475{
2476
2477 if (host->version < SDHCI_SPEC_300)
2478 return;
2479
2480
2481
2482
2483
2484 if (host->preset_enabled != enable) {
2485 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2486
2487 if (enable)
2488 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2489 else
2490 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2491
2492 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2493
2494 if (enable)
2495 host->flags |= SDHCI_PV_ENABLED;
2496 else
2497 host->flags &= ~SDHCI_PV_ENABLED;
2498
2499 host->preset_enabled = enable;
2500 }
2501}
2502
2503static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2504 int err)
2505{
2506 struct sdhci_host *host = mmc_priv(mmc);
2507 struct mmc_data *data = mrq->data;
2508
2509 if (data->host_cookie != COOKIE_UNMAPPED)
2510 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2511 mmc_get_dma_dir(data));
2512
2513 data->host_cookie = COOKIE_UNMAPPED;
2514}
2515
2516static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2517{
2518 struct sdhci_host *host = mmc_priv(mmc);
2519
2520 mrq->data->host_cookie = COOKIE_UNMAPPED;
2521
2522
2523
2524
2525
2526
2527 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2528 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2529}
2530
2531static inline bool sdhci_has_requests(struct sdhci_host *host)
2532{
2533 return host->cmd || host->data_cmd;
2534}
2535
2536static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2537{
2538 if (host->data_cmd) {
2539 host->data_cmd->error = err;
2540 sdhci_finish_mrq(host, host->data_cmd->mrq);
2541 }
2542
2543 if (host->cmd) {
2544 host->cmd->error = err;
2545 sdhci_finish_mrq(host, host->cmd->mrq);
2546 }
2547}
2548
2549static void sdhci_card_event(struct mmc_host *mmc)
2550{
2551 struct sdhci_host *host = mmc_priv(mmc);
2552 unsigned long flags;
2553 int present;
2554
2555
2556 if (host->ops->card_event)
2557 host->ops->card_event(host);
2558
2559 present = mmc->ops->get_cd(mmc);
2560
2561 spin_lock_irqsave(&host->lock, flags);
2562
2563
2564 if (sdhci_has_requests(host) && !present) {
2565 pr_err("%s: Card removed during transfer!\n",
2566 mmc_hostname(host->mmc));
2567 pr_err("%s: Resetting controller.\n",
2568 mmc_hostname(host->mmc));
2569
2570 sdhci_do_reset(host, SDHCI_RESET_CMD);
2571 sdhci_do_reset(host, SDHCI_RESET_DATA);
2572
2573 sdhci_error_out_mrqs(host, -ENOMEDIUM);
2574 }
2575
2576 spin_unlock_irqrestore(&host->lock, flags);
2577}
2578
2579static const struct mmc_host_ops sdhci_ops = {
2580 .request = sdhci_request,
2581 .post_req = sdhci_post_req,
2582 .pre_req = sdhci_pre_req,
2583 .set_ios = sdhci_set_ios,
2584 .get_cd = sdhci_get_cd,
2585 .get_ro = sdhci_get_ro,
2586 .hw_reset = sdhci_hw_reset,
2587 .enable_sdio_irq = sdhci_enable_sdio_irq,
2588 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2589 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2590 .execute_tuning = sdhci_execute_tuning,
2591 .card_event = sdhci_card_event,
2592 .card_busy = sdhci_card_busy,
2593};
2594
2595
2596
2597
2598
2599
2600
2601static bool sdhci_request_done(struct sdhci_host *host)
2602{
2603 unsigned long flags;
2604 struct mmc_request *mrq;
2605 int i;
2606
2607 spin_lock_irqsave(&host->lock, flags);
2608
2609 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2610 mrq = host->mrqs_done[i];
2611 if (mrq)
2612 break;
2613 }
2614
2615 if (!mrq) {
2616 spin_unlock_irqrestore(&host->lock, flags);
2617 return true;
2618 }
2619
2620 sdhci_del_timer(host, mrq);
2621
2622
2623
2624
2625
2626
2627 if (host->flags & SDHCI_REQ_USE_DMA) {
2628 struct mmc_data *data = mrq->data;
2629
2630 if (data && data->host_cookie == COOKIE_MAPPED) {
2631 if (host->bounce_buffer) {
2632
2633
2634
2635
2636 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
2637 unsigned int length = data->bytes_xfered;
2638
2639 if (length > host->bounce_buffer_size) {
2640 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
2641 mmc_hostname(host->mmc),
2642 host->bounce_buffer_size,
2643 data->bytes_xfered);
2644
2645 length = host->bounce_buffer_size;
2646 }
2647 dma_sync_single_for_cpu(
2648 host->mmc->parent,
2649 host->bounce_addr,
2650 host->bounce_buffer_size,
2651 DMA_FROM_DEVICE);
2652 sg_copy_from_buffer(data->sg,
2653 data->sg_len,
2654 host->bounce_buffer,
2655 length);
2656 } else {
2657
2658 dma_sync_single_for_cpu(
2659 host->mmc->parent,
2660 host->bounce_addr,
2661 host->bounce_buffer_size,
2662 mmc_get_dma_dir(data));
2663 }
2664 } else {
2665
2666 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
2667 data->sg_len,
2668 mmc_get_dma_dir(data));
2669 }
2670 data->host_cookie = COOKIE_UNMAPPED;
2671 }
2672 }
2673
2674
2675
2676
2677
2678 if (sdhci_needs_reset(host, mrq)) {
2679
2680
2681
2682
2683
2684
2685 if (host->cmd || host->data_cmd) {
2686 spin_unlock_irqrestore(&host->lock, flags);
2687 return true;
2688 }
2689
2690
2691 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2692
2693 host->ops->set_clock(host, host->clock);
2694
2695
2696
2697 sdhci_do_reset(host, SDHCI_RESET_CMD);
2698 sdhci_do_reset(host, SDHCI_RESET_DATA);
2699
2700 host->pending_reset = false;
2701 }
2702
2703 if (!sdhci_has_requests(host))
2704 sdhci_led_deactivate(host);
2705
2706 host->mrqs_done[i] = NULL;
2707
2708 mmiowb();
2709 spin_unlock_irqrestore(&host->lock, flags);
2710
2711 mmc_request_done(host->mmc, mrq);
2712
2713 return false;
2714}
2715
2716static void sdhci_tasklet_finish(unsigned long param)
2717{
2718 struct sdhci_host *host = (struct sdhci_host *)param;
2719
2720 while (!sdhci_request_done(host))
2721 ;
2722}
2723
2724static void sdhci_timeout_timer(struct timer_list *t)
2725{
2726 struct sdhci_host *host;
2727 unsigned long flags;
2728
2729 host = from_timer(host, t, timer);
2730
2731 spin_lock_irqsave(&host->lock, flags);
2732
2733 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2734 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2735 mmc_hostname(host->mmc));
2736 sdhci_dumpregs(host);
2737
2738 host->cmd->error = -ETIMEDOUT;
2739 sdhci_finish_mrq(host, host->cmd->mrq);
2740 }
2741
2742 mmiowb();
2743 spin_unlock_irqrestore(&host->lock, flags);
2744}
2745
2746static void sdhci_timeout_data_timer(struct timer_list *t)
2747{
2748 struct sdhci_host *host;
2749 unsigned long flags;
2750
2751 host = from_timer(host, t, data_timer);
2752
2753 spin_lock_irqsave(&host->lock, flags);
2754
2755 if (host->data || host->data_cmd ||
2756 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2757 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2758 mmc_hostname(host->mmc));
2759 sdhci_dumpregs(host);
2760
2761 if (host->data) {
2762 host->data->error = -ETIMEDOUT;
2763 sdhci_finish_data(host);
2764 } else if (host->data_cmd) {
2765 host->data_cmd->error = -ETIMEDOUT;
2766 sdhci_finish_mrq(host, host->data_cmd->mrq);
2767 } else {
2768 host->cmd->error = -ETIMEDOUT;
2769 sdhci_finish_mrq(host, host->cmd->mrq);
2770 }
2771 }
2772
2773 mmiowb();
2774 spin_unlock_irqrestore(&host->lock, flags);
2775}
2776
2777
2778
2779
2780
2781
2782
2783static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
2784{
2785
2786 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
2787 struct mmc_request *mrq = host->data_cmd->mrq;
2788 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
2789 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
2790 SDHCI_INT_DATA_TIMEOUT :
2791 SDHCI_INT_DATA_CRC;
2792
2793
2794 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
2795 *intmask_p |= data_err_bit;
2796 return;
2797 }
2798 }
2799
2800 if (!host->cmd) {
2801
2802
2803
2804
2805
2806 if (host->pending_reset)
2807 return;
2808 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2809 mmc_hostname(host->mmc), (unsigned)intmask);
2810 sdhci_dumpregs(host);
2811 return;
2812 }
2813
2814 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2815 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2816 if (intmask & SDHCI_INT_TIMEOUT)
2817 host->cmd->error = -ETIMEDOUT;
2818 else
2819 host->cmd->error = -EILSEQ;
2820
2821
2822 if (host->cmd->data &&
2823 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2824 SDHCI_INT_CRC) {
2825 host->cmd = NULL;
2826 *intmask_p |= SDHCI_INT_DATA_CRC;
2827 return;
2828 }
2829
2830 sdhci_finish_mrq(host, host->cmd->mrq);
2831 return;
2832 }
2833
2834
2835 if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
2836 struct mmc_request *mrq = host->cmd->mrq;
2837 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
2838 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
2839 -ETIMEDOUT :
2840 -EILSEQ;
2841
2842 if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
2843 mrq->sbc->error = err;
2844 sdhci_finish_mrq(host, mrq);
2845 return;
2846 }
2847 }
2848
2849 if (intmask & SDHCI_INT_RESPONSE)
2850 sdhci_finish_command(host);
2851}
2852
2853static void sdhci_adma_show_error(struct sdhci_host *host)
2854{
2855 void *desc = host->adma_table;
2856
2857 sdhci_dumpregs(host);
2858
2859 while (true) {
2860 struct sdhci_adma2_64_desc *dma_desc = desc;
2861
2862 if (host->flags & SDHCI_USE_64_BIT_DMA)
2863 DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2864 desc, le32_to_cpu(dma_desc->addr_hi),
2865 le32_to_cpu(dma_desc->addr_lo),
2866 le16_to_cpu(dma_desc->len),
2867 le16_to_cpu(dma_desc->cmd));
2868 else
2869 DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2870 desc, le32_to_cpu(dma_desc->addr_lo),
2871 le16_to_cpu(dma_desc->len),
2872 le16_to_cpu(dma_desc->cmd));
2873
2874 desc += host->desc_sz;
2875
2876 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2877 break;
2878 }
2879}
2880
2881static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2882{
2883 u32 command;
2884
2885
2886 if (intmask & SDHCI_INT_DATA_AVAIL) {
2887 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2888 if (command == MMC_SEND_TUNING_BLOCK ||
2889 command == MMC_SEND_TUNING_BLOCK_HS200) {
2890 host->tuning_done = 1;
2891 wake_up(&host->buf_ready_int);
2892 return;
2893 }
2894 }
2895
2896 if (!host->data) {
2897 struct mmc_command *data_cmd = host->data_cmd;
2898
2899
2900
2901
2902
2903
2904 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2905 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2906 host->data_cmd = NULL;
2907 data_cmd->error = -ETIMEDOUT;
2908 sdhci_finish_mrq(host, data_cmd->mrq);
2909 return;
2910 }
2911 if (intmask & SDHCI_INT_DATA_END) {
2912 host->data_cmd = NULL;
2913
2914
2915
2916
2917
2918 if (host->cmd == data_cmd)
2919 return;
2920
2921 sdhci_finish_mrq(host, data_cmd->mrq);
2922 return;
2923 }
2924 }
2925
2926
2927
2928
2929
2930
2931 if (host->pending_reset)
2932 return;
2933
2934 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2935 mmc_hostname(host->mmc), (unsigned)intmask);
2936 sdhci_dumpregs(host);
2937
2938 return;
2939 }
2940
2941 if (intmask & SDHCI_INT_DATA_TIMEOUT)
2942 host->data->error = -ETIMEDOUT;
2943 else if (intmask & SDHCI_INT_DATA_END_BIT)
2944 host->data->error = -EILSEQ;
2945 else if ((intmask & SDHCI_INT_DATA_CRC) &&
2946 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2947 != MMC_BUS_TEST_R)
2948 host->data->error = -EILSEQ;
2949 else if (intmask & SDHCI_INT_ADMA_ERROR) {
2950 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2951 sdhci_adma_show_error(host);
2952 host->data->error = -EIO;
2953 if (host->ops->adma_workaround)
2954 host->ops->adma_workaround(host, intmask);
2955 }
2956
2957 if (host->data->error)
2958 sdhci_finish_data(host);
2959 else {
2960 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2961 sdhci_transfer_pio(host);
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972 if (intmask & SDHCI_INT_DMA_END) {
2973 dma_addr_t dmastart, dmanow;
2974
2975 dmastart = sdhci_sdma_address(host);
2976 dmanow = dmastart + host->data->bytes_xfered;
2977
2978
2979
2980 dmanow = (dmanow &
2981 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2982 SDHCI_DEFAULT_BOUNDARY_SIZE;
2983 host->data->bytes_xfered = dmanow - dmastart;
2984 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
2985 &dmastart, host->data->bytes_xfered, &dmanow);
2986 sdhci_set_sdma_addr(host, dmanow);
2987 }
2988
2989 if (intmask & SDHCI_INT_DATA_END) {
2990 if (host->cmd == host->data_cmd) {
2991
2992
2993
2994
2995
2996 host->data_early = 1;
2997 } else {
2998 sdhci_finish_data(host);
2999 }
3000 }
3001 }
3002}
3003
3004static irqreturn_t sdhci_irq(int irq, void *dev_id)
3005{
3006 irqreturn_t result = IRQ_NONE;
3007 struct sdhci_host *host = dev_id;
3008 u32 intmask, mask, unexpected = 0;
3009 int max_loops = 16;
3010
3011 spin_lock(&host->lock);
3012
3013 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
3014 spin_unlock(&host->lock);
3015 return IRQ_NONE;
3016 }
3017
3018 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3019 if (!intmask || intmask == 0xffffffff) {
3020 result = IRQ_NONE;
3021 goto out;
3022 }
3023
3024 do {
3025 DBG("IRQ status 0x%08x\n", intmask);
3026
3027 if (host->ops->irq) {
3028 intmask = host->ops->irq(host, intmask);
3029 if (!intmask)
3030 goto cont;
3031 }
3032
3033
3034 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3035 SDHCI_INT_BUS_POWER);
3036 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3037
3038 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3039 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3040 SDHCI_CARD_PRESENT;
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053 host->ier &= ~(SDHCI_INT_CARD_INSERT |
3054 SDHCI_INT_CARD_REMOVE);
3055 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
3056 SDHCI_INT_CARD_INSERT;
3057 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3058 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3059
3060 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
3061 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3062
3063 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
3064 SDHCI_INT_CARD_REMOVE);
3065 result = IRQ_WAKE_THREAD;
3066 }
3067
3068 if (intmask & SDHCI_INT_CMD_MASK)
3069 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
3070
3071 if (intmask & SDHCI_INT_DATA_MASK)
3072 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
3073
3074 if (intmask & SDHCI_INT_BUS_POWER)
3075 pr_err("%s: Card is consuming too much power!\n",
3076 mmc_hostname(host->mmc));
3077
3078 if (intmask & SDHCI_INT_RETUNE)
3079 mmc_retune_needed(host->mmc);
3080
3081 if ((intmask & SDHCI_INT_CARD_INT) &&
3082 (host->ier & SDHCI_INT_CARD_INT)) {
3083 sdhci_enable_sdio_irq_nolock(host, false);
3084 host->thread_isr |= SDHCI_INT_CARD_INT;
3085 result = IRQ_WAKE_THREAD;
3086 }
3087
3088 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
3089 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3090 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
3091 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
3092
3093 if (intmask) {
3094 unexpected |= intmask;
3095 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3096 }
3097cont:
3098 if (result == IRQ_NONE)
3099 result = IRQ_HANDLED;
3100
3101 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3102 } while (intmask && --max_loops);
3103out:
3104 spin_unlock(&host->lock);
3105
3106 if (unexpected) {
3107 pr_err("%s: Unexpected interrupt 0x%08x.\n",
3108 mmc_hostname(host->mmc), unexpected);
3109 sdhci_dumpregs(host);
3110 }
3111
3112 return result;
3113}
3114
3115static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
3116{
3117 struct sdhci_host *host = dev_id;
3118 unsigned long flags;
3119 u32 isr;
3120
3121 spin_lock_irqsave(&host->lock, flags);
3122 isr = host->thread_isr;
3123 host->thread_isr = 0;
3124 spin_unlock_irqrestore(&host->lock, flags);
3125
3126 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3127 struct mmc_host *mmc = host->mmc;
3128
3129 mmc->ops->card_event(mmc);
3130 mmc_detect_change(mmc, msecs_to_jiffies(200));
3131 }
3132
3133 if (isr & SDHCI_INT_CARD_INT) {
3134 sdio_run_irqs(host->mmc);
3135
3136 spin_lock_irqsave(&host->lock, flags);
3137 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
3138 sdhci_enable_sdio_irq_nolock(host, true);
3139 spin_unlock_irqrestore(&host->lock, flags);
3140 }
3141
3142 return isr ? IRQ_HANDLED : IRQ_NONE;
3143}
3144
3145
3146
3147
3148
3149
3150
3151#ifdef CONFIG_PM
3152
3153static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
3154{
3155 return mmc_card_is_removable(host->mmc) &&
3156 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3157 !mmc_can_gpio_cd(host->mmc);
3158}
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3169{
3170 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3171 SDHCI_WAKE_ON_INT;
3172 u32 irq_val = 0;
3173 u8 wake_val = 0;
3174 u8 val;
3175
3176 if (sdhci_cd_irq_can_wakeup(host)) {
3177 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3178 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3179 }
3180
3181 if (mmc_card_wake_sdio_irq(host->mmc)) {
3182 wake_val |= SDHCI_WAKE_ON_INT;
3183 irq_val |= SDHCI_INT_CARD_INT;
3184 }
3185
3186 if (!irq_val)
3187 return false;
3188
3189 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3190 val &= ~mask;
3191 val |= wake_val;
3192 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3193
3194 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3195
3196 host->irq_wake_enabled = !enable_irq_wake(host->irq);
3197
3198 return host->irq_wake_enabled;
3199}
3200
3201static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3202{
3203 u8 val;
3204 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3205 | SDHCI_WAKE_ON_INT;
3206
3207 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3208 val &= ~mask;
3209 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3210
3211 disable_irq_wake(host->irq);
3212
3213 host->irq_wake_enabled = false;
3214}
3215
3216int sdhci_suspend_host(struct sdhci_host *host)
3217{
3218 sdhci_disable_card_detection(host);
3219
3220 mmc_retune_timer_stop(host->mmc);
3221
3222 if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3223 !sdhci_enable_irq_wakeups(host)) {
3224 host->ier = 0;
3225 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3226 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3227 free_irq(host->irq, host);
3228 }
3229
3230 return 0;
3231}
3232
3233EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3234
3235int sdhci_resume_host(struct sdhci_host *host)
3236{
3237 struct mmc_host *mmc = host->mmc;
3238 int ret = 0;
3239
3240 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3241 if (host->ops->enable_dma)
3242 host->ops->enable_dma(host);
3243 }
3244
3245 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3246 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3247
3248 sdhci_init(host, 0);
3249 host->pwr = 0;
3250 host->clock = 0;
3251 mmc->ops->set_ios(mmc, &mmc->ios);
3252 } else {
3253 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
3254 mmiowb();
3255 }
3256
3257 if (host->irq_wake_enabled) {
3258 sdhci_disable_irq_wakeups(host);
3259 } else {
3260 ret = request_threaded_irq(host->irq, sdhci_irq,
3261 sdhci_thread_irq, IRQF_SHARED,
3262 mmc_hostname(host->mmc), host);
3263 if (ret)
3264 return ret;
3265 }
3266
3267 sdhci_enable_card_detection(host);
3268
3269 return ret;
3270}
3271
3272EXPORT_SYMBOL_GPL(sdhci_resume_host);
3273
3274int sdhci_runtime_suspend_host(struct sdhci_host *host)
3275{
3276 unsigned long flags;
3277
3278 mmc_retune_timer_stop(host->mmc);
3279
3280 spin_lock_irqsave(&host->lock, flags);
3281 host->ier &= SDHCI_INT_CARD_INT;
3282 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3283 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3284 spin_unlock_irqrestore(&host->lock, flags);
3285
3286 synchronize_hardirq(host->irq);
3287
3288 spin_lock_irqsave(&host->lock, flags);
3289 host->runtime_suspended = true;
3290 spin_unlock_irqrestore(&host->lock, flags);
3291
3292 return 0;
3293}
3294EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3295
3296int sdhci_runtime_resume_host(struct sdhci_host *host)
3297{
3298 struct mmc_host *mmc = host->mmc;
3299 unsigned long flags;
3300 int host_flags = host->flags;
3301
3302 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3303 if (host->ops->enable_dma)
3304 host->ops->enable_dma(host);
3305 }
3306
3307 sdhci_init(host, 0);
3308
3309 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3310 mmc->ios.power_mode != MMC_POWER_OFF) {
3311
3312 host->pwr = 0;
3313 host->clock = 0;
3314 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3315 mmc->ops->set_ios(mmc, &mmc->ios);
3316
3317 if ((host_flags & SDHCI_PV_ENABLED) &&
3318 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3319 spin_lock_irqsave(&host->lock, flags);
3320 sdhci_enable_preset_value(host, true);
3321 spin_unlock_irqrestore(&host->lock, flags);
3322 }
3323
3324 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3325 mmc->ops->hs400_enhanced_strobe)
3326 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3327 }
3328
3329 spin_lock_irqsave(&host->lock, flags);
3330
3331 host->runtime_suspended = false;
3332
3333
3334 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
3335 sdhci_enable_sdio_irq_nolock(host, true);
3336
3337
3338 sdhci_enable_card_detection(host);
3339
3340 spin_unlock_irqrestore(&host->lock, flags);
3341
3342 return 0;
3343}
3344EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3345
3346#endif
3347
3348
3349
3350
3351
3352
3353
3354void sdhci_cqe_enable(struct mmc_host *mmc)
3355{
3356 struct sdhci_host *host = mmc_priv(mmc);
3357 unsigned long flags;
3358 u8 ctrl;
3359
3360 spin_lock_irqsave(&host->lock, flags);
3361
3362 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3363 ctrl &= ~SDHCI_CTRL_DMA_MASK;
3364
3365
3366
3367
3368
3369 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
3370 ctrl |= SDHCI_CTRL_ADMA3;
3371 else if (host->flags & SDHCI_USE_64_BIT_DMA)
3372 ctrl |= SDHCI_CTRL_ADMA64;
3373 else
3374 ctrl |= SDHCI_CTRL_ADMA32;
3375 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3376
3377 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3378 SDHCI_BLOCK_SIZE);
3379
3380
3381 sdhci_set_timeout(host, NULL);
3382
3383 host->ier = host->cqe_ier;
3384
3385 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3386 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3387
3388 host->cqe_on = true;
3389
3390 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3391 mmc_hostname(mmc), host->ier,
3392 sdhci_readl(host, SDHCI_INT_STATUS));
3393
3394 mmiowb();
3395 spin_unlock_irqrestore(&host->lock, flags);
3396}
3397EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3398
3399void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3400{
3401 struct sdhci_host *host = mmc_priv(mmc);
3402 unsigned long flags;
3403
3404 spin_lock_irqsave(&host->lock, flags);
3405
3406 sdhci_set_default_irqs(host);
3407
3408 host->cqe_on = false;
3409
3410 if (recovery) {
3411 sdhci_do_reset(host, SDHCI_RESET_CMD);
3412 sdhci_do_reset(host, SDHCI_RESET_DATA);
3413 }
3414
3415 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3416 mmc_hostname(mmc), host->ier,
3417 sdhci_readl(host, SDHCI_INT_STATUS));
3418
3419 mmiowb();
3420 spin_unlock_irqrestore(&host->lock, flags);
3421}
3422EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3423
3424bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3425 int *data_error)
3426{
3427 u32 mask;
3428
3429 if (!host->cqe_on)
3430 return false;
3431
3432 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3433 *cmd_error = -EILSEQ;
3434 else if (intmask & SDHCI_INT_TIMEOUT)
3435 *cmd_error = -ETIMEDOUT;
3436 else
3437 *cmd_error = 0;
3438
3439 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3440 *data_error = -EILSEQ;
3441 else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3442 *data_error = -ETIMEDOUT;
3443 else if (intmask & SDHCI_INT_ADMA_ERROR)
3444 *data_error = -EIO;
3445 else
3446 *data_error = 0;
3447
3448
3449 mask = intmask & host->cqe_ier;
3450 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3451
3452 if (intmask & SDHCI_INT_BUS_POWER)
3453 pr_err("%s: Card is consuming too much power!\n",
3454 mmc_hostname(host->mmc));
3455
3456 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3457 if (intmask) {
3458 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3459 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3460 mmc_hostname(host->mmc), intmask);
3461 sdhci_dumpregs(host);
3462 }
3463
3464 return true;
3465}
3466EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3467
3468
3469
3470
3471
3472
3473
3474struct sdhci_host *sdhci_alloc_host(struct device *dev,
3475 size_t priv_size)
3476{
3477 struct mmc_host *mmc;
3478 struct sdhci_host *host;
3479
3480 WARN_ON(dev == NULL);
3481
3482 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3483 if (!mmc)
3484 return ERR_PTR(-ENOMEM);
3485
3486 host = mmc_priv(mmc);
3487 host->mmc = mmc;
3488 host->mmc_host_ops = sdhci_ops;
3489 mmc->ops = &host->mmc_host_ops;
3490
3491 host->flags = SDHCI_SIGNALING_330;
3492
3493 host->cqe_ier = SDHCI_CQE_INT_MASK;
3494 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3495
3496 host->tuning_delay = -1;
3497
3498 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3499
3500
3501
3502
3503
3504
3505 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
3506
3507 return host;
3508}
3509
3510EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3511
3512static int sdhci_set_dma_mask(struct sdhci_host *host)
3513{
3514 struct mmc_host *mmc = host->mmc;
3515 struct device *dev = mmc_dev(mmc);
3516 int ret = -EINVAL;
3517
3518 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3519 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3520
3521
3522 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3523 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3524 if (ret) {
3525 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3526 mmc_hostname(mmc));
3527 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3528 }
3529 }
3530
3531
3532 if (ret) {
3533 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3534 if (ret)
3535 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3536 mmc_hostname(mmc));
3537 }
3538
3539 return ret;
3540}
3541
3542void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
3543{
3544 u16 v;
3545 u64 dt_caps_mask = 0;
3546 u64 dt_caps = 0;
3547
3548 if (host->read_caps)
3549 return;
3550
3551 host->read_caps = true;
3552
3553 if (debug_quirks)
3554 host->quirks = debug_quirks;
3555
3556 if (debug_quirks2)
3557 host->quirks2 = debug_quirks2;
3558
3559 sdhci_do_reset(host, SDHCI_RESET_ALL);
3560
3561 if (host->v4_mode)
3562 sdhci_do_enable_v4_mode(host);
3563
3564 of_property_read_u64(mmc_dev(host->mmc)->of_node,
3565 "sdhci-caps-mask", &dt_caps_mask);
3566 of_property_read_u64(mmc_dev(host->mmc)->of_node,
3567 "sdhci-caps", &dt_caps);
3568
3569 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3570 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3571
3572 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3573 return;
3574
3575 if (caps) {
3576 host->caps = *caps;
3577 } else {
3578 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3579 host->caps &= ~lower_32_bits(dt_caps_mask);
3580 host->caps |= lower_32_bits(dt_caps);
3581 }
3582
3583 if (host->version < SDHCI_SPEC_300)
3584 return;
3585
3586 if (caps1) {
3587 host->caps1 = *caps1;
3588 } else {
3589 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3590 host->caps1 &= ~upper_32_bits(dt_caps_mask);
3591 host->caps1 |= upper_32_bits(dt_caps);
3592 }
3593}
3594EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3595
3596static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
3597{
3598 struct mmc_host *mmc = host->mmc;
3599 unsigned int max_blocks;
3600 unsigned int bounce_size;
3601 int ret;
3602
3603
3604
3605
3606
3607
3608 bounce_size = SZ_64K;
3609
3610
3611
3612
3613
3614 if (mmc->max_req_size < bounce_size)
3615 bounce_size = mmc->max_req_size;
3616 max_blocks = bounce_size / 512;
3617
3618
3619
3620
3621
3622
3623 host->bounce_buffer = devm_kmalloc(mmc->parent,
3624 bounce_size,
3625 GFP_KERNEL);
3626 if (!host->bounce_buffer) {
3627 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
3628 mmc_hostname(mmc),
3629 bounce_size);
3630
3631
3632
3633
3634 return;
3635 }
3636
3637 host->bounce_addr = dma_map_single(mmc->parent,
3638 host->bounce_buffer,
3639 bounce_size,
3640 DMA_BIDIRECTIONAL);
3641 ret = dma_mapping_error(mmc->parent, host->bounce_addr);
3642 if (ret)
3643
3644 return;
3645 host->bounce_buffer_size = bounce_size;
3646
3647
3648 mmc->max_segs = max_blocks;
3649 mmc->max_seg_size = bounce_size;
3650 mmc->max_req_size = bounce_size;
3651
3652 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
3653 mmc_hostname(mmc), max_blocks, bounce_size);
3654}
3655
3656static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
3657{
3658
3659
3660
3661
3662
3663 if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
3664 return host->caps & SDHCI_CAN_64BIT_V4;
3665
3666 return host->caps & SDHCI_CAN_64BIT;
3667}
3668
3669int sdhci_setup_host(struct sdhci_host *host)
3670{
3671 struct mmc_host *mmc;
3672 u32 max_current_caps;
3673 unsigned int ocr_avail;
3674 unsigned int override_timeout_clk;
3675 u32 max_clk;
3676 int ret;
3677
3678 WARN_ON(host == NULL);
3679 if (host == NULL)
3680 return -EINVAL;
3681
3682 mmc = host->mmc;
3683
3684
3685
3686
3687
3688
3689
3690 ret = mmc_regulator_get_supply(mmc);
3691 if (ret)
3692 return ret;
3693
3694 DBG("Version: 0x%08x | Present: 0x%08x\n",
3695 sdhci_readw(host, SDHCI_HOST_VERSION),
3696 sdhci_readl(host, SDHCI_PRESENT_STATE));
3697 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
3698 sdhci_readl(host, SDHCI_CAPABILITIES),
3699 sdhci_readl(host, SDHCI_CAPABILITIES_1));
3700
3701 sdhci_read_caps(host);
3702
3703 override_timeout_clk = host->timeout_clk;
3704
3705 if (host->version > SDHCI_SPEC_420) {
3706 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3707 mmc_hostname(mmc), host->version);
3708 }
3709
3710 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3711 host->flags |= SDHCI_USE_SDMA;
3712 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3713 DBG("Controller doesn't have SDMA capability\n");
3714 else
3715 host->flags |= SDHCI_USE_SDMA;
3716
3717 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3718 (host->flags & SDHCI_USE_SDMA)) {
3719 DBG("Disabling DMA as it is marked broken\n");
3720 host->flags &= ~SDHCI_USE_SDMA;
3721 }
3722
3723 if ((host->version >= SDHCI_SPEC_200) &&
3724 (host->caps & SDHCI_CAN_DO_ADMA2))
3725 host->flags |= SDHCI_USE_ADMA;
3726
3727 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3728 (host->flags & SDHCI_USE_ADMA)) {
3729 DBG("Disabling ADMA as it is marked broken\n");
3730 host->flags &= ~SDHCI_USE_ADMA;
3731 }
3732
3733
3734
3735
3736
3737
3738
3739
3740 if (sdhci_can_64bit_dma(host))
3741 host->flags |= SDHCI_USE_64_BIT_DMA;
3742
3743 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3744 ret = sdhci_set_dma_mask(host);
3745
3746 if (!ret && host->ops->enable_dma)
3747 ret = host->ops->enable_dma(host);
3748
3749 if (ret) {
3750 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3751 mmc_hostname(mmc));
3752 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3753
3754 ret = 0;
3755 }
3756 }
3757
3758
3759 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
3760 host->flags &= ~SDHCI_USE_SDMA;
3761
3762 if (host->flags & SDHCI_USE_ADMA) {
3763 dma_addr_t dma;
3764 void *buf;
3765
3766 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3767 host->adma_table_sz = host->adma_table_cnt *
3768 SDHCI_ADMA2_64_DESC_SZ(host);
3769 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
3770 } else {
3771 host->adma_table_sz = host->adma_table_cnt *
3772 SDHCI_ADMA2_32_DESC_SZ;
3773 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3774 }
3775
3776 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3777
3778
3779
3780
3781 buf = dma_alloc_coherent(mmc_dev(mmc),
3782 host->align_buffer_sz + host->adma_table_sz,
3783 &dma, GFP_KERNEL);
3784 if (!buf) {
3785 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3786 mmc_hostname(mmc));
3787 host->flags &= ~SDHCI_USE_ADMA;
3788 } else if ((dma + host->align_buffer_sz) &
3789 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3790 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3791 mmc_hostname(mmc));
3792 host->flags &= ~SDHCI_USE_ADMA;
3793 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3794 host->adma_table_sz, buf, dma);
3795 } else {
3796 host->align_buffer = buf;
3797 host->align_addr = dma;
3798
3799 host->adma_table = buf + host->align_buffer_sz;
3800 host->adma_addr = dma + host->align_buffer_sz;
3801 }
3802 }
3803
3804
3805
3806
3807
3808
3809 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3810 host->dma_mask = DMA_BIT_MASK(64);
3811 mmc_dev(mmc)->dma_mask = &host->dma_mask;
3812 }
3813
3814 if (host->version >= SDHCI_SPEC_300)
3815 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3816 >> SDHCI_CLOCK_BASE_SHIFT;
3817 else
3818 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3819 >> SDHCI_CLOCK_BASE_SHIFT;
3820
3821 host->max_clk *= 1000000;
3822 if (host->max_clk == 0 || host->quirks &
3823 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3824 if (!host->ops->get_max_clock) {
3825 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3826 mmc_hostname(mmc));
3827 ret = -ENODEV;
3828 goto undma;
3829 }
3830 host->max_clk = host->ops->get_max_clock(host);
3831 }
3832
3833
3834
3835
3836
3837 host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3838 SDHCI_CLOCK_MUL_SHIFT;
3839
3840
3841
3842
3843
3844
3845
3846 if (host->clk_mul)
3847 host->clk_mul += 1;
3848
3849
3850
3851
3852 max_clk = host->max_clk;
3853
3854 if (host->ops->get_min_clock)
3855 mmc->f_min = host->ops->get_min_clock(host);
3856 else if (host->version >= SDHCI_SPEC_300) {
3857 if (host->clk_mul) {
3858 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3859 max_clk = host->max_clk * host->clk_mul;
3860 } else
3861 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3862 } else
3863 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3864
3865 if (!mmc->f_max || mmc->f_max > max_clk)
3866 mmc->f_max = max_clk;
3867
3868 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3869 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3870 SDHCI_TIMEOUT_CLK_SHIFT;
3871
3872 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3873 host->timeout_clk *= 1000;
3874
3875 if (host->timeout_clk == 0) {
3876 if (!host->ops->get_timeout_clock) {
3877 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3878 mmc_hostname(mmc));
3879 ret = -ENODEV;
3880 goto undma;
3881 }
3882
3883 host->timeout_clk =
3884 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
3885 1000);
3886 }
3887
3888 if (override_timeout_clk)
3889 host->timeout_clk = override_timeout_clk;
3890
3891 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3892 host->ops->get_max_timeout_count(host) : 1 << 27;
3893 mmc->max_busy_timeout /= host->timeout_clk;
3894 }
3895
3896 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
3897 !host->ops->get_max_timeout_count)
3898 mmc->max_busy_timeout = 0;
3899
3900 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3901 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3902
3903 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3904 host->flags |= SDHCI_AUTO_CMD12;
3905
3906
3907
3908
3909
3910 if ((host->version >= SDHCI_SPEC_300) &&
3911 ((host->flags & SDHCI_USE_ADMA) ||
3912 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
3913 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3914 host->flags |= SDHCI_AUTO_CMD23;
3915 DBG("Auto-CMD23 available\n");
3916 } else {
3917 DBG("Auto-CMD23 unavailable\n");
3918 }
3919
3920
3921
3922
3923
3924
3925
3926
3927 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3928 mmc->caps |= MMC_CAP_4_BIT_DATA;
3929
3930 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3931 mmc->caps &= ~MMC_CAP_CMD23;
3932
3933 if (host->caps & SDHCI_CAN_DO_HISPD)
3934 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3935
3936 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3937 mmc_card_is_removable(mmc) &&
3938 mmc_gpio_get_cd(host->mmc) < 0)
3939 mmc->caps |= MMC_CAP_NEEDS_POLL;
3940
3941 if (!IS_ERR(mmc->supply.vqmmc)) {
3942 ret = regulator_enable(mmc->supply.vqmmc);
3943
3944
3945 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3946 1950000))
3947 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
3948 SDHCI_SUPPORT_SDR50 |
3949 SDHCI_SUPPORT_DDR50);
3950
3951
3952 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
3953 3600000))
3954 host->flags &= ~SDHCI_SIGNALING_330;
3955
3956 if (ret) {
3957 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3958 mmc_hostname(mmc), ret);
3959 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3960 }
3961 }
3962
3963 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
3964 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3965 SDHCI_SUPPORT_DDR50);
3966
3967
3968
3969
3970
3971
3972
3973
3974 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
3975 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
3976 }
3977
3978
3979 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3980 SDHCI_SUPPORT_DDR50))
3981 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3982
3983
3984 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3985 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3986
3987
3988
3989 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3990 mmc->caps2 |= MMC_CAP2_HS200;
3991 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3992 mmc->caps |= MMC_CAP_UHS_SDR50;
3993 }
3994
3995 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3996 (host->caps1 & SDHCI_SUPPORT_HS400))
3997 mmc->caps2 |= MMC_CAP2_HS400;
3998
3999 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
4000 (IS_ERR(mmc->supply.vqmmc) ||
4001 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
4002 1300000)))
4003 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
4004
4005 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
4006 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
4007 mmc->caps |= MMC_CAP_UHS_DDR50;
4008
4009
4010 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
4011 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
4012
4013
4014 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
4015 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
4016 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
4017 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
4018 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
4019 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
4020
4021
4022 host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
4023 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
4024
4025
4026
4027
4028
4029 if (host->tuning_count)
4030 host->tuning_count = 1 << (host->tuning_count - 1);
4031
4032
4033 host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
4034 SDHCI_RETUNING_MODE_SHIFT;
4035
4036 ocr_avail = 0;
4037
4038
4039
4040
4041
4042
4043
4044
4045 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
4046 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
4047 int curr = regulator_get_current_limit(mmc->supply.vmmc);
4048 if (curr > 0) {
4049
4050
4051 curr = curr/1000;
4052 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
4053
4054 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
4055 max_current_caps =
4056 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
4057 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
4058 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
4059 }
4060 }
4061
4062 if (host->caps & SDHCI_CAN_VDD_330) {
4063 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
4064
4065 mmc->max_current_330 = ((max_current_caps &
4066 SDHCI_MAX_CURRENT_330_MASK) >>
4067 SDHCI_MAX_CURRENT_330_SHIFT) *
4068 SDHCI_MAX_CURRENT_MULTIPLIER;
4069 }
4070 if (host->caps & SDHCI_CAN_VDD_300) {
4071 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
4072
4073 mmc->max_current_300 = ((max_current_caps &
4074 SDHCI_MAX_CURRENT_300_MASK) >>
4075 SDHCI_MAX_CURRENT_300_SHIFT) *
4076 SDHCI_MAX_CURRENT_MULTIPLIER;
4077 }
4078 if (host->caps & SDHCI_CAN_VDD_180) {
4079 ocr_avail |= MMC_VDD_165_195;
4080
4081 mmc->max_current_180 = ((max_current_caps &
4082 SDHCI_MAX_CURRENT_180_MASK) >>
4083 SDHCI_MAX_CURRENT_180_SHIFT) *
4084 SDHCI_MAX_CURRENT_MULTIPLIER;
4085 }
4086
4087
4088 if (host->ocr_mask)
4089 ocr_avail = host->ocr_mask;
4090
4091
4092 if (mmc->ocr_avail)
4093 ocr_avail = mmc->ocr_avail;
4094
4095 mmc->ocr_avail = ocr_avail;
4096 mmc->ocr_avail_sdio = ocr_avail;
4097 if (host->ocr_avail_sdio)
4098 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
4099 mmc->ocr_avail_sd = ocr_avail;
4100 if (host->ocr_avail_sd)
4101 mmc->ocr_avail_sd &= host->ocr_avail_sd;
4102 else
4103 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
4104 mmc->ocr_avail_mmc = ocr_avail;
4105 if (host->ocr_avail_mmc)
4106 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4107
4108 if (mmc->ocr_avail == 0) {
4109 pr_err("%s: Hardware doesn't report any support voltages.\n",
4110 mmc_hostname(mmc));
4111 ret = -ENODEV;
4112 goto unreg;
4113 }
4114
4115 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
4116 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
4117 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
4118 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
4119 host->flags |= SDHCI_SIGNALING_180;
4120
4121 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
4122 host->flags |= SDHCI_SIGNALING_120;
4123
4124 spin_lock_init(&host->lock);
4125
4126
4127
4128
4129
4130
4131 mmc->max_req_size = 524288;
4132
4133
4134
4135
4136
4137 if (host->flags & SDHCI_USE_ADMA) {
4138 mmc->max_segs = SDHCI_MAX_SEGS;
4139 } else if (host->flags & SDHCI_USE_SDMA) {
4140 mmc->max_segs = 1;
4141 if (swiotlb_max_segment()) {
4142 unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
4143 IO_TLB_SEGSIZE;
4144 mmc->max_req_size = min(mmc->max_req_size,
4145 max_req_size);
4146 }
4147 } else {
4148 mmc->max_segs = SDHCI_MAX_SEGS;
4149 }
4150
4151
4152
4153
4154
4155
4156 if (host->flags & SDHCI_USE_ADMA) {
4157 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
4158 mmc->max_seg_size = 65535;
4159 else
4160 mmc->max_seg_size = 65536;
4161 } else {
4162 mmc->max_seg_size = mmc->max_req_size;
4163 }
4164
4165
4166
4167
4168
4169 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4170 mmc->max_blk_size = 2;
4171 } else {
4172 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4173 SDHCI_MAX_BLOCK_SHIFT;
4174 if (mmc->max_blk_size >= 3) {
4175 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4176 mmc_hostname(mmc));
4177 mmc->max_blk_size = 0;
4178 }
4179 }
4180
4181 mmc->max_blk_size = 512 << mmc->max_blk_size;
4182
4183
4184
4185
4186 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4187
4188 if (mmc->max_segs == 1)
4189
4190 sdhci_allocate_bounce_buffer(host);
4191
4192 return 0;
4193
4194unreg:
4195 if (!IS_ERR(mmc->supply.vqmmc))
4196 regulator_disable(mmc->supply.vqmmc);
4197undma:
4198 if (host->align_buffer)
4199 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4200 host->adma_table_sz, host->align_buffer,
4201 host->align_addr);
4202 host->adma_table = NULL;
4203 host->align_buffer = NULL;
4204
4205 return ret;
4206}
4207EXPORT_SYMBOL_GPL(sdhci_setup_host);
4208
4209void sdhci_cleanup_host(struct sdhci_host *host)
4210{
4211 struct mmc_host *mmc = host->mmc;
4212
4213 if (!IS_ERR(mmc->supply.vqmmc))
4214 regulator_disable(mmc->supply.vqmmc);
4215
4216 if (host->align_buffer)
4217 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4218 host->adma_table_sz, host->align_buffer,
4219 host->align_addr);
4220 host->adma_table = NULL;
4221 host->align_buffer = NULL;
4222}
4223EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4224
4225int __sdhci_add_host(struct sdhci_host *host)
4226{
4227 struct mmc_host *mmc = host->mmc;
4228 int ret;
4229
4230
4231
4232
4233 tasklet_init(&host->finish_tasklet,
4234 sdhci_tasklet_finish, (unsigned long)host);
4235
4236 timer_setup(&host->timer, sdhci_timeout_timer, 0);
4237 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4238
4239 init_waitqueue_head(&host->buf_ready_int);
4240
4241 sdhci_init(host, 0);
4242
4243 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4244 IRQF_SHARED, mmc_hostname(mmc), host);
4245 if (ret) {
4246 pr_err("%s: Failed to request IRQ %d: %d\n",
4247 mmc_hostname(mmc), host->irq, ret);
4248 goto untasklet;
4249 }
4250
4251 ret = sdhci_led_register(host);
4252 if (ret) {
4253 pr_err("%s: Failed to register LED device: %d\n",
4254 mmc_hostname(mmc), ret);
4255 goto unirq;
4256 }
4257
4258 mmiowb();
4259
4260 ret = mmc_add_host(mmc);
4261 if (ret)
4262 goto unled;
4263
4264 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4265 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4266 (host->flags & SDHCI_USE_ADMA) ?
4267 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4268 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4269
4270 sdhci_enable_card_detection(host);
4271
4272 return 0;
4273
4274unled:
4275 sdhci_led_unregister(host);
4276unirq:
4277 sdhci_do_reset(host, SDHCI_RESET_ALL);
4278 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4279 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4280 free_irq(host->irq, host);
4281untasklet:
4282 tasklet_kill(&host->finish_tasklet);
4283
4284 return ret;
4285}
4286EXPORT_SYMBOL_GPL(__sdhci_add_host);
4287
4288int sdhci_add_host(struct sdhci_host *host)
4289{
4290 int ret;
4291
4292 ret = sdhci_setup_host(host);
4293 if (ret)
4294 return ret;
4295
4296 ret = __sdhci_add_host(host);
4297 if (ret)
4298 goto cleanup;
4299
4300 return 0;
4301
4302cleanup:
4303 sdhci_cleanup_host(host);
4304
4305 return ret;
4306}
4307EXPORT_SYMBOL_GPL(sdhci_add_host);
4308
4309void sdhci_remove_host(struct sdhci_host *host, int dead)
4310{
4311 struct mmc_host *mmc = host->mmc;
4312 unsigned long flags;
4313
4314 if (dead) {
4315 spin_lock_irqsave(&host->lock, flags);
4316
4317 host->flags |= SDHCI_DEVICE_DEAD;
4318
4319 if (sdhci_has_requests(host)) {
4320 pr_err("%s: Controller removed during "
4321 " transfer!\n", mmc_hostname(mmc));
4322 sdhci_error_out_mrqs(host, -ENOMEDIUM);
4323 }
4324
4325 spin_unlock_irqrestore(&host->lock, flags);
4326 }
4327
4328 sdhci_disable_card_detection(host);
4329
4330 mmc_remove_host(mmc);
4331
4332 sdhci_led_unregister(host);
4333
4334 if (!dead)
4335 sdhci_do_reset(host, SDHCI_RESET_ALL);
4336
4337 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4338 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4339 free_irq(host->irq, host);
4340
4341 del_timer_sync(&host->timer);
4342 del_timer_sync(&host->data_timer);
4343
4344 tasklet_kill(&host->finish_tasklet);
4345
4346 if (!IS_ERR(mmc->supply.vqmmc))
4347 regulator_disable(mmc->supply.vqmmc);
4348
4349 if (host->align_buffer)
4350 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4351 host->adma_table_sz, host->align_buffer,
4352 host->align_addr);
4353
4354 host->adma_table = NULL;
4355 host->align_buffer = NULL;
4356}
4357
4358EXPORT_SYMBOL_GPL(sdhci_remove_host);
4359
4360void sdhci_free_host(struct sdhci_host *host)
4361{
4362 mmc_free_host(host->mmc);
4363}
4364
4365EXPORT_SYMBOL_GPL(sdhci_free_host);
4366
4367
4368
4369
4370
4371
4372
4373static int __init sdhci_drv_init(void)
4374{
4375 pr_info(DRIVER_NAME
4376 ": Secure Digital Host Controller Interface driver\n");
4377 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4378
4379 return 0;
4380}
4381
4382static void __exit sdhci_drv_exit(void)
4383{
4384}
4385
4386module_init(sdhci_drv_init);
4387module_exit(sdhci_drv_exit);
4388
4389module_param(debug_quirks, uint, 0444);
4390module_param(debug_quirks2, uint, 0444);
4391
4392MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4393MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4394MODULE_LICENSE("GPL");
4395
4396MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4397MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
4398