1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/delay.h>
31#include <linux/device.h>
32#include <linux/highmem.h>
33#include <linux/interrupt.h>
34#include <linux/io.h>
35#include <linux/irq.h>
36#include <linux/mfd/tmio.h>
37#include <linux/mmc/card.h>
38#include <linux/mmc/host.h>
39#include <linux/mmc/mmc.h>
40#include <linux/mmc/slot-gpio.h>
41#include <linux/module.h>
42#include <linux/pagemap.h>
43#include <linux/platform_device.h>
44#include <linux/pm_qos.h>
45#include <linux/pm_runtime.h>
46#include <linux/regulator/consumer.h>
47#include <linux/mmc/sdio.h>
48#include <linux/scatterlist.h>
49#include <linux/spinlock.h>
50#include <linux/swiotlb.h>
51#include <linux/workqueue.h>
52
53#include "tmio_mmc.h"
54
55static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
56 struct mmc_data *data)
57{
58 if (host->dma_ops)
59 host->dma_ops->start(host, data);
60}
61
62static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
63{
64 if (host->dma_ops)
65 host->dma_ops->enable(host, enable);
66}
67
68static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
69 struct tmio_mmc_data *pdata)
70{
71 if (host->dma_ops) {
72 host->dma_ops->request(host, pdata);
73 } else {
74 host->chan_tx = NULL;
75 host->chan_rx = NULL;
76 }
77}
78
79static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
80{
81 if (host->dma_ops)
82 host->dma_ops->release(host);
83}
84
85static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
86{
87 if (host->dma_ops)
88 host->dma_ops->abort(host);
89}
90
91static inline void tmio_mmc_dataend_dma(struct tmio_mmc_host *host)
92{
93 if (host->dma_ops)
94 host->dma_ops->dataend(host);
95}
96
97void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
98{
99 host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
100 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
101}
102EXPORT_SYMBOL_GPL(tmio_mmc_enable_mmc_irqs);
103
104void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
105{
106 host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
107 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
108}
109EXPORT_SYMBOL_GPL(tmio_mmc_disable_mmc_irqs);
110
111static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
112{
113 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, ~i);
114}
115
116static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
117{
118 host->sg_len = data->sg_len;
119 host->sg_ptr = data->sg;
120 host->sg_orig = data->sg;
121 host->sg_off = 0;
122}
123
124static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
125{
126 host->sg_ptr = sg_next(host->sg_ptr);
127 host->sg_off = 0;
128 return --host->sg_len;
129}
130
131#define CMDREQ_TIMEOUT 5000
132
133static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
134{
135 struct tmio_mmc_host *host = mmc_priv(mmc);
136
137 if (enable && !host->sdio_irq_enabled) {
138 u16 sdio_status;
139
140
141 pm_runtime_get_sync(mmc_dev(mmc));
142
143 host->sdio_irq_enabled = true;
144 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ;
145
146
147 sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS) & ~TMIO_SDIO_MASK_ALL;
148 if (host->pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
149 sdio_status |= TMIO_SDIO_SETBITS_MASK;
150 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
151
152 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
153 } else if (!enable && host->sdio_irq_enabled) {
154 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
155 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
156
157 host->sdio_irq_enabled = false;
158 pm_runtime_mark_last_busy(mmc_dev(mmc));
159 pm_runtime_put_autosuspend(mmc_dev(mmc));
160 }
161}
162
163static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
164{
165 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
166 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
167
168
169 if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
170 usleep_range(10000, 11000);
171
172 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
173 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
174 usleep_range(10000, 11000);
175 }
176}
177
178static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
179{
180 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
181 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
182 usleep_range(10000, 11000);
183 }
184
185 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
186 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
187
188
189 if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
190 usleep_range(10000, 11000);
191}
192
193static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
194 unsigned int new_clock)
195{
196 u32 clk = 0, clock;
197
198 if (new_clock == 0) {
199 tmio_mmc_clk_stop(host);
200 return;
201 }
202
203 if (host->clk_update)
204 clock = host->clk_update(host, new_clock) / 512;
205 else
206 clock = host->mmc->f_min;
207
208 for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1)
209 clock <<= 1;
210
211
212 if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && ((clk >> 22) & 0x1))
213 clk |= 0xff;
214
215 if (host->set_clk_div)
216 host->set_clk_div(host->pdev, (clk >> 22) & 1);
217
218 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
219 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
220 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
221 if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
222 usleep_range(10000, 11000);
223
224 tmio_mmc_clk_start(host);
225}
226
227static void tmio_mmc_reset(struct tmio_mmc_host *host)
228{
229
230 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
231 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
232 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
233 usleep_range(10000, 11000);
234 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
235 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
236 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
237 usleep_range(10000, 11000);
238
239 if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
240 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
241 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
242 }
243
244}
245
246static void tmio_mmc_reset_work(struct work_struct *work)
247{
248 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
249 delayed_reset_work.work);
250 struct mmc_request *mrq;
251 unsigned long flags;
252
253 spin_lock_irqsave(&host->lock, flags);
254 mrq = host->mrq;
255
256
257
258
259
260
261 if (IS_ERR_OR_NULL(mrq) ||
262 time_is_after_jiffies(host->last_req_ts +
263 msecs_to_jiffies(CMDREQ_TIMEOUT))) {
264 spin_unlock_irqrestore(&host->lock, flags);
265 return;
266 }
267
268 dev_warn(&host->pdev->dev,
269 "timeout waiting for hardware interrupt (CMD%u)\n",
270 mrq->cmd->opcode);
271
272 if (host->data)
273 host->data->error = -ETIMEDOUT;
274 else if (host->cmd)
275 host->cmd->error = -ETIMEDOUT;
276 else
277 mrq->cmd->error = -ETIMEDOUT;
278
279 host->cmd = NULL;
280 host->data = NULL;
281
282 spin_unlock_irqrestore(&host->lock, flags);
283
284 tmio_mmc_reset(host);
285
286
287 host->mrq = NULL;
288
289 tmio_mmc_abort_dma(host);
290 mmc_request_done(host->mmc, mrq);
291}
292
293
294
295#define APP_CMD 0x0040
296#define RESP_NONE 0x0300
297#define RESP_R1 0x0400
298#define RESP_R1B 0x0500
299#define RESP_R2 0x0600
300#define RESP_R3 0x0700
301#define DATA_PRESENT 0x0800
302#define TRANSFER_READ 0x1000
303#define TRANSFER_MULTI 0x2000
304#define SECURITY_CMD 0x4000
305#define NO_CMD12_ISSUE 0x4000
306
307static int tmio_mmc_start_command(struct tmio_mmc_host *host,
308 struct mmc_command *cmd)
309{
310 struct mmc_data *data = host->data;
311 int c = cmd->opcode;
312 u32 irq_mask = TMIO_MASK_CMD;
313
314 switch (mmc_resp_type(cmd)) {
315 case MMC_RSP_NONE: c |= RESP_NONE; break;
316 case MMC_RSP_R1:
317 case MMC_RSP_R1_NO_CRC:
318 c |= RESP_R1; break;
319 case MMC_RSP_R1B: c |= RESP_R1B; break;
320 case MMC_RSP_R2: c |= RESP_R2; break;
321 case MMC_RSP_R3: c |= RESP_R3; break;
322 default:
323 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
324 return -EINVAL;
325 }
326
327 host->cmd = cmd;
328
329
330
331
332
333
334 if (data) {
335 c |= DATA_PRESENT;
336 if (data->blocks > 1) {
337 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, TMIO_STOP_SEC);
338 c |= TRANSFER_MULTI;
339
340
341
342
343
344 if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) &&
345 (cmd->opcode == SD_IO_RW_EXTENDED || host->mrq->sbc))
346 c |= NO_CMD12_ISSUE;
347 }
348 if (data->flags & MMC_DATA_READ)
349 c |= TRANSFER_READ;
350 }
351
352 tmio_mmc_enable_mmc_irqs(host, irq_mask);
353
354
355 sd_ctrl_write32_as_16_and_16(host, CTL_ARG_REG, cmd->arg);
356 sd_ctrl_write16(host, CTL_SD_CMD, c);
357
358 return 0;
359}
360
361static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
362 unsigned short *buf,
363 unsigned int count)
364{
365 int is_read = host->data->flags & MMC_DATA_READ;
366 u8 *buf8;
367
368
369
370
371 if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) {
372 u32 data = 0;
373 u32 *buf32 = (u32 *)buf;
374
375 if (is_read)
376 sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, buf32,
377 count >> 2);
378 else
379 sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, buf32,
380 count >> 2);
381
382
383 if (!(count & 0x3))
384 return;
385
386 buf32 += count >> 2;
387 count %= 4;
388
389 if (is_read) {
390 sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, &data, 1);
391 memcpy(buf32, &data, count);
392 } else {
393 memcpy(&data, buf32, count);
394 sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, &data, 1);
395 }
396
397 return;
398 }
399
400 if (is_read)
401 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
402 else
403 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
404
405
406 if (!(count & 0x1))
407 return;
408
409
410 buf8 = (u8 *)(buf + (count >> 1));
411
412
413
414
415
416
417
418 if (is_read)
419 *buf8 = sd_ctrl_read16(host, CTL_SD_DATA_PORT) & 0xff;
420 else
421 sd_ctrl_write16(host, CTL_SD_DATA_PORT, *buf8);
422}
423
424
425
426
427
428
429static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
430{
431 struct mmc_data *data = host->data;
432 void *sg_virt;
433 unsigned short *buf;
434 unsigned int count;
435 unsigned long flags;
436
437 if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
438 pr_err("PIO IRQ in DMA mode!\n");
439 return;
440 } else if (!data) {
441 pr_debug("Spurious PIO IRQ\n");
442 return;
443 }
444
445 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
446 buf = (unsigned short *)(sg_virt + host->sg_off);
447
448 count = host->sg_ptr->length - host->sg_off;
449 if (count > data->blksz)
450 count = data->blksz;
451
452 pr_debug("count: %08x offset: %08x flags %08x\n",
453 count, host->sg_off, data->flags);
454
455
456 tmio_mmc_transfer_data(host, buf, count);
457
458 host->sg_off += count;
459
460 tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
461
462 if (host->sg_off == host->sg_ptr->length)
463 tmio_mmc_next_sg(host);
464}
465
466static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
467{
468 if (host->sg_ptr == &host->bounce_sg) {
469 unsigned long flags;
470 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
471
472 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
473 tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
474 }
475}
476
477
478void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
479{
480 struct mmc_data *data = host->data;
481 struct mmc_command *stop;
482
483 host->data = NULL;
484
485 if (!data) {
486 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
487 return;
488 }
489 stop = data->stop;
490
491
492 if (!data->error)
493 data->bytes_xfered = data->blocks * data->blksz;
494 else
495 data->bytes_xfered = 0;
496
497 pr_debug("Completed data request\n");
498
499
500
501
502
503
504
505
506
507
508 if (data->flags & MMC_DATA_READ) {
509 if (host->chan_rx && !host->force_pio)
510 tmio_mmc_check_bounce_buffer(host);
511 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
512 host->mrq);
513 } else {
514 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
515 host->mrq);
516 }
517
518 if (stop && !host->mrq->sbc) {
519 if (stop->opcode != MMC_STOP_TRANSMISSION || stop->arg)
520 dev_err(&host->pdev->dev, "unsupported stop: CMD%u,0x%x. We did CMD12,0\n",
521 stop->opcode, stop->arg);
522
523
524 stop->resp[0] = sd_ctrl_read16_and_16_as_32(host, CTL_RESPONSE);
525
526 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0);
527 }
528
529 schedule_work(&host->done);
530}
531EXPORT_SYMBOL_GPL(tmio_mmc_do_data_irq);
532
533static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
534{
535 struct mmc_data *data;
536
537 spin_lock(&host->lock);
538 data = host->data;
539
540 if (!data)
541 goto out;
542
543 if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR ||
544 stat & TMIO_STAT_TXUNDERRUN)
545 data->error = -EILSEQ;
546 if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
547 u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
548 bool done = false;
549
550
551
552
553
554
555
556
557
558 if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) {
559 if (status & TMIO_STAT_SCLKDIVEN)
560 done = true;
561 } else {
562 if (!(status & TMIO_STAT_CMD_BUSY))
563 done = true;
564 }
565
566 if (done) {
567 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
568 tmio_mmc_dataend_dma(host);
569 }
570 } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
571 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
572 tmio_mmc_dataend_dma(host);
573 } else {
574 tmio_mmc_do_data_irq(host);
575 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
576 }
577out:
578 spin_unlock(&host->lock);
579}
580
581static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat)
582{
583 struct mmc_command *cmd = host->cmd;
584 int i, addr;
585
586 spin_lock(&host->lock);
587
588 if (!host->cmd) {
589 pr_debug("Spurious CMD irq\n");
590 goto out;
591 }
592
593
594
595
596
597
598 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
599 cmd->resp[i] = sd_ctrl_read16_and_16_as_32(host, addr);
600
601 if (cmd->flags & MMC_RSP_136) {
602 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
603 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
604 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
605 cmd->resp[3] <<= 8;
606 } else if (cmd->flags & MMC_RSP_R3) {
607 cmd->resp[0] = cmd->resp[3];
608 }
609
610 if (stat & TMIO_STAT_CMDTIMEOUT)
611 cmd->error = -ETIMEDOUT;
612 else if ((stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) ||
613 stat & TMIO_STAT_STOPBIT_ERR ||
614 stat & TMIO_STAT_CMD_IDX_ERR)
615 cmd->error = -EILSEQ;
616
617
618
619
620
621 if (host->data && (!cmd->error || cmd->error == -EILSEQ)) {
622 if (host->data->flags & MMC_DATA_READ) {
623 if (host->force_pio || !host->chan_rx) {
624 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
625 } else {
626 tmio_mmc_disable_mmc_irqs(host,
627 TMIO_MASK_READOP);
628 tasklet_schedule(&host->dma_issue);
629 }
630 } else {
631 if (host->force_pio || !host->chan_tx) {
632 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
633 } else {
634 tmio_mmc_disable_mmc_irqs(host,
635 TMIO_MASK_WRITEOP);
636 tasklet_schedule(&host->dma_issue);
637 }
638 }
639 } else {
640 schedule_work(&host->done);
641 }
642
643out:
644 spin_unlock(&host->lock);
645}
646
647static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
648 int ireg, int status)
649{
650 struct mmc_host *mmc = host->mmc;
651
652
653 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
654 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
655 TMIO_STAT_CARD_REMOVE);
656 if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) ||
657 ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) &&
658 !work_pending(&mmc->detect.work))
659 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
660 return true;
661 }
662
663 return false;
664}
665
666static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg,
667 int status)
668{
669
670 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
671 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CMDRESPEND |
672 TMIO_STAT_CMDTIMEOUT);
673 tmio_mmc_cmd_irq(host, status);
674 return true;
675 }
676
677
678 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
679 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
680 tmio_mmc_pio_irq(host);
681 return true;
682 }
683
684
685 if (ireg & TMIO_STAT_DATAEND) {
686 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
687 tmio_mmc_data_irq(host, status);
688 return true;
689 }
690
691 return false;
692}
693
694static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
695{
696 struct mmc_host *mmc = host->mmc;
697 struct tmio_mmc_data *pdata = host->pdata;
698 unsigned int ireg, status;
699 unsigned int sdio_status;
700
701 if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
702 return;
703
704 status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
705 ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
706
707 sdio_status = status & ~TMIO_SDIO_MASK_ALL;
708 if (pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
709 sdio_status |= TMIO_SDIO_SETBITS_MASK;
710
711 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
712
713 if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
714 mmc_signal_sdio_irq(mmc);
715}
716
717irqreturn_t tmio_mmc_irq(int irq, void *devid)
718{
719 struct tmio_mmc_host *host = devid;
720 unsigned int ireg, status;
721
722 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
723 ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
724
725
726 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ);
727
728 if (__tmio_mmc_card_detect_irq(host, ireg, status))
729 return IRQ_HANDLED;
730 if (__tmio_mmc_sdcard_irq(host, ireg, status))
731 return IRQ_HANDLED;
732
733 __tmio_mmc_sdio_irq(host);
734
735 return IRQ_HANDLED;
736}
737EXPORT_SYMBOL_GPL(tmio_mmc_irq);
738
739static int tmio_mmc_start_data(struct tmio_mmc_host *host,
740 struct mmc_data *data)
741{
742 struct tmio_mmc_data *pdata = host->pdata;
743
744 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
745 data->blksz, data->blocks);
746
747
748 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4 ||
749 host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
750 int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
751
752 if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
753 pr_err("%s: %d byte block unsupported in 4/8 bit mode\n",
754 mmc_hostname(host->mmc), data->blksz);
755 return -EINVAL;
756 }
757 }
758
759 tmio_mmc_init_sg(host, data);
760 host->data = data;
761 host->force_pio = false;
762
763
764 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
765 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
766
767 tmio_mmc_start_dma(host, data);
768
769 return 0;
770}
771
772static void tmio_mmc_hw_reset(struct mmc_host *mmc)
773{
774 struct tmio_mmc_host *host = mmc_priv(mmc);
775
776 if (host->hw_reset)
777 host->hw_reset(host);
778}
779
780static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
781{
782 struct tmio_mmc_host *host = mmc_priv(mmc);
783 int i, ret = 0;
784
785 if (!host->init_tuning || !host->select_tuning)
786
787 goto out;
788
789 host->tap_num = host->init_tuning(host);
790 if (!host->tap_num)
791
792 goto out;
793
794 if (host->tap_num * 2 >= sizeof(host->taps) * BITS_PER_BYTE) {
795 dev_warn_once(&host->pdev->dev,
796 "Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n");
797 goto out;
798 }
799
800 bitmap_zero(host->taps, host->tap_num * 2);
801
802
803 for (i = 0; i < 2 * host->tap_num; i++) {
804 if (host->prepare_tuning)
805 host->prepare_tuning(host, i % host->tap_num);
806
807 ret = mmc_send_tuning(mmc, opcode, NULL);
808 if (ret && ret != -EILSEQ)
809 goto out;
810 if (ret == 0)
811 set_bit(i, host->taps);
812
813 usleep_range(1000, 1200);
814 }
815
816 ret = host->select_tuning(host);
817
818out:
819 if (ret < 0) {
820 dev_warn(&host->pdev->dev, "Tuning procedure failed\n");
821 tmio_mmc_hw_reset(mmc);
822 }
823
824 return ret;
825}
826
827static void tmio_process_mrq(struct tmio_mmc_host *host,
828 struct mmc_request *mrq)
829{
830 struct mmc_command *cmd;
831 int ret;
832
833 if (mrq->sbc && host->cmd != mrq->sbc) {
834 cmd = mrq->sbc;
835 } else {
836 cmd = mrq->cmd;
837 if (mrq->data) {
838 ret = tmio_mmc_start_data(host, mrq->data);
839 if (ret)
840 goto fail;
841 }
842 }
843
844 ret = tmio_mmc_start_command(host, cmd);
845 if (ret)
846 goto fail;
847
848 schedule_delayed_work(&host->delayed_reset_work,
849 msecs_to_jiffies(CMDREQ_TIMEOUT));
850 return;
851
852fail:
853 host->mrq = NULL;
854 mrq->cmd->error = ret;
855 mmc_request_done(host->mmc, mrq);
856}
857
858
859static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
860{
861 struct tmio_mmc_host *host = mmc_priv(mmc);
862 unsigned long flags;
863
864 spin_lock_irqsave(&host->lock, flags);
865
866 if (host->mrq) {
867 pr_debug("request not null\n");
868 if (IS_ERR(host->mrq)) {
869 spin_unlock_irqrestore(&host->lock, flags);
870 mrq->cmd->error = -EAGAIN;
871 mmc_request_done(mmc, mrq);
872 return;
873 }
874 }
875
876 host->last_req_ts = jiffies;
877 wmb();
878 host->mrq = mrq;
879
880 spin_unlock_irqrestore(&host->lock, flags);
881
882 tmio_process_mrq(host, mrq);
883}
884
885static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
886{
887 struct mmc_request *mrq;
888 unsigned long flags;
889
890 spin_lock_irqsave(&host->lock, flags);
891
892 mrq = host->mrq;
893 if (IS_ERR_OR_NULL(mrq)) {
894 spin_unlock_irqrestore(&host->lock, flags);
895 return;
896 }
897
898
899 if (host->cmd != mrq->sbc) {
900 host->cmd = NULL;
901 host->data = NULL;
902 host->mrq = NULL;
903 }
904
905 cancel_delayed_work(&host->delayed_reset_work);
906
907 spin_unlock_irqrestore(&host->lock, flags);
908
909 if (mrq->cmd->error || (mrq->data && mrq->data->error))
910 tmio_mmc_abort_dma(host);
911
912 if (host->check_scc_error)
913 host->check_scc_error(host);
914
915
916 if (host->mrq && !mrq->cmd->error) {
917 tmio_process_mrq(host, mrq);
918 return;
919 }
920
921 mmc_request_done(host->mmc, mrq);
922}
923
924static void tmio_mmc_done_work(struct work_struct *work)
925{
926 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
927 done);
928 tmio_mmc_finish_request(host);
929}
930
931static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
932{
933 struct mmc_host *mmc = host->mmc;
934 int ret = 0;
935
936
937
938 if (host->set_pwr)
939 host->set_pwr(host->pdev, 1);
940
941 if (!IS_ERR(mmc->supply.vmmc)) {
942 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
943
944
945
946
947
948
949 usleep_range(200, 300);
950 }
951
952
953
954
955 if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
956 ret = regulator_enable(mmc->supply.vqmmc);
957 usleep_range(200, 300);
958 }
959
960 if (ret < 0)
961 dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n",
962 ret);
963}
964
965static void tmio_mmc_power_off(struct tmio_mmc_host *host)
966{
967 struct mmc_host *mmc = host->mmc;
968
969 if (!IS_ERR(mmc->supply.vqmmc))
970 regulator_disable(mmc->supply.vqmmc);
971
972 if (!IS_ERR(mmc->supply.vmmc))
973 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
974
975 if (host->set_pwr)
976 host->set_pwr(host->pdev, 0);
977}
978
979static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
980 unsigned char bus_width)
981{
982 u16 reg = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT)
983 & ~(CARD_OPT_WIDTH | CARD_OPT_WIDTH8);
984
985
986 if (bus_width == MMC_BUS_WIDTH_1)
987 reg |= CARD_OPT_WIDTH;
988 else if (bus_width == MMC_BUS_WIDTH_8)
989 reg |= CARD_OPT_WIDTH8;
990
991 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg);
992}
993
994
995
996
997
998
999
1000static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1001{
1002 struct tmio_mmc_host *host = mmc_priv(mmc);
1003 struct device *dev = &host->pdev->dev;
1004 unsigned long flags;
1005
1006 mutex_lock(&host->ios_lock);
1007
1008 spin_lock_irqsave(&host->lock, flags);
1009 if (host->mrq) {
1010 if (IS_ERR(host->mrq)) {
1011 dev_dbg(dev,
1012 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
1013 current->comm, task_pid_nr(current),
1014 ios->clock, ios->power_mode);
1015 host->mrq = ERR_PTR(-EINTR);
1016 } else {
1017 dev_dbg(dev,
1018 "%s.%d: CMD%u active since %lu, now %lu!\n",
1019 current->comm, task_pid_nr(current),
1020 host->mrq->cmd->opcode, host->last_req_ts,
1021 jiffies);
1022 }
1023 spin_unlock_irqrestore(&host->lock, flags);
1024
1025 mutex_unlock(&host->ios_lock);
1026 return;
1027 }
1028
1029 host->mrq = ERR_PTR(-EBUSY);
1030
1031 spin_unlock_irqrestore(&host->lock, flags);
1032
1033 switch (ios->power_mode) {
1034 case MMC_POWER_OFF:
1035 tmio_mmc_power_off(host);
1036 tmio_mmc_clk_stop(host);
1037 break;
1038 case MMC_POWER_UP:
1039 tmio_mmc_power_on(host, ios->vdd);
1040 tmio_mmc_set_clock(host, ios->clock);
1041 tmio_mmc_set_bus_width(host, ios->bus_width);
1042 break;
1043 case MMC_POWER_ON:
1044 tmio_mmc_set_clock(host, ios->clock);
1045 tmio_mmc_set_bus_width(host, ios->bus_width);
1046 break;
1047 }
1048
1049
1050 usleep_range(140, 200);
1051 if (PTR_ERR(host->mrq) == -EINTR)
1052 dev_dbg(&host->pdev->dev,
1053 "%s.%d: IOS interrupted: clk %u, mode %u",
1054 current->comm, task_pid_nr(current),
1055 ios->clock, ios->power_mode);
1056 host->mrq = NULL;
1057
1058 host->clk_cache = ios->clock;
1059
1060 mutex_unlock(&host->ios_lock);
1061}
1062
1063static int tmio_mmc_get_ro(struct mmc_host *mmc)
1064{
1065 struct tmio_mmc_host *host = mmc_priv(mmc);
1066
1067 return !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) &
1068 TMIO_STAT_WRPROTECT);
1069}
1070
1071static int tmio_mmc_get_cd(struct mmc_host *mmc)
1072{
1073 struct tmio_mmc_host *host = mmc_priv(mmc);
1074
1075 return !!(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) &
1076 TMIO_STAT_SIGSTATE);
1077}
1078
1079static int tmio_multi_io_quirk(struct mmc_card *card,
1080 unsigned int direction, int blk_size)
1081{
1082 struct tmio_mmc_host *host = mmc_priv(card->host);
1083
1084 if (host->multi_io_quirk)
1085 return host->multi_io_quirk(card, direction, blk_size);
1086
1087 return blk_size;
1088}
1089
1090static const struct mmc_host_ops tmio_mmc_ops = {
1091 .request = tmio_mmc_request,
1092 .set_ios = tmio_mmc_set_ios,
1093 .get_ro = tmio_mmc_get_ro,
1094 .get_cd = tmio_mmc_get_cd,
1095 .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
1096 .multi_io_quirk = tmio_multi_io_quirk,
1097 .hw_reset = tmio_mmc_hw_reset,
1098 .execute_tuning = tmio_mmc_execute_tuning,
1099};
1100
1101static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
1102{
1103 struct tmio_mmc_data *pdata = host->pdata;
1104 struct mmc_host *mmc = host->mmc;
1105 int err;
1106
1107 err = mmc_regulator_get_supply(mmc);
1108 if (err)
1109 return err;
1110
1111
1112 if (!mmc->ocr_avail)
1113 mmc->ocr_avail = pdata->ocr_mask;
1114
1115
1116
1117
1118
1119 if (!mmc->ocr_avail)
1120 return -EPROBE_DEFER;
1121
1122 return 0;
1123}
1124
1125static void tmio_mmc_of_parse(struct platform_device *pdev,
1126 struct mmc_host *mmc)
1127{
1128 const struct device_node *np = pdev->dev.of_node;
1129
1130 if (!np)
1131 return;
1132
1133
1134
1135
1136
1137
1138 if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL))
1139 mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
1140}
1141
1142struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
1143 struct tmio_mmc_data *pdata)
1144{
1145 struct tmio_mmc_host *host;
1146 struct mmc_host *mmc;
1147 struct resource *res;
1148 void __iomem *ctl;
1149 int ret;
1150
1151 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1152 ctl = devm_ioremap_resource(&pdev->dev, res);
1153 if (IS_ERR(ctl))
1154 return ERR_CAST(ctl);
1155
1156 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
1157 if (!mmc)
1158 return ERR_PTR(-ENOMEM);
1159
1160 host = mmc_priv(mmc);
1161 host->ctl = ctl;
1162 host->mmc = mmc;
1163 host->pdev = pdev;
1164 host->pdata = pdata;
1165 host->ops = tmio_mmc_ops;
1166 mmc->ops = &host->ops;
1167
1168 ret = mmc_of_parse(host->mmc);
1169 if (ret) {
1170 host = ERR_PTR(ret);
1171 goto free;
1172 }
1173
1174 tmio_mmc_of_parse(pdev, mmc);
1175
1176 platform_set_drvdata(pdev, host);
1177
1178 return host;
1179free:
1180 mmc_free_host(mmc);
1181
1182 return host;
1183}
1184EXPORT_SYMBOL_GPL(tmio_mmc_host_alloc);
1185
1186void tmio_mmc_host_free(struct tmio_mmc_host *host)
1187{
1188 mmc_free_host(host->mmc);
1189}
1190EXPORT_SYMBOL_GPL(tmio_mmc_host_free);
1191
1192int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
1193{
1194 struct platform_device *pdev = _host->pdev;
1195 struct tmio_mmc_data *pdata = _host->pdata;
1196 struct mmc_host *mmc = _host->mmc;
1197 int ret;
1198
1199
1200
1201
1202
1203 if (mmc->f_min == 0)
1204 return -EINVAL;
1205
1206 if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
1207 _host->write16_hook = NULL;
1208
1209 _host->set_pwr = pdata->set_pwr;
1210 _host->set_clk_div = pdata->set_clk_div;
1211
1212 ret = tmio_mmc_init_ocr(_host);
1213 if (ret < 0)
1214 return ret;
1215
1216 if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
1217 ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
1218 if (ret)
1219 return ret;
1220 }
1221
1222 mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
1223 mmc->caps2 |= pdata->capabilities2;
1224 mmc->max_segs = pdata->max_segs ? : 32;
1225 mmc->max_blk_size = 512;
1226 mmc->max_blk_count = pdata->max_blk_count ? :
1227 (PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs;
1228 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1229
1230
1231
1232
1233
1234
1235 if (swiotlb_max_segment()) {
1236 unsigned int max_size = (1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
1237
1238 if (mmc->max_req_size > max_size)
1239 mmc->max_req_size = max_size;
1240 }
1241 mmc->max_seg_size = mmc->max_req_size;
1242
1243 if (mmc_can_gpio_ro(mmc))
1244 _host->ops.get_ro = mmc_gpio_get_ro;
1245
1246 if (mmc_can_gpio_cd(mmc))
1247 _host->ops.get_cd = mmc_gpio_get_cd;
1248
1249 _host->native_hotplug = !(mmc_can_gpio_cd(mmc) ||
1250 mmc->caps & MMC_CAP_NEEDS_POLL ||
1251 !mmc_card_is_removable(mmc));
1252
1253
1254
1255
1256
1257
1258
1259 if (pdata->flags & TMIO_MMC_MIN_RCAR2)
1260 _host->native_hotplug = true;
1261
1262
1263
1264
1265
1266 if (_host->native_hotplug)
1267 pm_runtime_get_noresume(&pdev->dev);
1268
1269 _host->sdio_irq_enabled = false;
1270 if (pdata->flags & TMIO_MMC_SDIO_IRQ)
1271 _host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
1272
1273 tmio_mmc_clk_stop(_host);
1274 tmio_mmc_reset(_host);
1275
1276 _host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK);
1277 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
1278
1279 if (_host->native_hotplug)
1280 tmio_mmc_enable_mmc_irqs(_host,
1281 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
1282
1283 spin_lock_init(&_host->lock);
1284 mutex_init(&_host->ios_lock);
1285
1286
1287 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
1288 INIT_WORK(&_host->done, tmio_mmc_done_work);
1289
1290
1291 tmio_mmc_request_dma(_host, pdata);
1292
1293 pm_runtime_set_active(&pdev->dev);
1294 pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1295 pm_runtime_use_autosuspend(&pdev->dev);
1296 pm_runtime_enable(&pdev->dev);
1297
1298 ret = mmc_add_host(mmc);
1299 if (ret)
1300 goto remove_host;
1301
1302 dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
1303
1304 return 0;
1305
1306remove_host:
1307 tmio_mmc_host_remove(_host);
1308 return ret;
1309}
1310EXPORT_SYMBOL_GPL(tmio_mmc_host_probe);
1311
1312void tmio_mmc_host_remove(struct tmio_mmc_host *host)
1313{
1314 struct platform_device *pdev = host->pdev;
1315 struct mmc_host *mmc = host->mmc;
1316
1317 if (host->pdata->flags & TMIO_MMC_SDIO_IRQ)
1318 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
1319
1320 if (!host->native_hotplug)
1321 pm_runtime_get_sync(&pdev->dev);
1322
1323 dev_pm_qos_hide_latency_limit(&pdev->dev);
1324
1325 mmc_remove_host(mmc);
1326 cancel_work_sync(&host->done);
1327 cancel_delayed_work_sync(&host->delayed_reset_work);
1328 tmio_mmc_release_dma(host);
1329
1330 pm_runtime_put_sync(&pdev->dev);
1331 pm_runtime_disable(&pdev->dev);
1332}
1333EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);
1334
1335#ifdef CONFIG_PM
1336static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
1337{
1338 if (!host->clk_enable)
1339 return -ENOTSUPP;
1340
1341 return host->clk_enable(host);
1342}
1343
1344static void tmio_mmc_clk_disable(struct tmio_mmc_host *host)
1345{
1346 if (host->clk_disable)
1347 host->clk_disable(host);
1348}
1349
1350int tmio_mmc_host_runtime_suspend(struct device *dev)
1351{
1352 struct tmio_mmc_host *host = dev_get_drvdata(dev);
1353
1354 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
1355
1356 if (host->clk_cache)
1357 tmio_mmc_clk_stop(host);
1358
1359 tmio_mmc_clk_disable(host);
1360
1361 return 0;
1362}
1363EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_suspend);
1364
1365static bool tmio_mmc_can_retune(struct tmio_mmc_host *host)
1366{
1367 return host->tap_num && mmc_can_retune(host->mmc);
1368}
1369
1370int tmio_mmc_host_runtime_resume(struct device *dev)
1371{
1372 struct tmio_mmc_host *host = dev_get_drvdata(dev);
1373
1374 tmio_mmc_reset(host);
1375 tmio_mmc_clk_enable(host);
1376
1377 if (host->clk_cache)
1378 tmio_mmc_set_clock(host, host->clk_cache);
1379
1380 if (host->native_hotplug)
1381 tmio_mmc_enable_mmc_irqs(host,
1382 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
1383
1384 tmio_mmc_enable_dma(host, true);
1385
1386 if (tmio_mmc_can_retune(host) && host->select_tuning(host))
1387 dev_warn(&host->pdev->dev, "Tuning selection failed\n");
1388
1389 return 0;
1390}
1391EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_resume);
1392#endif
1393
1394MODULE_LICENSE("GPL v2");
1395