1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/delay.h>
33#include <linux/device.h>
34#include <linux/highmem.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
37#include <linux/irq.h>
38#include <linux/mfd/tmio.h>
39#include <linux/mmc/host.h>
40#include <linux/mmc/mmc.h>
41#include <linux/mmc/slot-gpio.h>
42#include <linux/module.h>
43#include <linux/pagemap.h>
44#include <linux/platform_device.h>
45#include <linux/pm_qos.h>
46#include <linux/pm_runtime.h>
47#include <linux/regulator/consumer.h>
48#include <linux/mmc/sdio.h>
49#include <linux/scatterlist.h>
50#include <linux/spinlock.h>
51#include <linux/workqueue.h>
52
53#include "tmio_mmc.h"
54
55void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
56{
57 host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
58 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
59}
60
61void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
62{
63 host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
64 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
65}
66
67static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
68{
69 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, ~i);
70}
71
72static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
73{
74 host->sg_len = data->sg_len;
75 host->sg_ptr = data->sg;
76 host->sg_orig = data->sg;
77 host->sg_off = 0;
78}
79
80static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
81{
82 host->sg_ptr = sg_next(host->sg_ptr);
83 host->sg_off = 0;
84 return --host->sg_len;
85}
86
87#define CMDREQ_TIMEOUT 5000
88
89#ifdef CONFIG_MMC_DEBUG
90
91#define STATUS_TO_TEXT(a, status, i) \
92 do { \
93 if (status & TMIO_STAT_##a) { \
94 if (i++) \
95 printk(" | "); \
96 printk(#a); \
97 } \
98 } while (0)
99
100static void pr_debug_status(u32 status)
101{
102 int i = 0;
103 pr_debug("status: %08x = ", status);
104 STATUS_TO_TEXT(CARD_REMOVE, status, i);
105 STATUS_TO_TEXT(CARD_INSERT, status, i);
106 STATUS_TO_TEXT(SIGSTATE, status, i);
107 STATUS_TO_TEXT(WRPROTECT, status, i);
108 STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
109 STATUS_TO_TEXT(CARD_INSERT_A, status, i);
110 STATUS_TO_TEXT(SIGSTATE_A, status, i);
111 STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
112 STATUS_TO_TEXT(STOPBIT_ERR, status, i);
113 STATUS_TO_TEXT(ILL_FUNC, status, i);
114 STATUS_TO_TEXT(CMD_BUSY, status, i);
115 STATUS_TO_TEXT(CMDRESPEND, status, i);
116 STATUS_TO_TEXT(DATAEND, status, i);
117 STATUS_TO_TEXT(CRCFAIL, status, i);
118 STATUS_TO_TEXT(DATATIMEOUT, status, i);
119 STATUS_TO_TEXT(CMDTIMEOUT, status, i);
120 STATUS_TO_TEXT(RXOVERFLOW, status, i);
121 STATUS_TO_TEXT(TXUNDERRUN, status, i);
122 STATUS_TO_TEXT(RXRDY, status, i);
123 STATUS_TO_TEXT(TXRQ, status, i);
124 STATUS_TO_TEXT(ILL_ACCESS, status, i);
125 printk("\n");
126}
127
128#else
129#define pr_debug_status(s) do { } while (0)
130#endif
131
132static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
133{
134 struct tmio_mmc_host *host = mmc_priv(mmc);
135
136 if (enable && !host->sdio_irq_enabled) {
137
138 pm_runtime_get_sync(mmc_dev(mmc));
139 host->sdio_irq_enabled = true;
140
141 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL &
142 ~TMIO_SDIO_STAT_IOIRQ;
143 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
144 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
145 } else if (!enable && host->sdio_irq_enabled) {
146 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
147 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
148 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
149
150 host->sdio_irq_enabled = false;
151 pm_runtime_mark_last_busy(mmc_dev(mmc));
152 pm_runtime_put_autosuspend(mmc_dev(mmc));
153 }
154}
155
156static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
157{
158 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
159 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
160 msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 1 : 10);
161
162 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
163 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
164 msleep(10);
165 }
166}
167
168static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
169{
170 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
171 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
172 msleep(10);
173 }
174
175 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
176 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
177 msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 5 : 10);
178}
179
180static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
181 unsigned int new_clock)
182{
183 u32 clk = 0, clock;
184
185 if (new_clock == 0) {
186 tmio_mmc_clk_stop(host);
187 return;
188 }
189
190 if (host->clk_update)
191 clock = host->clk_update(host, new_clock) / 512;
192 else
193 clock = host->mmc->f_min;
194
195 for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1)
196 clock <<= 1;
197
198
199 if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && ((clk >> 22) & 0x1))
200 clk |= 0xff;
201
202 if (host->set_clk_div)
203 host->set_clk_div(host->pdev, (clk >> 22) & 1);
204
205 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
206 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
207 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
208 if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
209 msleep(10);
210
211 tmio_mmc_clk_start(host);
212}
213
214static void tmio_mmc_reset(struct tmio_mmc_host *host)
215{
216
217 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
218 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
219 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
220 msleep(10);
221 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
222 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
223 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
224 msleep(10);
225}
226
227static void tmio_mmc_reset_work(struct work_struct *work)
228{
229 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
230 delayed_reset_work.work);
231 struct mmc_request *mrq;
232 unsigned long flags;
233
234 spin_lock_irqsave(&host->lock, flags);
235 mrq = host->mrq;
236
237
238
239
240
241
242 if (IS_ERR_OR_NULL(mrq)
243 || time_is_after_jiffies(host->last_req_ts +
244 msecs_to_jiffies(CMDREQ_TIMEOUT))) {
245 spin_unlock_irqrestore(&host->lock, flags);
246 return;
247 }
248
249 dev_warn(&host->pdev->dev,
250 "timeout waiting for hardware interrupt (CMD%u)\n",
251 mrq->cmd->opcode);
252
253 if (host->data)
254 host->data->error = -ETIMEDOUT;
255 else if (host->cmd)
256 host->cmd->error = -ETIMEDOUT;
257 else
258 mrq->cmd->error = -ETIMEDOUT;
259
260 host->cmd = NULL;
261 host->data = NULL;
262 host->force_pio = false;
263
264 spin_unlock_irqrestore(&host->lock, flags);
265
266 tmio_mmc_reset(host);
267
268
269 host->mrq = NULL;
270
271 tmio_mmc_abort_dma(host);
272 mmc_request_done(host->mmc, mrq);
273}
274
275
276static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
277{
278 struct mmc_request *mrq;
279 unsigned long flags;
280
281 spin_lock_irqsave(&host->lock, flags);
282
283 mrq = host->mrq;
284 if (IS_ERR_OR_NULL(mrq)) {
285 spin_unlock_irqrestore(&host->lock, flags);
286 return;
287 }
288
289 host->cmd = NULL;
290 host->data = NULL;
291 host->force_pio = false;
292
293 cancel_delayed_work(&host->delayed_reset_work);
294
295 host->mrq = NULL;
296 spin_unlock_irqrestore(&host->lock, flags);
297
298 if (mrq->cmd->error || (mrq->data && mrq->data->error))
299 tmio_mmc_abort_dma(host);
300
301 mmc_request_done(host->mmc, mrq);
302}
303
304static void tmio_mmc_done_work(struct work_struct *work)
305{
306 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
307 done);
308 tmio_mmc_finish_request(host);
309}
310
311
312
313#define APP_CMD 0x0040
314#define RESP_NONE 0x0300
315#define RESP_R1 0x0400
316#define RESP_R1B 0x0500
317#define RESP_R2 0x0600
318#define RESP_R3 0x0700
319#define DATA_PRESENT 0x0800
320#define TRANSFER_READ 0x1000
321#define TRANSFER_MULTI 0x2000
322#define SECURITY_CMD 0x4000
323#define NO_CMD12_ISSUE 0x4000
324
325static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
326{
327 struct mmc_data *data = host->data;
328 int c = cmd->opcode;
329 u32 irq_mask = TMIO_MASK_CMD;
330
331
332 if (cmd->opcode == MMC_STOP_TRANSMISSION && !cmd->arg) {
333 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
334 return 0;
335 }
336
337 switch (mmc_resp_type(cmd)) {
338 case MMC_RSP_NONE: c |= RESP_NONE; break;
339 case MMC_RSP_R1:
340 case MMC_RSP_R1_NO_CRC:
341 c |= RESP_R1; break;
342 case MMC_RSP_R1B: c |= RESP_R1B; break;
343 case MMC_RSP_R2: c |= RESP_R2; break;
344 case MMC_RSP_R3: c |= RESP_R3; break;
345 default:
346 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
347 return -EINVAL;
348 }
349
350 host->cmd = cmd;
351
352
353
354
355
356
357 if (data) {
358 c |= DATA_PRESENT;
359 if (data->blocks > 1) {
360 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
361 c |= TRANSFER_MULTI;
362
363
364
365
366
367 if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) &&
368 (cmd->opcode == SD_IO_RW_EXTENDED))
369 c |= NO_CMD12_ISSUE;
370 }
371 if (data->flags & MMC_DATA_READ)
372 c |= TRANSFER_READ;
373 }
374
375 if (!host->native_hotplug)
376 irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
377 tmio_mmc_enable_mmc_irqs(host, irq_mask);
378
379
380 sd_ctrl_write32_as_16_and_16(host, CTL_ARG_REG, cmd->arg);
381 sd_ctrl_write16(host, CTL_SD_CMD, c);
382
383 return 0;
384}
385
386static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
387 unsigned short *buf,
388 unsigned int count)
389{
390 int is_read = host->data->flags & MMC_DATA_READ;
391 u8 *buf8;
392
393
394
395
396 if (is_read)
397 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
398 else
399 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
400
401
402 if (!(count & 0x1))
403 return;
404
405
406 buf8 = (u8 *)(buf + (count >> 1));
407
408
409
410
411
412
413
414 if (is_read)
415 *buf8 = sd_ctrl_read16(host, CTL_SD_DATA_PORT) & 0xff;
416 else
417 sd_ctrl_write16(host, CTL_SD_DATA_PORT, *buf8);
418}
419
420
421
422
423
424
425static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
426{
427 struct mmc_data *data = host->data;
428 void *sg_virt;
429 unsigned short *buf;
430 unsigned int count;
431 unsigned long flags;
432
433 if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
434 pr_err("PIO IRQ in DMA mode!\n");
435 return;
436 } else if (!data) {
437 pr_debug("Spurious PIO IRQ\n");
438 return;
439 }
440
441 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
442 buf = (unsigned short *)(sg_virt + host->sg_off);
443
444 count = host->sg_ptr->length - host->sg_off;
445 if (count > data->blksz)
446 count = data->blksz;
447
448 pr_debug("count: %08x offset: %08x flags %08x\n",
449 count, host->sg_off, data->flags);
450
451
452 tmio_mmc_transfer_data(host, buf, count);
453
454 host->sg_off += count;
455
456 tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
457
458 if (host->sg_off == host->sg_ptr->length)
459 tmio_mmc_next_sg(host);
460
461 return;
462}
463
464static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
465{
466 if (host->sg_ptr == &host->bounce_sg) {
467 unsigned long flags;
468 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
469 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
470 tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
471 }
472}
473
474
475void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
476{
477 struct mmc_data *data = host->data;
478 struct mmc_command *stop;
479
480 host->data = NULL;
481
482 if (!data) {
483 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
484 return;
485 }
486 stop = data->stop;
487
488
489 if (!data->error)
490 data->bytes_xfered = data->blocks * data->blksz;
491 else
492 data->bytes_xfered = 0;
493
494 pr_debug("Completed data request\n");
495
496
497
498
499
500
501
502
503
504
505 if (data->flags & MMC_DATA_READ) {
506 if (host->chan_rx && !host->force_pio)
507 tmio_mmc_check_bounce_buffer(host);
508 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
509 host->mrq);
510 } else {
511 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
512 host->mrq);
513 }
514
515 if (stop) {
516 if (stop->opcode == MMC_STOP_TRANSMISSION && !stop->arg)
517 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
518 else
519 BUG();
520 }
521
522 schedule_work(&host->done);
523}
524
525static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
526{
527 struct mmc_data *data;
528 spin_lock(&host->lock);
529 data = host->data;
530
531 if (!data)
532 goto out;
533
534 if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
535 u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
536 bool done = false;
537
538
539
540
541
542
543
544
545
546 if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) {
547 if (status & TMIO_STAT_SCLKDIVEN)
548 done = true;
549 } else {
550 if (!(status & TMIO_STAT_CMD_BUSY))
551 done = true;
552 }
553
554 if (done) {
555 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
556 tasklet_schedule(&host->dma_complete);
557 }
558 } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
559 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
560 tasklet_schedule(&host->dma_complete);
561 } else {
562 tmio_mmc_do_data_irq(host);
563 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
564 }
565out:
566 spin_unlock(&host->lock);
567}
568
569static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
570 unsigned int stat)
571{
572 struct mmc_command *cmd = host->cmd;
573 int i, addr;
574
575 spin_lock(&host->lock);
576
577 if (!host->cmd) {
578 pr_debug("Spurious CMD irq\n");
579 goto out;
580 }
581
582 host->cmd = NULL;
583
584
585
586
587
588
589 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
590 cmd->resp[i] = sd_ctrl_read16_and_16_as_32(host, addr);
591
592 if (cmd->flags & MMC_RSP_136) {
593 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
594 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
595 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
596 cmd->resp[3] <<= 8;
597 } else if (cmd->flags & MMC_RSP_R3) {
598 cmd->resp[0] = cmd->resp[3];
599 }
600
601 if (stat & TMIO_STAT_CMDTIMEOUT)
602 cmd->error = -ETIMEDOUT;
603 else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
604 cmd->error = -EILSEQ;
605
606
607
608
609
610 if (host->data && !cmd->error) {
611 if (host->data->flags & MMC_DATA_READ) {
612 if (host->force_pio || !host->chan_rx)
613 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
614 else
615 tasklet_schedule(&host->dma_issue);
616 } else {
617 if (host->force_pio || !host->chan_tx)
618 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
619 else
620 tasklet_schedule(&host->dma_issue);
621 }
622 } else {
623 schedule_work(&host->done);
624 }
625
626out:
627 spin_unlock(&host->lock);
628}
629
630static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
631 int ireg, int status)
632{
633 struct mmc_host *mmc = host->mmc;
634
635
636 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
637 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
638 TMIO_STAT_CARD_REMOVE);
639 if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) ||
640 ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) &&
641 !work_pending(&mmc->detect.work))
642 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
643 return true;
644 }
645
646 return false;
647}
648
649static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
650 int ireg, int status)
651{
652
653 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
654 tmio_mmc_ack_mmc_irqs(host,
655 TMIO_STAT_CMDRESPEND |
656 TMIO_STAT_CMDTIMEOUT);
657 tmio_mmc_cmd_irq(host, status);
658 return true;
659 }
660
661
662 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
663 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
664 tmio_mmc_pio_irq(host);
665 return true;
666 }
667
668
669 if (ireg & TMIO_STAT_DATAEND) {
670 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
671 tmio_mmc_data_irq(host);
672 return true;
673 }
674
675 return false;
676}
677
678static void tmio_mmc_sdio_irq(int irq, void *devid)
679{
680 struct tmio_mmc_host *host = devid;
681 struct mmc_host *mmc = host->mmc;
682 struct tmio_mmc_data *pdata = host->pdata;
683 unsigned int ireg, status;
684 unsigned int sdio_status;
685
686 if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
687 return;
688
689 status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
690 ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask;
691
692 sdio_status = status & ~TMIO_SDIO_MASK_ALL;
693 if (pdata->flags & TMIO_MMC_SDIO_STATUS_QUIRK)
694 sdio_status |= 6;
695
696 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
697
698 if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
699 mmc_signal_sdio_irq(mmc);
700}
701
702irqreturn_t tmio_mmc_irq(int irq, void *devid)
703{
704 struct tmio_mmc_host *host = devid;
705 unsigned int ireg, status;
706
707 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
708 ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
709
710 pr_debug_status(status);
711 pr_debug_status(ireg);
712
713
714 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ);
715
716 if (__tmio_mmc_card_detect_irq(host, ireg, status))
717 return IRQ_HANDLED;
718 if (__tmio_mmc_sdcard_irq(host, ireg, status))
719 return IRQ_HANDLED;
720
721 tmio_mmc_sdio_irq(irq, devid);
722
723 return IRQ_HANDLED;
724}
725EXPORT_SYMBOL(tmio_mmc_irq);
726
727static int tmio_mmc_start_data(struct tmio_mmc_host *host,
728 struct mmc_data *data)
729{
730 struct tmio_mmc_data *pdata = host->pdata;
731
732 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
733 data->blksz, data->blocks);
734
735
736 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4 ||
737 host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
738 int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
739
740 if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
741 pr_err("%s: %d byte block unsupported in 4/8 bit mode\n",
742 mmc_hostname(host->mmc), data->blksz);
743 return -EINVAL;
744 }
745 }
746
747 tmio_mmc_init_sg(host, data);
748 host->data = data;
749
750
751 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
752 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
753
754 tmio_mmc_start_dma(host, data);
755
756 return 0;
757}
758
759
760static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
761{
762 struct tmio_mmc_host *host = mmc_priv(mmc);
763 unsigned long flags;
764 int ret;
765
766 spin_lock_irqsave(&host->lock, flags);
767
768 if (host->mrq) {
769 pr_debug("request not null\n");
770 if (IS_ERR(host->mrq)) {
771 spin_unlock_irqrestore(&host->lock, flags);
772 mrq->cmd->error = -EAGAIN;
773 mmc_request_done(mmc, mrq);
774 return;
775 }
776 }
777
778 host->last_req_ts = jiffies;
779 wmb();
780 host->mrq = mrq;
781
782 spin_unlock_irqrestore(&host->lock, flags);
783
784 if (mrq->data) {
785 ret = tmio_mmc_start_data(host, mrq->data);
786 if (ret)
787 goto fail;
788 }
789
790 ret = tmio_mmc_start_command(host, mrq->cmd);
791 if (!ret) {
792 schedule_delayed_work(&host->delayed_reset_work,
793 msecs_to_jiffies(CMDREQ_TIMEOUT));
794 return;
795 }
796
797fail:
798 host->force_pio = false;
799 host->mrq = NULL;
800 mrq->cmd->error = ret;
801 mmc_request_done(mmc, mrq);
802}
803
804static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
805{
806 if (!host->clk_enable)
807 return -ENOTSUPP;
808
809 return host->clk_enable(host);
810}
811
812static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
813{
814 struct mmc_host *mmc = host->mmc;
815 int ret = 0;
816
817
818
819 if (host->set_pwr)
820 host->set_pwr(host->pdev, 1);
821
822 if (!IS_ERR(mmc->supply.vmmc)) {
823 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
824
825
826
827
828
829
830 udelay(200);
831 }
832
833
834
835
836 if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
837 ret = regulator_enable(mmc->supply.vqmmc);
838 udelay(200);
839 }
840
841 if (ret < 0)
842 dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n",
843 ret);
844}
845
846static void tmio_mmc_power_off(struct tmio_mmc_host *host)
847{
848 struct mmc_host *mmc = host->mmc;
849
850 if (!IS_ERR(mmc->supply.vqmmc))
851 regulator_disable(mmc->supply.vqmmc);
852
853 if (!IS_ERR(mmc->supply.vmmc))
854 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
855
856 if (host->set_pwr)
857 host->set_pwr(host->pdev, 0);
858}
859
860static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
861 unsigned char bus_width)
862{
863 u16 reg = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT)
864 & ~(CARD_OPT_WIDTH | CARD_OPT_WIDTH8);
865
866
867 if (bus_width == MMC_BUS_WIDTH_1)
868 reg |= CARD_OPT_WIDTH;
869 else if (bus_width == MMC_BUS_WIDTH_8)
870 reg |= CARD_OPT_WIDTH8;
871
872 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg);
873}
874
875
876
877
878
879
880
881static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
882{
883 struct tmio_mmc_host *host = mmc_priv(mmc);
884 struct device *dev = &host->pdev->dev;
885 unsigned long flags;
886
887 mutex_lock(&host->ios_lock);
888
889 spin_lock_irqsave(&host->lock, flags);
890 if (host->mrq) {
891 if (IS_ERR(host->mrq)) {
892 dev_dbg(dev,
893 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
894 current->comm, task_pid_nr(current),
895 ios->clock, ios->power_mode);
896 host->mrq = ERR_PTR(-EINTR);
897 } else {
898 dev_dbg(dev,
899 "%s.%d: CMD%u active since %lu, now %lu!\n",
900 current->comm, task_pid_nr(current),
901 host->mrq->cmd->opcode, host->last_req_ts, jiffies);
902 }
903 spin_unlock_irqrestore(&host->lock, flags);
904
905 mutex_unlock(&host->ios_lock);
906 return;
907 }
908
909 host->mrq = ERR_PTR(-EBUSY);
910
911 spin_unlock_irqrestore(&host->lock, flags);
912
913 switch (ios->power_mode) {
914 case MMC_POWER_OFF:
915 tmio_mmc_power_off(host);
916 tmio_mmc_clk_stop(host);
917 break;
918 case MMC_POWER_UP:
919 tmio_mmc_power_on(host, ios->vdd);
920 tmio_mmc_set_clock(host, ios->clock);
921 tmio_mmc_set_bus_width(host, ios->bus_width);
922 break;
923 case MMC_POWER_ON:
924 tmio_mmc_set_clock(host, ios->clock);
925 tmio_mmc_set_bus_width(host, ios->bus_width);
926 break;
927 }
928
929
930 udelay(140);
931 if (PTR_ERR(host->mrq) == -EINTR)
932 dev_dbg(&host->pdev->dev,
933 "%s.%d: IOS interrupted: clk %u, mode %u",
934 current->comm, task_pid_nr(current),
935 ios->clock, ios->power_mode);
936 host->mrq = NULL;
937
938 host->clk_cache = ios->clock;
939
940 mutex_unlock(&host->ios_lock);
941}
942
943static int tmio_mmc_get_ro(struct mmc_host *mmc)
944{
945 struct tmio_mmc_host *host = mmc_priv(mmc);
946 struct tmio_mmc_data *pdata = host->pdata;
947 int ret = mmc_gpio_get_ro(mmc);
948 if (ret >= 0)
949 return ret;
950
951 ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
952 (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
953
954 return ret;
955}
956
957static int tmio_multi_io_quirk(struct mmc_card *card,
958 unsigned int direction, int blk_size)
959{
960 struct tmio_mmc_host *host = mmc_priv(card->host);
961
962 if (host->multi_io_quirk)
963 return host->multi_io_quirk(card, direction, blk_size);
964
965 return blk_size;
966}
967
968static struct mmc_host_ops tmio_mmc_ops = {
969 .request = tmio_mmc_request,
970 .set_ios = tmio_mmc_set_ios,
971 .get_ro = tmio_mmc_get_ro,
972 .get_cd = mmc_gpio_get_cd,
973 .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
974 .multi_io_quirk = tmio_multi_io_quirk,
975};
976
977static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
978{
979 struct tmio_mmc_data *pdata = host->pdata;
980 struct mmc_host *mmc = host->mmc;
981
982 mmc_regulator_get_supply(mmc);
983
984
985 if (!mmc->ocr_avail)
986 mmc->ocr_avail = pdata->ocr_mask;
987
988
989
990
991
992 if (!mmc->ocr_avail)
993 return -EPROBE_DEFER;
994
995 return 0;
996}
997
998static void tmio_mmc_of_parse(struct platform_device *pdev,
999 struct tmio_mmc_data *pdata)
1000{
1001 const struct device_node *np = pdev->dev.of_node;
1002 if (!np)
1003 return;
1004
1005 if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL))
1006 pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE;
1007}
1008
1009struct tmio_mmc_host*
1010tmio_mmc_host_alloc(struct platform_device *pdev)
1011{
1012 struct tmio_mmc_host *host;
1013 struct mmc_host *mmc;
1014
1015 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
1016 if (!mmc)
1017 return NULL;
1018
1019 host = mmc_priv(mmc);
1020 host->mmc = mmc;
1021 host->pdev = pdev;
1022
1023 return host;
1024}
1025EXPORT_SYMBOL(tmio_mmc_host_alloc);
1026
1027void tmio_mmc_host_free(struct tmio_mmc_host *host)
1028{
1029 mmc_free_host(host->mmc);
1030}
1031EXPORT_SYMBOL(tmio_mmc_host_free);
1032
1033int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
1034 struct tmio_mmc_data *pdata)
1035{
1036 struct platform_device *pdev = _host->pdev;
1037 struct mmc_host *mmc = _host->mmc;
1038 struct resource *res_ctl;
1039 int ret;
1040 u32 irq_mask = TMIO_MASK_CMD;
1041
1042 tmio_mmc_of_parse(pdev, pdata);
1043
1044 if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
1045 _host->write16_hook = NULL;
1046
1047 res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1048 if (!res_ctl)
1049 return -EINVAL;
1050
1051 ret = mmc_of_parse(mmc);
1052 if (ret < 0)
1053 goto host_free;
1054
1055 _host->pdata = pdata;
1056 platform_set_drvdata(pdev, mmc);
1057
1058 _host->set_pwr = pdata->set_pwr;
1059 _host->set_clk_div = pdata->set_clk_div;
1060
1061 ret = tmio_mmc_init_ocr(_host);
1062 if (ret < 0)
1063 goto host_free;
1064
1065 _host->ctl = devm_ioremap(&pdev->dev,
1066 res_ctl->start, resource_size(res_ctl));
1067 if (!_host->ctl) {
1068 ret = -ENOMEM;
1069 goto host_free;
1070 }
1071
1072 tmio_mmc_ops.card_busy = _host->card_busy;
1073 tmio_mmc_ops.start_signal_voltage_switch = _host->start_signal_voltage_switch;
1074 mmc->ops = &tmio_mmc_ops;
1075
1076 mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
1077 mmc->caps2 |= pdata->capabilities2;
1078 mmc->max_segs = 32;
1079 mmc->max_blk_size = 512;
1080 mmc->max_blk_count = (PAGE_SIZE / mmc->max_blk_size) *
1081 mmc->max_segs;
1082 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1083 mmc->max_seg_size = mmc->max_req_size;
1084
1085 _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
1086 mmc->caps & MMC_CAP_NEEDS_POLL ||
1087 !mmc_card_is_removable(mmc) ||
1088 mmc->slot.cd_irq >= 0);
1089
1090
1091
1092
1093
1094
1095
1096 if (pdata->flags & TMIO_MMC_MIN_RCAR2)
1097 _host->native_hotplug = true;
1098
1099 if (tmio_mmc_clk_enable(_host) < 0) {
1100 mmc->f_max = pdata->hclk;
1101 mmc->f_min = mmc->f_max / 512;
1102 }
1103
1104
1105
1106
1107
1108 if (mmc->f_min == 0) {
1109 ret = -EINVAL;
1110 goto host_free;
1111 }
1112
1113
1114
1115
1116
1117 if (_host->native_hotplug)
1118 pm_runtime_get_noresume(&pdev->dev);
1119
1120 tmio_mmc_clk_stop(_host);
1121 tmio_mmc_reset(_host);
1122
1123 _host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK);
1124 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
1125
1126
1127 if (!_host->chan_rx)
1128 irq_mask |= TMIO_MASK_READOP;
1129 if (!_host->chan_tx)
1130 irq_mask |= TMIO_MASK_WRITEOP;
1131 if (!_host->native_hotplug)
1132 irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
1133
1134 _host->sdcard_irq_mask &= ~irq_mask;
1135
1136 _host->sdio_irq_enabled = false;
1137 if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
1138 _host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
1139 sd_ctrl_write16(_host, CTL_SDIO_IRQ_MASK, _host->sdio_irq_mask);
1140 sd_ctrl_write16(_host, CTL_TRANSACTION_CTL, 0x0000);
1141 }
1142
1143 spin_lock_init(&_host->lock);
1144 mutex_init(&_host->ios_lock);
1145
1146
1147 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
1148 INIT_WORK(&_host->done, tmio_mmc_done_work);
1149
1150
1151 tmio_mmc_request_dma(_host, pdata);
1152
1153 pm_runtime_set_active(&pdev->dev);
1154 pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1155 pm_runtime_use_autosuspend(&pdev->dev);
1156 pm_runtime_enable(&pdev->dev);
1157
1158 ret = mmc_add_host(mmc);
1159 if (ret < 0) {
1160 tmio_mmc_host_remove(_host);
1161 return ret;
1162 }
1163
1164 dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
1165
1166 if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
1167 ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
1168 if (ret < 0) {
1169 tmio_mmc_host_remove(_host);
1170 return ret;
1171 }
1172 mmc_gpiod_request_cd_irq(mmc);
1173 }
1174
1175 return 0;
1176
1177host_free:
1178
1179 return ret;
1180}
1181EXPORT_SYMBOL(tmio_mmc_host_probe);
1182
1183void tmio_mmc_host_remove(struct tmio_mmc_host *host)
1184{
1185 struct platform_device *pdev = host->pdev;
1186 struct mmc_host *mmc = host->mmc;
1187
1188 if (!host->native_hotplug)
1189 pm_runtime_get_sync(&pdev->dev);
1190
1191 dev_pm_qos_hide_latency_limit(&pdev->dev);
1192
1193 mmc_remove_host(mmc);
1194 cancel_work_sync(&host->done);
1195 cancel_delayed_work_sync(&host->delayed_reset_work);
1196 tmio_mmc_release_dma(host);
1197
1198 pm_runtime_put_sync(&pdev->dev);
1199 pm_runtime_disable(&pdev->dev);
1200}
1201EXPORT_SYMBOL(tmio_mmc_host_remove);
1202
1203#ifdef CONFIG_PM
1204int tmio_mmc_host_runtime_suspend(struct device *dev)
1205{
1206 struct mmc_host *mmc = dev_get_drvdata(dev);
1207 struct tmio_mmc_host *host = mmc_priv(mmc);
1208
1209 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
1210
1211 if (host->clk_cache)
1212 tmio_mmc_clk_stop(host);
1213
1214 if (host->clk_disable)
1215 host->clk_disable(host);
1216
1217 return 0;
1218}
1219EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
1220
1221int tmio_mmc_host_runtime_resume(struct device *dev)
1222{
1223 struct mmc_host *mmc = dev_get_drvdata(dev);
1224 struct tmio_mmc_host *host = mmc_priv(mmc);
1225
1226 tmio_mmc_reset(host);
1227 tmio_mmc_clk_enable(host);
1228
1229 if (host->clk_cache)
1230 tmio_mmc_set_clock(host, host->clk_cache);
1231
1232 tmio_mmc_enable_dma(host, true);
1233
1234 return 0;
1235}
1236EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
1237#endif
1238
1239MODULE_LICENSE("GPL v2");
1240