1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/delay.h>
31#include <linux/device.h>
32#include <linux/highmem.h>
33#include <linux/interrupt.h>
34#include <linux/io.h>
35#include <linux/irq.h>
36#include <linux/mfd/tmio.h>
37#include <linux/mmc/card.h>
38#include <linux/mmc/host.h>
39#include <linux/mmc/mmc.h>
40#include <linux/mmc/slot-gpio.h>
41#include <linux/module.h>
42#include <linux/pagemap.h>
43#include <linux/platform_device.h>
44#include <linux/pm_qos.h>
45#include <linux/pm_runtime.h>
46#include <linux/regulator/consumer.h>
47#include <linux/mmc/sdio.h>
48#include <linux/scatterlist.h>
49#include <linux/spinlock.h>
50#include <linux/swiotlb.h>
51#include <linux/workqueue.h>
52
53#include "tmio_mmc.h"
54
55static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
56 struct mmc_data *data)
57{
58 if (host->dma_ops)
59 host->dma_ops->start(host, data);
60}
61
62static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
63{
64 if (host->dma_ops)
65 host->dma_ops->enable(host, enable);
66}
67
68static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
69 struct tmio_mmc_data *pdata)
70{
71 if (host->dma_ops) {
72 host->dma_ops->request(host, pdata);
73 } else {
74 host->chan_tx = NULL;
75 host->chan_rx = NULL;
76 }
77}
78
79static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
80{
81 if (host->dma_ops)
82 host->dma_ops->release(host);
83}
84
85static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
86{
87 if (host->dma_ops)
88 host->dma_ops->abort(host);
89}
90
91static inline void tmio_mmc_dataend_dma(struct tmio_mmc_host *host)
92{
93 if (host->dma_ops)
94 host->dma_ops->dataend(host);
95}
96
97void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
98{
99 host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
100 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
101}
102EXPORT_SYMBOL_GPL(tmio_mmc_enable_mmc_irqs);
103
104void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
105{
106 host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
107 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
108}
109EXPORT_SYMBOL_GPL(tmio_mmc_disable_mmc_irqs);
110
111static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
112{
113 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, ~i);
114}
115
116static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
117{
118 host->sg_len = data->sg_len;
119 host->sg_ptr = data->sg;
120 host->sg_orig = data->sg;
121 host->sg_off = 0;
122}
123
124static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
125{
126 host->sg_ptr = sg_next(host->sg_ptr);
127 host->sg_off = 0;
128 return --host->sg_len;
129}
130
131#define CMDREQ_TIMEOUT 5000
132
133static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
134{
135 struct tmio_mmc_host *host = mmc_priv(mmc);
136
137 if (enable && !host->sdio_irq_enabled) {
138 u16 sdio_status;
139
140
141 pm_runtime_get_sync(mmc_dev(mmc));
142
143 host->sdio_irq_enabled = true;
144 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ;
145
146
147 sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS) & ~TMIO_SDIO_MASK_ALL;
148 if (host->pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
149 sdio_status |= TMIO_SDIO_SETBITS_MASK;
150 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
151
152 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
153 } else if (!enable && host->sdio_irq_enabled) {
154 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
155 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
156
157 host->sdio_irq_enabled = false;
158 pm_runtime_mark_last_busy(mmc_dev(mmc));
159 pm_runtime_put_autosuspend(mmc_dev(mmc));
160 }
161}
162
163static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
164{
165 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
166 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
167
168
169 if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
170 usleep_range(10000, 11000);
171
172 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
173 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
174 usleep_range(10000, 11000);
175 }
176}
177
178static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
179{
180 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
181 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
182 usleep_range(10000, 11000);
183 }
184
185 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
186 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
187
188
189 if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
190 usleep_range(10000, 11000);
191}
192
193static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
194 unsigned int new_clock)
195{
196 u32 clk = 0, clock;
197
198 if (new_clock == 0) {
199 tmio_mmc_clk_stop(host);
200 return;
201 }
202
203 if (host->clk_update)
204 clock = host->clk_update(host, new_clock) / 512;
205 else
206 clock = host->mmc->f_min;
207
208 for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1)
209 clock <<= 1;
210
211
212 if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && ((clk >> 22) & 0x1))
213 clk |= 0xff;
214
215 if (host->set_clk_div)
216 host->set_clk_div(host->pdev, (clk >> 22) & 1);
217
218 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
219 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
220 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
221 if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
222 usleep_range(10000, 11000);
223
224 tmio_mmc_clk_start(host);
225}
226
227static void tmio_mmc_reset(struct tmio_mmc_host *host)
228{
229
230 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
231 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
232 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
233 usleep_range(10000, 11000);
234 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
235 if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
236 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
237 usleep_range(10000, 11000);
238
239 if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
240 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
241 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
242 }
243
244}
245
246static void tmio_mmc_reset_work(struct work_struct *work)
247{
248 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
249 delayed_reset_work.work);
250 struct mmc_request *mrq;
251 unsigned long flags;
252
253 spin_lock_irqsave(&host->lock, flags);
254 mrq = host->mrq;
255
256
257
258
259
260
261 if (IS_ERR_OR_NULL(mrq) ||
262 time_is_after_jiffies(host->last_req_ts +
263 msecs_to_jiffies(CMDREQ_TIMEOUT))) {
264 spin_unlock_irqrestore(&host->lock, flags);
265 return;
266 }
267
268 dev_warn(&host->pdev->dev,
269 "timeout waiting for hardware interrupt (CMD%u)\n",
270 mrq->cmd->opcode);
271
272 if (host->data)
273 host->data->error = -ETIMEDOUT;
274 else if (host->cmd)
275 host->cmd->error = -ETIMEDOUT;
276 else
277 mrq->cmd->error = -ETIMEDOUT;
278
279 host->cmd = NULL;
280 host->data = NULL;
281 host->force_pio = false;
282
283 spin_unlock_irqrestore(&host->lock, flags);
284
285 tmio_mmc_reset(host);
286
287
288 host->mrq = NULL;
289
290 tmio_mmc_abort_dma(host);
291 mmc_request_done(host->mmc, mrq);
292}
293
294
295
296#define APP_CMD 0x0040
297#define RESP_NONE 0x0300
298#define RESP_R1 0x0400
299#define RESP_R1B 0x0500
300#define RESP_R2 0x0600
301#define RESP_R3 0x0700
302#define DATA_PRESENT 0x0800
303#define TRANSFER_READ 0x1000
304#define TRANSFER_MULTI 0x2000
305#define SECURITY_CMD 0x4000
306#define NO_CMD12_ISSUE 0x4000
307
308static int tmio_mmc_start_command(struct tmio_mmc_host *host,
309 struct mmc_command *cmd)
310{
311 struct mmc_data *data = host->data;
312 int c = cmd->opcode;
313 u32 irq_mask = TMIO_MASK_CMD;
314
315 switch (mmc_resp_type(cmd)) {
316 case MMC_RSP_NONE: c |= RESP_NONE; break;
317 case MMC_RSP_R1:
318 case MMC_RSP_R1_NO_CRC:
319 c |= RESP_R1; break;
320 case MMC_RSP_R1B: c |= RESP_R1B; break;
321 case MMC_RSP_R2: c |= RESP_R2; break;
322 case MMC_RSP_R3: c |= RESP_R3; break;
323 default:
324 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
325 return -EINVAL;
326 }
327
328 host->cmd = cmd;
329
330
331
332
333
334
335 if (data) {
336 c |= DATA_PRESENT;
337 if (data->blocks > 1) {
338 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, TMIO_STOP_SEC);
339 c |= TRANSFER_MULTI;
340
341
342
343
344
345 if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) &&
346 (cmd->opcode == SD_IO_RW_EXTENDED || host->mrq->sbc))
347 c |= NO_CMD12_ISSUE;
348 }
349 if (data->flags & MMC_DATA_READ)
350 c |= TRANSFER_READ;
351 }
352
353 if (!host->native_hotplug)
354 irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
355 tmio_mmc_enable_mmc_irqs(host, irq_mask);
356
357
358 sd_ctrl_write32_as_16_and_16(host, CTL_ARG_REG, cmd->arg);
359 sd_ctrl_write16(host, CTL_SD_CMD, c);
360
361 return 0;
362}
363
364static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
365 unsigned short *buf,
366 unsigned int count)
367{
368 int is_read = host->data->flags & MMC_DATA_READ;
369 u8 *buf8;
370
371
372
373
374 if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) {
375 u32 data = 0;
376 u32 *buf32 = (u32 *)buf;
377
378 if (is_read)
379 sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, buf32,
380 count >> 2);
381 else
382 sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, buf32,
383 count >> 2);
384
385
386 if (!(count & 0x3))
387 return;
388
389 buf32 += count >> 2;
390 count %= 4;
391
392 if (is_read) {
393 sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, &data, 1);
394 memcpy(buf32, &data, count);
395 } else {
396 memcpy(&data, buf32, count);
397 sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, &data, 1);
398 }
399
400 return;
401 }
402
403 if (is_read)
404 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
405 else
406 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
407
408
409 if (!(count & 0x1))
410 return;
411
412
413 buf8 = (u8 *)(buf + (count >> 1));
414
415
416
417
418
419
420
421 if (is_read)
422 *buf8 = sd_ctrl_read16(host, CTL_SD_DATA_PORT) & 0xff;
423 else
424 sd_ctrl_write16(host, CTL_SD_DATA_PORT, *buf8);
425}
426
427
428
429
430
431
432static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
433{
434 struct mmc_data *data = host->data;
435 void *sg_virt;
436 unsigned short *buf;
437 unsigned int count;
438 unsigned long flags;
439
440 if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
441 pr_err("PIO IRQ in DMA mode!\n");
442 return;
443 } else if (!data) {
444 pr_debug("Spurious PIO IRQ\n");
445 return;
446 }
447
448 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
449 buf = (unsigned short *)(sg_virt + host->sg_off);
450
451 count = host->sg_ptr->length - host->sg_off;
452 if (count > data->blksz)
453 count = data->blksz;
454
455 pr_debug("count: %08x offset: %08x flags %08x\n",
456 count, host->sg_off, data->flags);
457
458
459 tmio_mmc_transfer_data(host, buf, count);
460
461 host->sg_off += count;
462
463 tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
464
465 if (host->sg_off == host->sg_ptr->length)
466 tmio_mmc_next_sg(host);
467}
468
469static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
470{
471 if (host->sg_ptr == &host->bounce_sg) {
472 unsigned long flags;
473 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
474
475 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
476 tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
477 }
478}
479
480
481void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
482{
483 struct mmc_data *data = host->data;
484 struct mmc_command *stop;
485
486 host->data = NULL;
487
488 if (!data) {
489 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
490 return;
491 }
492 stop = data->stop;
493
494
495 if (!data->error)
496 data->bytes_xfered = data->blocks * data->blksz;
497 else
498 data->bytes_xfered = 0;
499
500 pr_debug("Completed data request\n");
501
502
503
504
505
506
507
508
509
510
511 if (data->flags & MMC_DATA_READ) {
512 if (host->chan_rx && !host->force_pio)
513 tmio_mmc_check_bounce_buffer(host);
514 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
515 host->mrq);
516 } else {
517 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
518 host->mrq);
519 }
520
521 if (stop && !host->mrq->sbc) {
522 if (stop->opcode != MMC_STOP_TRANSMISSION || stop->arg)
523 dev_err(&host->pdev->dev, "unsupported stop: CMD%u,0x%x. We did CMD12,0\n",
524 stop->opcode, stop->arg);
525
526
527 stop->resp[0] = sd_ctrl_read16_and_16_as_32(host, CTL_RESPONSE);
528
529 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0);
530 }
531
532 schedule_work(&host->done);
533}
534EXPORT_SYMBOL_GPL(tmio_mmc_do_data_irq);
535
536static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
537{
538 struct mmc_data *data;
539
540 spin_lock(&host->lock);
541 data = host->data;
542
543 if (!data)
544 goto out;
545
546 if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR ||
547 stat & TMIO_STAT_TXUNDERRUN)
548 data->error = -EILSEQ;
549 if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
550 u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
551 bool done = false;
552
553
554
555
556
557
558
559
560
561 if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) {
562 if (status & TMIO_STAT_SCLKDIVEN)
563 done = true;
564 } else {
565 if (!(status & TMIO_STAT_CMD_BUSY))
566 done = true;
567 }
568
569 if (done) {
570 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
571 tmio_mmc_dataend_dma(host);
572 }
573 } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
574 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
575 tmio_mmc_dataend_dma(host);
576 } else {
577 tmio_mmc_do_data_irq(host);
578 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
579 }
580out:
581 spin_unlock(&host->lock);
582}
583
584static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat)
585{
586 struct mmc_command *cmd = host->cmd;
587 int i, addr;
588
589 spin_lock(&host->lock);
590
591 if (!host->cmd) {
592 pr_debug("Spurious CMD irq\n");
593 goto out;
594 }
595
596
597
598
599
600
601 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
602 cmd->resp[i] = sd_ctrl_read16_and_16_as_32(host, addr);
603
604 if (cmd->flags & MMC_RSP_136) {
605 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
606 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
607 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
608 cmd->resp[3] <<= 8;
609 } else if (cmd->flags & MMC_RSP_R3) {
610 cmd->resp[0] = cmd->resp[3];
611 }
612
613 if (stat & TMIO_STAT_CMDTIMEOUT)
614 cmd->error = -ETIMEDOUT;
615 else if ((stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) ||
616 stat & TMIO_STAT_STOPBIT_ERR ||
617 stat & TMIO_STAT_CMD_IDX_ERR)
618 cmd->error = -EILSEQ;
619
620
621
622
623
624 if (host->data && (!cmd->error || cmd->error == -EILSEQ)) {
625 if (host->data->flags & MMC_DATA_READ) {
626 if (host->force_pio || !host->chan_rx)
627 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
628 else
629 tasklet_schedule(&host->dma_issue);
630 } else {
631 if (host->force_pio || !host->chan_tx)
632 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
633 else
634 tasklet_schedule(&host->dma_issue);
635 }
636 } else {
637 schedule_work(&host->done);
638 }
639
640out:
641 spin_unlock(&host->lock);
642}
643
644static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
645 int ireg, int status)
646{
647 struct mmc_host *mmc = host->mmc;
648
649
650 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
651 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
652 TMIO_STAT_CARD_REMOVE);
653 if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) ||
654 ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) &&
655 !work_pending(&mmc->detect.work))
656 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
657 return true;
658 }
659
660 return false;
661}
662
663static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg,
664 int status)
665{
666
667 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
668 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CMDRESPEND |
669 TMIO_STAT_CMDTIMEOUT);
670 tmio_mmc_cmd_irq(host, status);
671 return true;
672 }
673
674
675 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
676 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
677 tmio_mmc_pio_irq(host);
678 return true;
679 }
680
681
682 if (ireg & TMIO_STAT_DATAEND) {
683 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
684 tmio_mmc_data_irq(host, status);
685 return true;
686 }
687
688 return false;
689}
690
691static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
692{
693 struct mmc_host *mmc = host->mmc;
694 struct tmio_mmc_data *pdata = host->pdata;
695 unsigned int ireg, status;
696 unsigned int sdio_status;
697
698 if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
699 return;
700
701 status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
702 ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
703
704 sdio_status = status & ~TMIO_SDIO_MASK_ALL;
705 if (pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
706 sdio_status |= TMIO_SDIO_SETBITS_MASK;
707
708 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
709
710 if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
711 mmc_signal_sdio_irq(mmc);
712}
713
714irqreturn_t tmio_mmc_irq(int irq, void *devid)
715{
716 struct tmio_mmc_host *host = devid;
717 unsigned int ireg, status;
718
719 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
720 ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
721
722
723 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ);
724
725 if (__tmio_mmc_card_detect_irq(host, ireg, status))
726 return IRQ_HANDLED;
727 if (__tmio_mmc_sdcard_irq(host, ireg, status))
728 return IRQ_HANDLED;
729
730 __tmio_mmc_sdio_irq(host);
731
732 return IRQ_HANDLED;
733}
734EXPORT_SYMBOL_GPL(tmio_mmc_irq);
735
736static int tmio_mmc_start_data(struct tmio_mmc_host *host,
737 struct mmc_data *data)
738{
739 struct tmio_mmc_data *pdata = host->pdata;
740
741 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
742 data->blksz, data->blocks);
743
744
745 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4 ||
746 host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
747 int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
748
749 if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
750 pr_err("%s: %d byte block unsupported in 4/8 bit mode\n",
751 mmc_hostname(host->mmc), data->blksz);
752 return -EINVAL;
753 }
754 }
755
756 tmio_mmc_init_sg(host, data);
757 host->data = data;
758
759
760 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
761 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
762
763 tmio_mmc_start_dma(host, data);
764
765 return 0;
766}
767
768static void tmio_mmc_hw_reset(struct mmc_host *mmc)
769{
770 struct tmio_mmc_host *host = mmc_priv(mmc);
771
772 if (host->hw_reset)
773 host->hw_reset(host);
774}
775
776static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
777{
778 struct tmio_mmc_host *host = mmc_priv(mmc);
779 int i, ret = 0;
780
781 if (!host->init_tuning || !host->select_tuning)
782
783 goto out;
784
785 host->tap_num = host->init_tuning(host);
786 if (!host->tap_num)
787
788 goto out;
789
790 if (host->tap_num * 2 >= sizeof(host->taps) * BITS_PER_BYTE) {
791 dev_warn_once(&host->pdev->dev,
792 "Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n");
793 goto out;
794 }
795
796 bitmap_zero(host->taps, host->tap_num * 2);
797
798
799 for (i = 0; i < 2 * host->tap_num; i++) {
800 if (host->prepare_tuning)
801 host->prepare_tuning(host, i % host->tap_num);
802
803 ret = mmc_send_tuning(mmc, opcode, NULL);
804 if (ret && ret != -EILSEQ)
805 goto out;
806 if (ret == 0)
807 set_bit(i, host->taps);
808
809 mdelay(1);
810 }
811
812 ret = host->select_tuning(host);
813
814out:
815 if (ret < 0) {
816 dev_warn(&host->pdev->dev, "Tuning procedure failed\n");
817 tmio_mmc_hw_reset(mmc);
818 }
819
820 return ret;
821}
822
823static void tmio_process_mrq(struct tmio_mmc_host *host,
824 struct mmc_request *mrq)
825{
826 struct mmc_command *cmd;
827 int ret;
828
829 if (mrq->sbc && host->cmd != mrq->sbc) {
830 cmd = mrq->sbc;
831 } else {
832 cmd = mrq->cmd;
833 if (mrq->data) {
834 ret = tmio_mmc_start_data(host, mrq->data);
835 if (ret)
836 goto fail;
837 }
838 }
839
840 ret = tmio_mmc_start_command(host, cmd);
841 if (ret)
842 goto fail;
843
844 schedule_delayed_work(&host->delayed_reset_work,
845 msecs_to_jiffies(CMDREQ_TIMEOUT));
846 return;
847
848fail:
849 host->force_pio = false;
850 host->mrq = NULL;
851 mrq->cmd->error = ret;
852 mmc_request_done(host->mmc, mrq);
853}
854
855
856static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
857{
858 struct tmio_mmc_host *host = mmc_priv(mmc);
859 unsigned long flags;
860
861 spin_lock_irqsave(&host->lock, flags);
862
863 if (host->mrq) {
864 pr_debug("request not null\n");
865 if (IS_ERR(host->mrq)) {
866 spin_unlock_irqrestore(&host->lock, flags);
867 mrq->cmd->error = -EAGAIN;
868 mmc_request_done(mmc, mrq);
869 return;
870 }
871 }
872
873 host->last_req_ts = jiffies;
874 wmb();
875 host->mrq = mrq;
876
877 spin_unlock_irqrestore(&host->lock, flags);
878
879 tmio_process_mrq(host, mrq);
880}
881
882static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
883{
884 struct mmc_request *mrq;
885 unsigned long flags;
886
887 spin_lock_irqsave(&host->lock, flags);
888
889 mrq = host->mrq;
890 if (IS_ERR_OR_NULL(mrq)) {
891 spin_unlock_irqrestore(&host->lock, flags);
892 return;
893 }
894
895
896 if (host->cmd != mrq->sbc) {
897 host->cmd = NULL;
898 host->data = NULL;
899 host->force_pio = false;
900 host->mrq = NULL;
901 }
902
903 cancel_delayed_work(&host->delayed_reset_work);
904
905 spin_unlock_irqrestore(&host->lock, flags);
906
907 if (mrq->cmd->error || (mrq->data && mrq->data->error))
908 tmio_mmc_abort_dma(host);
909
910 if (host->check_scc_error)
911 host->check_scc_error(host);
912
913
914 if (host->mrq) {
915 tmio_process_mrq(host, mrq);
916 return;
917 }
918
919 mmc_request_done(host->mmc, mrq);
920}
921
922static void tmio_mmc_done_work(struct work_struct *work)
923{
924 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
925 done);
926 tmio_mmc_finish_request(host);
927}
928
929static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
930{
931 if (!host->clk_enable)
932 return -ENOTSUPP;
933
934 return host->clk_enable(host);
935}
936
937static void tmio_mmc_clk_disable(struct tmio_mmc_host *host)
938{
939 if (host->clk_disable)
940 host->clk_disable(host);
941}
942
943static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
944{
945 struct mmc_host *mmc = host->mmc;
946 int ret = 0;
947
948
949
950 if (host->set_pwr)
951 host->set_pwr(host->pdev, 1);
952
953 if (!IS_ERR(mmc->supply.vmmc)) {
954 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
955
956
957
958
959
960
961 udelay(200);
962 }
963
964
965
966
967 if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
968 ret = regulator_enable(mmc->supply.vqmmc);
969 udelay(200);
970 }
971
972 if (ret < 0)
973 dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n",
974 ret);
975}
976
977static void tmio_mmc_power_off(struct tmio_mmc_host *host)
978{
979 struct mmc_host *mmc = host->mmc;
980
981 if (!IS_ERR(mmc->supply.vqmmc))
982 regulator_disable(mmc->supply.vqmmc);
983
984 if (!IS_ERR(mmc->supply.vmmc))
985 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
986
987 if (host->set_pwr)
988 host->set_pwr(host->pdev, 0);
989}
990
991static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
992 unsigned char bus_width)
993{
994 u16 reg = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT)
995 & ~(CARD_OPT_WIDTH | CARD_OPT_WIDTH8);
996
997
998 if (bus_width == MMC_BUS_WIDTH_1)
999 reg |= CARD_OPT_WIDTH;
1000 else if (bus_width == MMC_BUS_WIDTH_8)
1001 reg |= CARD_OPT_WIDTH8;
1002
1003 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg);
1004}
1005
1006
1007
1008
1009
1010
1011
1012static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1013{
1014 struct tmio_mmc_host *host = mmc_priv(mmc);
1015 struct device *dev = &host->pdev->dev;
1016 unsigned long flags;
1017
1018 mutex_lock(&host->ios_lock);
1019
1020 spin_lock_irqsave(&host->lock, flags);
1021 if (host->mrq) {
1022 if (IS_ERR(host->mrq)) {
1023 dev_dbg(dev,
1024 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
1025 current->comm, task_pid_nr(current),
1026 ios->clock, ios->power_mode);
1027 host->mrq = ERR_PTR(-EINTR);
1028 } else {
1029 dev_dbg(dev,
1030 "%s.%d: CMD%u active since %lu, now %lu!\n",
1031 current->comm, task_pid_nr(current),
1032 host->mrq->cmd->opcode, host->last_req_ts,
1033 jiffies);
1034 }
1035 spin_unlock_irqrestore(&host->lock, flags);
1036
1037 mutex_unlock(&host->ios_lock);
1038 return;
1039 }
1040
1041 host->mrq = ERR_PTR(-EBUSY);
1042
1043 spin_unlock_irqrestore(&host->lock, flags);
1044
1045 switch (ios->power_mode) {
1046 case MMC_POWER_OFF:
1047 tmio_mmc_power_off(host);
1048 tmio_mmc_clk_stop(host);
1049 break;
1050 case MMC_POWER_UP:
1051 tmio_mmc_power_on(host, ios->vdd);
1052 tmio_mmc_set_clock(host, ios->clock);
1053 tmio_mmc_set_bus_width(host, ios->bus_width);
1054 break;
1055 case MMC_POWER_ON:
1056 tmio_mmc_set_clock(host, ios->clock);
1057 tmio_mmc_set_bus_width(host, ios->bus_width);
1058 break;
1059 }
1060
1061
1062 udelay(140);
1063 if (PTR_ERR(host->mrq) == -EINTR)
1064 dev_dbg(&host->pdev->dev,
1065 "%s.%d: IOS interrupted: clk %u, mode %u",
1066 current->comm, task_pid_nr(current),
1067 ios->clock, ios->power_mode);
1068 host->mrq = NULL;
1069
1070 host->clk_cache = ios->clock;
1071
1072 mutex_unlock(&host->ios_lock);
1073}
1074
1075static int tmio_mmc_get_ro(struct mmc_host *mmc)
1076{
1077 struct tmio_mmc_host *host = mmc_priv(mmc);
1078 struct tmio_mmc_data *pdata = host->pdata;
1079 int ret = mmc_gpio_get_ro(mmc);
1080
1081 if (ret >= 0)
1082 return ret;
1083
1084 ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
1085 (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
1086
1087 return ret;
1088}
1089
1090static int tmio_multi_io_quirk(struct mmc_card *card,
1091 unsigned int direction, int blk_size)
1092{
1093 struct tmio_mmc_host *host = mmc_priv(card->host);
1094
1095 if (host->multi_io_quirk)
1096 return host->multi_io_quirk(card, direction, blk_size);
1097
1098 return blk_size;
1099}
1100
1101static struct mmc_host_ops tmio_mmc_ops = {
1102 .request = tmio_mmc_request,
1103 .set_ios = tmio_mmc_set_ios,
1104 .get_ro = tmio_mmc_get_ro,
1105 .get_cd = mmc_gpio_get_cd,
1106 .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
1107 .multi_io_quirk = tmio_multi_io_quirk,
1108 .hw_reset = tmio_mmc_hw_reset,
1109 .execute_tuning = tmio_mmc_execute_tuning,
1110};
1111
1112static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
1113{
1114 struct tmio_mmc_data *pdata = host->pdata;
1115 struct mmc_host *mmc = host->mmc;
1116 int err;
1117
1118 err = mmc_regulator_get_supply(mmc);
1119 if (err)
1120 return err;
1121
1122
1123 if (!mmc->ocr_avail)
1124 mmc->ocr_avail = pdata->ocr_mask;
1125
1126
1127
1128
1129
1130 if (!mmc->ocr_avail)
1131 return -EPROBE_DEFER;
1132
1133 return 0;
1134}
1135
1136static void tmio_mmc_of_parse(struct platform_device *pdev,
1137 struct tmio_mmc_data *pdata)
1138{
1139 const struct device_node *np = pdev->dev.of_node;
1140
1141 if (!np)
1142 return;
1143
1144 if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL))
1145 pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE;
1146}
1147
1148struct tmio_mmc_host*
1149tmio_mmc_host_alloc(struct platform_device *pdev)
1150{
1151 struct tmio_mmc_host *host;
1152 struct mmc_host *mmc;
1153
1154 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
1155 if (!mmc)
1156 return NULL;
1157
1158 host = mmc_priv(mmc);
1159 host->mmc = mmc;
1160 host->pdev = pdev;
1161
1162 return host;
1163}
1164EXPORT_SYMBOL_GPL(tmio_mmc_host_alloc);
1165
1166void tmio_mmc_host_free(struct tmio_mmc_host *host)
1167{
1168 mmc_free_host(host->mmc);
1169}
1170EXPORT_SYMBOL_GPL(tmio_mmc_host_free);
1171
1172int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
1173 struct tmio_mmc_data *pdata,
1174 const struct tmio_mmc_dma_ops *dma_ops)
1175{
1176 struct platform_device *pdev = _host->pdev;
1177 struct mmc_host *mmc = _host->mmc;
1178 struct resource *res_ctl;
1179 int ret;
1180 u32 irq_mask = TMIO_MASK_CMD;
1181
1182 tmio_mmc_of_parse(pdev, pdata);
1183
1184 if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
1185 _host->write16_hook = NULL;
1186
1187 res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1188 if (!res_ctl)
1189 return -EINVAL;
1190
1191 ret = mmc_of_parse(mmc);
1192 if (ret < 0)
1193 return ret;
1194
1195 _host->pdata = pdata;
1196 platform_set_drvdata(pdev, mmc);
1197
1198 _host->set_pwr = pdata->set_pwr;
1199 _host->set_clk_div = pdata->set_clk_div;
1200
1201 ret = tmio_mmc_init_ocr(_host);
1202 if (ret < 0)
1203 return ret;
1204
1205 _host->ctl = devm_ioremap(&pdev->dev,
1206 res_ctl->start, resource_size(res_ctl));
1207 if (!_host->ctl)
1208 return -ENOMEM;
1209
1210 tmio_mmc_ops.card_busy = _host->card_busy;
1211 tmio_mmc_ops.start_signal_voltage_switch =
1212 _host->start_signal_voltage_switch;
1213 mmc->ops = &tmio_mmc_ops;
1214
1215 mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
1216 mmc->caps2 |= pdata->capabilities2;
1217 mmc->max_segs = pdata->max_segs ? : 32;
1218 mmc->max_blk_size = 512;
1219 mmc->max_blk_count = pdata->max_blk_count ? :
1220 (PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs;
1221 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1222
1223
1224
1225
1226
1227
1228 if (swiotlb_max_segment()) {
1229 unsigned int max_size = (1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
1230
1231 if (mmc->max_req_size > max_size)
1232 mmc->max_req_size = max_size;
1233 }
1234 mmc->max_seg_size = mmc->max_req_size;
1235
1236 _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
1237 mmc->caps & MMC_CAP_NEEDS_POLL ||
1238 !mmc_card_is_removable(mmc));
1239
1240
1241
1242
1243
1244
1245
1246 if (pdata->flags & TMIO_MMC_MIN_RCAR2)
1247 _host->native_hotplug = true;
1248
1249 if (tmio_mmc_clk_enable(_host) < 0) {
1250 mmc->f_max = pdata->hclk;
1251 mmc->f_min = mmc->f_max / 512;
1252 }
1253
1254
1255
1256
1257
1258 if (mmc->f_min == 0)
1259 return -EINVAL;
1260
1261
1262
1263
1264
1265 if (_host->native_hotplug)
1266 pm_runtime_get_noresume(&pdev->dev);
1267
1268 _host->sdio_irq_enabled = false;
1269 if (pdata->flags & TMIO_MMC_SDIO_IRQ)
1270 _host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
1271
1272 tmio_mmc_clk_stop(_host);
1273 tmio_mmc_reset(_host);
1274
1275 _host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK);
1276 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
1277
1278
1279 if (!_host->chan_rx)
1280 irq_mask |= TMIO_MASK_READOP;
1281 if (!_host->chan_tx)
1282 irq_mask |= TMIO_MASK_WRITEOP;
1283 if (!_host->native_hotplug)
1284 irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
1285
1286 _host->sdcard_irq_mask &= ~irq_mask;
1287
1288 spin_lock_init(&_host->lock);
1289 mutex_init(&_host->ios_lock);
1290
1291
1292 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
1293 INIT_WORK(&_host->done, tmio_mmc_done_work);
1294
1295
1296 _host->dma_ops = dma_ops;
1297 tmio_mmc_request_dma(_host, pdata);
1298
1299 pm_runtime_set_active(&pdev->dev);
1300 pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1301 pm_runtime_use_autosuspend(&pdev->dev);
1302 pm_runtime_enable(&pdev->dev);
1303
1304 ret = mmc_add_host(mmc);
1305 if (ret)
1306 goto remove_host;
1307
1308 dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
1309
1310 if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
1311 ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
1312 if (ret)
1313 goto remove_host;
1314
1315 mmc_gpiod_request_cd_irq(mmc);
1316 }
1317
1318 return 0;
1319
1320remove_host:
1321 tmio_mmc_host_remove(_host);
1322 return ret;
1323}
1324EXPORT_SYMBOL_GPL(tmio_mmc_host_probe);
1325
1326void tmio_mmc_host_remove(struct tmio_mmc_host *host)
1327{
1328 struct platform_device *pdev = host->pdev;
1329 struct mmc_host *mmc = host->mmc;
1330
1331 if (host->pdata->flags & TMIO_MMC_SDIO_IRQ)
1332 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
1333
1334 if (!host->native_hotplug)
1335 pm_runtime_get_sync(&pdev->dev);
1336
1337 dev_pm_qos_hide_latency_limit(&pdev->dev);
1338
1339 mmc_remove_host(mmc);
1340 cancel_work_sync(&host->done);
1341 cancel_delayed_work_sync(&host->delayed_reset_work);
1342 tmio_mmc_release_dma(host);
1343
1344 pm_runtime_put_sync(&pdev->dev);
1345 pm_runtime_disable(&pdev->dev);
1346
1347 tmio_mmc_clk_disable(host);
1348}
1349EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);
1350
1351#ifdef CONFIG_PM
1352int tmio_mmc_host_runtime_suspend(struct device *dev)
1353{
1354 struct mmc_host *mmc = dev_get_drvdata(dev);
1355 struct tmio_mmc_host *host = mmc_priv(mmc);
1356
1357 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
1358
1359 if (host->clk_cache)
1360 tmio_mmc_clk_stop(host);
1361
1362 tmio_mmc_clk_disable(host);
1363
1364 return 0;
1365}
1366EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_suspend);
1367
1368static bool tmio_mmc_can_retune(struct tmio_mmc_host *host)
1369{
1370 return host->tap_num && mmc_can_retune(host->mmc);
1371}
1372
1373int tmio_mmc_host_runtime_resume(struct device *dev)
1374{
1375 struct mmc_host *mmc = dev_get_drvdata(dev);
1376 struct tmio_mmc_host *host = mmc_priv(mmc);
1377
1378 tmio_mmc_reset(host);
1379 tmio_mmc_clk_enable(host);
1380
1381 if (host->clk_cache)
1382 tmio_mmc_set_clock(host, host->clk_cache);
1383
1384 tmio_mmc_enable_dma(host, true);
1385
1386 if (tmio_mmc_can_retune(host) && host->select_tuning(host))
1387 dev_warn(&host->pdev->dev, "Tuning selection failed\n");
1388
1389 return 0;
1390}
1391EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_resume);
1392#endif
1393
1394MODULE_LICENSE("GPL v2");
1395