1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/module.h>
24#include <linux/ioport.h>
25#include <linux/platform_device.h>
26#include <linux/clk.h>
27#include <linux/err.h>
28#include <linux/cpufreq.h>
29#include <linux/mmc/host.h>
30#include <linux/io.h>
31#include <linux/irq.h>
32#include <linux/delay.h>
33#include <linux/dmaengine.h>
34#include <linux/dma-mapping.h>
35#include <linux/edma.h>
36#include <linux/mmc/mmc.h>
37#include <linux/of.h>
38#include <linux/of_device.h>
39
40#include <linux/platform_data/edma.h>
41#include <linux/platform_data/mmc-davinci.h>
42
43
44
45
46#define DAVINCI_MMCCTL 0x00
47#define DAVINCI_MMCCLK 0x04
48#define DAVINCI_MMCST0 0x08
49#define DAVINCI_MMCST1 0x0C
50#define DAVINCI_MMCIM 0x10
51#define DAVINCI_MMCTOR 0x14
52#define DAVINCI_MMCTOD 0x18
53#define DAVINCI_MMCBLEN 0x1C
54#define DAVINCI_MMCNBLK 0x20
55#define DAVINCI_MMCNBLC 0x24
56#define DAVINCI_MMCDRR 0x28
57#define DAVINCI_MMCDXR 0x2C
58#define DAVINCI_MMCCMD 0x30
59#define DAVINCI_MMCARGHL 0x34
60#define DAVINCI_MMCRSP01 0x38
61#define DAVINCI_MMCRSP23 0x3C
62#define DAVINCI_MMCRSP45 0x40
63#define DAVINCI_MMCRSP67 0x44
64#define DAVINCI_MMCDRSP 0x48
65#define DAVINCI_MMCETOK 0x4C
66#define DAVINCI_MMCCIDX 0x50
67#define DAVINCI_MMCCKC 0x54
68#define DAVINCI_MMCTORC 0x58
69#define DAVINCI_MMCTODC 0x5C
70#define DAVINCI_MMCBLNC 0x60
71#define DAVINCI_SDIOCTL 0x64
72#define DAVINCI_SDIOST0 0x68
73#define DAVINCI_SDIOIEN 0x6C
74#define DAVINCI_SDIOIST 0x70
75#define DAVINCI_MMCFIFOCTL 0x74
76
77
78#define MMCCTL_DATRST (1 << 0)
79#define MMCCTL_CMDRST (1 << 1)
80#define MMCCTL_WIDTH_8_BIT (1 << 8)
81#define MMCCTL_WIDTH_4_BIT (1 << 2)
82#define MMCCTL_DATEG_DISABLED (0 << 6)
83#define MMCCTL_DATEG_RISING (1 << 6)
84#define MMCCTL_DATEG_FALLING (2 << 6)
85#define MMCCTL_DATEG_BOTH (3 << 6)
86#define MMCCTL_PERMDR_LE (0 << 9)
87#define MMCCTL_PERMDR_BE (1 << 9)
88#define MMCCTL_PERMDX_LE (0 << 10)
89#define MMCCTL_PERMDX_BE (1 << 10)
90
91
92#define MMCCLK_CLKEN (1 << 8)
93#define MMCCLK_CLKRT_MASK (0xFF << 0)
94
95
96#define MMCST0_DATDNE BIT(0)
97#define MMCST0_BSYDNE BIT(1)
98#define MMCST0_RSPDNE BIT(2)
99#define MMCST0_TOUTRD BIT(3)
100#define MMCST0_TOUTRS BIT(4)
101#define MMCST0_CRCWR BIT(5)
102#define MMCST0_CRCRD BIT(6)
103#define MMCST0_CRCRS BIT(7)
104#define MMCST0_DXRDY BIT(9)
105#define MMCST0_DRRDY BIT(10)
106#define MMCST0_DATED BIT(11)
107#define MMCST0_TRNDNE BIT(12)
108
109
110#define MMCST1_BUSY (1 << 0)
111
112
113#define MMCCMD_CMD_MASK (0x3F << 0)
114#define MMCCMD_PPLEN (1 << 7)
115#define MMCCMD_BSYEXP (1 << 8)
116#define MMCCMD_RSPFMT_MASK (3 << 9)
117#define MMCCMD_RSPFMT_NONE (0 << 9)
118#define MMCCMD_RSPFMT_R1456 (1 << 9)
119#define MMCCMD_RSPFMT_R2 (2 << 9)
120#define MMCCMD_RSPFMT_R3 (3 << 9)
121#define MMCCMD_DTRW (1 << 11)
122#define MMCCMD_STRMTP (1 << 12)
123#define MMCCMD_WDATX (1 << 13)
124#define MMCCMD_INITCK (1 << 14)
125#define MMCCMD_DCLR (1 << 15)
126#define MMCCMD_DMATRIG (1 << 16)
127
128
129#define MMCFIFOCTL_FIFORST (1 << 0)
130#define MMCFIFOCTL_FIFODIR_WR (1 << 1)
131#define MMCFIFOCTL_FIFODIR_RD (0 << 1)
132#define MMCFIFOCTL_FIFOLEV (1 << 2)
133#define MMCFIFOCTL_ACCWD_4 (0 << 3)
134#define MMCFIFOCTL_ACCWD_3 (1 << 3)
135#define MMCFIFOCTL_ACCWD_2 (2 << 3)
136#define MMCFIFOCTL_ACCWD_1 (3 << 3)
137
138
139#define SDIOST0_DAT1_HI BIT(0)
140
141
142#define SDIOIEN_IOINTEN BIT(0)
143
144
145#define SDIOIST_IOINT BIT(0)
146
147
148#define MMCSD_INIT_CLOCK 200000
149
150
151
152
153
154
155
156
157
158#define MAX_CCNT ((1 << 16) - 1)
159
160#define MAX_NR_SG 16
161
162static unsigned rw_threshold = 32;
163module_param(rw_threshold, uint, S_IRUGO);
164MODULE_PARM_DESC(rw_threshold,
165 "Read/Write threshold. Default = 32");
166
167static unsigned poll_threshold = 128;
168module_param(poll_threshold, uint, S_IRUGO);
169MODULE_PARM_DESC(poll_threshold,
170 "Polling transaction size threshold. Default = 128");
171
172static unsigned poll_loopcount = 32;
173module_param(poll_loopcount, uint, S_IRUGO);
174MODULE_PARM_DESC(poll_loopcount,
175 "Maximum polling loop count. Default = 32");
176
177static unsigned __initdata use_dma = 1;
178module_param(use_dma, uint, 0);
179MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1");
180
181struct mmc_davinci_host {
182 struct mmc_command *cmd;
183 struct mmc_data *data;
184 struct mmc_host *mmc;
185 struct clk *clk;
186 unsigned int mmc_input_clk;
187 void __iomem *base;
188 struct resource *mem_res;
189 int mmc_irq, sdio_irq;
190 unsigned char bus_mode;
191
192#define DAVINCI_MMC_DATADIR_NONE 0
193#define DAVINCI_MMC_DATADIR_READ 1
194#define DAVINCI_MMC_DATADIR_WRITE 2
195 unsigned char data_dir;
196 unsigned char suspended;
197
198
199
200
201
202 u8 *buffer;
203 u32 buffer_bytes_left;
204 u32 bytes_left;
205
206 u32 rxdma, txdma;
207 struct dma_chan *dma_tx;
208 struct dma_chan *dma_rx;
209 bool use_dma;
210 bool do_dma;
211 bool sdio_int;
212 bool active_request;
213
214
215 unsigned int sg_len;
216 struct scatterlist *sg;
217
218
219 u8 version;
220
221 unsigned ns_in_one_cycle;
222
223 u8 nr_sg;
224#ifdef CONFIG_CPU_FREQ
225 struct notifier_block freq_transition;
226#endif
227};
228
229static irqreturn_t mmc_davinci_irq(int irq, void *dev_id);
230
231
232static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host)
233{
234 host->buffer_bytes_left = sg_dma_len(host->sg);
235 host->buffer = sg_virt(host->sg);
236 if (host->buffer_bytes_left > host->bytes_left)
237 host->buffer_bytes_left = host->bytes_left;
238}
239
240static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
241 unsigned int n)
242{
243 u8 *p;
244 unsigned int i;
245
246 if (host->buffer_bytes_left == 0) {
247 host->sg = sg_next(host->data->sg);
248 mmc_davinci_sg_to_buf(host);
249 }
250
251 p = host->buffer;
252 if (n > host->buffer_bytes_left)
253 n = host->buffer_bytes_left;
254 host->buffer_bytes_left -= n;
255 host->bytes_left -= n;
256
257
258
259
260
261 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
262 for (i = 0; i < (n >> 2); i++) {
263 writel(*((u32 *)p), host->base + DAVINCI_MMCDXR);
264 p = p + 4;
265 }
266 if (n & 3) {
267 iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3));
268 p = p + (n & 3);
269 }
270 } else {
271 for (i = 0; i < (n >> 2); i++) {
272 *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR);
273 p = p + 4;
274 }
275 if (n & 3) {
276 ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3));
277 p = p + (n & 3);
278 }
279 }
280 host->buffer = p;
281}
282
283static void mmc_davinci_start_command(struct mmc_davinci_host *host,
284 struct mmc_command *cmd)
285{
286 u32 cmd_reg = 0;
287 u32 im_val;
288
289 dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n",
290 cmd->opcode, cmd->arg,
291 ({ char *s;
292 switch (mmc_resp_type(cmd)) {
293 case MMC_RSP_R1:
294 s = ", R1/R5/R6/R7 response";
295 break;
296 case MMC_RSP_R1B:
297 s = ", R1b response";
298 break;
299 case MMC_RSP_R2:
300 s = ", R2 response";
301 break;
302 case MMC_RSP_R3:
303 s = ", R3/R4 response";
304 break;
305 default:
306 s = ", (R? response)";
307 break;
308 }; s; }));
309 host->cmd = cmd;
310
311 switch (mmc_resp_type(cmd)) {
312 case MMC_RSP_R1B:
313
314
315
316
317 cmd_reg |= MMCCMD_BSYEXP;
318
319 case MMC_RSP_R1:
320 cmd_reg |= MMCCMD_RSPFMT_R1456;
321 break;
322 case MMC_RSP_R2:
323 cmd_reg |= MMCCMD_RSPFMT_R2;
324 break;
325 case MMC_RSP_R3:
326 cmd_reg |= MMCCMD_RSPFMT_R3;
327 break;
328 default:
329 cmd_reg |= MMCCMD_RSPFMT_NONE;
330 dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n",
331 mmc_resp_type(cmd));
332 break;
333 }
334
335
336 cmd_reg |= cmd->opcode;
337
338
339 if (host->do_dma)
340 cmd_reg |= MMCCMD_DMATRIG;
341
342 if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL &&
343 host->data_dir == DAVINCI_MMC_DATADIR_READ)
344 cmd_reg |= MMCCMD_DMATRIG;
345
346
347 if (cmd->data)
348 cmd_reg |= MMCCMD_WDATX;
349
350
351 if (cmd->flags & MMC_DATA_STREAM)
352 cmd_reg |= MMCCMD_STRMTP;
353
354
355 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
356 cmd_reg |= MMCCMD_DTRW;
357
358 if (host->bus_mode == MMC_BUSMODE_PUSHPULL)
359 cmd_reg |= MMCCMD_PPLEN;
360
361
362 writel(0x1FFF, host->base + DAVINCI_MMCTOR);
363
364
365 im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS;
366 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
367 im_val |= MMCST0_DATDNE | MMCST0_CRCWR;
368
369 if (!host->do_dma)
370 im_val |= MMCST0_DXRDY;
371 } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) {
372 im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD;
373
374 if (!host->do_dma)
375 im_val |= MMCST0_DRRDY;
376 }
377
378
379
380
381
382 if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE))
383 davinci_fifo_data_trans(host, rw_threshold);
384
385 writel(cmd->arg, host->base + DAVINCI_MMCARGHL);
386 writel(cmd_reg, host->base + DAVINCI_MMCCMD);
387
388 host->active_request = true;
389
390 if (!host->do_dma && host->bytes_left <= poll_threshold) {
391 u32 count = poll_loopcount;
392
393 while (host->active_request && count--) {
394 mmc_davinci_irq(0, host);
395 cpu_relax();
396 }
397 }
398
399 if (host->active_request)
400 writel(im_val, host->base + DAVINCI_MMCIM);
401}
402
403
404
405
406
407static void davinci_abort_dma(struct mmc_davinci_host *host)
408{
409 struct dma_chan *sync_dev;
410
411 if (host->data_dir == DAVINCI_MMC_DATADIR_READ)
412 sync_dev = host->dma_rx;
413 else
414 sync_dev = host->dma_tx;
415
416 dmaengine_terminate_all(sync_dev);
417}
418
419static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
420 struct mmc_data *data)
421{
422 struct dma_chan *chan;
423 struct dma_async_tx_descriptor *desc;
424 int ret = 0;
425
426 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
427 struct dma_slave_config dma_tx_conf = {
428 .direction = DMA_MEM_TO_DEV,
429 .dst_addr = host->mem_res->start + DAVINCI_MMCDXR,
430 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
431 .dst_maxburst =
432 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
433 };
434 chan = host->dma_tx;
435 dmaengine_slave_config(host->dma_tx, &dma_tx_conf);
436
437 desc = dmaengine_prep_slave_sg(host->dma_tx,
438 data->sg,
439 host->sg_len,
440 DMA_MEM_TO_DEV,
441 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
442 if (!desc) {
443 dev_dbg(mmc_dev(host->mmc),
444 "failed to allocate DMA TX descriptor");
445 ret = -1;
446 goto out;
447 }
448 } else {
449 struct dma_slave_config dma_rx_conf = {
450 .direction = DMA_DEV_TO_MEM,
451 .src_addr = host->mem_res->start + DAVINCI_MMCDRR,
452 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
453 .src_maxburst =
454 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES,
455 };
456 chan = host->dma_rx;
457 dmaengine_slave_config(host->dma_rx, &dma_rx_conf);
458
459 desc = dmaengine_prep_slave_sg(host->dma_rx,
460 data->sg,
461 host->sg_len,
462 DMA_DEV_TO_MEM,
463 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
464 if (!desc) {
465 dev_dbg(mmc_dev(host->mmc),
466 "failed to allocate DMA RX descriptor");
467 ret = -1;
468 goto out;
469 }
470 }
471
472 dmaengine_submit(desc);
473 dma_async_issue_pending(chan);
474
475out:
476 return ret;
477}
478
479static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
480 struct mmc_data *data)
481{
482 int i;
483 int mask = rw_threshold - 1;
484 int ret = 0;
485
486 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
487 ((data->flags & MMC_DATA_WRITE)
488 ? DMA_TO_DEVICE
489 : DMA_FROM_DEVICE));
490
491
492 for (i = 0; i < host->sg_len; i++) {
493 if (sg_dma_len(data->sg + i) & mask) {
494 dma_unmap_sg(mmc_dev(host->mmc),
495 data->sg, data->sg_len,
496 (data->flags & MMC_DATA_WRITE)
497 ? DMA_TO_DEVICE
498 : DMA_FROM_DEVICE);
499 return -1;
500 }
501 }
502
503 host->do_dma = 1;
504 ret = mmc_davinci_send_dma_request(host, data);
505
506 return ret;
507}
508
509static void __init_or_module
510davinci_release_dma_channels(struct mmc_davinci_host *host)
511{
512 if (!host->use_dma)
513 return;
514
515 dma_release_channel(host->dma_tx);
516 dma_release_channel(host->dma_rx);
517}
518
519static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
520{
521 int r;
522 dma_cap_mask_t mask;
523
524 dma_cap_zero(mask);
525 dma_cap_set(DMA_SLAVE, mask);
526
527 host->dma_tx =
528 dma_request_slave_channel_compat(mask, edma_filter_fn,
529 &host->txdma, mmc_dev(host->mmc), "tx");
530 if (!host->dma_tx) {
531 dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n");
532 return -ENODEV;
533 }
534
535 host->dma_rx =
536 dma_request_slave_channel_compat(mask, edma_filter_fn,
537 &host->rxdma, mmc_dev(host->mmc), "rx");
538 if (!host->dma_rx) {
539 dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n");
540 r = -ENODEV;
541 goto free_master_write;
542 }
543
544 return 0;
545
546free_master_write:
547 dma_release_channel(host->dma_tx);
548
549 return r;
550}
551
552
553
554static void
555mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
556{
557 int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0;
558 int timeout;
559 struct mmc_data *data = req->data;
560
561 if (host->version == MMC_CTLR_VERSION_2)
562 fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0;
563
564 host->data = data;
565 if (data == NULL) {
566 host->data_dir = DAVINCI_MMC_DATADIR_NONE;
567 writel(0, host->base + DAVINCI_MMCBLEN);
568 writel(0, host->base + DAVINCI_MMCNBLK);
569 return;
570 }
571
572 dev_dbg(mmc_dev(host->mmc), "%s %s, %d blocks of %d bytes\n",
573 (data->flags & MMC_DATA_STREAM) ? "stream" : "block",
574 (data->flags & MMC_DATA_WRITE) ? "write" : "read",
575 data->blocks, data->blksz);
576 dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n",
577 data->timeout_clks, data->timeout_ns);
578 timeout = data->timeout_clks +
579 (data->timeout_ns / host->ns_in_one_cycle);
580 if (timeout > 0xffff)
581 timeout = 0xffff;
582
583 writel(timeout, host->base + DAVINCI_MMCTOD);
584 writel(data->blocks, host->base + DAVINCI_MMCNBLK);
585 writel(data->blksz, host->base + DAVINCI_MMCBLEN);
586
587
588 switch (data->flags & MMC_DATA_WRITE) {
589 case MMC_DATA_WRITE:
590 host->data_dir = DAVINCI_MMC_DATADIR_WRITE;
591 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST,
592 host->base + DAVINCI_MMCFIFOCTL);
593 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR,
594 host->base + DAVINCI_MMCFIFOCTL);
595 break;
596
597 default:
598 host->data_dir = DAVINCI_MMC_DATADIR_READ;
599 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST,
600 host->base + DAVINCI_MMCFIFOCTL);
601 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD,
602 host->base + DAVINCI_MMCFIFOCTL);
603 break;
604 }
605
606 host->buffer = NULL;
607 host->bytes_left = data->blocks * data->blksz;
608
609
610
611
612
613
614
615
616
617 if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0
618 && mmc_davinci_start_dma_transfer(host, data) == 0) {
619
620 host->bytes_left = 0;
621 } else {
622
623 host->sg_len = data->sg_len;
624 host->sg = host->data->sg;
625 mmc_davinci_sg_to_buf(host);
626 }
627}
628
629static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req)
630{
631 struct mmc_davinci_host *host = mmc_priv(mmc);
632 unsigned long timeout = jiffies + msecs_to_jiffies(900);
633 u32 mmcst1 = 0;
634
635
636
637
638 while (time_before(jiffies, timeout)) {
639 mmcst1 = readl(host->base + DAVINCI_MMCST1);
640 if (!(mmcst1 & MMCST1_BUSY))
641 break;
642 cpu_relax();
643 }
644 if (mmcst1 & MMCST1_BUSY) {
645 dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n");
646 req->cmd->error = -ETIMEDOUT;
647 mmc_request_done(mmc, req);
648 return;
649 }
650
651 host->do_dma = 0;
652 mmc_davinci_prepare_data(host, req);
653 mmc_davinci_start_command(host, req->cmd);
654}
655
656static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host,
657 unsigned int mmc_req_freq)
658{
659 unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0;
660
661 mmc_pclk = host->mmc_input_clk;
662 if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq))
663 mmc_push_pull_divisor = ((unsigned int)mmc_pclk
664 / (2 * mmc_req_freq)) - 1;
665 else
666 mmc_push_pull_divisor = 0;
667
668 mmc_freq = (unsigned int)mmc_pclk
669 / (2 * (mmc_push_pull_divisor + 1));
670
671 if (mmc_freq > mmc_req_freq)
672 mmc_push_pull_divisor = mmc_push_pull_divisor + 1;
673
674 if (mmc_req_freq <= 400000)
675 host->ns_in_one_cycle = (1000000) / (((mmc_pclk
676 / (2 * (mmc_push_pull_divisor + 1)))/1000));
677 else
678 host->ns_in_one_cycle = (1000000) / (((mmc_pclk
679 / (2 * (mmc_push_pull_divisor + 1)))/1000000));
680
681 return mmc_push_pull_divisor;
682}
683
684static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios)
685{
686 unsigned int open_drain_freq = 0, mmc_pclk = 0;
687 unsigned int mmc_push_pull_freq = 0;
688 struct mmc_davinci_host *host = mmc_priv(mmc);
689
690 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
691 u32 temp;
692
693
694
695
696 open_drain_freq = ((unsigned int)mmc_pclk
697 / (2 * MMCSD_INIT_CLOCK)) - 1;
698
699 if (open_drain_freq > 0xFF)
700 open_drain_freq = 0xFF;
701
702 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
703 temp |= open_drain_freq;
704 writel(temp, host->base + DAVINCI_MMCCLK);
705
706
707 host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000);
708 } else {
709 u32 temp;
710 mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock);
711
712 if (mmc_push_pull_freq > 0xFF)
713 mmc_push_pull_freq = 0xFF;
714
715 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN;
716 writel(temp, host->base + DAVINCI_MMCCLK);
717
718 udelay(10);
719
720 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
721 temp |= mmc_push_pull_freq;
722 writel(temp, host->base + DAVINCI_MMCCLK);
723
724 writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
725
726 udelay(10);
727 }
728}
729
730static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
731{
732 struct mmc_davinci_host *host = mmc_priv(mmc);
733 struct platform_device *pdev = to_platform_device(mmc->parent);
734 struct davinci_mmc_config *config = pdev->dev.platform_data;
735
736 dev_dbg(mmc_dev(host->mmc),
737 "clock %dHz busmode %d powermode %d Vdd %04x\n",
738 ios->clock, ios->bus_mode, ios->power_mode,
739 ios->vdd);
740
741 switch (ios->power_mode) {
742 case MMC_POWER_OFF:
743 if (config && config->set_power)
744 config->set_power(pdev->id, false);
745 break;
746 case MMC_POWER_UP:
747 if (config && config->set_power)
748 config->set_power(pdev->id, true);
749 break;
750 }
751
752 switch (ios->bus_width) {
753 case MMC_BUS_WIDTH_8:
754 dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n");
755 writel((readl(host->base + DAVINCI_MMCCTL) &
756 ~MMCCTL_WIDTH_4_BIT) | MMCCTL_WIDTH_8_BIT,
757 host->base + DAVINCI_MMCCTL);
758 break;
759 case MMC_BUS_WIDTH_4:
760 dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n");
761 if (host->version == MMC_CTLR_VERSION_2)
762 writel((readl(host->base + DAVINCI_MMCCTL) &
763 ~MMCCTL_WIDTH_8_BIT) | MMCCTL_WIDTH_4_BIT,
764 host->base + DAVINCI_MMCCTL);
765 else
766 writel(readl(host->base + DAVINCI_MMCCTL) |
767 MMCCTL_WIDTH_4_BIT,
768 host->base + DAVINCI_MMCCTL);
769 break;
770 case MMC_BUS_WIDTH_1:
771 dev_dbg(mmc_dev(host->mmc), "Enabling 1 bit mode\n");
772 if (host->version == MMC_CTLR_VERSION_2)
773 writel(readl(host->base + DAVINCI_MMCCTL) &
774 ~(MMCCTL_WIDTH_8_BIT | MMCCTL_WIDTH_4_BIT),
775 host->base + DAVINCI_MMCCTL);
776 else
777 writel(readl(host->base + DAVINCI_MMCCTL) &
778 ~MMCCTL_WIDTH_4_BIT,
779 host->base + DAVINCI_MMCCTL);
780 break;
781 }
782
783 calculate_clk_divider(mmc, ios);
784
785 host->bus_mode = ios->bus_mode;
786 if (ios->power_mode == MMC_POWER_UP) {
787 unsigned long timeout = jiffies + msecs_to_jiffies(50);
788 bool lose = true;
789
790
791 writel(0, host->base + DAVINCI_MMCARGHL);
792 writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD);
793 while (time_before(jiffies, timeout)) {
794 u32 tmp = readl(host->base + DAVINCI_MMCST0);
795
796 if (tmp & MMCST0_RSPDNE) {
797 lose = false;
798 break;
799 }
800 cpu_relax();
801 }
802 if (lose)
803 dev_warn(mmc_dev(host->mmc), "powerup timeout\n");
804 }
805
806
807}
808
809static void
810mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
811{
812 host->data = NULL;
813
814 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) {
815
816
817
818
819
820 if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) &
821 SDIOST0_DAT1_HI)) {
822 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
823 mmc_signal_sdio_irq(host->mmc);
824 }
825 }
826
827 if (host->do_dma) {
828 davinci_abort_dma(host);
829
830 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
831 (data->flags & MMC_DATA_WRITE)
832 ? DMA_TO_DEVICE
833 : DMA_FROM_DEVICE);
834 host->do_dma = false;
835 }
836 host->data_dir = DAVINCI_MMC_DATADIR_NONE;
837
838 if (!data->stop || (host->cmd && host->cmd->error)) {
839 mmc_request_done(host->mmc, data->mrq);
840 writel(0, host->base + DAVINCI_MMCIM);
841 host->active_request = false;
842 } else
843 mmc_davinci_start_command(host, data->stop);
844}
845
846static void mmc_davinci_cmd_done(struct mmc_davinci_host *host,
847 struct mmc_command *cmd)
848{
849 host->cmd = NULL;
850
851 if (cmd->flags & MMC_RSP_PRESENT) {
852 if (cmd->flags & MMC_RSP_136) {
853
854 cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01);
855 cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23);
856 cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45);
857 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67);
858 } else {
859
860 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67);
861 }
862 }
863
864 if (host->data == NULL || cmd->error) {
865 if (cmd->error == -ETIMEDOUT)
866 cmd->mrq->cmd->retries = 0;
867 mmc_request_done(host->mmc, cmd->mrq);
868 writel(0, host->base + DAVINCI_MMCIM);
869 host->active_request = false;
870 }
871}
872
873static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host,
874 int val)
875{
876 u32 temp;
877
878 temp = readl(host->base + DAVINCI_MMCCTL);
879 if (val)
880 temp |= MMCCTL_CMDRST | MMCCTL_DATRST;
881 else
882 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
883
884 writel(temp, host->base + DAVINCI_MMCCTL);
885 udelay(10);
886}
887
888static void
889davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
890{
891 mmc_davinci_reset_ctrl(host, 1);
892 mmc_davinci_reset_ctrl(host, 0);
893}
894
895static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id)
896{
897 struct mmc_davinci_host *host = dev_id;
898 unsigned int status;
899
900 status = readl(host->base + DAVINCI_SDIOIST);
901 if (status & SDIOIST_IOINT) {
902 dev_dbg(mmc_dev(host->mmc),
903 "SDIO interrupt status %x\n", status);
904 writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
905 mmc_signal_sdio_irq(host->mmc);
906 }
907 return IRQ_HANDLED;
908}
909
910static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
911{
912 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id;
913 unsigned int status, qstatus;
914 int end_command = 0;
915 int end_transfer = 0;
916 struct mmc_data *data = host->data;
917
918 if (host->cmd == NULL && host->data == NULL) {
919 status = readl(host->base + DAVINCI_MMCST0);
920 dev_dbg(mmc_dev(host->mmc),
921 "Spurious interrupt 0x%04x\n", status);
922
923 writel(0, host->base + DAVINCI_MMCIM);
924 return IRQ_NONE;
925 }
926
927 status = readl(host->base + DAVINCI_MMCST0);
928 qstatus = status;
929
930
931
932
933
934
935
936
937 if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) {
938 unsigned long im_val;
939
940
941
942
943
944
945
946
947 im_val = readl(host->base + DAVINCI_MMCIM);
948 writel(0, host->base + DAVINCI_MMCIM);
949
950 do {
951 davinci_fifo_data_trans(host, rw_threshold);
952 status = readl(host->base + DAVINCI_MMCST0);
953 qstatus |= status;
954 } while (host->bytes_left &&
955 (status & (MMCST0_DXRDY | MMCST0_DRRDY)));
956
957
958
959
960
961
962
963 writel(im_val, host->base + DAVINCI_MMCIM);
964 }
965
966 if (qstatus & MMCST0_DATDNE) {
967
968 if (data != NULL) {
969 if ((host->do_dma == 0) && (host->bytes_left > 0)) {
970
971
972
973 davinci_fifo_data_trans(host, host->bytes_left);
974 }
975 end_transfer = 1;
976 data->bytes_xfered = data->blocks * data->blksz;
977 } else {
978 dev_err(mmc_dev(host->mmc),
979 "DATDNE with no host->data\n");
980 }
981 }
982
983 if (qstatus & MMCST0_TOUTRD) {
984
985 data->error = -ETIMEDOUT;
986 end_transfer = 1;
987
988 dev_dbg(mmc_dev(host->mmc),
989 "read data timeout, status %x\n",
990 qstatus);
991
992 davinci_abort_data(host, data);
993 }
994
995 if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) {
996
997 data->error = -EILSEQ;
998 end_transfer = 1;
999
1000
1001
1002
1003
1004
1005
1006 if (qstatus & MMCST0_CRCWR) {
1007 u32 temp = readb(host->base + DAVINCI_MMCDRSP);
1008
1009 if (temp == 0x9f)
1010 data->error = -ETIMEDOUT;
1011 }
1012 dev_dbg(mmc_dev(host->mmc), "data %s %s error\n",
1013 (qstatus & MMCST0_CRCWR) ? "write" : "read",
1014 (data->error == -ETIMEDOUT) ? "timeout" : "CRC");
1015
1016 davinci_abort_data(host, data);
1017 }
1018
1019 if (qstatus & MMCST0_TOUTRS) {
1020
1021 if (host->cmd) {
1022 dev_dbg(mmc_dev(host->mmc),
1023 "CMD%d timeout, status %x\n",
1024 host->cmd->opcode, qstatus);
1025 host->cmd->error = -ETIMEDOUT;
1026 if (data) {
1027 end_transfer = 1;
1028 davinci_abort_data(host, data);
1029 } else
1030 end_command = 1;
1031 }
1032 }
1033
1034 if (qstatus & MMCST0_CRCRS) {
1035
1036 dev_dbg(mmc_dev(host->mmc), "Command CRC error\n");
1037 if (host->cmd) {
1038 host->cmd->error = -EILSEQ;
1039 end_command = 1;
1040 }
1041 }
1042
1043 if (qstatus & MMCST0_RSPDNE) {
1044
1045 end_command = (int) host->cmd;
1046 }
1047
1048 if (end_command)
1049 mmc_davinci_cmd_done(host, host->cmd);
1050 if (end_transfer)
1051 mmc_davinci_xfer_done(host, data);
1052 return IRQ_HANDLED;
1053}
1054
1055static int mmc_davinci_get_cd(struct mmc_host *mmc)
1056{
1057 struct platform_device *pdev = to_platform_device(mmc->parent);
1058 struct davinci_mmc_config *config = pdev->dev.platform_data;
1059
1060 if (!config || !config->get_cd)
1061 return -ENOSYS;
1062 return config->get_cd(pdev->id);
1063}
1064
1065static int mmc_davinci_get_ro(struct mmc_host *mmc)
1066{
1067 struct platform_device *pdev = to_platform_device(mmc->parent);
1068 struct davinci_mmc_config *config = pdev->dev.platform_data;
1069
1070 if (!config || !config->get_ro)
1071 return -ENOSYS;
1072 return config->get_ro(pdev->id);
1073}
1074
1075static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1076{
1077 struct mmc_davinci_host *host = mmc_priv(mmc);
1078
1079 if (enable) {
1080 if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) {
1081 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
1082 mmc_signal_sdio_irq(host->mmc);
1083 } else {
1084 host->sdio_int = true;
1085 writel(readl(host->base + DAVINCI_SDIOIEN) |
1086 SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN);
1087 }
1088 } else {
1089 host->sdio_int = false;
1090 writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN,
1091 host->base + DAVINCI_SDIOIEN);
1092 }
1093}
1094
1095static struct mmc_host_ops mmc_davinci_ops = {
1096 .request = mmc_davinci_request,
1097 .set_ios = mmc_davinci_set_ios,
1098 .get_cd = mmc_davinci_get_cd,
1099 .get_ro = mmc_davinci_get_ro,
1100 .enable_sdio_irq = mmc_davinci_enable_sdio_irq,
1101};
1102
1103
1104
1105#ifdef CONFIG_CPU_FREQ
1106static int mmc_davinci_cpufreq_transition(struct notifier_block *nb,
1107 unsigned long val, void *data)
1108{
1109 struct mmc_davinci_host *host;
1110 unsigned int mmc_pclk;
1111 struct mmc_host *mmc;
1112 unsigned long flags;
1113
1114 host = container_of(nb, struct mmc_davinci_host, freq_transition);
1115 mmc = host->mmc;
1116 mmc_pclk = clk_get_rate(host->clk);
1117
1118 if (val == CPUFREQ_POSTCHANGE) {
1119 spin_lock_irqsave(&mmc->lock, flags);
1120 host->mmc_input_clk = mmc_pclk;
1121 calculate_clk_divider(mmc, &mmc->ios);
1122 spin_unlock_irqrestore(&mmc->lock, flags);
1123 }
1124
1125 return 0;
1126}
1127
1128static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host)
1129{
1130 host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition;
1131
1132 return cpufreq_register_notifier(&host->freq_transition,
1133 CPUFREQ_TRANSITION_NOTIFIER);
1134}
1135
1136static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
1137{
1138 cpufreq_unregister_notifier(&host->freq_transition,
1139 CPUFREQ_TRANSITION_NOTIFIER);
1140}
1141#else
1142static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host)
1143{
1144 return 0;
1145}
1146
1147static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
1148{
1149}
1150#endif
1151static void __init init_mmcsd_host(struct mmc_davinci_host *host)
1152{
1153
1154 mmc_davinci_reset_ctrl(host, 1);
1155
1156 writel(0, host->base + DAVINCI_MMCCLK);
1157 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
1158
1159 writel(0x1FFF, host->base + DAVINCI_MMCTOR);
1160 writel(0xFFFF, host->base + DAVINCI_MMCTOD);
1161
1162 mmc_davinci_reset_ctrl(host, 0);
1163}
1164
1165static struct platform_device_id davinci_mmc_devtype[] = {
1166 {
1167 .name = "dm6441-mmc",
1168 .driver_data = MMC_CTLR_VERSION_1,
1169 }, {
1170 .name = "da830-mmc",
1171 .driver_data = MMC_CTLR_VERSION_2,
1172 },
1173 {},
1174};
1175MODULE_DEVICE_TABLE(platform, davinci_mmc_devtype);
1176
1177static const struct of_device_id davinci_mmc_dt_ids[] = {
1178 {
1179 .compatible = "ti,dm6441-mmc",
1180 .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_1],
1181 },
1182 {
1183 .compatible = "ti,da830-mmc",
1184 .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_2],
1185 },
1186 {},
1187};
1188MODULE_DEVICE_TABLE(of, davinci_mmc_dt_ids);
1189
1190static struct davinci_mmc_config
1191 *mmc_parse_pdata(struct platform_device *pdev)
1192{
1193 struct device_node *np;
1194 struct davinci_mmc_config *pdata = pdev->dev.platform_data;
1195 const struct of_device_id *match =
1196 of_match_device(of_match_ptr(davinci_mmc_dt_ids), &pdev->dev);
1197 u32 data;
1198
1199 np = pdev->dev.of_node;
1200 if (!np)
1201 return pdata;
1202
1203 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1204 if (!pdata) {
1205 dev_err(&pdev->dev, "Failed to allocate memory for struct davinci_mmc_config\n");
1206 goto nodata;
1207 }
1208
1209 if (match)
1210 pdev->id_entry = match->data;
1211
1212 if (of_property_read_u32(np, "max-frequency", &pdata->max_freq))
1213 dev_info(&pdev->dev, "'max-frequency' property not specified, defaulting to 25MHz\n");
1214
1215 of_property_read_u32(np, "bus-width", &data);
1216 switch (data) {
1217 case 1:
1218 case 4:
1219 case 8:
1220 pdata->wires = data;
1221 break;
1222 default:
1223 pdata->wires = 1;
1224 dev_info(&pdev->dev, "Unsupported buswidth, defaulting to 1 bit\n");
1225 }
1226nodata:
1227 return pdata;
1228}
1229
1230static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1231{
1232 struct davinci_mmc_config *pdata = NULL;
1233 struct mmc_davinci_host *host = NULL;
1234 struct mmc_host *mmc = NULL;
1235 struct resource *r, *mem = NULL;
1236 int ret = 0, irq = 0;
1237 size_t mem_size;
1238 const struct platform_device_id *id_entry;
1239
1240 pdata = mmc_parse_pdata(pdev);
1241 if (pdata == NULL) {
1242 dev_err(&pdev->dev, "Couldn't get platform data\n");
1243 return -ENOENT;
1244 }
1245
1246 ret = -ENODEV;
1247 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1248 irq = platform_get_irq(pdev, 0);
1249 if (!r || irq == NO_IRQ)
1250 goto out;
1251
1252 ret = -EBUSY;
1253 mem_size = resource_size(r);
1254 mem = request_mem_region(r->start, mem_size, pdev->name);
1255 if (!mem)
1256 goto out;
1257
1258 ret = -ENOMEM;
1259 mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev);
1260 if (!mmc)
1261 goto out;
1262
1263 host = mmc_priv(mmc);
1264 host->mmc = mmc;
1265
1266 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1267 if (!r)
1268 dev_warn(&pdev->dev, "RX DMA resource not specified\n");
1269 else
1270 host->rxdma = r->start;
1271
1272 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1273 if (!r)
1274 dev_warn(&pdev->dev, "TX DMA resource not specified\n");
1275 else
1276 host->txdma = r->start;
1277
1278 host->mem_res = mem;
1279 host->base = ioremap(mem->start, mem_size);
1280 if (!host->base)
1281 goto out;
1282
1283 ret = -ENXIO;
1284 host->clk = clk_get(&pdev->dev, "MMCSDCLK");
1285 if (IS_ERR(host->clk)) {
1286 ret = PTR_ERR(host->clk);
1287 goto out;
1288 }
1289 clk_enable(host->clk);
1290 host->mmc_input_clk = clk_get_rate(host->clk);
1291
1292 init_mmcsd_host(host);
1293
1294 if (pdata->nr_sg)
1295 host->nr_sg = pdata->nr_sg - 1;
1296
1297 if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
1298 host->nr_sg = MAX_NR_SG;
1299
1300 host->use_dma = use_dma;
1301 host->mmc_irq = irq;
1302 host->sdio_irq = platform_get_irq(pdev, 1);
1303
1304 if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
1305 host->use_dma = 0;
1306
1307
1308 mmc->caps |= MMC_CAP_NEEDS_POLL;
1309 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
1310
1311 if (pdata && (pdata->wires == 4 || pdata->wires == 0))
1312 mmc->caps |= MMC_CAP_4_BIT_DATA;
1313
1314 if (pdata && (pdata->wires == 8))
1315 mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA);
1316
1317 id_entry = platform_get_device_id(pdev);
1318 if (id_entry)
1319 host->version = id_entry->driver_data;
1320
1321 mmc->ops = &mmc_davinci_ops;
1322 mmc->f_min = 312500;
1323 mmc->f_max = 25000000;
1324 if (pdata && pdata->max_freq)
1325 mmc->f_max = pdata->max_freq;
1326 if (pdata && pdata->caps)
1327 mmc->caps |= pdata->caps;
1328 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1329
1330
1331
1332
1333
1334 mmc->max_segs = MAX_NR_SG;
1335
1336
1337 mmc->max_seg_size = MAX_CCNT * rw_threshold;
1338
1339
1340 mmc->max_blk_size = 4095;
1341 mmc->max_blk_count = 65535;
1342 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1343
1344 dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs);
1345 dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size);
1346 dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size);
1347 dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size);
1348
1349 platform_set_drvdata(pdev, host);
1350
1351 ret = mmc_davinci_cpufreq_register(host);
1352 if (ret) {
1353 dev_err(&pdev->dev, "failed to register cpufreq\n");
1354 goto cpu_freq_fail;
1355 }
1356
1357 ret = mmc_add_host(mmc);
1358 if (ret < 0)
1359 goto out;
1360
1361 ret = request_irq(irq, mmc_davinci_irq, 0, mmc_hostname(mmc), host);
1362 if (ret)
1363 goto out;
1364
1365 if (host->sdio_irq >= 0) {
1366 ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0,
1367 mmc_hostname(mmc), host);
1368 if (!ret)
1369 mmc->caps |= MMC_CAP_SDIO_IRQ;
1370 }
1371
1372 rename_region(mem, mmc_hostname(mmc));
1373
1374 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
1375 host->use_dma ? "DMA" : "PIO",
1376 (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
1377
1378 return 0;
1379
1380out:
1381 mmc_davinci_cpufreq_deregister(host);
1382cpu_freq_fail:
1383 if (host) {
1384 davinci_release_dma_channels(host);
1385
1386 if (host->clk) {
1387 clk_disable(host->clk);
1388 clk_put(host->clk);
1389 }
1390
1391 if (host->base)
1392 iounmap(host->base);
1393 }
1394
1395 if (mmc)
1396 mmc_free_host(mmc);
1397
1398 if (mem)
1399 release_resource(mem);
1400
1401 dev_dbg(&pdev->dev, "probe err %d\n", ret);
1402
1403 return ret;
1404}
1405
1406static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
1407{
1408 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1409
1410 if (host) {
1411 mmc_davinci_cpufreq_deregister(host);
1412
1413 mmc_remove_host(host->mmc);
1414 free_irq(host->mmc_irq, host);
1415 if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
1416 free_irq(host->sdio_irq, host);
1417
1418 davinci_release_dma_channels(host);
1419
1420 clk_disable(host->clk);
1421 clk_put(host->clk);
1422
1423 iounmap(host->base);
1424
1425 release_resource(host->mem_res);
1426
1427 mmc_free_host(host->mmc);
1428 }
1429
1430 return 0;
1431}
1432
1433#ifdef CONFIG_PM
1434static int davinci_mmcsd_suspend(struct device *dev)
1435{
1436 struct platform_device *pdev = to_platform_device(dev);
1437 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1438 int ret;
1439
1440 ret = mmc_suspend_host(host->mmc);
1441 if (!ret) {
1442 writel(0, host->base + DAVINCI_MMCIM);
1443 mmc_davinci_reset_ctrl(host, 1);
1444 clk_disable(host->clk);
1445 host->suspended = 1;
1446 } else {
1447 host->suspended = 0;
1448 }
1449
1450 return ret;
1451}
1452
1453static int davinci_mmcsd_resume(struct device *dev)
1454{
1455 struct platform_device *pdev = to_platform_device(dev);
1456 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1457 int ret;
1458
1459 if (!host->suspended)
1460 return 0;
1461
1462 clk_enable(host->clk);
1463
1464 mmc_davinci_reset_ctrl(host, 0);
1465 ret = mmc_resume_host(host->mmc);
1466 if (!ret)
1467 host->suspended = 0;
1468
1469 return ret;
1470}
1471
1472static const struct dev_pm_ops davinci_mmcsd_pm = {
1473 .suspend = davinci_mmcsd_suspend,
1474 .resume = davinci_mmcsd_resume,
1475};
1476
1477#define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm)
1478#else
1479#define davinci_mmcsd_pm_ops NULL
1480#endif
1481
1482static struct platform_driver davinci_mmcsd_driver = {
1483 .driver = {
1484 .name = "davinci_mmc",
1485 .owner = THIS_MODULE,
1486 .pm = davinci_mmcsd_pm_ops,
1487 .of_match_table = of_match_ptr(davinci_mmc_dt_ids),
1488 },
1489 .remove = __exit_p(davinci_mmcsd_remove),
1490 .id_table = davinci_mmc_devtype,
1491};
1492
1493module_platform_driver_probe(davinci_mmcsd_driver, davinci_mmcsd_probe);
1494
1495MODULE_AUTHOR("Texas Instruments India");
1496MODULE_LICENSE("GPL");
1497MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller");
1498MODULE_ALIAS("platform:davinci_mmc");
1499
1500