1
2
3
4
5
6
7
8
9
10#include <linux/blkdev.h>
11#include <linux/clk.h>
12#include <linux/debugfs.h>
13#include <linux/device.h>
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
16#include <linux/err.h>
17#include <linux/gpio.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/ioport.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/of_gpio.h>
25#include <linux/platform_device.h>
26#include <linux/scatterlist.h>
27#include <linux/seq_file.h>
28#include <linux/slab.h>
29#include <linux/stat.h>
30#include <linux/types.h>
31#include <linux/platform_data/atmel.h>
32
33#include <linux/mmc/host.h>
34#include <linux/mmc/sdio.h>
35
36#include <mach/atmel-mci.h>
37#include <linux/atmel-mci.h>
38#include <linux/atmel_pdc.h>
39
40#include <asm/io.h>
41#include <asm/unaligned.h>
42
43#include <mach/cpu.h>
44
45#include "atmel-mci-regs.h"
46
47#define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
48#define ATMCI_DMA_THRESHOLD 16
49
50enum {
51 EVENT_CMD_RDY = 0,
52 EVENT_XFER_COMPLETE,
53 EVENT_NOTBUSY,
54 EVENT_DATA_ERROR,
55};
56
57enum atmel_mci_state {
58 STATE_IDLE = 0,
59 STATE_SENDING_CMD,
60 STATE_DATA_XFER,
61 STATE_WAITING_NOTBUSY,
62 STATE_SENDING_STOP,
63 STATE_END_REQUEST,
64};
65
66enum atmci_xfer_dir {
67 XFER_RECEIVE = 0,
68 XFER_TRANSMIT,
69};
70
71enum atmci_pdc_buf {
72 PDC_FIRST_BUF = 0,
73 PDC_SECOND_BUF,
74};
75
76struct atmel_mci_caps {
77 bool has_dma_conf_reg;
78 bool has_pdc;
79 bool has_cfg_reg;
80 bool has_cstor_reg;
81 bool has_highspeed;
82 bool has_rwproof;
83 bool has_odd_clk_div;
84 bool has_bad_data_ordering;
85 bool need_reset_after_xfer;
86 bool need_blksz_mul_4;
87 bool need_notbusy_for_read_ops;
88};
89
90struct atmel_mci_dma {
91 struct dma_chan *chan;
92 struct dma_async_tx_descriptor *data_desc;
93};
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176struct atmel_mci {
177 spinlock_t lock;
178 void __iomem *regs;
179
180 struct scatterlist *sg;
181 unsigned int sg_len;
182 unsigned int pio_offset;
183 unsigned int *buffer;
184 unsigned int buf_size;
185 dma_addr_t buf_phys_addr;
186
187 struct atmel_mci_slot *cur_slot;
188 struct mmc_request *mrq;
189 struct mmc_command *cmd;
190 struct mmc_data *data;
191 unsigned int data_size;
192
193 struct atmel_mci_dma dma;
194 struct dma_chan *data_chan;
195 struct dma_slave_config dma_conf;
196
197 u32 cmd_status;
198 u32 data_status;
199 u32 stop_cmdr;
200
201 struct tasklet_struct tasklet;
202 unsigned long pending_events;
203 unsigned long completed_events;
204 enum atmel_mci_state state;
205 struct list_head queue;
206
207 bool need_clock_update;
208 bool need_reset;
209 struct timer_list timer;
210 u32 mode_reg;
211 u32 cfg_reg;
212 unsigned long bus_hz;
213 unsigned long mapbase;
214 struct clk *mck;
215 struct platform_device *pdev;
216
217 struct atmel_mci_slot *slot[ATMCI_MAX_NR_SLOTS];
218
219 struct atmel_mci_caps caps;
220
221 u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data);
222 void (*submit_data)(struct atmel_mci *host, struct mmc_data *data);
223 void (*stop_transfer)(struct atmel_mci *host);
224};
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245struct atmel_mci_slot {
246 struct mmc_host *mmc;
247 struct atmel_mci *host;
248
249 u32 sdc_reg;
250 u32 sdio_irq;
251
252 struct mmc_request *mrq;
253 struct list_head queue_node;
254
255 unsigned int clock;
256 unsigned long flags;
257#define ATMCI_CARD_PRESENT 0
258#define ATMCI_CARD_NEED_INIT 1
259#define ATMCI_SHUTDOWN 2
260#define ATMCI_SUSPENDED 3
261
262 int detect_pin;
263 int wp_pin;
264 bool detect_is_active_high;
265
266 struct timer_list detect_timer;
267};
268
269#define atmci_test_and_clear_pending(host, event) \
270 test_and_clear_bit(event, &host->pending_events)
271#define atmci_set_completed(host, event) \
272 set_bit(event, &host->completed_events)
273#define atmci_set_pending(host, event) \
274 set_bit(event, &host->pending_events)
275
276
277
278
279
280static int atmci_req_show(struct seq_file *s, void *v)
281{
282 struct atmel_mci_slot *slot = s->private;
283 struct mmc_request *mrq;
284 struct mmc_command *cmd;
285 struct mmc_command *stop;
286 struct mmc_data *data;
287
288
289 spin_lock_bh(&slot->host->lock);
290 mrq = slot->mrq;
291
292 if (mrq) {
293 cmd = mrq->cmd;
294 data = mrq->data;
295 stop = mrq->stop;
296
297 if (cmd)
298 seq_printf(s,
299 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
300 cmd->opcode, cmd->arg, cmd->flags,
301 cmd->resp[0], cmd->resp[1], cmd->resp[2],
302 cmd->resp[3], cmd->error);
303 if (data)
304 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
305 data->bytes_xfered, data->blocks,
306 data->blksz, data->flags, data->error);
307 if (stop)
308 seq_printf(s,
309 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
310 stop->opcode, stop->arg, stop->flags,
311 stop->resp[0], stop->resp[1], stop->resp[2],
312 stop->resp[3], stop->error);
313 }
314
315 spin_unlock_bh(&slot->host->lock);
316
317 return 0;
318}
319
320static int atmci_req_open(struct inode *inode, struct file *file)
321{
322 return single_open(file, atmci_req_show, inode->i_private);
323}
324
325static const struct file_operations atmci_req_fops = {
326 .owner = THIS_MODULE,
327 .open = atmci_req_open,
328 .read = seq_read,
329 .llseek = seq_lseek,
330 .release = single_release,
331};
332
333static void atmci_show_status_reg(struct seq_file *s,
334 const char *regname, u32 value)
335{
336 static const char *sr_bit[] = {
337 [0] = "CMDRDY",
338 [1] = "RXRDY",
339 [2] = "TXRDY",
340 [3] = "BLKE",
341 [4] = "DTIP",
342 [5] = "NOTBUSY",
343 [6] = "ENDRX",
344 [7] = "ENDTX",
345 [8] = "SDIOIRQA",
346 [9] = "SDIOIRQB",
347 [12] = "SDIOWAIT",
348 [14] = "RXBUFF",
349 [15] = "TXBUFE",
350 [16] = "RINDE",
351 [17] = "RDIRE",
352 [18] = "RCRCE",
353 [19] = "RENDE",
354 [20] = "RTOE",
355 [21] = "DCRCE",
356 [22] = "DTOE",
357 [23] = "CSTOE",
358 [24] = "BLKOVRE",
359 [25] = "DMADONE",
360 [26] = "FIFOEMPTY",
361 [27] = "XFRDONE",
362 [30] = "OVRE",
363 [31] = "UNRE",
364 };
365 unsigned int i;
366
367 seq_printf(s, "%s:\t0x%08x", regname, value);
368 for (i = 0; i < ARRAY_SIZE(sr_bit); i++) {
369 if (value & (1 << i)) {
370 if (sr_bit[i])
371 seq_printf(s, " %s", sr_bit[i]);
372 else
373 seq_puts(s, " UNKNOWN");
374 }
375 }
376 seq_putc(s, '\n');
377}
378
379static int atmci_regs_show(struct seq_file *s, void *v)
380{
381 struct atmel_mci *host = s->private;
382 u32 *buf;
383
384 buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
385 if (!buf)
386 return -ENOMEM;
387
388
389
390
391
392
393 spin_lock_bh(&host->lock);
394 clk_enable(host->mck);
395 memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
396 clk_disable(host->mck);
397 spin_unlock_bh(&host->lock);
398
399 seq_printf(s, "MR:\t0x%08x%s%s ",
400 buf[ATMCI_MR / 4],
401 buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
402 buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "");
403 if (host->caps.has_odd_clk_div)
404 seq_printf(s, "{CLKDIV,CLKODD}=%u\n",
405 ((buf[ATMCI_MR / 4] & 0xff) << 1)
406 | ((buf[ATMCI_MR / 4] >> 16) & 1));
407 else
408 seq_printf(s, "CLKDIV=%u\n",
409 (buf[ATMCI_MR / 4] & 0xff));
410 seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]);
411 seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]);
412 seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]);
413 seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
414 buf[ATMCI_BLKR / 4],
415 buf[ATMCI_BLKR / 4] & 0xffff,
416 (buf[ATMCI_BLKR / 4] >> 16) & 0xffff);
417 if (host->caps.has_cstor_reg)
418 seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]);
419
420
421
422 atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]);
423 atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]);
424
425 if (host->caps.has_dma_conf_reg) {
426 u32 val;
427
428 val = buf[ATMCI_DMA / 4];
429 seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
430 val, val & 3,
431 ((val >> 4) & 3) ?
432 1 << (((val >> 4) & 3) + 1) : 1,
433 val & ATMCI_DMAEN ? " DMAEN" : "");
434 }
435 if (host->caps.has_cfg_reg) {
436 u32 val;
437
438 val = buf[ATMCI_CFG / 4];
439 seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
440 val,
441 val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
442 val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
443 val & ATMCI_CFG_HSMODE ? " HSMODE" : "",
444 val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
445 }
446
447 kfree(buf);
448
449 return 0;
450}
451
452static int atmci_regs_open(struct inode *inode, struct file *file)
453{
454 return single_open(file, atmci_regs_show, inode->i_private);
455}
456
457static const struct file_operations atmci_regs_fops = {
458 .owner = THIS_MODULE,
459 .open = atmci_regs_open,
460 .read = seq_read,
461 .llseek = seq_lseek,
462 .release = single_release,
463};
464
465static void atmci_init_debugfs(struct atmel_mci_slot *slot)
466{
467 struct mmc_host *mmc = slot->mmc;
468 struct atmel_mci *host = slot->host;
469 struct dentry *root;
470 struct dentry *node;
471
472 root = mmc->debugfs_root;
473 if (!root)
474 return;
475
476 node = debugfs_create_file("regs", S_IRUSR, root, host,
477 &atmci_regs_fops);
478 if (IS_ERR(node))
479 return;
480 if (!node)
481 goto err;
482
483 node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
484 if (!node)
485 goto err;
486
487 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
488 if (!node)
489 goto err;
490
491 node = debugfs_create_x32("pending_events", S_IRUSR, root,
492 (u32 *)&host->pending_events);
493 if (!node)
494 goto err;
495
496 node = debugfs_create_x32("completed_events", S_IRUSR, root,
497 (u32 *)&host->completed_events);
498 if (!node)
499 goto err;
500
501 return;
502
503err:
504 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
505}
506
507#if defined(CONFIG_OF)
508static const struct of_device_id atmci_dt_ids[] = {
509 { .compatible = "atmel,hsmci" },
510 { }
511};
512
513MODULE_DEVICE_TABLE(of, atmci_dt_ids);
514
515static struct mci_platform_data*
516atmci_of_init(struct platform_device *pdev)
517{
518 struct device_node *np = pdev->dev.of_node;
519 struct device_node *cnp;
520 struct mci_platform_data *pdata;
521 u32 slot_id;
522
523 if (!np) {
524 dev_err(&pdev->dev, "device node not found\n");
525 return ERR_PTR(-EINVAL);
526 }
527
528 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
529 if (!pdata) {
530 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
531 return ERR_PTR(-ENOMEM);
532 }
533
534 for_each_child_of_node(np, cnp) {
535 if (of_property_read_u32(cnp, "reg", &slot_id)) {
536 dev_warn(&pdev->dev, "reg property is missing for %s\n",
537 cnp->full_name);
538 continue;
539 }
540
541 if (slot_id >= ATMCI_MAX_NR_SLOTS) {
542 dev_warn(&pdev->dev, "can't have more than %d slots\n",
543 ATMCI_MAX_NR_SLOTS);
544 break;
545 }
546
547 if (of_property_read_u32(cnp, "bus-width",
548 &pdata->slot[slot_id].bus_width))
549 pdata->slot[slot_id].bus_width = 1;
550
551 pdata->slot[slot_id].detect_pin =
552 of_get_named_gpio(cnp, "cd-gpios", 0);
553
554 pdata->slot[slot_id].detect_is_active_high =
555 of_property_read_bool(cnp, "cd-inverted");
556
557 pdata->slot[slot_id].wp_pin =
558 of_get_named_gpio(cnp, "wp-gpios", 0);
559 }
560
561 return pdata;
562}
563#else
564static inline struct mci_platform_data*
565atmci_of_init(struct platform_device *dev)
566{
567 return ERR_PTR(-EINVAL);
568}
569#endif
570
571static inline unsigned int atmci_get_version(struct atmel_mci *host)
572{
573 return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
574}
575
576static void atmci_timeout_timer(unsigned long data)
577{
578 struct atmel_mci *host;
579
580 host = (struct atmel_mci *)data;
581
582 dev_dbg(&host->pdev->dev, "software timeout\n");
583
584 if (host->mrq->cmd->data) {
585 host->mrq->cmd->data->error = -ETIMEDOUT;
586 host->data = NULL;
587 } else {
588 host->mrq->cmd->error = -ETIMEDOUT;
589 host->cmd = NULL;
590 }
591 host->need_reset = 1;
592 host->state = STATE_END_REQUEST;
593 smp_wmb();
594 tasklet_schedule(&host->tasklet);
595}
596
597static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
598 unsigned int ns)
599{
600
601
602
603
604 unsigned int us = DIV_ROUND_UP(ns, 1000);
605
606
607 return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
608}
609
610static void atmci_set_timeout(struct atmel_mci *host,
611 struct atmel_mci_slot *slot, struct mmc_data *data)
612{
613 static unsigned dtomul_to_shift[] = {
614 0, 4, 7, 8, 10, 12, 16, 20
615 };
616 unsigned timeout;
617 unsigned dtocyc;
618 unsigned dtomul;
619
620 timeout = atmci_ns_to_clocks(host, data->timeout_ns)
621 + data->timeout_clks;
622
623 for (dtomul = 0; dtomul < 8; dtomul++) {
624 unsigned shift = dtomul_to_shift[dtomul];
625 dtocyc = (timeout + (1 << shift) - 1) >> shift;
626 if (dtocyc < 15)
627 break;
628 }
629
630 if (dtomul >= 8) {
631 dtomul = 7;
632 dtocyc = 15;
633 }
634
635 dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
636 dtocyc << dtomul_to_shift[dtomul]);
637 atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc)));
638}
639
640
641
642
643static u32 atmci_prepare_command(struct mmc_host *mmc,
644 struct mmc_command *cmd)
645{
646 struct mmc_data *data;
647 u32 cmdr;
648
649 cmd->error = -EINPROGRESS;
650
651 cmdr = ATMCI_CMDR_CMDNB(cmd->opcode);
652
653 if (cmd->flags & MMC_RSP_PRESENT) {
654 if (cmd->flags & MMC_RSP_136)
655 cmdr |= ATMCI_CMDR_RSPTYP_136BIT;
656 else
657 cmdr |= ATMCI_CMDR_RSPTYP_48BIT;
658 }
659
660
661
662
663
664
665 cmdr |= ATMCI_CMDR_MAXLAT_64CYC;
666
667 if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
668 cmdr |= ATMCI_CMDR_OPDCMD;
669
670 data = cmd->data;
671 if (data) {
672 cmdr |= ATMCI_CMDR_START_XFER;
673
674 if (cmd->opcode == SD_IO_RW_EXTENDED) {
675 cmdr |= ATMCI_CMDR_SDIO_BLOCK;
676 } else {
677 if (data->flags & MMC_DATA_STREAM)
678 cmdr |= ATMCI_CMDR_STREAM;
679 else if (data->blocks > 1)
680 cmdr |= ATMCI_CMDR_MULTI_BLOCK;
681 else
682 cmdr |= ATMCI_CMDR_BLOCK;
683 }
684
685 if (data->flags & MMC_DATA_READ)
686 cmdr |= ATMCI_CMDR_TRDIR_READ;
687 }
688
689 return cmdr;
690}
691
692static void atmci_send_command(struct atmel_mci *host,
693 struct mmc_command *cmd, u32 cmd_flags)
694{
695 WARN_ON(host->cmd);
696 host->cmd = cmd;
697
698 dev_vdbg(&host->pdev->dev,
699 "start command: ARGR=0x%08x CMDR=0x%08x\n",
700 cmd->arg, cmd_flags);
701
702 atmci_writel(host, ATMCI_ARGR, cmd->arg);
703 atmci_writel(host, ATMCI_CMDR, cmd_flags);
704}
705
706static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
707{
708 dev_dbg(&host->pdev->dev, "send stop command\n");
709 atmci_send_command(host, data->stop, host->stop_cmdr);
710 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
711}
712
713
714
715
716
717static void atmci_pdc_set_single_buf(struct atmel_mci *host,
718 enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
719{
720 u32 pointer_reg, counter_reg;
721 unsigned int buf_size;
722
723 if (dir == XFER_RECEIVE) {
724 pointer_reg = ATMEL_PDC_RPR;
725 counter_reg = ATMEL_PDC_RCR;
726 } else {
727 pointer_reg = ATMEL_PDC_TPR;
728 counter_reg = ATMEL_PDC_TCR;
729 }
730
731 if (buf_nb == PDC_SECOND_BUF) {
732 pointer_reg += ATMEL_PDC_SCND_BUF_OFF;
733 counter_reg += ATMEL_PDC_SCND_BUF_OFF;
734 }
735
736 if (!host->caps.has_rwproof) {
737 buf_size = host->buf_size;
738 atmci_writel(host, pointer_reg, host->buf_phys_addr);
739 } else {
740 buf_size = sg_dma_len(host->sg);
741 atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
742 }
743
744 if (host->data_size <= buf_size) {
745 if (host->data_size & 0x3) {
746
747 atmci_writel(host, counter_reg, host->data_size);
748 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE);
749 } else {
750
751 atmci_writel(host, counter_reg, host->data_size / 4);
752 }
753 host->data_size = 0;
754 } else {
755
756 atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4);
757 host->data_size -= sg_dma_len(host->sg);
758 if (host->data_size)
759 host->sg = sg_next(host->sg);
760 }
761}
762
763
764
765
766
767
768static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
769{
770 atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF);
771 if (host->data_size)
772 atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF);
773}
774
775
776
777
778static void atmci_pdc_cleanup(struct atmel_mci *host)
779{
780 struct mmc_data *data = host->data;
781
782 if (data)
783 dma_unmap_sg(&host->pdev->dev,
784 data->sg, data->sg_len,
785 ((data->flags & MMC_DATA_WRITE)
786 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
787}
788
789
790
791
792
793
794static void atmci_pdc_complete(struct atmel_mci *host)
795{
796 int transfer_size = host->data->blocks * host->data->blksz;
797 int i;
798
799 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
800
801 if ((!host->caps.has_rwproof)
802 && (host->data->flags & MMC_DATA_READ)) {
803 if (host->caps.has_bad_data_ordering)
804 for (i = 0; i < transfer_size; i++)
805 host->buffer[i] = swab32(host->buffer[i]);
806 sg_copy_from_buffer(host->data->sg, host->data->sg_len,
807 host->buffer, transfer_size);
808 }
809
810 atmci_pdc_cleanup(host);
811
812
813
814
815
816 if (host->data) {
817 dev_dbg(&host->pdev->dev,
818 "(%s) set pending xfer complete\n", __func__);
819 atmci_set_pending(host, EVENT_XFER_COMPLETE);
820 tasklet_schedule(&host->tasklet);
821 }
822}
823
824static void atmci_dma_cleanup(struct atmel_mci *host)
825{
826 struct mmc_data *data = host->data;
827
828 if (data)
829 dma_unmap_sg(host->dma.chan->device->dev,
830 data->sg, data->sg_len,
831 ((data->flags & MMC_DATA_WRITE)
832 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
833}
834
835
836
837
838static void atmci_dma_complete(void *arg)
839{
840 struct atmel_mci *host = arg;
841 struct mmc_data *data = host->data;
842
843 dev_vdbg(&host->pdev->dev, "DMA complete\n");
844
845 if (host->caps.has_dma_conf_reg)
846
847 atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
848
849 atmci_dma_cleanup(host);
850
851
852
853
854
855 if (data) {
856 dev_dbg(&host->pdev->dev,
857 "(%s) set pending xfer complete\n", __func__);
858 atmci_set_pending(host, EVENT_XFER_COMPLETE);
859 tasklet_schedule(&host->tasklet);
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
882 }
883}
884
885
886
887
888
889static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
890{
891 u32 iflags;
892
893 data->error = -EINPROGRESS;
894
895 host->sg = data->sg;
896 host->sg_len = data->sg_len;
897 host->data = data;
898 host->data_chan = NULL;
899
900 iflags = ATMCI_DATA_ERROR_FLAGS;
901
902
903
904
905
906
907
908
909 if (data->blocks * data->blksz < 12
910 || (data->blocks * data->blksz) & 3)
911 host->need_reset = true;
912
913 host->pio_offset = 0;
914 if (data->flags & MMC_DATA_READ)
915 iflags |= ATMCI_RXRDY;
916 else
917 iflags |= ATMCI_TXRDY;
918
919 return iflags;
920}
921
922
923
924
925
926
927
928static u32
929atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
930{
931 u32 iflags, tmp;
932 unsigned int sg_len;
933 enum dma_data_direction dir;
934 int i;
935
936 data->error = -EINPROGRESS;
937
938 host->data = data;
939 host->sg = data->sg;
940 iflags = ATMCI_DATA_ERROR_FLAGS;
941
942
943 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
944
945 if (data->flags & MMC_DATA_READ) {
946 dir = DMA_FROM_DEVICE;
947 iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
948 } else {
949 dir = DMA_TO_DEVICE;
950 iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
951 }
952
953
954 tmp = atmci_readl(host, ATMCI_MR);
955 tmp &= 0x0000ffff;
956 tmp |= ATMCI_BLKLEN(data->blksz);
957 atmci_writel(host, ATMCI_MR, tmp);
958
959
960 host->data_size = data->blocks * data->blksz;
961 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
962
963 if ((!host->caps.has_rwproof)
964 && (host->data->flags & MMC_DATA_WRITE)) {
965 sg_copy_to_buffer(host->data->sg, host->data->sg_len,
966 host->buffer, host->data_size);
967 if (host->caps.has_bad_data_ordering)
968 for (i = 0; i < host->data_size; i++)
969 host->buffer[i] = swab32(host->buffer[i]);
970 }
971
972 if (host->data_size)
973 atmci_pdc_set_both_buf(host,
974 ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
975
976 return iflags;
977}
978
979static u32
980atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
981{
982 struct dma_chan *chan;
983 struct dma_async_tx_descriptor *desc;
984 struct scatterlist *sg;
985 unsigned int i;
986 enum dma_data_direction direction;
987 enum dma_transfer_direction slave_dirn;
988 unsigned int sglen;
989 u32 maxburst;
990 u32 iflags;
991
992 data->error = -EINPROGRESS;
993
994 WARN_ON(host->data);
995 host->sg = NULL;
996 host->data = data;
997
998 iflags = ATMCI_DATA_ERROR_FLAGS;
999
1000
1001
1002
1003
1004
1005 if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
1006 return atmci_prepare_data(host, data);
1007 if (data->blksz & 3)
1008 return atmci_prepare_data(host, data);
1009
1010 for_each_sg(data->sg, sg, data->sg_len, i) {
1011 if (sg->offset & 3 || sg->length & 3)
1012 return atmci_prepare_data(host, data);
1013 }
1014
1015
1016 chan = host->dma.chan;
1017 if (chan)
1018 host->data_chan = chan;
1019
1020 if (!chan)
1021 return -ENODEV;
1022
1023 if (data->flags & MMC_DATA_READ) {
1024 direction = DMA_FROM_DEVICE;
1025 host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
1026 maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst);
1027 } else {
1028 direction = DMA_TO_DEVICE;
1029 host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
1030 maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
1031 }
1032
1033 if (host->caps.has_dma_conf_reg)
1034 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) |
1035 ATMCI_DMAEN);
1036
1037 sglen = dma_map_sg(chan->device->dev, data->sg,
1038 data->sg_len, direction);
1039
1040 dmaengine_slave_config(chan, &host->dma_conf);
1041 desc = dmaengine_prep_slave_sg(chan,
1042 data->sg, sglen, slave_dirn,
1043 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1044 if (!desc)
1045 goto unmap_exit;
1046
1047 host->dma.data_desc = desc;
1048 desc->callback = atmci_dma_complete;
1049 desc->callback_param = host;
1050
1051 return iflags;
1052unmap_exit:
1053 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
1054 return -ENOMEM;
1055}
1056
1057static void
1058atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
1059{
1060 return;
1061}
1062
1063
1064
1065
1066static void
1067atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data)
1068{
1069 if (data->flags & MMC_DATA_READ)
1070 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
1071 else
1072 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1073}
1074
1075static void
1076atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
1077{
1078 struct dma_chan *chan = host->data_chan;
1079 struct dma_async_tx_descriptor *desc = host->dma.data_desc;
1080
1081 if (chan) {
1082 dmaengine_submit(desc);
1083 dma_async_issue_pending(chan);
1084 }
1085}
1086
1087static void atmci_stop_transfer(struct atmel_mci *host)
1088{
1089 dev_dbg(&host->pdev->dev,
1090 "(%s) set pending xfer complete\n", __func__);
1091 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1092 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1093}
1094
1095
1096
1097
1098static void atmci_stop_transfer_pdc(struct atmel_mci *host)
1099{
1100 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
1101}
1102
1103static void atmci_stop_transfer_dma(struct atmel_mci *host)
1104{
1105 struct dma_chan *chan = host->data_chan;
1106
1107 if (chan) {
1108 dmaengine_terminate_all(chan);
1109 atmci_dma_cleanup(host);
1110 } else {
1111
1112 dev_dbg(&host->pdev->dev,
1113 "(%s) set pending xfer complete\n", __func__);
1114 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1115 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1116 }
1117}
1118
1119
1120
1121
1122
1123static void atmci_start_request(struct atmel_mci *host,
1124 struct atmel_mci_slot *slot)
1125{
1126 struct mmc_request *mrq;
1127 struct mmc_command *cmd;
1128 struct mmc_data *data;
1129 u32 iflags;
1130 u32 cmdflags;
1131
1132 mrq = slot->mrq;
1133 host->cur_slot = slot;
1134 host->mrq = mrq;
1135
1136 host->pending_events = 0;
1137 host->completed_events = 0;
1138 host->cmd_status = 0;
1139 host->data_status = 0;
1140
1141 dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
1142
1143 if (host->need_reset || host->caps.need_reset_after_xfer) {
1144 iflags = atmci_readl(host, ATMCI_IMR);
1145 iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
1146 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1147 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1148 atmci_writel(host, ATMCI_MR, host->mode_reg);
1149 if (host->caps.has_cfg_reg)
1150 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1151 atmci_writel(host, ATMCI_IER, iflags);
1152 host->need_reset = false;
1153 }
1154 atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
1155
1156 iflags = atmci_readl(host, ATMCI_IMR);
1157 if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
1158 dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
1159 iflags);
1160
1161 if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
1162
1163 atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT);
1164 while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY))
1165 cpu_relax();
1166 }
1167 iflags = 0;
1168 data = mrq->data;
1169 if (data) {
1170 atmci_set_timeout(host, slot, data);
1171
1172
1173 atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks)
1174 | ATMCI_BLKLEN(data->blksz));
1175 dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
1176 ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz));
1177
1178 iflags |= host->prepare_data(host, data);
1179 }
1180
1181 iflags |= ATMCI_CMDRDY;
1182 cmd = mrq->cmd;
1183 cmdflags = atmci_prepare_command(slot->mmc, cmd);
1184 atmci_send_command(host, cmd, cmdflags);
1185
1186 if (data)
1187 host->submit_data(host, data);
1188
1189 if (mrq->stop) {
1190 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
1191 host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
1192 if (!(data->flags & MMC_DATA_WRITE))
1193 host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;
1194 if (data->flags & MMC_DATA_STREAM)
1195 host->stop_cmdr |= ATMCI_CMDR_STREAM;
1196 else
1197 host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
1198 }
1199
1200
1201
1202
1203
1204
1205
1206 atmci_writel(host, ATMCI_IER, iflags);
1207
1208 mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000));
1209}
1210
1211static void atmci_queue_request(struct atmel_mci *host,
1212 struct atmel_mci_slot *slot, struct mmc_request *mrq)
1213{
1214 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1215 host->state);
1216
1217 spin_lock_bh(&host->lock);
1218 slot->mrq = mrq;
1219 if (host->state == STATE_IDLE) {
1220 host->state = STATE_SENDING_CMD;
1221 atmci_start_request(host, slot);
1222 } else {
1223 dev_dbg(&host->pdev->dev, "queue request\n");
1224 list_add_tail(&slot->queue_node, &host->queue);
1225 }
1226 spin_unlock_bh(&host->lock);
1227}
1228
1229static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1230{
1231 struct atmel_mci_slot *slot = mmc_priv(mmc);
1232 struct atmel_mci *host = slot->host;
1233 struct mmc_data *data;
1234
1235 WARN_ON(slot->mrq);
1236 dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246 if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
1247 mrq->cmd->error = -ENOMEDIUM;
1248 mmc_request_done(mmc, mrq);
1249 return;
1250 }
1251
1252
1253 data = mrq->data;
1254 if (data && data->blocks > 1 && data->blksz & 3) {
1255 mrq->cmd->error = -EINVAL;
1256 mmc_request_done(mmc, mrq);
1257 }
1258
1259 atmci_queue_request(host, slot, mrq);
1260}
1261
1262static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1263{
1264 struct atmel_mci_slot *slot = mmc_priv(mmc);
1265 struct atmel_mci *host = slot->host;
1266 unsigned int i;
1267
1268 slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
1269 switch (ios->bus_width) {
1270 case MMC_BUS_WIDTH_1:
1271 slot->sdc_reg |= ATMCI_SDCBUS_1BIT;
1272 break;
1273 case MMC_BUS_WIDTH_4:
1274 slot->sdc_reg |= ATMCI_SDCBUS_4BIT;
1275 break;
1276 }
1277
1278 if (ios->clock) {
1279 unsigned int clock_min = ~0U;
1280 u32 clkdiv;
1281
1282 spin_lock_bh(&host->lock);
1283 if (!host->mode_reg) {
1284 clk_enable(host->mck);
1285 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1286 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1287 if (host->caps.has_cfg_reg)
1288 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1289 }
1290
1291
1292
1293
1294
1295 slot->clock = ios->clock;
1296 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1297 if (host->slot[i] && host->slot[i]->clock
1298 && host->slot[i]->clock < clock_min)
1299 clock_min = host->slot[i]->clock;
1300 }
1301
1302
1303 if (host->caps.has_odd_clk_div) {
1304 clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
1305 if (clkdiv > 511) {
1306 dev_warn(&mmc->class_dev,
1307 "clock %u too slow; using %lu\n",
1308 clock_min, host->bus_hz / (511 + 2));
1309 clkdiv = 511;
1310 }
1311 host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1)
1312 | ATMCI_MR_CLKODD(clkdiv & 1);
1313 } else {
1314 clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
1315 if (clkdiv > 255) {
1316 dev_warn(&mmc->class_dev,
1317 "clock %u too slow; using %lu\n",
1318 clock_min, host->bus_hz / (2 * 256));
1319 clkdiv = 255;
1320 }
1321 host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
1322 }
1323
1324
1325
1326
1327
1328
1329 if (host->caps.has_rwproof)
1330 host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF);
1331
1332 if (host->caps.has_cfg_reg) {
1333
1334 if (ios->timing == MMC_TIMING_SD_HS)
1335 host->cfg_reg |= ATMCI_CFG_HSMODE;
1336 else
1337 host->cfg_reg &= ~ATMCI_CFG_HSMODE;
1338 }
1339
1340 if (list_empty(&host->queue)) {
1341 atmci_writel(host, ATMCI_MR, host->mode_reg);
1342 if (host->caps.has_cfg_reg)
1343 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1344 } else {
1345 host->need_clock_update = true;
1346 }
1347
1348 spin_unlock_bh(&host->lock);
1349 } else {
1350 bool any_slot_active = false;
1351
1352 spin_lock_bh(&host->lock);
1353 slot->clock = 0;
1354 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1355 if (host->slot[i] && host->slot[i]->clock) {
1356 any_slot_active = true;
1357 break;
1358 }
1359 }
1360 if (!any_slot_active) {
1361 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
1362 if (host->mode_reg) {
1363 atmci_readl(host, ATMCI_MR);
1364 clk_disable(host->mck);
1365 }
1366 host->mode_reg = 0;
1367 }
1368 spin_unlock_bh(&host->lock);
1369 }
1370
1371 switch (ios->power_mode) {
1372 case MMC_POWER_UP:
1373 set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
1374 break;
1375 default:
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388 break;
1389 }
1390}
1391
1392static int atmci_get_ro(struct mmc_host *mmc)
1393{
1394 int read_only = -ENOSYS;
1395 struct atmel_mci_slot *slot = mmc_priv(mmc);
1396
1397 if (gpio_is_valid(slot->wp_pin)) {
1398 read_only = gpio_get_value(slot->wp_pin);
1399 dev_dbg(&mmc->class_dev, "card is %s\n",
1400 read_only ? "read-only" : "read-write");
1401 }
1402
1403 return read_only;
1404}
1405
1406static int atmci_get_cd(struct mmc_host *mmc)
1407{
1408 int present = -ENOSYS;
1409 struct atmel_mci_slot *slot = mmc_priv(mmc);
1410
1411 if (gpio_is_valid(slot->detect_pin)) {
1412 present = !(gpio_get_value(slot->detect_pin) ^
1413 slot->detect_is_active_high);
1414 dev_dbg(&mmc->class_dev, "card is %spresent\n",
1415 present ? "" : "not ");
1416 }
1417
1418 return present;
1419}
1420
1421static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1422{
1423 struct atmel_mci_slot *slot = mmc_priv(mmc);
1424 struct atmel_mci *host = slot->host;
1425
1426 if (enable)
1427 atmci_writel(host, ATMCI_IER, slot->sdio_irq);
1428 else
1429 atmci_writel(host, ATMCI_IDR, slot->sdio_irq);
1430}
1431
1432static const struct mmc_host_ops atmci_ops = {
1433 .request = atmci_request,
1434 .set_ios = atmci_set_ios,
1435 .get_ro = atmci_get_ro,
1436 .get_cd = atmci_get_cd,
1437 .enable_sdio_irq = atmci_enable_sdio_irq,
1438};
1439
1440
1441static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1442 __releases(&host->lock)
1443 __acquires(&host->lock)
1444{
1445 struct atmel_mci_slot *slot = NULL;
1446 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1447
1448 WARN_ON(host->cmd || host->data);
1449
1450
1451
1452
1453
1454
1455 if (host->need_clock_update) {
1456 atmci_writel(host, ATMCI_MR, host->mode_reg);
1457 if (host->caps.has_cfg_reg)
1458 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1459 }
1460
1461 host->cur_slot->mrq = NULL;
1462 host->mrq = NULL;
1463 if (!list_empty(&host->queue)) {
1464 slot = list_entry(host->queue.next,
1465 struct atmel_mci_slot, queue_node);
1466 list_del(&slot->queue_node);
1467 dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
1468 mmc_hostname(slot->mmc));
1469 host->state = STATE_SENDING_CMD;
1470 atmci_start_request(host, slot);
1471 } else {
1472 dev_vdbg(&host->pdev->dev, "list empty\n");
1473 host->state = STATE_IDLE;
1474 }
1475
1476 del_timer(&host->timer);
1477
1478 spin_unlock(&host->lock);
1479 mmc_request_done(prev_mmc, mrq);
1480 spin_lock(&host->lock);
1481}
1482
1483static void atmci_command_complete(struct atmel_mci *host,
1484 struct mmc_command *cmd)
1485{
1486 u32 status = host->cmd_status;
1487
1488
1489 cmd->resp[0] = atmci_readl(host, ATMCI_RSPR);
1490 cmd->resp[1] = atmci_readl(host, ATMCI_RSPR);
1491 cmd->resp[2] = atmci_readl(host, ATMCI_RSPR);
1492 cmd->resp[3] = atmci_readl(host, ATMCI_RSPR);
1493
1494 if (status & ATMCI_RTOE)
1495 cmd->error = -ETIMEDOUT;
1496 else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE))
1497 cmd->error = -EILSEQ;
1498 else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
1499 cmd->error = -EIO;
1500 else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
1501 if (host->caps.need_blksz_mul_4) {
1502 cmd->error = -EINVAL;
1503 host->need_reset = 1;
1504 }
1505 } else
1506 cmd->error = 0;
1507}
1508
1509static void atmci_detect_change(unsigned long data)
1510{
1511 struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data;
1512 bool present;
1513 bool present_old;
1514
1515
1516
1517
1518
1519
1520
1521 smp_rmb();
1522 if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
1523 return;
1524
1525 enable_irq(gpio_to_irq(slot->detect_pin));
1526 present = !(gpio_get_value(slot->detect_pin) ^
1527 slot->detect_is_active_high);
1528 present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
1529
1530 dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
1531 present, present_old);
1532
1533 if (present != present_old) {
1534 struct atmel_mci *host = slot->host;
1535 struct mmc_request *mrq;
1536
1537 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1538 present ? "inserted" : "removed");
1539
1540 spin_lock(&host->lock);
1541
1542 if (!present)
1543 clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
1544 else
1545 set_bit(ATMCI_CARD_PRESENT, &slot->flags);
1546
1547
1548 mrq = slot->mrq;
1549 if (mrq) {
1550 if (mrq == host->mrq) {
1551
1552
1553
1554
1555 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1556 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1557 atmci_writel(host, ATMCI_MR, host->mode_reg);
1558 if (host->caps.has_cfg_reg)
1559 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1560
1561 host->data = NULL;
1562 host->cmd = NULL;
1563
1564 switch (host->state) {
1565 case STATE_IDLE:
1566 break;
1567 case STATE_SENDING_CMD:
1568 mrq->cmd->error = -ENOMEDIUM;
1569 if (mrq->data)
1570 host->stop_transfer(host);
1571 break;
1572 case STATE_DATA_XFER:
1573 mrq->data->error = -ENOMEDIUM;
1574 host->stop_transfer(host);
1575 break;
1576 case STATE_WAITING_NOTBUSY:
1577 mrq->data->error = -ENOMEDIUM;
1578 break;
1579 case STATE_SENDING_STOP:
1580 mrq->stop->error = -ENOMEDIUM;
1581 break;
1582 case STATE_END_REQUEST:
1583 break;
1584 }
1585
1586 atmci_request_end(host, mrq);
1587 } else {
1588 list_del(&slot->queue_node);
1589 mrq->cmd->error = -ENOMEDIUM;
1590 if (mrq->data)
1591 mrq->data->error = -ENOMEDIUM;
1592 if (mrq->stop)
1593 mrq->stop->error = -ENOMEDIUM;
1594
1595 spin_unlock(&host->lock);
1596 mmc_request_done(slot->mmc, mrq);
1597 spin_lock(&host->lock);
1598 }
1599 }
1600 spin_unlock(&host->lock);
1601
1602 mmc_detect_change(slot->mmc, 0);
1603 }
1604}
1605
1606static void atmci_tasklet_func(unsigned long priv)
1607{
1608 struct atmel_mci *host = (struct atmel_mci *)priv;
1609 struct mmc_request *mrq = host->mrq;
1610 struct mmc_data *data = host->data;
1611 enum atmel_mci_state state = host->state;
1612 enum atmel_mci_state prev_state;
1613 u32 status;
1614
1615 spin_lock(&host->lock);
1616
1617 state = host->state;
1618
1619 dev_vdbg(&host->pdev->dev,
1620 "tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
1621 state, host->pending_events, host->completed_events,
1622 atmci_readl(host, ATMCI_IMR));
1623
1624 do {
1625 prev_state = state;
1626 dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
1627
1628 switch (state) {
1629 case STATE_IDLE:
1630 break;
1631
1632 case STATE_SENDING_CMD:
1633
1634
1635
1636
1637
1638
1639 dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
1640 if (!atmci_test_and_clear_pending(host,
1641 EVENT_CMD_RDY))
1642 break;
1643
1644 dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
1645 host->cmd = NULL;
1646 atmci_set_completed(host, EVENT_CMD_RDY);
1647 atmci_command_complete(host, mrq->cmd);
1648 if (mrq->data) {
1649 dev_dbg(&host->pdev->dev,
1650 "command with data transfer");
1651
1652
1653
1654
1655 if (mrq->cmd->error) {
1656 host->stop_transfer(host);
1657 host->data = NULL;
1658 atmci_writel(host, ATMCI_IDR,
1659 ATMCI_TXRDY | ATMCI_RXRDY
1660 | ATMCI_DATA_ERROR_FLAGS);
1661 state = STATE_END_REQUEST;
1662 } else
1663 state = STATE_DATA_XFER;
1664 } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
1665 dev_dbg(&host->pdev->dev,
1666 "command response need waiting notbusy");
1667 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1668 state = STATE_WAITING_NOTBUSY;
1669 } else
1670 state = STATE_END_REQUEST;
1671
1672 break;
1673
1674 case STATE_DATA_XFER:
1675 if (atmci_test_and_clear_pending(host,
1676 EVENT_DATA_ERROR)) {
1677 dev_dbg(&host->pdev->dev, "set completed data error\n");
1678 atmci_set_completed(host, EVENT_DATA_ERROR);
1679 state = STATE_END_REQUEST;
1680 break;
1681 }
1682
1683
1684
1685
1686
1687
1688
1689
1690 dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
1691 if (!atmci_test_and_clear_pending(host,
1692 EVENT_XFER_COMPLETE))
1693 break;
1694
1695 dev_dbg(&host->pdev->dev,
1696 "(%s) set completed xfer complete\n",
1697 __func__);
1698 atmci_set_completed(host, EVENT_XFER_COMPLETE);
1699
1700 if (host->caps.need_notbusy_for_read_ops ||
1701 (host->data->flags & MMC_DATA_WRITE)) {
1702 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1703 state = STATE_WAITING_NOTBUSY;
1704 } else if (host->mrq->stop) {
1705 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
1706 atmci_send_stop_cmd(host, data);
1707 state = STATE_SENDING_STOP;
1708 } else {
1709 host->data = NULL;
1710 data->bytes_xfered = data->blocks * data->blksz;
1711 data->error = 0;
1712 state = STATE_END_REQUEST;
1713 }
1714 break;
1715
1716 case STATE_WAITING_NOTBUSY:
1717
1718
1719
1720
1721
1722
1723 dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
1724 if (!atmci_test_and_clear_pending(host,
1725 EVENT_NOTBUSY))
1726 break;
1727
1728 dev_dbg(&host->pdev->dev, "set completed not busy\n");
1729 atmci_set_completed(host, EVENT_NOTBUSY);
1730
1731 if (host->data) {
1732
1733
1734
1735
1736
1737 if (host->mrq->stop) {
1738 atmci_writel(host, ATMCI_IER,
1739 ATMCI_CMDRDY);
1740 atmci_send_stop_cmd(host, data);
1741 state = STATE_SENDING_STOP;
1742 } else {
1743 host->data = NULL;
1744 data->bytes_xfered = data->blocks
1745 * data->blksz;
1746 data->error = 0;
1747 state = STATE_END_REQUEST;
1748 }
1749 } else
1750 state = STATE_END_REQUEST;
1751 break;
1752
1753 case STATE_SENDING_STOP:
1754
1755
1756
1757
1758
1759
1760 dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
1761 if (!atmci_test_and_clear_pending(host,
1762 EVENT_CMD_RDY))
1763 break;
1764
1765 dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
1766 host->cmd = NULL;
1767 data->bytes_xfered = data->blocks * data->blksz;
1768 data->error = 0;
1769 atmci_command_complete(host, mrq->stop);
1770 if (mrq->stop->error) {
1771 host->stop_transfer(host);
1772 atmci_writel(host, ATMCI_IDR,
1773 ATMCI_TXRDY | ATMCI_RXRDY
1774 | ATMCI_DATA_ERROR_FLAGS);
1775 state = STATE_END_REQUEST;
1776 } else {
1777 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1778 state = STATE_WAITING_NOTBUSY;
1779 }
1780 host->data = NULL;
1781 break;
1782
1783 case STATE_END_REQUEST:
1784 atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
1785 | ATMCI_DATA_ERROR_FLAGS);
1786 status = host->data_status;
1787 if (unlikely(status)) {
1788 host->stop_transfer(host);
1789 host->data = NULL;
1790 if (status & ATMCI_DTOE) {
1791 data->error = -ETIMEDOUT;
1792 } else if (status & ATMCI_DCRCE) {
1793 data->error = -EILSEQ;
1794 } else {
1795 data->error = -EIO;
1796 }
1797 }
1798
1799 atmci_request_end(host, host->mrq);
1800 state = STATE_IDLE;
1801 break;
1802 }
1803 } while (state != prev_state);
1804
1805 host->state = state;
1806
1807 spin_unlock(&host->lock);
1808}
1809
1810static void atmci_read_data_pio(struct atmel_mci *host)
1811{
1812 struct scatterlist *sg = host->sg;
1813 void *buf = sg_virt(sg);
1814 unsigned int offset = host->pio_offset;
1815 struct mmc_data *data = host->data;
1816 u32 value;
1817 u32 status;
1818 unsigned int nbytes = 0;
1819
1820 do {
1821 value = atmci_readl(host, ATMCI_RDR);
1822 if (likely(offset + 4 <= sg->length)) {
1823 put_unaligned(value, (u32 *)(buf + offset));
1824
1825 offset += 4;
1826 nbytes += 4;
1827
1828 if (offset == sg->length) {
1829 flush_dcache_page(sg_page(sg));
1830 host->sg = sg = sg_next(sg);
1831 host->sg_len--;
1832 if (!sg || !host->sg_len)
1833 goto done;
1834
1835 offset = 0;
1836 buf = sg_virt(sg);
1837 }
1838 } else {
1839 unsigned int remaining = sg->length - offset;
1840 memcpy(buf + offset, &value, remaining);
1841 nbytes += remaining;
1842
1843 flush_dcache_page(sg_page(sg));
1844 host->sg = sg = sg_next(sg);
1845 host->sg_len--;
1846 if (!sg || !host->sg_len)
1847 goto done;
1848
1849 offset = 4 - remaining;
1850 buf = sg_virt(sg);
1851 memcpy(buf, (u8 *)&value + remaining, offset);
1852 nbytes += offset;
1853 }
1854
1855 status = atmci_readl(host, ATMCI_SR);
1856 if (status & ATMCI_DATA_ERROR_FLAGS) {
1857 atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY
1858 | ATMCI_DATA_ERROR_FLAGS));
1859 host->data_status = status;
1860 data->bytes_xfered += nbytes;
1861 return;
1862 }
1863 } while (status & ATMCI_RXRDY);
1864
1865 host->pio_offset = offset;
1866 data->bytes_xfered += nbytes;
1867
1868 return;
1869
1870done:
1871 atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY);
1872 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1873 data->bytes_xfered += nbytes;
1874 smp_wmb();
1875 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1876}
1877
1878static void atmci_write_data_pio(struct atmel_mci *host)
1879{
1880 struct scatterlist *sg = host->sg;
1881 void *buf = sg_virt(sg);
1882 unsigned int offset = host->pio_offset;
1883 struct mmc_data *data = host->data;
1884 u32 value;
1885 u32 status;
1886 unsigned int nbytes = 0;
1887
1888 do {
1889 if (likely(offset + 4 <= sg->length)) {
1890 value = get_unaligned((u32 *)(buf + offset));
1891 atmci_writel(host, ATMCI_TDR, value);
1892
1893 offset += 4;
1894 nbytes += 4;
1895 if (offset == sg->length) {
1896 host->sg = sg = sg_next(sg);
1897 host->sg_len--;
1898 if (!sg || !host->sg_len)
1899 goto done;
1900
1901 offset = 0;
1902 buf = sg_virt(sg);
1903 }
1904 } else {
1905 unsigned int remaining = sg->length - offset;
1906
1907 value = 0;
1908 memcpy(&value, buf + offset, remaining);
1909 nbytes += remaining;
1910
1911 host->sg = sg = sg_next(sg);
1912 host->sg_len--;
1913 if (!sg || !host->sg_len) {
1914 atmci_writel(host, ATMCI_TDR, value);
1915 goto done;
1916 }
1917
1918 offset = 4 - remaining;
1919 buf = sg_virt(sg);
1920 memcpy((u8 *)&value + remaining, buf, offset);
1921 atmci_writel(host, ATMCI_TDR, value);
1922 nbytes += offset;
1923 }
1924
1925 status = atmci_readl(host, ATMCI_SR);
1926 if (status & ATMCI_DATA_ERROR_FLAGS) {
1927 atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY
1928 | ATMCI_DATA_ERROR_FLAGS));
1929 host->data_status = status;
1930 data->bytes_xfered += nbytes;
1931 return;
1932 }
1933 } while (status & ATMCI_TXRDY);
1934
1935 host->pio_offset = offset;
1936 data->bytes_xfered += nbytes;
1937
1938 return;
1939
1940done:
1941 atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY);
1942 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1943 data->bytes_xfered += nbytes;
1944 smp_wmb();
1945 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1946}
1947
1948static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
1949{
1950 int i;
1951
1952 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1953 struct atmel_mci_slot *slot = host->slot[i];
1954 if (slot && (status & slot->sdio_irq)) {
1955 mmc_signal_sdio_irq(slot->mmc);
1956 }
1957 }
1958}
1959
1960
1961static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1962{
1963 struct atmel_mci *host = dev_id;
1964 u32 status, mask, pending;
1965 unsigned int pass_count = 0;
1966
1967 do {
1968 status = atmci_readl(host, ATMCI_SR);
1969 mask = atmci_readl(host, ATMCI_IMR);
1970 pending = status & mask;
1971 if (!pending)
1972 break;
1973
1974 if (pending & ATMCI_DATA_ERROR_FLAGS) {
1975 dev_dbg(&host->pdev->dev, "IRQ: data error\n");
1976 atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
1977 | ATMCI_RXRDY | ATMCI_TXRDY
1978 | ATMCI_ENDRX | ATMCI_ENDTX
1979 | ATMCI_RXBUFF | ATMCI_TXBUFE);
1980
1981 host->data_status = status;
1982 dev_dbg(&host->pdev->dev, "set pending data error\n");
1983 smp_wmb();
1984 atmci_set_pending(host, EVENT_DATA_ERROR);
1985 tasklet_schedule(&host->tasklet);
1986 }
1987
1988 if (pending & ATMCI_TXBUFE) {
1989 dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
1990 atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
1991 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
1992
1993
1994
1995
1996
1997 if (host->data_size) {
1998 atmci_pdc_set_both_buf(host, XFER_TRANSMIT);
1999 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
2000 atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE);
2001 } else {
2002 atmci_pdc_complete(host);
2003 }
2004 } else if (pending & ATMCI_ENDTX) {
2005 dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
2006 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
2007
2008 if (host->data_size) {
2009 atmci_pdc_set_single_buf(host,
2010 XFER_TRANSMIT, PDC_SECOND_BUF);
2011 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
2012 }
2013 }
2014
2015 if (pending & ATMCI_RXBUFF) {
2016 dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
2017 atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
2018 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
2019
2020
2021
2022
2023
2024 if (host->data_size) {
2025 atmci_pdc_set_both_buf(host, XFER_RECEIVE);
2026 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
2027 atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF);
2028 } else {
2029 atmci_pdc_complete(host);
2030 }
2031 } else if (pending & ATMCI_ENDRX) {
2032 dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
2033 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
2034
2035 if (host->data_size) {
2036 atmci_pdc_set_single_buf(host,
2037 XFER_RECEIVE, PDC_SECOND_BUF);
2038 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
2039 }
2040 }
2041
2042
2043
2044
2045
2046
2047
2048 if (pending & ATMCI_BLKE) {
2049 dev_dbg(&host->pdev->dev, "IRQ: blke\n");
2050 atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
2051 smp_wmb();
2052 dev_dbg(&host->pdev->dev, "set pending notbusy\n");
2053 atmci_set_pending(host, EVENT_NOTBUSY);
2054 tasklet_schedule(&host->tasklet);
2055 }
2056
2057 if (pending & ATMCI_NOTBUSY) {
2058 dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
2059 atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
2060 smp_wmb();
2061 dev_dbg(&host->pdev->dev, "set pending notbusy\n");
2062 atmci_set_pending(host, EVENT_NOTBUSY);
2063 tasklet_schedule(&host->tasklet);
2064 }
2065
2066 if (pending & ATMCI_RXRDY)
2067 atmci_read_data_pio(host);
2068 if (pending & ATMCI_TXRDY)
2069 atmci_write_data_pio(host);
2070
2071 if (pending & ATMCI_CMDRDY) {
2072 dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
2073 atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
2074 host->cmd_status = status;
2075 smp_wmb();
2076 dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
2077 atmci_set_pending(host, EVENT_CMD_RDY);
2078 tasklet_schedule(&host->tasklet);
2079 }
2080
2081 if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
2082 atmci_sdio_interrupt(host, status);
2083
2084 } while (pass_count++ < 5);
2085
2086 return pass_count ? IRQ_HANDLED : IRQ_NONE;
2087}
2088
2089static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
2090{
2091 struct atmel_mci_slot *slot = dev_id;
2092
2093
2094
2095
2096
2097
2098 disable_irq_nosync(irq);
2099 mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
2100
2101 return IRQ_HANDLED;
2102}
2103
2104static int __init atmci_init_slot(struct atmel_mci *host,
2105 struct mci_slot_pdata *slot_data, unsigned int id,
2106 u32 sdc_reg, u32 sdio_irq)
2107{
2108 struct mmc_host *mmc;
2109 struct atmel_mci_slot *slot;
2110
2111 mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
2112 if (!mmc)
2113 return -ENOMEM;
2114
2115 slot = mmc_priv(mmc);
2116 slot->mmc = mmc;
2117 slot->host = host;
2118 slot->detect_pin = slot_data->detect_pin;
2119 slot->wp_pin = slot_data->wp_pin;
2120 slot->detect_is_active_high = slot_data->detect_is_active_high;
2121 slot->sdc_reg = sdc_reg;
2122 slot->sdio_irq = sdio_irq;
2123
2124 dev_dbg(&mmc->class_dev,
2125 "slot[%u]: bus_width=%u, detect_pin=%d, "
2126 "detect_is_active_high=%s, wp_pin=%d\n",
2127 id, slot_data->bus_width, slot_data->detect_pin,
2128 slot_data->detect_is_active_high ? "true" : "false",
2129 slot_data->wp_pin);
2130
2131 mmc->ops = &atmci_ops;
2132 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
2133 mmc->f_max = host->bus_hz / 2;
2134 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2135 if (sdio_irq)
2136 mmc->caps |= MMC_CAP_SDIO_IRQ;
2137 if (host->caps.has_highspeed)
2138 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
2139
2140
2141
2142
2143
2144 if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
2145 mmc->caps |= MMC_CAP_4_BIT_DATA;
2146
2147 if (atmci_get_version(host) < 0x200) {
2148 mmc->max_segs = 256;
2149 mmc->max_blk_size = 4095;
2150 mmc->max_blk_count = 256;
2151 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2152 mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
2153 } else {
2154 mmc->max_segs = 64;
2155 mmc->max_req_size = 32768 * 512;
2156 mmc->max_blk_size = 32768;
2157 mmc->max_blk_count = 512;
2158 }
2159
2160
2161 set_bit(ATMCI_CARD_PRESENT, &slot->flags);
2162 if (gpio_is_valid(slot->detect_pin)) {
2163 if (gpio_request(slot->detect_pin, "mmc_detect")) {
2164 dev_dbg(&mmc->class_dev, "no detect pin available\n");
2165 slot->detect_pin = -EBUSY;
2166 } else if (gpio_get_value(slot->detect_pin) ^
2167 slot->detect_is_active_high) {
2168 clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
2169 }
2170 }
2171
2172 if (!gpio_is_valid(slot->detect_pin))
2173 mmc->caps |= MMC_CAP_NEEDS_POLL;
2174
2175 if (gpio_is_valid(slot->wp_pin)) {
2176 if (gpio_request(slot->wp_pin, "mmc_wp")) {
2177 dev_dbg(&mmc->class_dev, "no WP pin available\n");
2178 slot->wp_pin = -EBUSY;
2179 }
2180 }
2181
2182 host->slot[id] = slot;
2183 mmc_add_host(mmc);
2184
2185 if (gpio_is_valid(slot->detect_pin)) {
2186 int ret;
2187
2188 setup_timer(&slot->detect_timer, atmci_detect_change,
2189 (unsigned long)slot);
2190
2191 ret = request_irq(gpio_to_irq(slot->detect_pin),
2192 atmci_detect_interrupt,
2193 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
2194 "mmc-detect", slot);
2195 if (ret) {
2196 dev_dbg(&mmc->class_dev,
2197 "could not request IRQ %d for detect pin\n",
2198 gpio_to_irq(slot->detect_pin));
2199 gpio_free(slot->detect_pin);
2200 slot->detect_pin = -EBUSY;
2201 }
2202 }
2203
2204 atmci_init_debugfs(slot);
2205
2206 return 0;
2207}
2208
2209static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
2210 unsigned int id)
2211{
2212
2213
2214 set_bit(ATMCI_SHUTDOWN, &slot->flags);
2215 smp_wmb();
2216
2217 mmc_remove_host(slot->mmc);
2218
2219 if (gpio_is_valid(slot->detect_pin)) {
2220 int pin = slot->detect_pin;
2221
2222 free_irq(gpio_to_irq(pin), slot);
2223 del_timer_sync(&slot->detect_timer);
2224 gpio_free(pin);
2225 }
2226 if (gpio_is_valid(slot->wp_pin))
2227 gpio_free(slot->wp_pin);
2228
2229 slot->host->slot[id] = NULL;
2230 mmc_free_host(slot->mmc);
2231}
2232
2233static bool atmci_filter(struct dma_chan *chan, void *pdata)
2234{
2235 struct mci_platform_data *sl_pdata = pdata;
2236 struct mci_dma_data *sl;
2237
2238 if (!sl_pdata)
2239 return false;
2240
2241 sl = sl_pdata->dma_slave;
2242 if (sl && find_slave_dev(sl) == chan->device->dev) {
2243 chan->private = slave_data_ptr(sl);
2244 return true;
2245 } else {
2246 return false;
2247 }
2248}
2249
2250static bool atmci_configure_dma(struct atmel_mci *host)
2251{
2252 struct mci_platform_data *pdata;
2253 dma_cap_mask_t mask;
2254
2255 if (host == NULL)
2256 return false;
2257
2258 pdata = host->pdev->dev.platform_data;
2259
2260 dma_cap_zero(mask);
2261 dma_cap_set(DMA_SLAVE, mask);
2262
2263 host->dma.chan = dma_request_slave_channel_compat(mask, atmci_filter, pdata,
2264 &host->pdev->dev, "rxtx");
2265 if (!host->dma.chan) {
2266 dev_warn(&host->pdev->dev, "no DMA channel available\n");
2267 return false;
2268 } else {
2269 dev_info(&host->pdev->dev,
2270 "using %s for DMA transfers\n",
2271 dma_chan_name(host->dma.chan));
2272
2273 host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
2274 host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2275 host->dma_conf.src_maxburst = 1;
2276 host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
2277 host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2278 host->dma_conf.dst_maxburst = 1;
2279 host->dma_conf.device_fc = false;
2280 return true;
2281 }
2282}
2283
2284
2285
2286
2287
2288
2289static void __init atmci_get_cap(struct atmel_mci *host)
2290{
2291 unsigned int version;
2292
2293 version = atmci_get_version(host);
2294 dev_info(&host->pdev->dev,
2295 "version: 0x%x\n", version);
2296
2297 host->caps.has_dma_conf_reg = 0;
2298 host->caps.has_pdc = ATMCI_PDC_CONNECTED;
2299 host->caps.has_cfg_reg = 0;
2300 host->caps.has_cstor_reg = 0;
2301 host->caps.has_highspeed = 0;
2302 host->caps.has_rwproof = 0;
2303 host->caps.has_odd_clk_div = 0;
2304 host->caps.has_bad_data_ordering = 1;
2305 host->caps.need_reset_after_xfer = 1;
2306 host->caps.need_blksz_mul_4 = 1;
2307 host->caps.need_notbusy_for_read_ops = 0;
2308
2309
2310 switch (version & 0xf00) {
2311 case 0x500:
2312 host->caps.has_odd_clk_div = 1;
2313 case 0x400:
2314 case 0x300:
2315 host->caps.has_dma_conf_reg = 1;
2316 host->caps.has_pdc = 0;
2317 host->caps.has_cfg_reg = 1;
2318 host->caps.has_cstor_reg = 1;
2319 host->caps.has_highspeed = 1;
2320 case 0x200:
2321 host->caps.has_rwproof = 1;
2322 host->caps.need_blksz_mul_4 = 0;
2323 host->caps.need_notbusy_for_read_ops = 1;
2324 case 0x100:
2325 host->caps.has_bad_data_ordering = 0;
2326 host->caps.need_reset_after_xfer = 0;
2327 case 0x0:
2328 break;
2329 default:
2330 host->caps.has_pdc = 0;
2331 dev_warn(&host->pdev->dev,
2332 "Unmanaged mci version, set minimum capabilities\n");
2333 break;
2334 }
2335}
2336
2337static int __init atmci_probe(struct platform_device *pdev)
2338{
2339 struct mci_platform_data *pdata;
2340 struct atmel_mci *host;
2341 struct resource *regs;
2342 unsigned int nr_slots;
2343 int irq;
2344 int ret;
2345
2346 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2347 if (!regs)
2348 return -ENXIO;
2349 pdata = pdev->dev.platform_data;
2350 if (!pdata) {
2351 pdata = atmci_of_init(pdev);
2352 if (IS_ERR(pdata)) {
2353 dev_err(&pdev->dev, "platform data not available\n");
2354 return PTR_ERR(pdata);
2355 }
2356 }
2357
2358 irq = platform_get_irq(pdev, 0);
2359 if (irq < 0)
2360 return irq;
2361
2362 host = kzalloc(sizeof(struct atmel_mci), GFP_KERNEL);
2363 if (!host)
2364 return -ENOMEM;
2365
2366 host->pdev = pdev;
2367 spin_lock_init(&host->lock);
2368 INIT_LIST_HEAD(&host->queue);
2369
2370 host->mck = clk_get(&pdev->dev, "mci_clk");
2371 if (IS_ERR(host->mck)) {
2372 ret = PTR_ERR(host->mck);
2373 goto err_clk_get;
2374 }
2375
2376 ret = -ENOMEM;
2377 host->regs = ioremap(regs->start, resource_size(regs));
2378 if (!host->regs)
2379 goto err_ioremap;
2380
2381 clk_enable(host->mck);
2382 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
2383 host->bus_hz = clk_get_rate(host->mck);
2384 clk_disable(host->mck);
2385
2386 host->mapbase = regs->start;
2387
2388 tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host);
2389
2390 ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
2391 if (ret)
2392 goto err_request_irq;
2393
2394
2395 atmci_get_cap(host);
2396 if (atmci_configure_dma(host)) {
2397 host->prepare_data = &atmci_prepare_data_dma;
2398 host->submit_data = &atmci_submit_data_dma;
2399 host->stop_transfer = &atmci_stop_transfer_dma;
2400 } else if (host->caps.has_pdc) {
2401 dev_info(&pdev->dev, "using PDC\n");
2402 host->prepare_data = &atmci_prepare_data_pdc;
2403 host->submit_data = &atmci_submit_data_pdc;
2404 host->stop_transfer = &atmci_stop_transfer_pdc;
2405 } else {
2406 dev_info(&pdev->dev, "using PIO\n");
2407 host->prepare_data = &atmci_prepare_data;
2408 host->submit_data = &atmci_submit_data;
2409 host->stop_transfer = &atmci_stop_transfer;
2410 }
2411
2412 platform_set_drvdata(pdev, host);
2413
2414 setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
2415
2416
2417 nr_slots = 0;
2418 ret = -ENODEV;
2419 if (pdata->slot[0].bus_width) {
2420 ret = atmci_init_slot(host, &pdata->slot[0],
2421 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
2422 if (!ret) {
2423 nr_slots++;
2424 host->buf_size = host->slot[0]->mmc->max_req_size;
2425 }
2426 }
2427 if (pdata->slot[1].bus_width) {
2428 ret = atmci_init_slot(host, &pdata->slot[1],
2429 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
2430 if (!ret) {
2431 nr_slots++;
2432 if (host->slot[1]->mmc->max_req_size > host->buf_size)
2433 host->buf_size =
2434 host->slot[1]->mmc->max_req_size;
2435 }
2436 }
2437
2438 if (!nr_slots) {
2439 dev_err(&pdev->dev, "init failed: no slot defined\n");
2440 goto err_init_slot;
2441 }
2442
2443 if (!host->caps.has_rwproof) {
2444 host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
2445 &host->buf_phys_addr,
2446 GFP_KERNEL);
2447 if (!host->buffer) {
2448 ret = -ENOMEM;
2449 dev_err(&pdev->dev, "buffer allocation failed\n");
2450 goto err_init_slot;
2451 }
2452 }
2453
2454 dev_info(&pdev->dev,
2455 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
2456 host->mapbase, irq, nr_slots);
2457
2458 return 0;
2459
2460err_init_slot:
2461 if (host->dma.chan)
2462 dma_release_channel(host->dma.chan);
2463 free_irq(irq, host);
2464err_request_irq:
2465 iounmap(host->regs);
2466err_ioremap:
2467 clk_put(host->mck);
2468err_clk_get:
2469 kfree(host);
2470 return ret;
2471}
2472
2473static int __exit atmci_remove(struct platform_device *pdev)
2474{
2475 struct atmel_mci *host = platform_get_drvdata(pdev);
2476 unsigned int i;
2477
2478 platform_set_drvdata(pdev, NULL);
2479
2480 if (host->buffer)
2481 dma_free_coherent(&pdev->dev, host->buf_size,
2482 host->buffer, host->buf_phys_addr);
2483
2484 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2485 if (host->slot[i])
2486 atmci_cleanup_slot(host->slot[i], i);
2487 }
2488
2489 clk_enable(host->mck);
2490 atmci_writel(host, ATMCI_IDR, ~0UL);
2491 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
2492 atmci_readl(host, ATMCI_SR);
2493 clk_disable(host->mck);
2494
2495 if (host->dma.chan)
2496 dma_release_channel(host->dma.chan);
2497
2498 free_irq(platform_get_irq(pdev, 0), host);
2499 iounmap(host->regs);
2500
2501 clk_put(host->mck);
2502 kfree(host);
2503
2504 return 0;
2505}
2506
2507#ifdef CONFIG_PM
2508static int atmci_suspend(struct device *dev)
2509{
2510 struct atmel_mci *host = dev_get_drvdata(dev);
2511 int i;
2512
2513 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2514 struct atmel_mci_slot *slot = host->slot[i];
2515 int ret;
2516
2517 if (!slot)
2518 continue;
2519 ret = mmc_suspend_host(slot->mmc);
2520 if (ret < 0) {
2521 while (--i >= 0) {
2522 slot = host->slot[i];
2523 if (slot
2524 && test_bit(ATMCI_SUSPENDED, &slot->flags)) {
2525 mmc_resume_host(host->slot[i]->mmc);
2526 clear_bit(ATMCI_SUSPENDED, &slot->flags);
2527 }
2528 }
2529 return ret;
2530 } else {
2531 set_bit(ATMCI_SUSPENDED, &slot->flags);
2532 }
2533 }
2534
2535 return 0;
2536}
2537
2538static int atmci_resume(struct device *dev)
2539{
2540 struct atmel_mci *host = dev_get_drvdata(dev);
2541 int i;
2542 int ret = 0;
2543
2544 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2545 struct atmel_mci_slot *slot = host->slot[i];
2546 int err;
2547
2548 slot = host->slot[i];
2549 if (!slot)
2550 continue;
2551 if (!test_bit(ATMCI_SUSPENDED, &slot->flags))
2552 continue;
2553 err = mmc_resume_host(slot->mmc);
2554 if (err < 0)
2555 ret = err;
2556 else
2557 clear_bit(ATMCI_SUSPENDED, &slot->flags);
2558 }
2559
2560 return ret;
2561}
2562static SIMPLE_DEV_PM_OPS(atmci_pm, atmci_suspend, atmci_resume);
2563#define ATMCI_PM_OPS (&atmci_pm)
2564#else
2565#define ATMCI_PM_OPS NULL
2566#endif
2567
2568static struct platform_driver atmci_driver = {
2569 .remove = __exit_p(atmci_remove),
2570 .driver = {
2571 .name = "atmel_mci",
2572 .pm = ATMCI_PM_OPS,
2573 .of_match_table = of_match_ptr(atmci_dt_ids),
2574 },
2575};
2576
2577static int __init atmci_init(void)
2578{
2579 return platform_driver_probe(&atmci_driver, atmci_probe);
2580}
2581
2582static void __exit atmci_exit(void)
2583{
2584 platform_driver_unregister(&atmci_driver);
2585}
2586
2587late_initcall(atmci_init);
2588module_exit(atmci_exit);
2589
2590MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
2591MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
2592MODULE_LICENSE("GPL v2");
2593