1
2
3
4
5
6
7
8
9
10#include <linux/blkdev.h>
11#include <linux/clk.h>
12#include <linux/debugfs.h>
13#include <linux/device.h>
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
16#include <linux/err.h>
17#include <linux/gpio.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/ioport.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/of_gpio.h>
25#include <linux/platform_device.h>
26#include <linux/scatterlist.h>
27#include <linux/seq_file.h>
28#include <linux/slab.h>
29#include <linux/stat.h>
30#include <linux/types.h>
31#include <linux/platform_data/atmel.h>
32
33#include <linux/mmc/host.h>
34#include <linux/mmc/sdio.h>
35
36#include <mach/atmel-mci.h>
37#include <linux/atmel-mci.h>
38#include <linux/atmel_pdc.h>
39
40#include <asm/io.h>
41#include <asm/unaligned.h>
42
43#include "atmel-mci-regs.h"
44
45#define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
46#define ATMCI_DMA_THRESHOLD 16
47
48enum {
49 EVENT_CMD_RDY = 0,
50 EVENT_XFER_COMPLETE,
51 EVENT_NOTBUSY,
52 EVENT_DATA_ERROR,
53};
54
55enum atmel_mci_state {
56 STATE_IDLE = 0,
57 STATE_SENDING_CMD,
58 STATE_DATA_XFER,
59 STATE_WAITING_NOTBUSY,
60 STATE_SENDING_STOP,
61 STATE_END_REQUEST,
62};
63
64enum atmci_xfer_dir {
65 XFER_RECEIVE = 0,
66 XFER_TRANSMIT,
67};
68
69enum atmci_pdc_buf {
70 PDC_FIRST_BUF = 0,
71 PDC_SECOND_BUF,
72};
73
74struct atmel_mci_caps {
75 bool has_dma_conf_reg;
76 bool has_pdc;
77 bool has_cfg_reg;
78 bool has_cstor_reg;
79 bool has_highspeed;
80 bool has_rwproof;
81 bool has_odd_clk_div;
82 bool has_bad_data_ordering;
83 bool need_reset_after_xfer;
84 bool need_blksz_mul_4;
85 bool need_notbusy_for_read_ops;
86};
87
88struct atmel_mci_dma {
89 struct dma_chan *chan;
90 struct dma_async_tx_descriptor *data_desc;
91};
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174struct atmel_mci {
175 spinlock_t lock;
176 void __iomem *regs;
177
178 struct scatterlist *sg;
179 unsigned int sg_len;
180 unsigned int pio_offset;
181 unsigned int *buffer;
182 unsigned int buf_size;
183 dma_addr_t buf_phys_addr;
184
185 struct atmel_mci_slot *cur_slot;
186 struct mmc_request *mrq;
187 struct mmc_command *cmd;
188 struct mmc_data *data;
189 unsigned int data_size;
190
191 struct atmel_mci_dma dma;
192 struct dma_chan *data_chan;
193 struct dma_slave_config dma_conf;
194
195 u32 cmd_status;
196 u32 data_status;
197 u32 stop_cmdr;
198
199 struct tasklet_struct tasklet;
200 unsigned long pending_events;
201 unsigned long completed_events;
202 enum atmel_mci_state state;
203 struct list_head queue;
204
205 bool need_clock_update;
206 bool need_reset;
207 struct timer_list timer;
208 u32 mode_reg;
209 u32 cfg_reg;
210 unsigned long bus_hz;
211 unsigned long mapbase;
212 struct clk *mck;
213 struct platform_device *pdev;
214
215 struct atmel_mci_slot *slot[ATMCI_MAX_NR_SLOTS];
216
217 struct atmel_mci_caps caps;
218
219 u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data);
220 void (*submit_data)(struct atmel_mci *host, struct mmc_data *data);
221 void (*stop_transfer)(struct atmel_mci *host);
222};
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243struct atmel_mci_slot {
244 struct mmc_host *mmc;
245 struct atmel_mci *host;
246
247 u32 sdc_reg;
248 u32 sdio_irq;
249
250 struct mmc_request *mrq;
251 struct list_head queue_node;
252
253 unsigned int clock;
254 unsigned long flags;
255#define ATMCI_CARD_PRESENT 0
256#define ATMCI_CARD_NEED_INIT 1
257#define ATMCI_SHUTDOWN 2
258#define ATMCI_SUSPENDED 3
259
260 int detect_pin;
261 int wp_pin;
262 bool detect_is_active_high;
263
264 struct timer_list detect_timer;
265};
266
267#define atmci_test_and_clear_pending(host, event) \
268 test_and_clear_bit(event, &host->pending_events)
269#define atmci_set_completed(host, event) \
270 set_bit(event, &host->completed_events)
271#define atmci_set_pending(host, event) \
272 set_bit(event, &host->pending_events)
273
274
275
276
277
278static int atmci_req_show(struct seq_file *s, void *v)
279{
280 struct atmel_mci_slot *slot = s->private;
281 struct mmc_request *mrq;
282 struct mmc_command *cmd;
283 struct mmc_command *stop;
284 struct mmc_data *data;
285
286
287 spin_lock_bh(&slot->host->lock);
288 mrq = slot->mrq;
289
290 if (mrq) {
291 cmd = mrq->cmd;
292 data = mrq->data;
293 stop = mrq->stop;
294
295 if (cmd)
296 seq_printf(s,
297 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
298 cmd->opcode, cmd->arg, cmd->flags,
299 cmd->resp[0], cmd->resp[1], cmd->resp[2],
300 cmd->resp[3], cmd->error);
301 if (data)
302 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
303 data->bytes_xfered, data->blocks,
304 data->blksz, data->flags, data->error);
305 if (stop)
306 seq_printf(s,
307 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
308 stop->opcode, stop->arg, stop->flags,
309 stop->resp[0], stop->resp[1], stop->resp[2],
310 stop->resp[3], stop->error);
311 }
312
313 spin_unlock_bh(&slot->host->lock);
314
315 return 0;
316}
317
318static int atmci_req_open(struct inode *inode, struct file *file)
319{
320 return single_open(file, atmci_req_show, inode->i_private);
321}
322
323static const struct file_operations atmci_req_fops = {
324 .owner = THIS_MODULE,
325 .open = atmci_req_open,
326 .read = seq_read,
327 .llseek = seq_lseek,
328 .release = single_release,
329};
330
331static void atmci_show_status_reg(struct seq_file *s,
332 const char *regname, u32 value)
333{
334 static const char *sr_bit[] = {
335 [0] = "CMDRDY",
336 [1] = "RXRDY",
337 [2] = "TXRDY",
338 [3] = "BLKE",
339 [4] = "DTIP",
340 [5] = "NOTBUSY",
341 [6] = "ENDRX",
342 [7] = "ENDTX",
343 [8] = "SDIOIRQA",
344 [9] = "SDIOIRQB",
345 [12] = "SDIOWAIT",
346 [14] = "RXBUFF",
347 [15] = "TXBUFE",
348 [16] = "RINDE",
349 [17] = "RDIRE",
350 [18] = "RCRCE",
351 [19] = "RENDE",
352 [20] = "RTOE",
353 [21] = "DCRCE",
354 [22] = "DTOE",
355 [23] = "CSTOE",
356 [24] = "BLKOVRE",
357 [25] = "DMADONE",
358 [26] = "FIFOEMPTY",
359 [27] = "XFRDONE",
360 [30] = "OVRE",
361 [31] = "UNRE",
362 };
363 unsigned int i;
364
365 seq_printf(s, "%s:\t0x%08x", regname, value);
366 for (i = 0; i < ARRAY_SIZE(sr_bit); i++) {
367 if (value & (1 << i)) {
368 if (sr_bit[i])
369 seq_printf(s, " %s", sr_bit[i]);
370 else
371 seq_puts(s, " UNKNOWN");
372 }
373 }
374 seq_putc(s, '\n');
375}
376
377static int atmci_regs_show(struct seq_file *s, void *v)
378{
379 struct atmel_mci *host = s->private;
380 u32 *buf;
381
382 buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
383 if (!buf)
384 return -ENOMEM;
385
386
387
388
389
390
391 spin_lock_bh(&host->lock);
392 clk_enable(host->mck);
393 memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
394 clk_disable(host->mck);
395 spin_unlock_bh(&host->lock);
396
397 seq_printf(s, "MR:\t0x%08x%s%s ",
398 buf[ATMCI_MR / 4],
399 buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
400 buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "");
401 if (host->caps.has_odd_clk_div)
402 seq_printf(s, "{CLKDIV,CLKODD}=%u\n",
403 ((buf[ATMCI_MR / 4] & 0xff) << 1)
404 | ((buf[ATMCI_MR / 4] >> 16) & 1));
405 else
406 seq_printf(s, "CLKDIV=%u\n",
407 (buf[ATMCI_MR / 4] & 0xff));
408 seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]);
409 seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]);
410 seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]);
411 seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
412 buf[ATMCI_BLKR / 4],
413 buf[ATMCI_BLKR / 4] & 0xffff,
414 (buf[ATMCI_BLKR / 4] >> 16) & 0xffff);
415 if (host->caps.has_cstor_reg)
416 seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]);
417
418
419
420 atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]);
421 atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]);
422
423 if (host->caps.has_dma_conf_reg) {
424 u32 val;
425
426 val = buf[ATMCI_DMA / 4];
427 seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
428 val, val & 3,
429 ((val >> 4) & 3) ?
430 1 << (((val >> 4) & 3) + 1) : 1,
431 val & ATMCI_DMAEN ? " DMAEN" : "");
432 }
433 if (host->caps.has_cfg_reg) {
434 u32 val;
435
436 val = buf[ATMCI_CFG / 4];
437 seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
438 val,
439 val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
440 val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
441 val & ATMCI_CFG_HSMODE ? " HSMODE" : "",
442 val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
443 }
444
445 kfree(buf);
446
447 return 0;
448}
449
450static int atmci_regs_open(struct inode *inode, struct file *file)
451{
452 return single_open(file, atmci_regs_show, inode->i_private);
453}
454
455static const struct file_operations atmci_regs_fops = {
456 .owner = THIS_MODULE,
457 .open = atmci_regs_open,
458 .read = seq_read,
459 .llseek = seq_lseek,
460 .release = single_release,
461};
462
463static void atmci_init_debugfs(struct atmel_mci_slot *slot)
464{
465 struct mmc_host *mmc = slot->mmc;
466 struct atmel_mci *host = slot->host;
467 struct dentry *root;
468 struct dentry *node;
469
470 root = mmc->debugfs_root;
471 if (!root)
472 return;
473
474 node = debugfs_create_file("regs", S_IRUSR, root, host,
475 &atmci_regs_fops);
476 if (IS_ERR(node))
477 return;
478 if (!node)
479 goto err;
480
481 node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
482 if (!node)
483 goto err;
484
485 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
486 if (!node)
487 goto err;
488
489 node = debugfs_create_x32("pending_events", S_IRUSR, root,
490 (u32 *)&host->pending_events);
491 if (!node)
492 goto err;
493
494 node = debugfs_create_x32("completed_events", S_IRUSR, root,
495 (u32 *)&host->completed_events);
496 if (!node)
497 goto err;
498
499 return;
500
501err:
502 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
503}
504
505#if defined(CONFIG_OF)
506static const struct of_device_id atmci_dt_ids[] = {
507 { .compatible = "atmel,hsmci" },
508 { }
509};
510
511MODULE_DEVICE_TABLE(of, atmci_dt_ids);
512
513static struct mci_platform_data*
514atmci_of_init(struct platform_device *pdev)
515{
516 struct device_node *np = pdev->dev.of_node;
517 struct device_node *cnp;
518 struct mci_platform_data *pdata;
519 u32 slot_id;
520
521 if (!np) {
522 dev_err(&pdev->dev, "device node not found\n");
523 return ERR_PTR(-EINVAL);
524 }
525
526 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
527 if (!pdata) {
528 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
529 return ERR_PTR(-ENOMEM);
530 }
531
532 for_each_child_of_node(np, cnp) {
533 if (of_property_read_u32(cnp, "reg", &slot_id)) {
534 dev_warn(&pdev->dev, "reg property is missing for %s\n",
535 cnp->full_name);
536 continue;
537 }
538
539 if (slot_id >= ATMCI_MAX_NR_SLOTS) {
540 dev_warn(&pdev->dev, "can't have more than %d slots\n",
541 ATMCI_MAX_NR_SLOTS);
542 break;
543 }
544
545 if (of_property_read_u32(cnp, "bus-width",
546 &pdata->slot[slot_id].bus_width))
547 pdata->slot[slot_id].bus_width = 1;
548
549 pdata->slot[slot_id].detect_pin =
550 of_get_named_gpio(cnp, "cd-gpios", 0);
551
552 pdata->slot[slot_id].detect_is_active_high =
553 of_property_read_bool(cnp, "cd-inverted");
554
555 pdata->slot[slot_id].wp_pin =
556 of_get_named_gpio(cnp, "wp-gpios", 0);
557 }
558
559 return pdata;
560}
561#else
562static inline struct mci_platform_data*
563atmci_of_init(struct platform_device *dev)
564{
565 return ERR_PTR(-EINVAL);
566}
567#endif
568
569static inline unsigned int atmci_get_version(struct atmel_mci *host)
570{
571 return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
572}
573
574static void atmci_timeout_timer(unsigned long data)
575{
576 struct atmel_mci *host;
577
578 host = (struct atmel_mci *)data;
579
580 dev_dbg(&host->pdev->dev, "software timeout\n");
581
582 if (host->mrq->cmd->data) {
583 host->mrq->cmd->data->error = -ETIMEDOUT;
584 host->data = NULL;
585 } else {
586 host->mrq->cmd->error = -ETIMEDOUT;
587 host->cmd = NULL;
588 }
589 host->need_reset = 1;
590 host->state = STATE_END_REQUEST;
591 smp_wmb();
592 tasklet_schedule(&host->tasklet);
593}
594
595static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
596 unsigned int ns)
597{
598
599
600
601
602 unsigned int us = DIV_ROUND_UP(ns, 1000);
603
604
605 return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
606}
607
608static void atmci_set_timeout(struct atmel_mci *host,
609 struct atmel_mci_slot *slot, struct mmc_data *data)
610{
611 static unsigned dtomul_to_shift[] = {
612 0, 4, 7, 8, 10, 12, 16, 20
613 };
614 unsigned timeout;
615 unsigned dtocyc;
616 unsigned dtomul;
617
618 timeout = atmci_ns_to_clocks(host, data->timeout_ns)
619 + data->timeout_clks;
620
621 for (dtomul = 0; dtomul < 8; dtomul++) {
622 unsigned shift = dtomul_to_shift[dtomul];
623 dtocyc = (timeout + (1 << shift) - 1) >> shift;
624 if (dtocyc < 15)
625 break;
626 }
627
628 if (dtomul >= 8) {
629 dtomul = 7;
630 dtocyc = 15;
631 }
632
633 dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
634 dtocyc << dtomul_to_shift[dtomul]);
635 atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc)));
636}
637
638
639
640
641static u32 atmci_prepare_command(struct mmc_host *mmc,
642 struct mmc_command *cmd)
643{
644 struct mmc_data *data;
645 u32 cmdr;
646
647 cmd->error = -EINPROGRESS;
648
649 cmdr = ATMCI_CMDR_CMDNB(cmd->opcode);
650
651 if (cmd->flags & MMC_RSP_PRESENT) {
652 if (cmd->flags & MMC_RSP_136)
653 cmdr |= ATMCI_CMDR_RSPTYP_136BIT;
654 else
655 cmdr |= ATMCI_CMDR_RSPTYP_48BIT;
656 }
657
658
659
660
661
662
663 cmdr |= ATMCI_CMDR_MAXLAT_64CYC;
664
665 if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
666 cmdr |= ATMCI_CMDR_OPDCMD;
667
668 data = cmd->data;
669 if (data) {
670 cmdr |= ATMCI_CMDR_START_XFER;
671
672 if (cmd->opcode == SD_IO_RW_EXTENDED) {
673 cmdr |= ATMCI_CMDR_SDIO_BLOCK;
674 } else {
675 if (data->flags & MMC_DATA_STREAM)
676 cmdr |= ATMCI_CMDR_STREAM;
677 else if (data->blocks > 1)
678 cmdr |= ATMCI_CMDR_MULTI_BLOCK;
679 else
680 cmdr |= ATMCI_CMDR_BLOCK;
681 }
682
683 if (data->flags & MMC_DATA_READ)
684 cmdr |= ATMCI_CMDR_TRDIR_READ;
685 }
686
687 return cmdr;
688}
689
690static void atmci_send_command(struct atmel_mci *host,
691 struct mmc_command *cmd, u32 cmd_flags)
692{
693 WARN_ON(host->cmd);
694 host->cmd = cmd;
695
696 dev_vdbg(&host->pdev->dev,
697 "start command: ARGR=0x%08x CMDR=0x%08x\n",
698 cmd->arg, cmd_flags);
699
700 atmci_writel(host, ATMCI_ARGR, cmd->arg);
701 atmci_writel(host, ATMCI_CMDR, cmd_flags);
702}
703
704static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
705{
706 dev_dbg(&host->pdev->dev, "send stop command\n");
707 atmci_send_command(host, data->stop, host->stop_cmdr);
708 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
709}
710
711
712
713
714
715static void atmci_pdc_set_single_buf(struct atmel_mci *host,
716 enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
717{
718 u32 pointer_reg, counter_reg;
719 unsigned int buf_size;
720
721 if (dir == XFER_RECEIVE) {
722 pointer_reg = ATMEL_PDC_RPR;
723 counter_reg = ATMEL_PDC_RCR;
724 } else {
725 pointer_reg = ATMEL_PDC_TPR;
726 counter_reg = ATMEL_PDC_TCR;
727 }
728
729 if (buf_nb == PDC_SECOND_BUF) {
730 pointer_reg += ATMEL_PDC_SCND_BUF_OFF;
731 counter_reg += ATMEL_PDC_SCND_BUF_OFF;
732 }
733
734 if (!host->caps.has_rwproof) {
735 buf_size = host->buf_size;
736 atmci_writel(host, pointer_reg, host->buf_phys_addr);
737 } else {
738 buf_size = sg_dma_len(host->sg);
739 atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
740 }
741
742 if (host->data_size <= buf_size) {
743 if (host->data_size & 0x3) {
744
745 atmci_writel(host, counter_reg, host->data_size);
746 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE);
747 } else {
748
749 atmci_writel(host, counter_reg, host->data_size / 4);
750 }
751 host->data_size = 0;
752 } else {
753
754 atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4);
755 host->data_size -= sg_dma_len(host->sg);
756 if (host->data_size)
757 host->sg = sg_next(host->sg);
758 }
759}
760
761
762
763
764
765
766static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
767{
768 atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF);
769 if (host->data_size)
770 atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF);
771}
772
773
774
775
776static void atmci_pdc_cleanup(struct atmel_mci *host)
777{
778 struct mmc_data *data = host->data;
779
780 if (data)
781 dma_unmap_sg(&host->pdev->dev,
782 data->sg, data->sg_len,
783 ((data->flags & MMC_DATA_WRITE)
784 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
785}
786
787
788
789
790
791
792static void atmci_pdc_complete(struct atmel_mci *host)
793{
794 int transfer_size = host->data->blocks * host->data->blksz;
795 int i;
796
797 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
798
799 if ((!host->caps.has_rwproof)
800 && (host->data->flags & MMC_DATA_READ)) {
801 if (host->caps.has_bad_data_ordering)
802 for (i = 0; i < transfer_size; i++)
803 host->buffer[i] = swab32(host->buffer[i]);
804 sg_copy_from_buffer(host->data->sg, host->data->sg_len,
805 host->buffer, transfer_size);
806 }
807
808 atmci_pdc_cleanup(host);
809
810
811
812
813
814 if (host->data) {
815 dev_dbg(&host->pdev->dev,
816 "(%s) set pending xfer complete\n", __func__);
817 atmci_set_pending(host, EVENT_XFER_COMPLETE);
818 tasklet_schedule(&host->tasklet);
819 }
820}
821
822static void atmci_dma_cleanup(struct atmel_mci *host)
823{
824 struct mmc_data *data = host->data;
825
826 if (data)
827 dma_unmap_sg(host->dma.chan->device->dev,
828 data->sg, data->sg_len,
829 ((data->flags & MMC_DATA_WRITE)
830 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
831}
832
833
834
835
836static void atmci_dma_complete(void *arg)
837{
838 struct atmel_mci *host = arg;
839 struct mmc_data *data = host->data;
840
841 dev_vdbg(&host->pdev->dev, "DMA complete\n");
842
843 if (host->caps.has_dma_conf_reg)
844
845 atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
846
847 atmci_dma_cleanup(host);
848
849
850
851
852
853 if (data) {
854 dev_dbg(&host->pdev->dev,
855 "(%s) set pending xfer complete\n", __func__);
856 atmci_set_pending(host, EVENT_XFER_COMPLETE);
857 tasklet_schedule(&host->tasklet);
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
880 }
881}
882
883
884
885
886
887static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
888{
889 u32 iflags;
890
891 data->error = -EINPROGRESS;
892
893 host->sg = data->sg;
894 host->sg_len = data->sg_len;
895 host->data = data;
896 host->data_chan = NULL;
897
898 iflags = ATMCI_DATA_ERROR_FLAGS;
899
900
901
902
903
904
905
906
907 if (data->blocks * data->blksz < 12
908 || (data->blocks * data->blksz) & 3)
909 host->need_reset = true;
910
911 host->pio_offset = 0;
912 if (data->flags & MMC_DATA_READ)
913 iflags |= ATMCI_RXRDY;
914 else
915 iflags |= ATMCI_TXRDY;
916
917 return iflags;
918}
919
920
921
922
923
924
925
926static u32
927atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
928{
929 u32 iflags, tmp;
930 unsigned int sg_len;
931 enum dma_data_direction dir;
932 int i;
933
934 data->error = -EINPROGRESS;
935
936 host->data = data;
937 host->sg = data->sg;
938 iflags = ATMCI_DATA_ERROR_FLAGS;
939
940
941 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
942
943 if (data->flags & MMC_DATA_READ) {
944 dir = DMA_FROM_DEVICE;
945 iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
946 } else {
947 dir = DMA_TO_DEVICE;
948 iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
949 }
950
951
952 tmp = atmci_readl(host, ATMCI_MR);
953 tmp &= 0x0000ffff;
954 tmp |= ATMCI_BLKLEN(data->blksz);
955 atmci_writel(host, ATMCI_MR, tmp);
956
957
958 host->data_size = data->blocks * data->blksz;
959 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
960
961 if ((!host->caps.has_rwproof)
962 && (host->data->flags & MMC_DATA_WRITE)) {
963 sg_copy_to_buffer(host->data->sg, host->data->sg_len,
964 host->buffer, host->data_size);
965 if (host->caps.has_bad_data_ordering)
966 for (i = 0; i < host->data_size; i++)
967 host->buffer[i] = swab32(host->buffer[i]);
968 }
969
970 if (host->data_size)
971 atmci_pdc_set_both_buf(host,
972 ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
973
974 return iflags;
975}
976
977static u32
978atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
979{
980 struct dma_chan *chan;
981 struct dma_async_tx_descriptor *desc;
982 struct scatterlist *sg;
983 unsigned int i;
984 enum dma_data_direction direction;
985 enum dma_transfer_direction slave_dirn;
986 unsigned int sglen;
987 u32 maxburst;
988 u32 iflags;
989
990 data->error = -EINPROGRESS;
991
992 WARN_ON(host->data);
993 host->sg = NULL;
994 host->data = data;
995
996 iflags = ATMCI_DATA_ERROR_FLAGS;
997
998
999
1000
1001
1002
1003 if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
1004 return atmci_prepare_data(host, data);
1005 if (data->blksz & 3)
1006 return atmci_prepare_data(host, data);
1007
1008 for_each_sg(data->sg, sg, data->sg_len, i) {
1009 if (sg->offset & 3 || sg->length & 3)
1010 return atmci_prepare_data(host, data);
1011 }
1012
1013
1014 chan = host->dma.chan;
1015 if (chan)
1016 host->data_chan = chan;
1017
1018 if (!chan)
1019 return -ENODEV;
1020
1021 if (data->flags & MMC_DATA_READ) {
1022 direction = DMA_FROM_DEVICE;
1023 host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
1024 maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst);
1025 } else {
1026 direction = DMA_TO_DEVICE;
1027 host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
1028 maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
1029 }
1030
1031 if (host->caps.has_dma_conf_reg)
1032 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) |
1033 ATMCI_DMAEN);
1034
1035 sglen = dma_map_sg(chan->device->dev, data->sg,
1036 data->sg_len, direction);
1037
1038 dmaengine_slave_config(chan, &host->dma_conf);
1039 desc = dmaengine_prep_slave_sg(chan,
1040 data->sg, sglen, slave_dirn,
1041 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1042 if (!desc)
1043 goto unmap_exit;
1044
1045 host->dma.data_desc = desc;
1046 desc->callback = atmci_dma_complete;
1047 desc->callback_param = host;
1048
1049 return iflags;
1050unmap_exit:
1051 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
1052 return -ENOMEM;
1053}
1054
1055static void
1056atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
1057{
1058 return;
1059}
1060
1061
1062
1063
1064static void
1065atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data)
1066{
1067 if (data->flags & MMC_DATA_READ)
1068 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
1069 else
1070 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1071}
1072
1073static void
1074atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
1075{
1076 struct dma_chan *chan = host->data_chan;
1077 struct dma_async_tx_descriptor *desc = host->dma.data_desc;
1078
1079 if (chan) {
1080 dmaengine_submit(desc);
1081 dma_async_issue_pending(chan);
1082 }
1083}
1084
1085static void atmci_stop_transfer(struct atmel_mci *host)
1086{
1087 dev_dbg(&host->pdev->dev,
1088 "(%s) set pending xfer complete\n", __func__);
1089 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1090 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1091}
1092
1093
1094
1095
1096static void atmci_stop_transfer_pdc(struct atmel_mci *host)
1097{
1098 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
1099}
1100
1101static void atmci_stop_transfer_dma(struct atmel_mci *host)
1102{
1103 struct dma_chan *chan = host->data_chan;
1104
1105 if (chan) {
1106 dmaengine_terminate_all(chan);
1107 atmci_dma_cleanup(host);
1108 } else {
1109
1110 dev_dbg(&host->pdev->dev,
1111 "(%s) set pending xfer complete\n", __func__);
1112 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1113 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1114 }
1115}
1116
1117
1118
1119
1120
1121static void atmci_start_request(struct atmel_mci *host,
1122 struct atmel_mci_slot *slot)
1123{
1124 struct mmc_request *mrq;
1125 struct mmc_command *cmd;
1126 struct mmc_data *data;
1127 u32 iflags;
1128 u32 cmdflags;
1129
1130 mrq = slot->mrq;
1131 host->cur_slot = slot;
1132 host->mrq = mrq;
1133
1134 host->pending_events = 0;
1135 host->completed_events = 0;
1136 host->cmd_status = 0;
1137 host->data_status = 0;
1138
1139 dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
1140
1141 if (host->need_reset || host->caps.need_reset_after_xfer) {
1142 iflags = atmci_readl(host, ATMCI_IMR);
1143 iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
1144 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1145 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1146 atmci_writel(host, ATMCI_MR, host->mode_reg);
1147 if (host->caps.has_cfg_reg)
1148 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1149 atmci_writel(host, ATMCI_IER, iflags);
1150 host->need_reset = false;
1151 }
1152 atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
1153
1154 iflags = atmci_readl(host, ATMCI_IMR);
1155 if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
1156 dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
1157 iflags);
1158
1159 if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
1160
1161 atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT);
1162 while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY))
1163 cpu_relax();
1164 }
1165 iflags = 0;
1166 data = mrq->data;
1167 if (data) {
1168 atmci_set_timeout(host, slot, data);
1169
1170
1171 atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks)
1172 | ATMCI_BLKLEN(data->blksz));
1173 dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
1174 ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz));
1175
1176 iflags |= host->prepare_data(host, data);
1177 }
1178
1179 iflags |= ATMCI_CMDRDY;
1180 cmd = mrq->cmd;
1181 cmdflags = atmci_prepare_command(slot->mmc, cmd);
1182 atmci_send_command(host, cmd, cmdflags);
1183
1184 if (data)
1185 host->submit_data(host, data);
1186
1187 if (mrq->stop) {
1188 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
1189 host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
1190 if (!(data->flags & MMC_DATA_WRITE))
1191 host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;
1192 if (data->flags & MMC_DATA_STREAM)
1193 host->stop_cmdr |= ATMCI_CMDR_STREAM;
1194 else
1195 host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
1196 }
1197
1198
1199
1200
1201
1202
1203
1204 atmci_writel(host, ATMCI_IER, iflags);
1205
1206 mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000));
1207}
1208
1209static void atmci_queue_request(struct atmel_mci *host,
1210 struct atmel_mci_slot *slot, struct mmc_request *mrq)
1211{
1212 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1213 host->state);
1214
1215 spin_lock_bh(&host->lock);
1216 slot->mrq = mrq;
1217 if (host->state == STATE_IDLE) {
1218 host->state = STATE_SENDING_CMD;
1219 atmci_start_request(host, slot);
1220 } else {
1221 dev_dbg(&host->pdev->dev, "queue request\n");
1222 list_add_tail(&slot->queue_node, &host->queue);
1223 }
1224 spin_unlock_bh(&host->lock);
1225}
1226
1227static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1228{
1229 struct atmel_mci_slot *slot = mmc_priv(mmc);
1230 struct atmel_mci *host = slot->host;
1231 struct mmc_data *data;
1232
1233 WARN_ON(slot->mrq);
1234 dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244 if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
1245 mrq->cmd->error = -ENOMEDIUM;
1246 mmc_request_done(mmc, mrq);
1247 return;
1248 }
1249
1250
1251 data = mrq->data;
1252 if (data && data->blocks > 1 && data->blksz & 3) {
1253 mrq->cmd->error = -EINVAL;
1254 mmc_request_done(mmc, mrq);
1255 }
1256
1257 atmci_queue_request(host, slot, mrq);
1258}
1259
1260static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1261{
1262 struct atmel_mci_slot *slot = mmc_priv(mmc);
1263 struct atmel_mci *host = slot->host;
1264 unsigned int i;
1265
1266 slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
1267 switch (ios->bus_width) {
1268 case MMC_BUS_WIDTH_1:
1269 slot->sdc_reg |= ATMCI_SDCBUS_1BIT;
1270 break;
1271 case MMC_BUS_WIDTH_4:
1272 slot->sdc_reg |= ATMCI_SDCBUS_4BIT;
1273 break;
1274 }
1275
1276 if (ios->clock) {
1277 unsigned int clock_min = ~0U;
1278 u32 clkdiv;
1279
1280 spin_lock_bh(&host->lock);
1281 if (!host->mode_reg) {
1282 clk_enable(host->mck);
1283 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1284 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1285 if (host->caps.has_cfg_reg)
1286 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1287 }
1288
1289
1290
1291
1292
1293 slot->clock = ios->clock;
1294 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1295 if (host->slot[i] && host->slot[i]->clock
1296 && host->slot[i]->clock < clock_min)
1297 clock_min = host->slot[i]->clock;
1298 }
1299
1300
1301 if (host->caps.has_odd_clk_div) {
1302 clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
1303 if (clkdiv > 511) {
1304 dev_warn(&mmc->class_dev,
1305 "clock %u too slow; using %lu\n",
1306 clock_min, host->bus_hz / (511 + 2));
1307 clkdiv = 511;
1308 }
1309 host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1)
1310 | ATMCI_MR_CLKODD(clkdiv & 1);
1311 } else {
1312 clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
1313 if (clkdiv > 255) {
1314 dev_warn(&mmc->class_dev,
1315 "clock %u too slow; using %lu\n",
1316 clock_min, host->bus_hz / (2 * 256));
1317 clkdiv = 255;
1318 }
1319 host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
1320 }
1321
1322
1323
1324
1325
1326
1327 if (host->caps.has_rwproof)
1328 host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF);
1329
1330 if (host->caps.has_cfg_reg) {
1331
1332 if (ios->timing == MMC_TIMING_SD_HS)
1333 host->cfg_reg |= ATMCI_CFG_HSMODE;
1334 else
1335 host->cfg_reg &= ~ATMCI_CFG_HSMODE;
1336 }
1337
1338 if (list_empty(&host->queue)) {
1339 atmci_writel(host, ATMCI_MR, host->mode_reg);
1340 if (host->caps.has_cfg_reg)
1341 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1342 } else {
1343 host->need_clock_update = true;
1344 }
1345
1346 spin_unlock_bh(&host->lock);
1347 } else {
1348 bool any_slot_active = false;
1349
1350 spin_lock_bh(&host->lock);
1351 slot->clock = 0;
1352 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1353 if (host->slot[i] && host->slot[i]->clock) {
1354 any_slot_active = true;
1355 break;
1356 }
1357 }
1358 if (!any_slot_active) {
1359 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
1360 if (host->mode_reg) {
1361 atmci_readl(host, ATMCI_MR);
1362 clk_disable(host->mck);
1363 }
1364 host->mode_reg = 0;
1365 }
1366 spin_unlock_bh(&host->lock);
1367 }
1368
1369 switch (ios->power_mode) {
1370 case MMC_POWER_UP:
1371 set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
1372 break;
1373 default:
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386 break;
1387 }
1388}
1389
1390static int atmci_get_ro(struct mmc_host *mmc)
1391{
1392 int read_only = -ENOSYS;
1393 struct atmel_mci_slot *slot = mmc_priv(mmc);
1394
1395 if (gpio_is_valid(slot->wp_pin)) {
1396 read_only = gpio_get_value(slot->wp_pin);
1397 dev_dbg(&mmc->class_dev, "card is %s\n",
1398 read_only ? "read-only" : "read-write");
1399 }
1400
1401 return read_only;
1402}
1403
1404static int atmci_get_cd(struct mmc_host *mmc)
1405{
1406 int present = -ENOSYS;
1407 struct atmel_mci_slot *slot = mmc_priv(mmc);
1408
1409 if (gpio_is_valid(slot->detect_pin)) {
1410 present = !(gpio_get_value(slot->detect_pin) ^
1411 slot->detect_is_active_high);
1412 dev_dbg(&mmc->class_dev, "card is %spresent\n",
1413 present ? "" : "not ");
1414 }
1415
1416 return present;
1417}
1418
1419static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1420{
1421 struct atmel_mci_slot *slot = mmc_priv(mmc);
1422 struct atmel_mci *host = slot->host;
1423
1424 if (enable)
1425 atmci_writel(host, ATMCI_IER, slot->sdio_irq);
1426 else
1427 atmci_writel(host, ATMCI_IDR, slot->sdio_irq);
1428}
1429
1430static const struct mmc_host_ops atmci_ops = {
1431 .request = atmci_request,
1432 .set_ios = atmci_set_ios,
1433 .get_ro = atmci_get_ro,
1434 .get_cd = atmci_get_cd,
1435 .enable_sdio_irq = atmci_enable_sdio_irq,
1436};
1437
1438
1439static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1440 __releases(&host->lock)
1441 __acquires(&host->lock)
1442{
1443 struct atmel_mci_slot *slot = NULL;
1444 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1445
1446 WARN_ON(host->cmd || host->data);
1447
1448
1449
1450
1451
1452
1453 if (host->need_clock_update) {
1454 atmci_writel(host, ATMCI_MR, host->mode_reg);
1455 if (host->caps.has_cfg_reg)
1456 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1457 }
1458
1459 host->cur_slot->mrq = NULL;
1460 host->mrq = NULL;
1461 if (!list_empty(&host->queue)) {
1462 slot = list_entry(host->queue.next,
1463 struct atmel_mci_slot, queue_node);
1464 list_del(&slot->queue_node);
1465 dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
1466 mmc_hostname(slot->mmc));
1467 host->state = STATE_SENDING_CMD;
1468 atmci_start_request(host, slot);
1469 } else {
1470 dev_vdbg(&host->pdev->dev, "list empty\n");
1471 host->state = STATE_IDLE;
1472 }
1473
1474 del_timer(&host->timer);
1475
1476 spin_unlock(&host->lock);
1477 mmc_request_done(prev_mmc, mrq);
1478 spin_lock(&host->lock);
1479}
1480
1481static void atmci_command_complete(struct atmel_mci *host,
1482 struct mmc_command *cmd)
1483{
1484 u32 status = host->cmd_status;
1485
1486
1487 cmd->resp[0] = atmci_readl(host, ATMCI_RSPR);
1488 cmd->resp[1] = atmci_readl(host, ATMCI_RSPR);
1489 cmd->resp[2] = atmci_readl(host, ATMCI_RSPR);
1490 cmd->resp[3] = atmci_readl(host, ATMCI_RSPR);
1491
1492 if (status & ATMCI_RTOE)
1493 cmd->error = -ETIMEDOUT;
1494 else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE))
1495 cmd->error = -EILSEQ;
1496 else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
1497 cmd->error = -EIO;
1498 else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
1499 if (host->caps.need_blksz_mul_4) {
1500 cmd->error = -EINVAL;
1501 host->need_reset = 1;
1502 }
1503 } else
1504 cmd->error = 0;
1505}
1506
1507static void atmci_detect_change(unsigned long data)
1508{
1509 struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data;
1510 bool present;
1511 bool present_old;
1512
1513
1514
1515
1516
1517
1518
1519 smp_rmb();
1520 if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
1521 return;
1522
1523 enable_irq(gpio_to_irq(slot->detect_pin));
1524 present = !(gpio_get_value(slot->detect_pin) ^
1525 slot->detect_is_active_high);
1526 present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
1527
1528 dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
1529 present, present_old);
1530
1531 if (present != present_old) {
1532 struct atmel_mci *host = slot->host;
1533 struct mmc_request *mrq;
1534
1535 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1536 present ? "inserted" : "removed");
1537
1538 spin_lock(&host->lock);
1539
1540 if (!present)
1541 clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
1542 else
1543 set_bit(ATMCI_CARD_PRESENT, &slot->flags);
1544
1545
1546 mrq = slot->mrq;
1547 if (mrq) {
1548 if (mrq == host->mrq) {
1549
1550
1551
1552
1553 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1554 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1555 atmci_writel(host, ATMCI_MR, host->mode_reg);
1556 if (host->caps.has_cfg_reg)
1557 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1558
1559 host->data = NULL;
1560 host->cmd = NULL;
1561
1562 switch (host->state) {
1563 case STATE_IDLE:
1564 break;
1565 case STATE_SENDING_CMD:
1566 mrq->cmd->error = -ENOMEDIUM;
1567 if (mrq->data)
1568 host->stop_transfer(host);
1569 break;
1570 case STATE_DATA_XFER:
1571 mrq->data->error = -ENOMEDIUM;
1572 host->stop_transfer(host);
1573 break;
1574 case STATE_WAITING_NOTBUSY:
1575 mrq->data->error = -ENOMEDIUM;
1576 break;
1577 case STATE_SENDING_STOP:
1578 mrq->stop->error = -ENOMEDIUM;
1579 break;
1580 case STATE_END_REQUEST:
1581 break;
1582 }
1583
1584 atmci_request_end(host, mrq);
1585 } else {
1586 list_del(&slot->queue_node);
1587 mrq->cmd->error = -ENOMEDIUM;
1588 if (mrq->data)
1589 mrq->data->error = -ENOMEDIUM;
1590 if (mrq->stop)
1591 mrq->stop->error = -ENOMEDIUM;
1592
1593 spin_unlock(&host->lock);
1594 mmc_request_done(slot->mmc, mrq);
1595 spin_lock(&host->lock);
1596 }
1597 }
1598 spin_unlock(&host->lock);
1599
1600 mmc_detect_change(slot->mmc, 0);
1601 }
1602}
1603
1604static void atmci_tasklet_func(unsigned long priv)
1605{
1606 struct atmel_mci *host = (struct atmel_mci *)priv;
1607 struct mmc_request *mrq = host->mrq;
1608 struct mmc_data *data = host->data;
1609 enum atmel_mci_state state = host->state;
1610 enum atmel_mci_state prev_state;
1611 u32 status;
1612
1613 spin_lock(&host->lock);
1614
1615 state = host->state;
1616
1617 dev_vdbg(&host->pdev->dev,
1618 "tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
1619 state, host->pending_events, host->completed_events,
1620 atmci_readl(host, ATMCI_IMR));
1621
1622 do {
1623 prev_state = state;
1624 dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
1625
1626 switch (state) {
1627 case STATE_IDLE:
1628 break;
1629
1630 case STATE_SENDING_CMD:
1631
1632
1633
1634
1635
1636
1637 dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
1638 if (!atmci_test_and_clear_pending(host,
1639 EVENT_CMD_RDY))
1640 break;
1641
1642 dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
1643 host->cmd = NULL;
1644 atmci_set_completed(host, EVENT_CMD_RDY);
1645 atmci_command_complete(host, mrq->cmd);
1646 if (mrq->data) {
1647 dev_dbg(&host->pdev->dev,
1648 "command with data transfer");
1649
1650
1651
1652
1653 if (mrq->cmd->error) {
1654 host->stop_transfer(host);
1655 host->data = NULL;
1656 atmci_writel(host, ATMCI_IDR,
1657 ATMCI_TXRDY | ATMCI_RXRDY
1658 | ATMCI_DATA_ERROR_FLAGS);
1659 state = STATE_END_REQUEST;
1660 } else
1661 state = STATE_DATA_XFER;
1662 } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
1663 dev_dbg(&host->pdev->dev,
1664 "command response need waiting notbusy");
1665 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1666 state = STATE_WAITING_NOTBUSY;
1667 } else
1668 state = STATE_END_REQUEST;
1669
1670 break;
1671
1672 case STATE_DATA_XFER:
1673 if (atmci_test_and_clear_pending(host,
1674 EVENT_DATA_ERROR)) {
1675 dev_dbg(&host->pdev->dev, "set completed data error\n");
1676 atmci_set_completed(host, EVENT_DATA_ERROR);
1677 state = STATE_END_REQUEST;
1678 break;
1679 }
1680
1681
1682
1683
1684
1685
1686
1687
1688 dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
1689 if (!atmci_test_and_clear_pending(host,
1690 EVENT_XFER_COMPLETE))
1691 break;
1692
1693 dev_dbg(&host->pdev->dev,
1694 "(%s) set completed xfer complete\n",
1695 __func__);
1696 atmci_set_completed(host, EVENT_XFER_COMPLETE);
1697
1698 if (host->caps.need_notbusy_for_read_ops ||
1699 (host->data->flags & MMC_DATA_WRITE)) {
1700 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1701 state = STATE_WAITING_NOTBUSY;
1702 } else if (host->mrq->stop) {
1703 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
1704 atmci_send_stop_cmd(host, data);
1705 state = STATE_SENDING_STOP;
1706 } else {
1707 host->data = NULL;
1708 data->bytes_xfered = data->blocks * data->blksz;
1709 data->error = 0;
1710 state = STATE_END_REQUEST;
1711 }
1712 break;
1713
1714 case STATE_WAITING_NOTBUSY:
1715
1716
1717
1718
1719
1720
1721 dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
1722 if (!atmci_test_and_clear_pending(host,
1723 EVENT_NOTBUSY))
1724 break;
1725
1726 dev_dbg(&host->pdev->dev, "set completed not busy\n");
1727 atmci_set_completed(host, EVENT_NOTBUSY);
1728
1729 if (host->data) {
1730
1731
1732
1733
1734
1735 if (host->mrq->stop) {
1736 atmci_writel(host, ATMCI_IER,
1737 ATMCI_CMDRDY);
1738 atmci_send_stop_cmd(host, data);
1739 state = STATE_SENDING_STOP;
1740 } else {
1741 host->data = NULL;
1742 data->bytes_xfered = data->blocks
1743 * data->blksz;
1744 data->error = 0;
1745 state = STATE_END_REQUEST;
1746 }
1747 } else
1748 state = STATE_END_REQUEST;
1749 break;
1750
1751 case STATE_SENDING_STOP:
1752
1753
1754
1755
1756
1757
1758 dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
1759 if (!atmci_test_and_clear_pending(host,
1760 EVENT_CMD_RDY))
1761 break;
1762
1763 dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
1764 host->cmd = NULL;
1765 data->bytes_xfered = data->blocks * data->blksz;
1766 data->error = 0;
1767 atmci_command_complete(host, mrq->stop);
1768 if (mrq->stop->error) {
1769 host->stop_transfer(host);
1770 atmci_writel(host, ATMCI_IDR,
1771 ATMCI_TXRDY | ATMCI_RXRDY
1772 | ATMCI_DATA_ERROR_FLAGS);
1773 state = STATE_END_REQUEST;
1774 } else {
1775 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1776 state = STATE_WAITING_NOTBUSY;
1777 }
1778 host->data = NULL;
1779 break;
1780
1781 case STATE_END_REQUEST:
1782 atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
1783 | ATMCI_DATA_ERROR_FLAGS);
1784 status = host->data_status;
1785 if (unlikely(status)) {
1786 host->stop_transfer(host);
1787 host->data = NULL;
1788 if (status & ATMCI_DTOE) {
1789 data->error = -ETIMEDOUT;
1790 } else if (status & ATMCI_DCRCE) {
1791 data->error = -EILSEQ;
1792 } else {
1793 data->error = -EIO;
1794 }
1795 }
1796
1797 atmci_request_end(host, host->mrq);
1798 state = STATE_IDLE;
1799 break;
1800 }
1801 } while (state != prev_state);
1802
1803 host->state = state;
1804
1805 spin_unlock(&host->lock);
1806}
1807
1808static void atmci_read_data_pio(struct atmel_mci *host)
1809{
1810 struct scatterlist *sg = host->sg;
1811 void *buf = sg_virt(sg);
1812 unsigned int offset = host->pio_offset;
1813 struct mmc_data *data = host->data;
1814 u32 value;
1815 u32 status;
1816 unsigned int nbytes = 0;
1817
1818 do {
1819 value = atmci_readl(host, ATMCI_RDR);
1820 if (likely(offset + 4 <= sg->length)) {
1821 put_unaligned(value, (u32 *)(buf + offset));
1822
1823 offset += 4;
1824 nbytes += 4;
1825
1826 if (offset == sg->length) {
1827 flush_dcache_page(sg_page(sg));
1828 host->sg = sg = sg_next(sg);
1829 host->sg_len--;
1830 if (!sg || !host->sg_len)
1831 goto done;
1832
1833 offset = 0;
1834 buf = sg_virt(sg);
1835 }
1836 } else {
1837 unsigned int remaining = sg->length - offset;
1838 memcpy(buf + offset, &value, remaining);
1839 nbytes += remaining;
1840
1841 flush_dcache_page(sg_page(sg));
1842 host->sg = sg = sg_next(sg);
1843 host->sg_len--;
1844 if (!sg || !host->sg_len)
1845 goto done;
1846
1847 offset = 4 - remaining;
1848 buf = sg_virt(sg);
1849 memcpy(buf, (u8 *)&value + remaining, offset);
1850 nbytes += offset;
1851 }
1852
1853 status = atmci_readl(host, ATMCI_SR);
1854 if (status & ATMCI_DATA_ERROR_FLAGS) {
1855 atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY
1856 | ATMCI_DATA_ERROR_FLAGS));
1857 host->data_status = status;
1858 data->bytes_xfered += nbytes;
1859 return;
1860 }
1861 } while (status & ATMCI_RXRDY);
1862
1863 host->pio_offset = offset;
1864 data->bytes_xfered += nbytes;
1865
1866 return;
1867
1868done:
1869 atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY);
1870 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1871 data->bytes_xfered += nbytes;
1872 smp_wmb();
1873 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1874}
1875
1876static void atmci_write_data_pio(struct atmel_mci *host)
1877{
1878 struct scatterlist *sg = host->sg;
1879 void *buf = sg_virt(sg);
1880 unsigned int offset = host->pio_offset;
1881 struct mmc_data *data = host->data;
1882 u32 value;
1883 u32 status;
1884 unsigned int nbytes = 0;
1885
1886 do {
1887 if (likely(offset + 4 <= sg->length)) {
1888 value = get_unaligned((u32 *)(buf + offset));
1889 atmci_writel(host, ATMCI_TDR, value);
1890
1891 offset += 4;
1892 nbytes += 4;
1893 if (offset == sg->length) {
1894 host->sg = sg = sg_next(sg);
1895 host->sg_len--;
1896 if (!sg || !host->sg_len)
1897 goto done;
1898
1899 offset = 0;
1900 buf = sg_virt(sg);
1901 }
1902 } else {
1903 unsigned int remaining = sg->length - offset;
1904
1905 value = 0;
1906 memcpy(&value, buf + offset, remaining);
1907 nbytes += remaining;
1908
1909 host->sg = sg = sg_next(sg);
1910 host->sg_len--;
1911 if (!sg || !host->sg_len) {
1912 atmci_writel(host, ATMCI_TDR, value);
1913 goto done;
1914 }
1915
1916 offset = 4 - remaining;
1917 buf = sg_virt(sg);
1918 memcpy((u8 *)&value + remaining, buf, offset);
1919 atmci_writel(host, ATMCI_TDR, value);
1920 nbytes += offset;
1921 }
1922
1923 status = atmci_readl(host, ATMCI_SR);
1924 if (status & ATMCI_DATA_ERROR_FLAGS) {
1925 atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY
1926 | ATMCI_DATA_ERROR_FLAGS));
1927 host->data_status = status;
1928 data->bytes_xfered += nbytes;
1929 return;
1930 }
1931 } while (status & ATMCI_TXRDY);
1932
1933 host->pio_offset = offset;
1934 data->bytes_xfered += nbytes;
1935
1936 return;
1937
1938done:
1939 atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY);
1940 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1941 data->bytes_xfered += nbytes;
1942 smp_wmb();
1943 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1944}
1945
1946static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
1947{
1948 int i;
1949
1950 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1951 struct atmel_mci_slot *slot = host->slot[i];
1952 if (slot && (status & slot->sdio_irq)) {
1953 mmc_signal_sdio_irq(slot->mmc);
1954 }
1955 }
1956}
1957
1958
1959static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1960{
1961 struct atmel_mci *host = dev_id;
1962 u32 status, mask, pending;
1963 unsigned int pass_count = 0;
1964
1965 do {
1966 status = atmci_readl(host, ATMCI_SR);
1967 mask = atmci_readl(host, ATMCI_IMR);
1968 pending = status & mask;
1969 if (!pending)
1970 break;
1971
1972 if (pending & ATMCI_DATA_ERROR_FLAGS) {
1973 dev_dbg(&host->pdev->dev, "IRQ: data error\n");
1974 atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
1975 | ATMCI_RXRDY | ATMCI_TXRDY
1976 | ATMCI_ENDRX | ATMCI_ENDTX
1977 | ATMCI_RXBUFF | ATMCI_TXBUFE);
1978
1979 host->data_status = status;
1980 dev_dbg(&host->pdev->dev, "set pending data error\n");
1981 smp_wmb();
1982 atmci_set_pending(host, EVENT_DATA_ERROR);
1983 tasklet_schedule(&host->tasklet);
1984 }
1985
1986 if (pending & ATMCI_TXBUFE) {
1987 dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
1988 atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
1989 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
1990
1991
1992
1993
1994
1995 if (host->data_size) {
1996 atmci_pdc_set_both_buf(host, XFER_TRANSMIT);
1997 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
1998 atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE);
1999 } else {
2000 atmci_pdc_complete(host);
2001 }
2002 } else if (pending & ATMCI_ENDTX) {
2003 dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
2004 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
2005
2006 if (host->data_size) {
2007 atmci_pdc_set_single_buf(host,
2008 XFER_TRANSMIT, PDC_SECOND_BUF);
2009 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
2010 }
2011 }
2012
2013 if (pending & ATMCI_RXBUFF) {
2014 dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
2015 atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
2016 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
2017
2018
2019
2020
2021
2022 if (host->data_size) {
2023 atmci_pdc_set_both_buf(host, XFER_RECEIVE);
2024 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
2025 atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF);
2026 } else {
2027 atmci_pdc_complete(host);
2028 }
2029 } else if (pending & ATMCI_ENDRX) {
2030 dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
2031 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
2032
2033 if (host->data_size) {
2034 atmci_pdc_set_single_buf(host,
2035 XFER_RECEIVE, PDC_SECOND_BUF);
2036 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
2037 }
2038 }
2039
2040
2041
2042
2043
2044
2045
2046 if (pending & ATMCI_BLKE) {
2047 dev_dbg(&host->pdev->dev, "IRQ: blke\n");
2048 atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
2049 smp_wmb();
2050 dev_dbg(&host->pdev->dev, "set pending notbusy\n");
2051 atmci_set_pending(host, EVENT_NOTBUSY);
2052 tasklet_schedule(&host->tasklet);
2053 }
2054
2055 if (pending & ATMCI_NOTBUSY) {
2056 dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
2057 atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
2058 smp_wmb();
2059 dev_dbg(&host->pdev->dev, "set pending notbusy\n");
2060 atmci_set_pending(host, EVENT_NOTBUSY);
2061 tasklet_schedule(&host->tasklet);
2062 }
2063
2064 if (pending & ATMCI_RXRDY)
2065 atmci_read_data_pio(host);
2066 if (pending & ATMCI_TXRDY)
2067 atmci_write_data_pio(host);
2068
2069 if (pending & ATMCI_CMDRDY) {
2070 dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
2071 atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
2072 host->cmd_status = status;
2073 smp_wmb();
2074 dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
2075 atmci_set_pending(host, EVENT_CMD_RDY);
2076 tasklet_schedule(&host->tasklet);
2077 }
2078
2079 if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
2080 atmci_sdio_interrupt(host, status);
2081
2082 } while (pass_count++ < 5);
2083
2084 return pass_count ? IRQ_HANDLED : IRQ_NONE;
2085}
2086
2087static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
2088{
2089 struct atmel_mci_slot *slot = dev_id;
2090
2091
2092
2093
2094
2095
2096 disable_irq_nosync(irq);
2097 mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
2098
2099 return IRQ_HANDLED;
2100}
2101
2102static int __init atmci_init_slot(struct atmel_mci *host,
2103 struct mci_slot_pdata *slot_data, unsigned int id,
2104 u32 sdc_reg, u32 sdio_irq)
2105{
2106 struct mmc_host *mmc;
2107 struct atmel_mci_slot *slot;
2108
2109 mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
2110 if (!mmc)
2111 return -ENOMEM;
2112
2113 slot = mmc_priv(mmc);
2114 slot->mmc = mmc;
2115 slot->host = host;
2116 slot->detect_pin = slot_data->detect_pin;
2117 slot->wp_pin = slot_data->wp_pin;
2118 slot->detect_is_active_high = slot_data->detect_is_active_high;
2119 slot->sdc_reg = sdc_reg;
2120 slot->sdio_irq = sdio_irq;
2121
2122 dev_dbg(&mmc->class_dev,
2123 "slot[%u]: bus_width=%u, detect_pin=%d, "
2124 "detect_is_active_high=%s, wp_pin=%d\n",
2125 id, slot_data->bus_width, slot_data->detect_pin,
2126 slot_data->detect_is_active_high ? "true" : "false",
2127 slot_data->wp_pin);
2128
2129 mmc->ops = &atmci_ops;
2130 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
2131 mmc->f_max = host->bus_hz / 2;
2132 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2133 if (sdio_irq)
2134 mmc->caps |= MMC_CAP_SDIO_IRQ;
2135 if (host->caps.has_highspeed)
2136 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
2137
2138
2139
2140
2141
2142 if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
2143 mmc->caps |= MMC_CAP_4_BIT_DATA;
2144
2145 if (atmci_get_version(host) < 0x200) {
2146 mmc->max_segs = 256;
2147 mmc->max_blk_size = 4095;
2148 mmc->max_blk_count = 256;
2149 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2150 mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
2151 } else {
2152 mmc->max_segs = 64;
2153 mmc->max_req_size = 32768 * 512;
2154 mmc->max_blk_size = 32768;
2155 mmc->max_blk_count = 512;
2156 }
2157
2158
2159 set_bit(ATMCI_CARD_PRESENT, &slot->flags);
2160 if (gpio_is_valid(slot->detect_pin)) {
2161 if (gpio_request(slot->detect_pin, "mmc_detect")) {
2162 dev_dbg(&mmc->class_dev, "no detect pin available\n");
2163 slot->detect_pin = -EBUSY;
2164 } else if (gpio_get_value(slot->detect_pin) ^
2165 slot->detect_is_active_high) {
2166 clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
2167 }
2168 }
2169
2170 if (!gpio_is_valid(slot->detect_pin))
2171 mmc->caps |= MMC_CAP_NEEDS_POLL;
2172
2173 if (gpio_is_valid(slot->wp_pin)) {
2174 if (gpio_request(slot->wp_pin, "mmc_wp")) {
2175 dev_dbg(&mmc->class_dev, "no WP pin available\n");
2176 slot->wp_pin = -EBUSY;
2177 }
2178 }
2179
2180 host->slot[id] = slot;
2181 mmc_add_host(mmc);
2182
2183 if (gpio_is_valid(slot->detect_pin)) {
2184 int ret;
2185
2186 setup_timer(&slot->detect_timer, atmci_detect_change,
2187 (unsigned long)slot);
2188
2189 ret = request_irq(gpio_to_irq(slot->detect_pin),
2190 atmci_detect_interrupt,
2191 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
2192 "mmc-detect", slot);
2193 if (ret) {
2194 dev_dbg(&mmc->class_dev,
2195 "could not request IRQ %d for detect pin\n",
2196 gpio_to_irq(slot->detect_pin));
2197 gpio_free(slot->detect_pin);
2198 slot->detect_pin = -EBUSY;
2199 }
2200 }
2201
2202 atmci_init_debugfs(slot);
2203
2204 return 0;
2205}
2206
2207static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
2208 unsigned int id)
2209{
2210
2211
2212 set_bit(ATMCI_SHUTDOWN, &slot->flags);
2213 smp_wmb();
2214
2215 mmc_remove_host(slot->mmc);
2216
2217 if (gpio_is_valid(slot->detect_pin)) {
2218 int pin = slot->detect_pin;
2219
2220 free_irq(gpio_to_irq(pin), slot);
2221 del_timer_sync(&slot->detect_timer);
2222 gpio_free(pin);
2223 }
2224 if (gpio_is_valid(slot->wp_pin))
2225 gpio_free(slot->wp_pin);
2226
2227 slot->host->slot[id] = NULL;
2228 mmc_free_host(slot->mmc);
2229}
2230
2231static bool atmci_filter(struct dma_chan *chan, void *pdata)
2232{
2233 struct mci_platform_data *sl_pdata = pdata;
2234 struct mci_dma_data *sl;
2235
2236 if (!sl_pdata)
2237 return false;
2238
2239 sl = sl_pdata->dma_slave;
2240 if (sl && find_slave_dev(sl) == chan->device->dev) {
2241 chan->private = slave_data_ptr(sl);
2242 return true;
2243 } else {
2244 return false;
2245 }
2246}
2247
2248static bool atmci_configure_dma(struct atmel_mci *host)
2249{
2250 struct mci_platform_data *pdata;
2251 dma_cap_mask_t mask;
2252
2253 if (host == NULL)
2254 return false;
2255
2256 pdata = host->pdev->dev.platform_data;
2257
2258 dma_cap_zero(mask);
2259 dma_cap_set(DMA_SLAVE, mask);
2260
2261 host->dma.chan = dma_request_slave_channel_compat(mask, atmci_filter, pdata,
2262 &host->pdev->dev, "rxtx");
2263 if (!host->dma.chan) {
2264 dev_warn(&host->pdev->dev, "no DMA channel available\n");
2265 return false;
2266 } else {
2267 dev_info(&host->pdev->dev,
2268 "using %s for DMA transfers\n",
2269 dma_chan_name(host->dma.chan));
2270
2271 host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
2272 host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2273 host->dma_conf.src_maxburst = 1;
2274 host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
2275 host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2276 host->dma_conf.dst_maxburst = 1;
2277 host->dma_conf.device_fc = false;
2278 return true;
2279 }
2280}
2281
2282
2283
2284
2285
2286
2287static void __init atmci_get_cap(struct atmel_mci *host)
2288{
2289 unsigned int version;
2290
2291 version = atmci_get_version(host);
2292 dev_info(&host->pdev->dev,
2293 "version: 0x%x\n", version);
2294
2295 host->caps.has_dma_conf_reg = 0;
2296 host->caps.has_pdc = ATMCI_PDC_CONNECTED;
2297 host->caps.has_cfg_reg = 0;
2298 host->caps.has_cstor_reg = 0;
2299 host->caps.has_highspeed = 0;
2300 host->caps.has_rwproof = 0;
2301 host->caps.has_odd_clk_div = 0;
2302 host->caps.has_bad_data_ordering = 1;
2303 host->caps.need_reset_after_xfer = 1;
2304 host->caps.need_blksz_mul_4 = 1;
2305 host->caps.need_notbusy_for_read_ops = 0;
2306
2307
2308 switch (version & 0xf00) {
2309 case 0x500:
2310 host->caps.has_odd_clk_div = 1;
2311 case 0x400:
2312 case 0x300:
2313 host->caps.has_dma_conf_reg = 1;
2314 host->caps.has_pdc = 0;
2315 host->caps.has_cfg_reg = 1;
2316 host->caps.has_cstor_reg = 1;
2317 host->caps.has_highspeed = 1;
2318 case 0x200:
2319 host->caps.has_rwproof = 1;
2320 host->caps.need_blksz_mul_4 = 0;
2321 host->caps.need_notbusy_for_read_ops = 1;
2322 case 0x100:
2323 host->caps.has_bad_data_ordering = 0;
2324 host->caps.need_reset_after_xfer = 0;
2325 case 0x0:
2326 break;
2327 default:
2328 host->caps.has_pdc = 0;
2329 dev_warn(&host->pdev->dev,
2330 "Unmanaged mci version, set minimum capabilities\n");
2331 break;
2332 }
2333}
2334
2335static int __init atmci_probe(struct platform_device *pdev)
2336{
2337 struct mci_platform_data *pdata;
2338 struct atmel_mci *host;
2339 struct resource *regs;
2340 unsigned int nr_slots;
2341 int irq;
2342 int ret;
2343
2344 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2345 if (!regs)
2346 return -ENXIO;
2347 pdata = pdev->dev.platform_data;
2348 if (!pdata) {
2349 pdata = atmci_of_init(pdev);
2350 if (IS_ERR(pdata)) {
2351 dev_err(&pdev->dev, "platform data not available\n");
2352 return PTR_ERR(pdata);
2353 }
2354 }
2355
2356 irq = platform_get_irq(pdev, 0);
2357 if (irq < 0)
2358 return irq;
2359
2360 host = kzalloc(sizeof(struct atmel_mci), GFP_KERNEL);
2361 if (!host)
2362 return -ENOMEM;
2363
2364 host->pdev = pdev;
2365 spin_lock_init(&host->lock);
2366 INIT_LIST_HEAD(&host->queue);
2367
2368 host->mck = clk_get(&pdev->dev, "mci_clk");
2369 if (IS_ERR(host->mck)) {
2370 ret = PTR_ERR(host->mck);
2371 goto err_clk_get;
2372 }
2373
2374 ret = -ENOMEM;
2375 host->regs = ioremap(regs->start, resource_size(regs));
2376 if (!host->regs)
2377 goto err_ioremap;
2378
2379 clk_enable(host->mck);
2380 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
2381 host->bus_hz = clk_get_rate(host->mck);
2382 clk_disable(host->mck);
2383
2384 host->mapbase = regs->start;
2385
2386 tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host);
2387
2388 ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
2389 if (ret)
2390 goto err_request_irq;
2391
2392
2393 atmci_get_cap(host);
2394 if (atmci_configure_dma(host)) {
2395 host->prepare_data = &atmci_prepare_data_dma;
2396 host->submit_data = &atmci_submit_data_dma;
2397 host->stop_transfer = &atmci_stop_transfer_dma;
2398 } else if (host->caps.has_pdc) {
2399 dev_info(&pdev->dev, "using PDC\n");
2400 host->prepare_data = &atmci_prepare_data_pdc;
2401 host->submit_data = &atmci_submit_data_pdc;
2402 host->stop_transfer = &atmci_stop_transfer_pdc;
2403 } else {
2404 dev_info(&pdev->dev, "using PIO\n");
2405 host->prepare_data = &atmci_prepare_data;
2406 host->submit_data = &atmci_submit_data;
2407 host->stop_transfer = &atmci_stop_transfer;
2408 }
2409
2410 platform_set_drvdata(pdev, host);
2411
2412 setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
2413
2414
2415 nr_slots = 0;
2416 ret = -ENODEV;
2417 if (pdata->slot[0].bus_width) {
2418 ret = atmci_init_slot(host, &pdata->slot[0],
2419 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
2420 if (!ret) {
2421 nr_slots++;
2422 host->buf_size = host->slot[0]->mmc->max_req_size;
2423 }
2424 }
2425 if (pdata->slot[1].bus_width) {
2426 ret = atmci_init_slot(host, &pdata->slot[1],
2427 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
2428 if (!ret) {
2429 nr_slots++;
2430 if (host->slot[1]->mmc->max_req_size > host->buf_size)
2431 host->buf_size =
2432 host->slot[1]->mmc->max_req_size;
2433 }
2434 }
2435
2436 if (!nr_slots) {
2437 dev_err(&pdev->dev, "init failed: no slot defined\n");
2438 goto err_init_slot;
2439 }
2440
2441 if (!host->caps.has_rwproof) {
2442 host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
2443 &host->buf_phys_addr,
2444 GFP_KERNEL);
2445 if (!host->buffer) {
2446 ret = -ENOMEM;
2447 dev_err(&pdev->dev, "buffer allocation failed\n");
2448 goto err_init_slot;
2449 }
2450 }
2451
2452 dev_info(&pdev->dev,
2453 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
2454 host->mapbase, irq, nr_slots);
2455
2456 return 0;
2457
2458err_init_slot:
2459 if (host->dma.chan)
2460 dma_release_channel(host->dma.chan);
2461 free_irq(irq, host);
2462err_request_irq:
2463 iounmap(host->regs);
2464err_ioremap:
2465 clk_put(host->mck);
2466err_clk_get:
2467 kfree(host);
2468 return ret;
2469}
2470
2471static int __exit atmci_remove(struct platform_device *pdev)
2472{
2473 struct atmel_mci *host = platform_get_drvdata(pdev);
2474 unsigned int i;
2475
2476 if (host->buffer)
2477 dma_free_coherent(&pdev->dev, host->buf_size,
2478 host->buffer, host->buf_phys_addr);
2479
2480 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2481 if (host->slot[i])
2482 atmci_cleanup_slot(host->slot[i], i);
2483 }
2484
2485 clk_enable(host->mck);
2486 atmci_writel(host, ATMCI_IDR, ~0UL);
2487 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
2488 atmci_readl(host, ATMCI_SR);
2489 clk_disable(host->mck);
2490
2491 if (host->dma.chan)
2492 dma_release_channel(host->dma.chan);
2493
2494 free_irq(platform_get_irq(pdev, 0), host);
2495 iounmap(host->regs);
2496
2497 clk_put(host->mck);
2498 kfree(host);
2499
2500 return 0;
2501}
2502
2503#ifdef CONFIG_PM_SLEEP
2504static int atmci_suspend(struct device *dev)
2505{
2506 struct atmel_mci *host = dev_get_drvdata(dev);
2507 int i;
2508
2509 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2510 struct atmel_mci_slot *slot = host->slot[i];
2511 int ret;
2512
2513 if (!slot)
2514 continue;
2515 ret = mmc_suspend_host(slot->mmc);
2516 if (ret < 0) {
2517 while (--i >= 0) {
2518 slot = host->slot[i];
2519 if (slot
2520 && test_bit(ATMCI_SUSPENDED, &slot->flags)) {
2521 mmc_resume_host(host->slot[i]->mmc);
2522 clear_bit(ATMCI_SUSPENDED, &slot->flags);
2523 }
2524 }
2525 return ret;
2526 } else {
2527 set_bit(ATMCI_SUSPENDED, &slot->flags);
2528 }
2529 }
2530
2531 return 0;
2532}
2533
2534static int atmci_resume(struct device *dev)
2535{
2536 struct atmel_mci *host = dev_get_drvdata(dev);
2537 int i;
2538 int ret = 0;
2539
2540 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2541 struct atmel_mci_slot *slot = host->slot[i];
2542 int err;
2543
2544 slot = host->slot[i];
2545 if (!slot)
2546 continue;
2547 if (!test_bit(ATMCI_SUSPENDED, &slot->flags))
2548 continue;
2549 err = mmc_resume_host(slot->mmc);
2550 if (err < 0)
2551 ret = err;
2552 else
2553 clear_bit(ATMCI_SUSPENDED, &slot->flags);
2554 }
2555
2556 return ret;
2557}
2558#endif
2559
2560static SIMPLE_DEV_PM_OPS(atmci_pm, atmci_suspend, atmci_resume);
2561
2562static struct platform_driver atmci_driver = {
2563 .remove = __exit_p(atmci_remove),
2564 .driver = {
2565 .name = "atmel_mci",
2566 .pm = &atmci_pm,
2567 .of_match_table = of_match_ptr(atmci_dt_ids),
2568 },
2569};
2570
2571static int __init atmci_init(void)
2572{
2573 return platform_driver_probe(&atmci_driver, atmci_probe);
2574}
2575
2576static void __exit atmci_exit(void)
2577{
2578 platform_driver_unregister(&atmci_driver);
2579}
2580
2581late_initcall(atmci_init);
2582module_exit(atmci_exit);
2583
2584MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
2585MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
2586MODULE_LICENSE("GPL v2");
2587