1
2
3
4
5
6
7
8
9
10#include <linux/blkdev.h>
11#include <linux/clk.h>
12#include <linux/debugfs.h>
13#include <linux/device.h>
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
16#include <linux/err.h>
17#include <linux/gpio.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/ioport.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/of_gpio.h>
25#include <linux/platform_device.h>
26#include <linux/scatterlist.h>
27#include <linux/seq_file.h>
28#include <linux/slab.h>
29#include <linux/stat.h>
30#include <linux/types.h>
31#include <linux/platform_data/atmel.h>
32
33#include <linux/mmc/host.h>
34#include <linux/mmc/sdio.h>
35
36#include <mach/atmel-mci.h>
37#include <linux/atmel-mci.h>
38#include <linux/atmel_pdc.h>
39
40#include <asm/io.h>
41#include <asm/unaligned.h>
42
43#include <mach/cpu.h>
44
45#include "atmel-mci-regs.h"
46
47#define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
48#define ATMCI_DMA_THRESHOLD 16
49
50enum {
51 EVENT_CMD_RDY = 0,
52 EVENT_XFER_COMPLETE,
53 EVENT_NOTBUSY,
54 EVENT_DATA_ERROR,
55};
56
57enum atmel_mci_state {
58 STATE_IDLE = 0,
59 STATE_SENDING_CMD,
60 STATE_DATA_XFER,
61 STATE_WAITING_NOTBUSY,
62 STATE_SENDING_STOP,
63 STATE_END_REQUEST,
64};
65
66enum atmci_xfer_dir {
67 XFER_RECEIVE = 0,
68 XFER_TRANSMIT,
69};
70
71enum atmci_pdc_buf {
72 PDC_FIRST_BUF = 0,
73 PDC_SECOND_BUF,
74};
75
76struct atmel_mci_caps {
77 bool has_dma_conf_reg;
78 bool has_pdc;
79 bool has_cfg_reg;
80 bool has_cstor_reg;
81 bool has_highspeed;
82 bool has_rwproof;
83 bool has_odd_clk_div;
84 bool has_bad_data_ordering;
85 bool need_reset_after_xfer;
86 bool need_blksz_mul_4;
87 bool need_notbusy_for_read_ops;
88};
89
90struct atmel_mci_dma {
91 struct dma_chan *chan;
92 struct dma_async_tx_descriptor *data_desc;
93};
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176struct atmel_mci {
177 spinlock_t lock;
178 void __iomem *regs;
179
180 struct scatterlist *sg;
181 unsigned int sg_len;
182 unsigned int pio_offset;
183 unsigned int *buffer;
184 unsigned int buf_size;
185 dma_addr_t buf_phys_addr;
186
187 struct atmel_mci_slot *cur_slot;
188 struct mmc_request *mrq;
189 struct mmc_command *cmd;
190 struct mmc_data *data;
191 unsigned int data_size;
192
193 struct atmel_mci_dma dma;
194 struct dma_chan *data_chan;
195 struct dma_slave_config dma_conf;
196
197 u32 cmd_status;
198 u32 data_status;
199 u32 stop_cmdr;
200
201 struct tasklet_struct tasklet;
202 unsigned long pending_events;
203 unsigned long completed_events;
204 enum atmel_mci_state state;
205 struct list_head queue;
206
207 bool need_clock_update;
208 bool need_reset;
209 struct timer_list timer;
210 u32 mode_reg;
211 u32 cfg_reg;
212 unsigned long bus_hz;
213 unsigned long mapbase;
214 struct clk *mck;
215 struct platform_device *pdev;
216
217 struct atmel_mci_slot *slot[ATMCI_MAX_NR_SLOTS];
218
219 struct atmel_mci_caps caps;
220
221 u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data);
222 void (*submit_data)(struct atmel_mci *host, struct mmc_data *data);
223 void (*stop_transfer)(struct atmel_mci *host);
224};
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245struct atmel_mci_slot {
246 struct mmc_host *mmc;
247 struct atmel_mci *host;
248
249 u32 sdc_reg;
250 u32 sdio_irq;
251
252 struct mmc_request *mrq;
253 struct list_head queue_node;
254
255 unsigned int clock;
256 unsigned long flags;
257#define ATMCI_CARD_PRESENT 0
258#define ATMCI_CARD_NEED_INIT 1
259#define ATMCI_SHUTDOWN 2
260#define ATMCI_SUSPENDED 3
261
262 int detect_pin;
263 int wp_pin;
264 bool detect_is_active_high;
265
266 struct timer_list detect_timer;
267};
268
269#define atmci_test_and_clear_pending(host, event) \
270 test_and_clear_bit(event, &host->pending_events)
271#define atmci_set_completed(host, event) \
272 set_bit(event, &host->completed_events)
273#define atmci_set_pending(host, event) \
274 set_bit(event, &host->pending_events)
275
276
277
278
279
280static int atmci_req_show(struct seq_file *s, void *v)
281{
282 struct atmel_mci_slot *slot = s->private;
283 struct mmc_request *mrq;
284 struct mmc_command *cmd;
285 struct mmc_command *stop;
286 struct mmc_data *data;
287
288
289 spin_lock_bh(&slot->host->lock);
290 mrq = slot->mrq;
291
292 if (mrq) {
293 cmd = mrq->cmd;
294 data = mrq->data;
295 stop = mrq->stop;
296
297 if (cmd)
298 seq_printf(s,
299 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
300 cmd->opcode, cmd->arg, cmd->flags,
301 cmd->resp[0], cmd->resp[1], cmd->resp[2],
302 cmd->resp[3], cmd->error);
303 if (data)
304 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
305 data->bytes_xfered, data->blocks,
306 data->blksz, data->flags, data->error);
307 if (stop)
308 seq_printf(s,
309 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
310 stop->opcode, stop->arg, stop->flags,
311 stop->resp[0], stop->resp[1], stop->resp[2],
312 stop->resp[3], stop->error);
313 }
314
315 spin_unlock_bh(&slot->host->lock);
316
317 return 0;
318}
319
320static int atmci_req_open(struct inode *inode, struct file *file)
321{
322 return single_open(file, atmci_req_show, inode->i_private);
323}
324
325static const struct file_operations atmci_req_fops = {
326 .owner = THIS_MODULE,
327 .open = atmci_req_open,
328 .read = seq_read,
329 .llseek = seq_lseek,
330 .release = single_release,
331};
332
333static void atmci_show_status_reg(struct seq_file *s,
334 const char *regname, u32 value)
335{
336 static const char *sr_bit[] = {
337 [0] = "CMDRDY",
338 [1] = "RXRDY",
339 [2] = "TXRDY",
340 [3] = "BLKE",
341 [4] = "DTIP",
342 [5] = "NOTBUSY",
343 [6] = "ENDRX",
344 [7] = "ENDTX",
345 [8] = "SDIOIRQA",
346 [9] = "SDIOIRQB",
347 [12] = "SDIOWAIT",
348 [14] = "RXBUFF",
349 [15] = "TXBUFE",
350 [16] = "RINDE",
351 [17] = "RDIRE",
352 [18] = "RCRCE",
353 [19] = "RENDE",
354 [20] = "RTOE",
355 [21] = "DCRCE",
356 [22] = "DTOE",
357 [23] = "CSTOE",
358 [24] = "BLKOVRE",
359 [25] = "DMADONE",
360 [26] = "FIFOEMPTY",
361 [27] = "XFRDONE",
362 [30] = "OVRE",
363 [31] = "UNRE",
364 };
365 unsigned int i;
366
367 seq_printf(s, "%s:\t0x%08x", regname, value);
368 for (i = 0; i < ARRAY_SIZE(sr_bit); i++) {
369 if (value & (1 << i)) {
370 if (sr_bit[i])
371 seq_printf(s, " %s", sr_bit[i]);
372 else
373 seq_puts(s, " UNKNOWN");
374 }
375 }
376 seq_putc(s, '\n');
377}
378
379static int atmci_regs_show(struct seq_file *s, void *v)
380{
381 struct atmel_mci *host = s->private;
382 u32 *buf;
383
384 buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
385 if (!buf)
386 return -ENOMEM;
387
388
389
390
391
392
393 spin_lock_bh(&host->lock);
394 clk_enable(host->mck);
395 memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
396 clk_disable(host->mck);
397 spin_unlock_bh(&host->lock);
398
399 seq_printf(s, "MR:\t0x%08x%s%s ",
400 buf[ATMCI_MR / 4],
401 buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
402 buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "");
403 if (host->caps.has_odd_clk_div)
404 seq_printf(s, "{CLKDIV,CLKODD}=%u\n",
405 ((buf[ATMCI_MR / 4] & 0xff) << 1)
406 | ((buf[ATMCI_MR / 4] >> 16) & 1));
407 else
408 seq_printf(s, "CLKDIV=%u\n",
409 (buf[ATMCI_MR / 4] & 0xff));
410 seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]);
411 seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]);
412 seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]);
413 seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
414 buf[ATMCI_BLKR / 4],
415 buf[ATMCI_BLKR / 4] & 0xffff,
416 (buf[ATMCI_BLKR / 4] >> 16) & 0xffff);
417 if (host->caps.has_cstor_reg)
418 seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]);
419
420
421
422 atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]);
423 atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]);
424
425 if (host->caps.has_dma_conf_reg) {
426 u32 val;
427
428 val = buf[ATMCI_DMA / 4];
429 seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
430 val, val & 3,
431 ((val >> 4) & 3) ?
432 1 << (((val >> 4) & 3) + 1) : 1,
433 val & ATMCI_DMAEN ? " DMAEN" : "");
434 }
435 if (host->caps.has_cfg_reg) {
436 u32 val;
437
438 val = buf[ATMCI_CFG / 4];
439 seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
440 val,
441 val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
442 val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
443 val & ATMCI_CFG_HSMODE ? " HSMODE" : "",
444 val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
445 }
446
447 kfree(buf);
448
449 return 0;
450}
451
452static int atmci_regs_open(struct inode *inode, struct file *file)
453{
454 return single_open(file, atmci_regs_show, inode->i_private);
455}
456
457static const struct file_operations atmci_regs_fops = {
458 .owner = THIS_MODULE,
459 .open = atmci_regs_open,
460 .read = seq_read,
461 .llseek = seq_lseek,
462 .release = single_release,
463};
464
465static void atmci_init_debugfs(struct atmel_mci_slot *slot)
466{
467 struct mmc_host *mmc = slot->mmc;
468 struct atmel_mci *host = slot->host;
469 struct dentry *root;
470 struct dentry *node;
471
472 root = mmc->debugfs_root;
473 if (!root)
474 return;
475
476 node = debugfs_create_file("regs", S_IRUSR, root, host,
477 &atmci_regs_fops);
478 if (IS_ERR(node))
479 return;
480 if (!node)
481 goto err;
482
483 node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
484 if (!node)
485 goto err;
486
487 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
488 if (!node)
489 goto err;
490
491 node = debugfs_create_x32("pending_events", S_IRUSR, root,
492 (u32 *)&host->pending_events);
493 if (!node)
494 goto err;
495
496 node = debugfs_create_x32("completed_events", S_IRUSR, root,
497 (u32 *)&host->completed_events);
498 if (!node)
499 goto err;
500
501 return;
502
503err:
504 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
505}
506
507#if defined(CONFIG_OF)
508static const struct of_device_id atmci_dt_ids[] = {
509 { .compatible = "atmel,hsmci" },
510 { }
511};
512
513MODULE_DEVICE_TABLE(of, atmci_dt_ids);
514
515static struct mci_platform_data*
516atmci_of_init(struct platform_device *pdev)
517{
518 struct device_node *np = pdev->dev.of_node;
519 struct device_node *cnp;
520 struct mci_platform_data *pdata;
521 u32 slot_id;
522
523 if (!np) {
524 dev_err(&pdev->dev, "device node not found\n");
525 return ERR_PTR(-EINVAL);
526 }
527
528 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
529 if (!pdata) {
530 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
531 return ERR_PTR(-ENOMEM);
532 }
533
534 for_each_child_of_node(np, cnp) {
535 if (of_property_read_u32(cnp, "reg", &slot_id)) {
536 dev_warn(&pdev->dev, "reg property is missing for %s\n",
537 cnp->full_name);
538 continue;
539 }
540
541 if (slot_id >= ATMCI_MAX_NR_SLOTS) {
542 dev_warn(&pdev->dev, "can't have more than %d slots\n",
543 ATMCI_MAX_NR_SLOTS);
544 break;
545 }
546
547 if (of_property_read_u32(cnp, "bus-width",
548 &pdata->slot[slot_id].bus_width))
549 pdata->slot[slot_id].bus_width = 1;
550
551 pdata->slot[slot_id].detect_pin =
552 of_get_named_gpio(cnp, "cd-gpios", 0);
553
554 pdata->slot[slot_id].detect_is_active_high =
555 of_property_read_bool(cnp, "cd-inverted");
556
557 pdata->slot[slot_id].wp_pin =
558 of_get_named_gpio(cnp, "wp-gpios", 0);
559 }
560
561 return pdata;
562}
563#else
564static inline struct mci_platform_data*
565atmci_of_init(struct platform_device *dev)
566{
567 return ERR_PTR(-EINVAL);
568}
569#endif
570
571static inline unsigned int atmci_get_version(struct atmel_mci *host)
572{
573 return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
574}
575
576static void atmci_timeout_timer(unsigned long data)
577{
578 struct atmel_mci *host;
579
580 host = (struct atmel_mci *)data;
581
582 dev_dbg(&host->pdev->dev, "software timeout\n");
583
584 if (host->mrq->cmd->data) {
585 host->mrq->cmd->data->error = -ETIMEDOUT;
586 host->data = NULL;
587
588
589
590
591
592 if (host->state == STATE_DATA_XFER)
593 host->stop_transfer(host);
594 } else {
595 host->mrq->cmd->error = -ETIMEDOUT;
596 host->cmd = NULL;
597 }
598 host->need_reset = 1;
599 host->state = STATE_END_REQUEST;
600 smp_wmb();
601 tasklet_schedule(&host->tasklet);
602}
603
604static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
605 unsigned int ns)
606{
607
608
609
610
611 unsigned int us = DIV_ROUND_UP(ns, 1000);
612
613
614 return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
615}
616
617static void atmci_set_timeout(struct atmel_mci *host,
618 struct atmel_mci_slot *slot, struct mmc_data *data)
619{
620 static unsigned dtomul_to_shift[] = {
621 0, 4, 7, 8, 10, 12, 16, 20
622 };
623 unsigned timeout;
624 unsigned dtocyc;
625 unsigned dtomul;
626
627 timeout = atmci_ns_to_clocks(host, data->timeout_ns)
628 + data->timeout_clks;
629
630 for (dtomul = 0; dtomul < 8; dtomul++) {
631 unsigned shift = dtomul_to_shift[dtomul];
632 dtocyc = (timeout + (1 << shift) - 1) >> shift;
633 if (dtocyc < 15)
634 break;
635 }
636
637 if (dtomul >= 8) {
638 dtomul = 7;
639 dtocyc = 15;
640 }
641
642 dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
643 dtocyc << dtomul_to_shift[dtomul]);
644 atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc)));
645}
646
647
648
649
650static u32 atmci_prepare_command(struct mmc_host *mmc,
651 struct mmc_command *cmd)
652{
653 struct mmc_data *data;
654 u32 cmdr;
655
656 cmd->error = -EINPROGRESS;
657
658 cmdr = ATMCI_CMDR_CMDNB(cmd->opcode);
659
660 if (cmd->flags & MMC_RSP_PRESENT) {
661 if (cmd->flags & MMC_RSP_136)
662 cmdr |= ATMCI_CMDR_RSPTYP_136BIT;
663 else
664 cmdr |= ATMCI_CMDR_RSPTYP_48BIT;
665 }
666
667
668
669
670
671
672 cmdr |= ATMCI_CMDR_MAXLAT_64CYC;
673
674 if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
675 cmdr |= ATMCI_CMDR_OPDCMD;
676
677 data = cmd->data;
678 if (data) {
679 cmdr |= ATMCI_CMDR_START_XFER;
680
681 if (cmd->opcode == SD_IO_RW_EXTENDED) {
682 cmdr |= ATMCI_CMDR_SDIO_BLOCK;
683 } else {
684 if (data->flags & MMC_DATA_STREAM)
685 cmdr |= ATMCI_CMDR_STREAM;
686 else if (data->blocks > 1)
687 cmdr |= ATMCI_CMDR_MULTI_BLOCK;
688 else
689 cmdr |= ATMCI_CMDR_BLOCK;
690 }
691
692 if (data->flags & MMC_DATA_READ)
693 cmdr |= ATMCI_CMDR_TRDIR_READ;
694 }
695
696 return cmdr;
697}
698
699static void atmci_send_command(struct atmel_mci *host,
700 struct mmc_command *cmd, u32 cmd_flags)
701{
702 WARN_ON(host->cmd);
703 host->cmd = cmd;
704
705 dev_vdbg(&host->pdev->dev,
706 "start command: ARGR=0x%08x CMDR=0x%08x\n",
707 cmd->arg, cmd_flags);
708
709 atmci_writel(host, ATMCI_ARGR, cmd->arg);
710 atmci_writel(host, ATMCI_CMDR, cmd_flags);
711}
712
713static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
714{
715 dev_dbg(&host->pdev->dev, "send stop command\n");
716 atmci_send_command(host, data->stop, host->stop_cmdr);
717 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
718}
719
720
721
722
723
724static void atmci_pdc_set_single_buf(struct atmel_mci *host,
725 enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
726{
727 u32 pointer_reg, counter_reg;
728 unsigned int buf_size;
729
730 if (dir == XFER_RECEIVE) {
731 pointer_reg = ATMEL_PDC_RPR;
732 counter_reg = ATMEL_PDC_RCR;
733 } else {
734 pointer_reg = ATMEL_PDC_TPR;
735 counter_reg = ATMEL_PDC_TCR;
736 }
737
738 if (buf_nb == PDC_SECOND_BUF) {
739 pointer_reg += ATMEL_PDC_SCND_BUF_OFF;
740 counter_reg += ATMEL_PDC_SCND_BUF_OFF;
741 }
742
743 if (!host->caps.has_rwproof) {
744 buf_size = host->buf_size;
745 atmci_writel(host, pointer_reg, host->buf_phys_addr);
746 } else {
747 buf_size = sg_dma_len(host->sg);
748 atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
749 }
750
751 if (host->data_size <= buf_size) {
752 if (host->data_size & 0x3) {
753
754 atmci_writel(host, counter_reg, host->data_size);
755 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE);
756 } else {
757
758 atmci_writel(host, counter_reg, host->data_size / 4);
759 }
760 host->data_size = 0;
761 } else {
762
763 atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4);
764 host->data_size -= sg_dma_len(host->sg);
765 if (host->data_size)
766 host->sg = sg_next(host->sg);
767 }
768}
769
770
771
772
773
774
775static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
776{
777 atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF);
778 if (host->data_size)
779 atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF);
780}
781
782
783
784
785static void atmci_pdc_cleanup(struct atmel_mci *host)
786{
787 struct mmc_data *data = host->data;
788
789 if (data)
790 dma_unmap_sg(&host->pdev->dev,
791 data->sg, data->sg_len,
792 ((data->flags & MMC_DATA_WRITE)
793 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
794}
795
796
797
798
799
800
801static void atmci_pdc_complete(struct atmel_mci *host)
802{
803 int transfer_size = host->data->blocks * host->data->blksz;
804 int i;
805
806 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
807
808 if ((!host->caps.has_rwproof)
809 && (host->data->flags & MMC_DATA_READ)) {
810 if (host->caps.has_bad_data_ordering)
811 for (i = 0; i < transfer_size; i++)
812 host->buffer[i] = swab32(host->buffer[i]);
813 sg_copy_from_buffer(host->data->sg, host->data->sg_len,
814 host->buffer, transfer_size);
815 }
816
817 atmci_pdc_cleanup(host);
818
819
820
821
822
823 if (host->data) {
824 dev_dbg(&host->pdev->dev,
825 "(%s) set pending xfer complete\n", __func__);
826 atmci_set_pending(host, EVENT_XFER_COMPLETE);
827 tasklet_schedule(&host->tasklet);
828 }
829}
830
831static void atmci_dma_cleanup(struct atmel_mci *host)
832{
833 struct mmc_data *data = host->data;
834
835 if (data)
836 dma_unmap_sg(host->dma.chan->device->dev,
837 data->sg, data->sg_len,
838 ((data->flags & MMC_DATA_WRITE)
839 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
840}
841
842
843
844
845static void atmci_dma_complete(void *arg)
846{
847 struct atmel_mci *host = arg;
848 struct mmc_data *data = host->data;
849
850 dev_vdbg(&host->pdev->dev, "DMA complete\n");
851
852 if (host->caps.has_dma_conf_reg)
853
854 atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
855
856 atmci_dma_cleanup(host);
857
858
859
860
861
862 if (data) {
863 dev_dbg(&host->pdev->dev,
864 "(%s) set pending xfer complete\n", __func__);
865 atmci_set_pending(host, EVENT_XFER_COMPLETE);
866 tasklet_schedule(&host->tasklet);
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
889 }
890}
891
892
893
894
895
896static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
897{
898 u32 iflags;
899
900 data->error = -EINPROGRESS;
901
902 host->sg = data->sg;
903 host->sg_len = data->sg_len;
904 host->data = data;
905 host->data_chan = NULL;
906
907 iflags = ATMCI_DATA_ERROR_FLAGS;
908
909
910
911
912
913
914
915
916 if (data->blocks * data->blksz < 12
917 || (data->blocks * data->blksz) & 3)
918 host->need_reset = true;
919
920 host->pio_offset = 0;
921 if (data->flags & MMC_DATA_READ)
922 iflags |= ATMCI_RXRDY;
923 else
924 iflags |= ATMCI_TXRDY;
925
926 return iflags;
927}
928
929
930
931
932
933
934
935static u32
936atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
937{
938 u32 iflags, tmp;
939 unsigned int sg_len;
940 enum dma_data_direction dir;
941 int i;
942
943 data->error = -EINPROGRESS;
944
945 host->data = data;
946 host->sg = data->sg;
947 iflags = ATMCI_DATA_ERROR_FLAGS;
948
949
950 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
951
952 if (data->flags & MMC_DATA_READ) {
953 dir = DMA_FROM_DEVICE;
954 iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
955 } else {
956 dir = DMA_TO_DEVICE;
957 iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
958 }
959
960
961 tmp = atmci_readl(host, ATMCI_MR);
962 tmp &= 0x0000ffff;
963 tmp |= ATMCI_BLKLEN(data->blksz);
964 atmci_writel(host, ATMCI_MR, tmp);
965
966
967 host->data_size = data->blocks * data->blksz;
968 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
969
970 if ((!host->caps.has_rwproof)
971 && (host->data->flags & MMC_DATA_WRITE)) {
972 sg_copy_to_buffer(host->data->sg, host->data->sg_len,
973 host->buffer, host->data_size);
974 if (host->caps.has_bad_data_ordering)
975 for (i = 0; i < host->data_size; i++)
976 host->buffer[i] = swab32(host->buffer[i]);
977 }
978
979 if (host->data_size)
980 atmci_pdc_set_both_buf(host,
981 ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
982
983 return iflags;
984}
985
986static u32
987atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
988{
989 struct dma_chan *chan;
990 struct dma_async_tx_descriptor *desc;
991 struct scatterlist *sg;
992 unsigned int i;
993 enum dma_data_direction direction;
994 enum dma_transfer_direction slave_dirn;
995 unsigned int sglen;
996 u32 maxburst;
997 u32 iflags;
998
999 data->error = -EINPROGRESS;
1000
1001 WARN_ON(host->data);
1002 host->sg = NULL;
1003 host->data = data;
1004
1005 iflags = ATMCI_DATA_ERROR_FLAGS;
1006
1007
1008
1009
1010
1011
1012 if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
1013 return atmci_prepare_data(host, data);
1014 if (data->blksz & 3)
1015 return atmci_prepare_data(host, data);
1016
1017 for_each_sg(data->sg, sg, data->sg_len, i) {
1018 if (sg->offset & 3 || sg->length & 3)
1019 return atmci_prepare_data(host, data);
1020 }
1021
1022
1023 chan = host->dma.chan;
1024 if (chan)
1025 host->data_chan = chan;
1026
1027 if (!chan)
1028 return -ENODEV;
1029
1030 if (data->flags & MMC_DATA_READ) {
1031 direction = DMA_FROM_DEVICE;
1032 host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
1033 maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst);
1034 } else {
1035 direction = DMA_TO_DEVICE;
1036 host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
1037 maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
1038 }
1039
1040 if (host->caps.has_dma_conf_reg)
1041 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) |
1042 ATMCI_DMAEN);
1043
1044 sglen = dma_map_sg(chan->device->dev, data->sg,
1045 data->sg_len, direction);
1046
1047 dmaengine_slave_config(chan, &host->dma_conf);
1048 desc = dmaengine_prep_slave_sg(chan,
1049 data->sg, sglen, slave_dirn,
1050 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1051 if (!desc)
1052 goto unmap_exit;
1053
1054 host->dma.data_desc = desc;
1055 desc->callback = atmci_dma_complete;
1056 desc->callback_param = host;
1057
1058 return iflags;
1059unmap_exit:
1060 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
1061 return -ENOMEM;
1062}
1063
1064static void
1065atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
1066{
1067 return;
1068}
1069
1070
1071
1072
1073static void
1074atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data)
1075{
1076 if (data->flags & MMC_DATA_READ)
1077 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
1078 else
1079 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1080}
1081
1082static void
1083atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
1084{
1085 struct dma_chan *chan = host->data_chan;
1086 struct dma_async_tx_descriptor *desc = host->dma.data_desc;
1087
1088 if (chan) {
1089 dmaengine_submit(desc);
1090 dma_async_issue_pending(chan);
1091 }
1092}
1093
1094static void atmci_stop_transfer(struct atmel_mci *host)
1095{
1096 dev_dbg(&host->pdev->dev,
1097 "(%s) set pending xfer complete\n", __func__);
1098 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1099 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1100}
1101
1102
1103
1104
1105static void atmci_stop_transfer_pdc(struct atmel_mci *host)
1106{
1107 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
1108}
1109
1110static void atmci_stop_transfer_dma(struct atmel_mci *host)
1111{
1112 struct dma_chan *chan = host->data_chan;
1113
1114 if (chan) {
1115 dmaengine_terminate_all(chan);
1116 atmci_dma_cleanup(host);
1117 } else {
1118
1119 dev_dbg(&host->pdev->dev,
1120 "(%s) set pending xfer complete\n", __func__);
1121 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1122 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1123 }
1124}
1125
1126
1127
1128
1129
1130static void atmci_start_request(struct atmel_mci *host,
1131 struct atmel_mci_slot *slot)
1132{
1133 struct mmc_request *mrq;
1134 struct mmc_command *cmd;
1135 struct mmc_data *data;
1136 u32 iflags;
1137 u32 cmdflags;
1138
1139 mrq = slot->mrq;
1140 host->cur_slot = slot;
1141 host->mrq = mrq;
1142
1143 host->pending_events = 0;
1144 host->completed_events = 0;
1145 host->cmd_status = 0;
1146 host->data_status = 0;
1147
1148 dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
1149
1150 if (host->need_reset || host->caps.need_reset_after_xfer) {
1151 iflags = atmci_readl(host, ATMCI_IMR);
1152 iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
1153 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1154 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1155 atmci_writel(host, ATMCI_MR, host->mode_reg);
1156 if (host->caps.has_cfg_reg)
1157 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1158 atmci_writel(host, ATMCI_IER, iflags);
1159 host->need_reset = false;
1160 }
1161 atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
1162
1163 iflags = atmci_readl(host, ATMCI_IMR);
1164 if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
1165 dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
1166 iflags);
1167
1168 if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
1169
1170 atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT);
1171 while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY))
1172 cpu_relax();
1173 }
1174 iflags = 0;
1175 data = mrq->data;
1176 if (data) {
1177 atmci_set_timeout(host, slot, data);
1178
1179
1180 atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks)
1181 | ATMCI_BLKLEN(data->blksz));
1182 dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
1183 ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz));
1184
1185 iflags |= host->prepare_data(host, data);
1186 }
1187
1188 iflags |= ATMCI_CMDRDY;
1189 cmd = mrq->cmd;
1190 cmdflags = atmci_prepare_command(slot->mmc, cmd);
1191 atmci_send_command(host, cmd, cmdflags);
1192
1193 if (data)
1194 host->submit_data(host, data);
1195
1196 if (mrq->stop) {
1197 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
1198 host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
1199 if (!(data->flags & MMC_DATA_WRITE))
1200 host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;
1201 if (data->flags & MMC_DATA_STREAM)
1202 host->stop_cmdr |= ATMCI_CMDR_STREAM;
1203 else
1204 host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
1205 }
1206
1207
1208
1209
1210
1211
1212
1213 atmci_writel(host, ATMCI_IER, iflags);
1214
1215 mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000));
1216}
1217
1218static void atmci_queue_request(struct atmel_mci *host,
1219 struct atmel_mci_slot *slot, struct mmc_request *mrq)
1220{
1221 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1222 host->state);
1223
1224 spin_lock_bh(&host->lock);
1225 slot->mrq = mrq;
1226 if (host->state == STATE_IDLE) {
1227 host->state = STATE_SENDING_CMD;
1228 atmci_start_request(host, slot);
1229 } else {
1230 dev_dbg(&host->pdev->dev, "queue request\n");
1231 list_add_tail(&slot->queue_node, &host->queue);
1232 }
1233 spin_unlock_bh(&host->lock);
1234}
1235
1236static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1237{
1238 struct atmel_mci_slot *slot = mmc_priv(mmc);
1239 struct atmel_mci *host = slot->host;
1240 struct mmc_data *data;
1241
1242 WARN_ON(slot->mrq);
1243 dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253 if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
1254 mrq->cmd->error = -ENOMEDIUM;
1255 mmc_request_done(mmc, mrq);
1256 return;
1257 }
1258
1259
1260 data = mrq->data;
1261 if (data && data->blocks > 1 && data->blksz & 3) {
1262 mrq->cmd->error = -EINVAL;
1263 mmc_request_done(mmc, mrq);
1264 }
1265
1266 atmci_queue_request(host, slot, mrq);
1267}
1268
1269static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1270{
1271 struct atmel_mci_slot *slot = mmc_priv(mmc);
1272 struct atmel_mci *host = slot->host;
1273 unsigned int i;
1274
1275 slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
1276 switch (ios->bus_width) {
1277 case MMC_BUS_WIDTH_1:
1278 slot->sdc_reg |= ATMCI_SDCBUS_1BIT;
1279 break;
1280 case MMC_BUS_WIDTH_4:
1281 slot->sdc_reg |= ATMCI_SDCBUS_4BIT;
1282 break;
1283 }
1284
1285 if (ios->clock) {
1286 unsigned int clock_min = ~0U;
1287 u32 clkdiv;
1288
1289 spin_lock_bh(&host->lock);
1290 if (!host->mode_reg) {
1291 clk_enable(host->mck);
1292 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1293 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1294 if (host->caps.has_cfg_reg)
1295 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1296 }
1297
1298
1299
1300
1301
1302 slot->clock = ios->clock;
1303 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1304 if (host->slot[i] && host->slot[i]->clock
1305 && host->slot[i]->clock < clock_min)
1306 clock_min = host->slot[i]->clock;
1307 }
1308
1309
1310 if (host->caps.has_odd_clk_div) {
1311 clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
1312 if (clkdiv > 511) {
1313 dev_warn(&mmc->class_dev,
1314 "clock %u too slow; using %lu\n",
1315 clock_min, host->bus_hz / (511 + 2));
1316 clkdiv = 511;
1317 }
1318 host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1)
1319 | ATMCI_MR_CLKODD(clkdiv & 1);
1320 } else {
1321 clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
1322 if (clkdiv > 255) {
1323 dev_warn(&mmc->class_dev,
1324 "clock %u too slow; using %lu\n",
1325 clock_min, host->bus_hz / (2 * 256));
1326 clkdiv = 255;
1327 }
1328 host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
1329 }
1330
1331
1332
1333
1334
1335
1336 if (host->caps.has_rwproof)
1337 host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF);
1338
1339 if (host->caps.has_cfg_reg) {
1340
1341 if (ios->timing == MMC_TIMING_SD_HS)
1342 host->cfg_reg |= ATMCI_CFG_HSMODE;
1343 else
1344 host->cfg_reg &= ~ATMCI_CFG_HSMODE;
1345 }
1346
1347 if (list_empty(&host->queue)) {
1348 atmci_writel(host, ATMCI_MR, host->mode_reg);
1349 if (host->caps.has_cfg_reg)
1350 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1351 } else {
1352 host->need_clock_update = true;
1353 }
1354
1355 spin_unlock_bh(&host->lock);
1356 } else {
1357 bool any_slot_active = false;
1358
1359 spin_lock_bh(&host->lock);
1360 slot->clock = 0;
1361 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1362 if (host->slot[i] && host->slot[i]->clock) {
1363 any_slot_active = true;
1364 break;
1365 }
1366 }
1367 if (!any_slot_active) {
1368 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
1369 if (host->mode_reg) {
1370 atmci_readl(host, ATMCI_MR);
1371 clk_disable(host->mck);
1372 }
1373 host->mode_reg = 0;
1374 }
1375 spin_unlock_bh(&host->lock);
1376 }
1377
1378 switch (ios->power_mode) {
1379 case MMC_POWER_UP:
1380 set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
1381 break;
1382 default:
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395 break;
1396 }
1397}
1398
1399static int atmci_get_ro(struct mmc_host *mmc)
1400{
1401 int read_only = -ENOSYS;
1402 struct atmel_mci_slot *slot = mmc_priv(mmc);
1403
1404 if (gpio_is_valid(slot->wp_pin)) {
1405 read_only = gpio_get_value(slot->wp_pin);
1406 dev_dbg(&mmc->class_dev, "card is %s\n",
1407 read_only ? "read-only" : "read-write");
1408 }
1409
1410 return read_only;
1411}
1412
1413static int atmci_get_cd(struct mmc_host *mmc)
1414{
1415 int present = -ENOSYS;
1416 struct atmel_mci_slot *slot = mmc_priv(mmc);
1417
1418 if (gpio_is_valid(slot->detect_pin)) {
1419 present = !(gpio_get_value(slot->detect_pin) ^
1420 slot->detect_is_active_high);
1421 dev_dbg(&mmc->class_dev, "card is %spresent\n",
1422 present ? "" : "not ");
1423 }
1424
1425 return present;
1426}
1427
1428static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1429{
1430 struct atmel_mci_slot *slot = mmc_priv(mmc);
1431 struct atmel_mci *host = slot->host;
1432
1433 if (enable)
1434 atmci_writel(host, ATMCI_IER, slot->sdio_irq);
1435 else
1436 atmci_writel(host, ATMCI_IDR, slot->sdio_irq);
1437}
1438
1439static const struct mmc_host_ops atmci_ops = {
1440 .request = atmci_request,
1441 .set_ios = atmci_set_ios,
1442 .get_ro = atmci_get_ro,
1443 .get_cd = atmci_get_cd,
1444 .enable_sdio_irq = atmci_enable_sdio_irq,
1445};
1446
1447
1448static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1449 __releases(&host->lock)
1450 __acquires(&host->lock)
1451{
1452 struct atmel_mci_slot *slot = NULL;
1453 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1454
1455 WARN_ON(host->cmd || host->data);
1456
1457
1458
1459
1460
1461
1462 if (host->need_clock_update) {
1463 atmci_writel(host, ATMCI_MR, host->mode_reg);
1464 if (host->caps.has_cfg_reg)
1465 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1466 }
1467
1468 host->cur_slot->mrq = NULL;
1469 host->mrq = NULL;
1470 if (!list_empty(&host->queue)) {
1471 slot = list_entry(host->queue.next,
1472 struct atmel_mci_slot, queue_node);
1473 list_del(&slot->queue_node);
1474 dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
1475 mmc_hostname(slot->mmc));
1476 host->state = STATE_SENDING_CMD;
1477 atmci_start_request(host, slot);
1478 } else {
1479 dev_vdbg(&host->pdev->dev, "list empty\n");
1480 host->state = STATE_IDLE;
1481 }
1482
1483 del_timer(&host->timer);
1484
1485 spin_unlock(&host->lock);
1486 mmc_request_done(prev_mmc, mrq);
1487 spin_lock(&host->lock);
1488}
1489
1490static void atmci_command_complete(struct atmel_mci *host,
1491 struct mmc_command *cmd)
1492{
1493 u32 status = host->cmd_status;
1494
1495
1496 cmd->resp[0] = atmci_readl(host, ATMCI_RSPR);
1497 cmd->resp[1] = atmci_readl(host, ATMCI_RSPR);
1498 cmd->resp[2] = atmci_readl(host, ATMCI_RSPR);
1499 cmd->resp[3] = atmci_readl(host, ATMCI_RSPR);
1500
1501 if (status & ATMCI_RTOE)
1502 cmd->error = -ETIMEDOUT;
1503 else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE))
1504 cmd->error = -EILSEQ;
1505 else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
1506 cmd->error = -EIO;
1507 else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
1508 if (host->caps.need_blksz_mul_4) {
1509 cmd->error = -EINVAL;
1510 host->need_reset = 1;
1511 }
1512 } else
1513 cmd->error = 0;
1514}
1515
1516static void atmci_detect_change(unsigned long data)
1517{
1518 struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data;
1519 bool present;
1520 bool present_old;
1521
1522
1523
1524
1525
1526
1527
1528 smp_rmb();
1529 if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
1530 return;
1531
1532 enable_irq(gpio_to_irq(slot->detect_pin));
1533 present = !(gpio_get_value(slot->detect_pin) ^
1534 slot->detect_is_active_high);
1535 present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
1536
1537 dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
1538 present, present_old);
1539
1540 if (present != present_old) {
1541 struct atmel_mci *host = slot->host;
1542 struct mmc_request *mrq;
1543
1544 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1545 present ? "inserted" : "removed");
1546
1547 spin_lock(&host->lock);
1548
1549 if (!present)
1550 clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
1551 else
1552 set_bit(ATMCI_CARD_PRESENT, &slot->flags);
1553
1554
1555 mrq = slot->mrq;
1556 if (mrq) {
1557 if (mrq == host->mrq) {
1558
1559
1560
1561
1562 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1563 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1564 atmci_writel(host, ATMCI_MR, host->mode_reg);
1565 if (host->caps.has_cfg_reg)
1566 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1567
1568 host->data = NULL;
1569 host->cmd = NULL;
1570
1571 switch (host->state) {
1572 case STATE_IDLE:
1573 break;
1574 case STATE_SENDING_CMD:
1575 mrq->cmd->error = -ENOMEDIUM;
1576 if (mrq->data)
1577 host->stop_transfer(host);
1578 break;
1579 case STATE_DATA_XFER:
1580 mrq->data->error = -ENOMEDIUM;
1581 host->stop_transfer(host);
1582 break;
1583 case STATE_WAITING_NOTBUSY:
1584 mrq->data->error = -ENOMEDIUM;
1585 break;
1586 case STATE_SENDING_STOP:
1587 mrq->stop->error = -ENOMEDIUM;
1588 break;
1589 case STATE_END_REQUEST:
1590 break;
1591 }
1592
1593 atmci_request_end(host, mrq);
1594 } else {
1595 list_del(&slot->queue_node);
1596 mrq->cmd->error = -ENOMEDIUM;
1597 if (mrq->data)
1598 mrq->data->error = -ENOMEDIUM;
1599 if (mrq->stop)
1600 mrq->stop->error = -ENOMEDIUM;
1601
1602 spin_unlock(&host->lock);
1603 mmc_request_done(slot->mmc, mrq);
1604 spin_lock(&host->lock);
1605 }
1606 }
1607 spin_unlock(&host->lock);
1608
1609 mmc_detect_change(slot->mmc, 0);
1610 }
1611}
1612
1613static void atmci_tasklet_func(unsigned long priv)
1614{
1615 struct atmel_mci *host = (struct atmel_mci *)priv;
1616 struct mmc_request *mrq = host->mrq;
1617 struct mmc_data *data = host->data;
1618 enum atmel_mci_state state = host->state;
1619 enum atmel_mci_state prev_state;
1620 u32 status;
1621
1622 spin_lock(&host->lock);
1623
1624 state = host->state;
1625
1626 dev_vdbg(&host->pdev->dev,
1627 "tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
1628 state, host->pending_events, host->completed_events,
1629 atmci_readl(host, ATMCI_IMR));
1630
1631 do {
1632 prev_state = state;
1633 dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
1634
1635 switch (state) {
1636 case STATE_IDLE:
1637 break;
1638
1639 case STATE_SENDING_CMD:
1640
1641
1642
1643
1644
1645
1646 dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
1647 if (!atmci_test_and_clear_pending(host,
1648 EVENT_CMD_RDY))
1649 break;
1650
1651 dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
1652 host->cmd = NULL;
1653 atmci_set_completed(host, EVENT_CMD_RDY);
1654 atmci_command_complete(host, mrq->cmd);
1655 if (mrq->data) {
1656 dev_dbg(&host->pdev->dev,
1657 "command with data transfer");
1658
1659
1660
1661
1662 if (mrq->cmd->error) {
1663 host->stop_transfer(host);
1664 host->data = NULL;
1665 atmci_writel(host, ATMCI_IDR,
1666 ATMCI_TXRDY | ATMCI_RXRDY
1667 | ATMCI_DATA_ERROR_FLAGS);
1668 state = STATE_END_REQUEST;
1669 } else
1670 state = STATE_DATA_XFER;
1671 } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
1672 dev_dbg(&host->pdev->dev,
1673 "command response need waiting notbusy");
1674 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1675 state = STATE_WAITING_NOTBUSY;
1676 } else
1677 state = STATE_END_REQUEST;
1678
1679 break;
1680
1681 case STATE_DATA_XFER:
1682 if (atmci_test_and_clear_pending(host,
1683 EVENT_DATA_ERROR)) {
1684 dev_dbg(&host->pdev->dev, "set completed data error\n");
1685 atmci_set_completed(host, EVENT_DATA_ERROR);
1686 state = STATE_END_REQUEST;
1687 break;
1688 }
1689
1690
1691
1692
1693
1694
1695
1696
1697 dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
1698 if (!atmci_test_and_clear_pending(host,
1699 EVENT_XFER_COMPLETE))
1700 break;
1701
1702 dev_dbg(&host->pdev->dev,
1703 "(%s) set completed xfer complete\n",
1704 __func__);
1705 atmci_set_completed(host, EVENT_XFER_COMPLETE);
1706
1707 if (host->caps.need_notbusy_for_read_ops ||
1708 (host->data->flags & MMC_DATA_WRITE)) {
1709 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1710 state = STATE_WAITING_NOTBUSY;
1711 } else if (host->mrq->stop) {
1712 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
1713 atmci_send_stop_cmd(host, data);
1714 state = STATE_SENDING_STOP;
1715 } else {
1716 host->data = NULL;
1717 data->bytes_xfered = data->blocks * data->blksz;
1718 data->error = 0;
1719 state = STATE_END_REQUEST;
1720 }
1721 break;
1722
1723 case STATE_WAITING_NOTBUSY:
1724
1725
1726
1727
1728
1729
1730 dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
1731 if (!atmci_test_and_clear_pending(host,
1732 EVENT_NOTBUSY))
1733 break;
1734
1735 dev_dbg(&host->pdev->dev, "set completed not busy\n");
1736 atmci_set_completed(host, EVENT_NOTBUSY);
1737
1738 if (host->data) {
1739
1740
1741
1742
1743
1744 if (host->mrq->stop) {
1745 atmci_writel(host, ATMCI_IER,
1746 ATMCI_CMDRDY);
1747 atmci_send_stop_cmd(host, data);
1748 state = STATE_SENDING_STOP;
1749 } else {
1750 host->data = NULL;
1751 data->bytes_xfered = data->blocks
1752 * data->blksz;
1753 data->error = 0;
1754 state = STATE_END_REQUEST;
1755 }
1756 } else
1757 state = STATE_END_REQUEST;
1758 break;
1759
1760 case STATE_SENDING_STOP:
1761
1762
1763
1764
1765
1766
1767 dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
1768 if (!atmci_test_and_clear_pending(host,
1769 EVENT_CMD_RDY))
1770 break;
1771
1772 dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
1773 host->cmd = NULL;
1774 data->bytes_xfered = data->blocks * data->blksz;
1775 data->error = 0;
1776 atmci_command_complete(host, mrq->stop);
1777 if (mrq->stop->error) {
1778 host->stop_transfer(host);
1779 atmci_writel(host, ATMCI_IDR,
1780 ATMCI_TXRDY | ATMCI_RXRDY
1781 | ATMCI_DATA_ERROR_FLAGS);
1782 state = STATE_END_REQUEST;
1783 } else {
1784 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1785 state = STATE_WAITING_NOTBUSY;
1786 }
1787 host->data = NULL;
1788 break;
1789
1790 case STATE_END_REQUEST:
1791 atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
1792 | ATMCI_DATA_ERROR_FLAGS);
1793 status = host->data_status;
1794 if (unlikely(status)) {
1795 host->stop_transfer(host);
1796 host->data = NULL;
1797 if (data) {
1798 if (status & ATMCI_DTOE) {
1799 data->error = -ETIMEDOUT;
1800 } else if (status & ATMCI_DCRCE) {
1801 data->error = -EILSEQ;
1802 } else {
1803 data->error = -EIO;
1804 }
1805 }
1806 }
1807
1808 atmci_request_end(host, host->mrq);
1809 state = STATE_IDLE;
1810 break;
1811 }
1812 } while (state != prev_state);
1813
1814 host->state = state;
1815
1816 spin_unlock(&host->lock);
1817}
1818
1819static void atmci_read_data_pio(struct atmel_mci *host)
1820{
1821 struct scatterlist *sg = host->sg;
1822 void *buf = sg_virt(sg);
1823 unsigned int offset = host->pio_offset;
1824 struct mmc_data *data = host->data;
1825 u32 value;
1826 u32 status;
1827 unsigned int nbytes = 0;
1828
1829 do {
1830 value = atmci_readl(host, ATMCI_RDR);
1831 if (likely(offset + 4 <= sg->length)) {
1832 put_unaligned(value, (u32 *)(buf + offset));
1833
1834 offset += 4;
1835 nbytes += 4;
1836
1837 if (offset == sg->length) {
1838 flush_dcache_page(sg_page(sg));
1839 host->sg = sg = sg_next(sg);
1840 host->sg_len--;
1841 if (!sg || !host->sg_len)
1842 goto done;
1843
1844 offset = 0;
1845 buf = sg_virt(sg);
1846 }
1847 } else {
1848 unsigned int remaining = sg->length - offset;
1849 memcpy(buf + offset, &value, remaining);
1850 nbytes += remaining;
1851
1852 flush_dcache_page(sg_page(sg));
1853 host->sg = sg = sg_next(sg);
1854 host->sg_len--;
1855 if (!sg || !host->sg_len)
1856 goto done;
1857
1858 offset = 4 - remaining;
1859 buf = sg_virt(sg);
1860 memcpy(buf, (u8 *)&value + remaining, offset);
1861 nbytes += offset;
1862 }
1863
1864 status = atmci_readl(host, ATMCI_SR);
1865 if (status & ATMCI_DATA_ERROR_FLAGS) {
1866 atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY
1867 | ATMCI_DATA_ERROR_FLAGS));
1868 host->data_status = status;
1869 data->bytes_xfered += nbytes;
1870 return;
1871 }
1872 } while (status & ATMCI_RXRDY);
1873
1874 host->pio_offset = offset;
1875 data->bytes_xfered += nbytes;
1876
1877 return;
1878
1879done:
1880 atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY);
1881 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1882 data->bytes_xfered += nbytes;
1883 smp_wmb();
1884 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1885}
1886
1887static void atmci_write_data_pio(struct atmel_mci *host)
1888{
1889 struct scatterlist *sg = host->sg;
1890 void *buf = sg_virt(sg);
1891 unsigned int offset = host->pio_offset;
1892 struct mmc_data *data = host->data;
1893 u32 value;
1894 u32 status;
1895 unsigned int nbytes = 0;
1896
1897 do {
1898 if (likely(offset + 4 <= sg->length)) {
1899 value = get_unaligned((u32 *)(buf + offset));
1900 atmci_writel(host, ATMCI_TDR, value);
1901
1902 offset += 4;
1903 nbytes += 4;
1904 if (offset == sg->length) {
1905 host->sg = sg = sg_next(sg);
1906 host->sg_len--;
1907 if (!sg || !host->sg_len)
1908 goto done;
1909
1910 offset = 0;
1911 buf = sg_virt(sg);
1912 }
1913 } else {
1914 unsigned int remaining = sg->length - offset;
1915
1916 value = 0;
1917 memcpy(&value, buf + offset, remaining);
1918 nbytes += remaining;
1919
1920 host->sg = sg = sg_next(sg);
1921 host->sg_len--;
1922 if (!sg || !host->sg_len) {
1923 atmci_writel(host, ATMCI_TDR, value);
1924 goto done;
1925 }
1926
1927 offset = 4 - remaining;
1928 buf = sg_virt(sg);
1929 memcpy((u8 *)&value + remaining, buf, offset);
1930 atmci_writel(host, ATMCI_TDR, value);
1931 nbytes += offset;
1932 }
1933
1934 status = atmci_readl(host, ATMCI_SR);
1935 if (status & ATMCI_DATA_ERROR_FLAGS) {
1936 atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY
1937 | ATMCI_DATA_ERROR_FLAGS));
1938 host->data_status = status;
1939 data->bytes_xfered += nbytes;
1940 return;
1941 }
1942 } while (status & ATMCI_TXRDY);
1943
1944 host->pio_offset = offset;
1945 data->bytes_xfered += nbytes;
1946
1947 return;
1948
1949done:
1950 atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY);
1951 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1952 data->bytes_xfered += nbytes;
1953 smp_wmb();
1954 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1955}
1956
1957static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
1958{
1959 int i;
1960
1961 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1962 struct atmel_mci_slot *slot = host->slot[i];
1963 if (slot && (status & slot->sdio_irq)) {
1964 mmc_signal_sdio_irq(slot->mmc);
1965 }
1966 }
1967}
1968
1969
1970static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1971{
1972 struct atmel_mci *host = dev_id;
1973 u32 status, mask, pending;
1974 unsigned int pass_count = 0;
1975
1976 do {
1977 status = atmci_readl(host, ATMCI_SR);
1978 mask = atmci_readl(host, ATMCI_IMR);
1979 pending = status & mask;
1980 if (!pending)
1981 break;
1982
1983 if (pending & ATMCI_DATA_ERROR_FLAGS) {
1984 dev_dbg(&host->pdev->dev, "IRQ: data error\n");
1985 atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
1986 | ATMCI_RXRDY | ATMCI_TXRDY
1987 | ATMCI_ENDRX | ATMCI_ENDTX
1988 | ATMCI_RXBUFF | ATMCI_TXBUFE);
1989
1990 host->data_status = status;
1991 dev_dbg(&host->pdev->dev, "set pending data error\n");
1992 smp_wmb();
1993 atmci_set_pending(host, EVENT_DATA_ERROR);
1994 tasklet_schedule(&host->tasklet);
1995 }
1996
1997 if (pending & ATMCI_TXBUFE) {
1998 dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
1999 atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
2000 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
2001
2002
2003
2004
2005
2006 if (host->data_size) {
2007 atmci_pdc_set_both_buf(host, XFER_TRANSMIT);
2008 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
2009 atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE);
2010 } else {
2011 atmci_pdc_complete(host);
2012 }
2013 } else if (pending & ATMCI_ENDTX) {
2014 dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
2015 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
2016
2017 if (host->data_size) {
2018 atmci_pdc_set_single_buf(host,
2019 XFER_TRANSMIT, PDC_SECOND_BUF);
2020 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
2021 }
2022 }
2023
2024 if (pending & ATMCI_RXBUFF) {
2025 dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
2026 atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
2027 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
2028
2029
2030
2031
2032
2033 if (host->data_size) {
2034 atmci_pdc_set_both_buf(host, XFER_RECEIVE);
2035 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
2036 atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF);
2037 } else {
2038 atmci_pdc_complete(host);
2039 }
2040 } else if (pending & ATMCI_ENDRX) {
2041 dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
2042 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
2043
2044 if (host->data_size) {
2045 atmci_pdc_set_single_buf(host,
2046 XFER_RECEIVE, PDC_SECOND_BUF);
2047 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
2048 }
2049 }
2050
2051
2052
2053
2054
2055
2056
2057 if (pending & ATMCI_BLKE) {
2058 dev_dbg(&host->pdev->dev, "IRQ: blke\n");
2059 atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
2060 smp_wmb();
2061 dev_dbg(&host->pdev->dev, "set pending notbusy\n");
2062 atmci_set_pending(host, EVENT_NOTBUSY);
2063 tasklet_schedule(&host->tasklet);
2064 }
2065
2066 if (pending & ATMCI_NOTBUSY) {
2067 dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
2068 atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
2069 smp_wmb();
2070 dev_dbg(&host->pdev->dev, "set pending notbusy\n");
2071 atmci_set_pending(host, EVENT_NOTBUSY);
2072 tasklet_schedule(&host->tasklet);
2073 }
2074
2075 if (pending & ATMCI_RXRDY)
2076 atmci_read_data_pio(host);
2077 if (pending & ATMCI_TXRDY)
2078 atmci_write_data_pio(host);
2079
2080 if (pending & ATMCI_CMDRDY) {
2081 dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
2082 atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
2083 host->cmd_status = status;
2084 smp_wmb();
2085 dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
2086 atmci_set_pending(host, EVENT_CMD_RDY);
2087 tasklet_schedule(&host->tasklet);
2088 }
2089
2090 if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
2091 atmci_sdio_interrupt(host, status);
2092
2093 } while (pass_count++ < 5);
2094
2095 return pass_count ? IRQ_HANDLED : IRQ_NONE;
2096}
2097
2098static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
2099{
2100 struct atmel_mci_slot *slot = dev_id;
2101
2102
2103
2104
2105
2106
2107 disable_irq_nosync(irq);
2108 mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
2109
2110 return IRQ_HANDLED;
2111}
2112
2113static int __init atmci_init_slot(struct atmel_mci *host,
2114 struct mci_slot_pdata *slot_data, unsigned int id,
2115 u32 sdc_reg, u32 sdio_irq)
2116{
2117 struct mmc_host *mmc;
2118 struct atmel_mci_slot *slot;
2119
2120 mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
2121 if (!mmc)
2122 return -ENOMEM;
2123
2124 slot = mmc_priv(mmc);
2125 slot->mmc = mmc;
2126 slot->host = host;
2127 slot->detect_pin = slot_data->detect_pin;
2128 slot->wp_pin = slot_data->wp_pin;
2129 slot->detect_is_active_high = slot_data->detect_is_active_high;
2130 slot->sdc_reg = sdc_reg;
2131 slot->sdio_irq = sdio_irq;
2132
2133 dev_dbg(&mmc->class_dev,
2134 "slot[%u]: bus_width=%u, detect_pin=%d, "
2135 "detect_is_active_high=%s, wp_pin=%d\n",
2136 id, slot_data->bus_width, slot_data->detect_pin,
2137 slot_data->detect_is_active_high ? "true" : "false",
2138 slot_data->wp_pin);
2139
2140 mmc->ops = &atmci_ops;
2141 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
2142 mmc->f_max = host->bus_hz / 2;
2143 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2144 if (sdio_irq)
2145 mmc->caps |= MMC_CAP_SDIO_IRQ;
2146 if (host->caps.has_highspeed)
2147 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
2148
2149
2150
2151
2152
2153 if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
2154 mmc->caps |= MMC_CAP_4_BIT_DATA;
2155
2156 if (atmci_get_version(host) < 0x200) {
2157 mmc->max_segs = 256;
2158 mmc->max_blk_size = 4095;
2159 mmc->max_blk_count = 256;
2160 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2161 mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
2162 } else {
2163 mmc->max_segs = 64;
2164 mmc->max_req_size = 32768 * 512;
2165 mmc->max_blk_size = 32768;
2166 mmc->max_blk_count = 512;
2167 }
2168
2169
2170 set_bit(ATMCI_CARD_PRESENT, &slot->flags);
2171 if (gpio_is_valid(slot->detect_pin)) {
2172 if (gpio_request(slot->detect_pin, "mmc_detect")) {
2173 dev_dbg(&mmc->class_dev, "no detect pin available\n");
2174 slot->detect_pin = -EBUSY;
2175 } else if (gpio_get_value(slot->detect_pin) ^
2176 slot->detect_is_active_high) {
2177 clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
2178 }
2179 }
2180
2181 if (!gpio_is_valid(slot->detect_pin))
2182 mmc->caps |= MMC_CAP_NEEDS_POLL;
2183
2184 if (gpio_is_valid(slot->wp_pin)) {
2185 if (gpio_request(slot->wp_pin, "mmc_wp")) {
2186 dev_dbg(&mmc->class_dev, "no WP pin available\n");
2187 slot->wp_pin = -EBUSY;
2188 }
2189 }
2190
2191 host->slot[id] = slot;
2192 mmc_add_host(mmc);
2193
2194 if (gpio_is_valid(slot->detect_pin)) {
2195 int ret;
2196
2197 setup_timer(&slot->detect_timer, atmci_detect_change,
2198 (unsigned long)slot);
2199
2200 ret = request_irq(gpio_to_irq(slot->detect_pin),
2201 atmci_detect_interrupt,
2202 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
2203 "mmc-detect", slot);
2204 if (ret) {
2205 dev_dbg(&mmc->class_dev,
2206 "could not request IRQ %d for detect pin\n",
2207 gpio_to_irq(slot->detect_pin));
2208 gpio_free(slot->detect_pin);
2209 slot->detect_pin = -EBUSY;
2210 }
2211 }
2212
2213 atmci_init_debugfs(slot);
2214
2215 return 0;
2216}
2217
2218static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
2219 unsigned int id)
2220{
2221
2222
2223 set_bit(ATMCI_SHUTDOWN, &slot->flags);
2224 smp_wmb();
2225
2226 mmc_remove_host(slot->mmc);
2227
2228 if (gpio_is_valid(slot->detect_pin)) {
2229 int pin = slot->detect_pin;
2230
2231 free_irq(gpio_to_irq(pin), slot);
2232 del_timer_sync(&slot->detect_timer);
2233 gpio_free(pin);
2234 }
2235 if (gpio_is_valid(slot->wp_pin))
2236 gpio_free(slot->wp_pin);
2237
2238 slot->host->slot[id] = NULL;
2239 mmc_free_host(slot->mmc);
2240}
2241
2242static bool atmci_filter(struct dma_chan *chan, void *pdata)
2243{
2244 struct mci_platform_data *sl_pdata = pdata;
2245 struct mci_dma_data *sl;
2246
2247 if (!sl_pdata)
2248 return false;
2249
2250 sl = sl_pdata->dma_slave;
2251 if (sl && find_slave_dev(sl) == chan->device->dev) {
2252 chan->private = slave_data_ptr(sl);
2253 return true;
2254 } else {
2255 return false;
2256 }
2257}
2258
2259static bool atmci_configure_dma(struct atmel_mci *host)
2260{
2261 struct mci_platform_data *pdata;
2262 dma_cap_mask_t mask;
2263
2264 if (host == NULL)
2265 return false;
2266
2267 pdata = host->pdev->dev.platform_data;
2268
2269 dma_cap_zero(mask);
2270 dma_cap_set(DMA_SLAVE, mask);
2271
2272 host->dma.chan = dma_request_slave_channel_compat(mask, atmci_filter, pdata,
2273 &host->pdev->dev, "rxtx");
2274 if (!host->dma.chan) {
2275 dev_warn(&host->pdev->dev, "no DMA channel available\n");
2276 return false;
2277 } else {
2278 dev_info(&host->pdev->dev,
2279 "using %s for DMA transfers\n",
2280 dma_chan_name(host->dma.chan));
2281
2282 host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
2283 host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2284 host->dma_conf.src_maxburst = 1;
2285 host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
2286 host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2287 host->dma_conf.dst_maxburst = 1;
2288 host->dma_conf.device_fc = false;
2289 return true;
2290 }
2291}
2292
2293
2294
2295
2296
2297
2298static void __init atmci_get_cap(struct atmel_mci *host)
2299{
2300 unsigned int version;
2301
2302 version = atmci_get_version(host);
2303 dev_info(&host->pdev->dev,
2304 "version: 0x%x\n", version);
2305
2306 host->caps.has_dma_conf_reg = 0;
2307 host->caps.has_pdc = ATMCI_PDC_CONNECTED;
2308 host->caps.has_cfg_reg = 0;
2309 host->caps.has_cstor_reg = 0;
2310 host->caps.has_highspeed = 0;
2311 host->caps.has_rwproof = 0;
2312 host->caps.has_odd_clk_div = 0;
2313 host->caps.has_bad_data_ordering = 1;
2314 host->caps.need_reset_after_xfer = 1;
2315 host->caps.need_blksz_mul_4 = 1;
2316 host->caps.need_notbusy_for_read_ops = 0;
2317
2318
2319 switch (version & 0xf00) {
2320 case 0x500:
2321 host->caps.has_odd_clk_div = 1;
2322 case 0x400:
2323 case 0x300:
2324 host->caps.has_dma_conf_reg = 1;
2325 host->caps.has_pdc = 0;
2326 host->caps.has_cfg_reg = 1;
2327 host->caps.has_cstor_reg = 1;
2328 host->caps.has_highspeed = 1;
2329 case 0x200:
2330 host->caps.has_rwproof = 1;
2331 host->caps.need_blksz_mul_4 = 0;
2332 host->caps.need_notbusy_for_read_ops = 1;
2333 case 0x100:
2334 host->caps.has_bad_data_ordering = 0;
2335 host->caps.need_reset_after_xfer = 0;
2336 case 0x0:
2337 break;
2338 default:
2339 host->caps.has_pdc = 0;
2340 dev_warn(&host->pdev->dev,
2341 "Unmanaged mci version, set minimum capabilities\n");
2342 break;
2343 }
2344}
2345
2346static int __init atmci_probe(struct platform_device *pdev)
2347{
2348 struct mci_platform_data *pdata;
2349 struct atmel_mci *host;
2350 struct resource *regs;
2351 unsigned int nr_slots;
2352 int irq;
2353 int ret;
2354
2355 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2356 if (!regs)
2357 return -ENXIO;
2358 pdata = pdev->dev.platform_data;
2359 if (!pdata) {
2360 pdata = atmci_of_init(pdev);
2361 if (IS_ERR(pdata)) {
2362 dev_err(&pdev->dev, "platform data not available\n");
2363 return PTR_ERR(pdata);
2364 }
2365 }
2366
2367 irq = platform_get_irq(pdev, 0);
2368 if (irq < 0)
2369 return irq;
2370
2371 host = kzalloc(sizeof(struct atmel_mci), GFP_KERNEL);
2372 if (!host)
2373 return -ENOMEM;
2374
2375 host->pdev = pdev;
2376 spin_lock_init(&host->lock);
2377 INIT_LIST_HEAD(&host->queue);
2378
2379 host->mck = clk_get(&pdev->dev, "mci_clk");
2380 if (IS_ERR(host->mck)) {
2381 ret = PTR_ERR(host->mck);
2382 goto err_clk_get;
2383 }
2384
2385 ret = -ENOMEM;
2386 host->regs = ioremap(regs->start, resource_size(regs));
2387 if (!host->regs)
2388 goto err_ioremap;
2389
2390 clk_enable(host->mck);
2391 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
2392 host->bus_hz = clk_get_rate(host->mck);
2393 clk_disable(host->mck);
2394
2395 host->mapbase = regs->start;
2396
2397 tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host);
2398
2399 ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
2400 if (ret)
2401 goto err_request_irq;
2402
2403
2404 atmci_get_cap(host);
2405 if (atmci_configure_dma(host)) {
2406 host->prepare_data = &atmci_prepare_data_dma;
2407 host->submit_data = &atmci_submit_data_dma;
2408 host->stop_transfer = &atmci_stop_transfer_dma;
2409 } else if (host->caps.has_pdc) {
2410 dev_info(&pdev->dev, "using PDC\n");
2411 host->prepare_data = &atmci_prepare_data_pdc;
2412 host->submit_data = &atmci_submit_data_pdc;
2413 host->stop_transfer = &atmci_stop_transfer_pdc;
2414 } else {
2415 dev_info(&pdev->dev, "using PIO\n");
2416 host->prepare_data = &atmci_prepare_data;
2417 host->submit_data = &atmci_submit_data;
2418 host->stop_transfer = &atmci_stop_transfer;
2419 }
2420
2421 platform_set_drvdata(pdev, host);
2422
2423 setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
2424
2425
2426 nr_slots = 0;
2427 ret = -ENODEV;
2428 if (pdata->slot[0].bus_width) {
2429 ret = atmci_init_slot(host, &pdata->slot[0],
2430 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
2431 if (!ret) {
2432 nr_slots++;
2433 host->buf_size = host->slot[0]->mmc->max_req_size;
2434 }
2435 }
2436 if (pdata->slot[1].bus_width) {
2437 ret = atmci_init_slot(host, &pdata->slot[1],
2438 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
2439 if (!ret) {
2440 nr_slots++;
2441 if (host->slot[1]->mmc->max_req_size > host->buf_size)
2442 host->buf_size =
2443 host->slot[1]->mmc->max_req_size;
2444 }
2445 }
2446
2447 if (!nr_slots) {
2448 dev_err(&pdev->dev, "init failed: no slot defined\n");
2449 goto err_init_slot;
2450 }
2451
2452 if (!host->caps.has_rwproof) {
2453 host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
2454 &host->buf_phys_addr,
2455 GFP_KERNEL);
2456 if (!host->buffer) {
2457 ret = -ENOMEM;
2458 dev_err(&pdev->dev, "buffer allocation failed\n");
2459 goto err_init_slot;
2460 }
2461 }
2462
2463 dev_info(&pdev->dev,
2464 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
2465 host->mapbase, irq, nr_slots);
2466
2467 return 0;
2468
2469err_init_slot:
2470 if (host->dma.chan)
2471 dma_release_channel(host->dma.chan);
2472 free_irq(irq, host);
2473err_request_irq:
2474 iounmap(host->regs);
2475err_ioremap:
2476 clk_put(host->mck);
2477err_clk_get:
2478 kfree(host);
2479 return ret;
2480}
2481
2482static int __exit atmci_remove(struct platform_device *pdev)
2483{
2484 struct atmel_mci *host = platform_get_drvdata(pdev);
2485 unsigned int i;
2486
2487 platform_set_drvdata(pdev, NULL);
2488
2489 if (host->buffer)
2490 dma_free_coherent(&pdev->dev, host->buf_size,
2491 host->buffer, host->buf_phys_addr);
2492
2493 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2494 if (host->slot[i])
2495 atmci_cleanup_slot(host->slot[i], i);
2496 }
2497
2498 clk_enable(host->mck);
2499 atmci_writel(host, ATMCI_IDR, ~0UL);
2500 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
2501 atmci_readl(host, ATMCI_SR);
2502 clk_disable(host->mck);
2503
2504 if (host->dma.chan)
2505 dma_release_channel(host->dma.chan);
2506
2507 free_irq(platform_get_irq(pdev, 0), host);
2508 iounmap(host->regs);
2509
2510 clk_put(host->mck);
2511 kfree(host);
2512
2513 return 0;
2514}
2515
2516#ifdef CONFIG_PM
2517static int atmci_suspend(struct device *dev)
2518{
2519 struct atmel_mci *host = dev_get_drvdata(dev);
2520 int i;
2521
2522 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2523 struct atmel_mci_slot *slot = host->slot[i];
2524 int ret;
2525
2526 if (!slot)
2527 continue;
2528 ret = mmc_suspend_host(slot->mmc);
2529 if (ret < 0) {
2530 while (--i >= 0) {
2531 slot = host->slot[i];
2532 if (slot
2533 && test_bit(ATMCI_SUSPENDED, &slot->flags)) {
2534 mmc_resume_host(host->slot[i]->mmc);
2535 clear_bit(ATMCI_SUSPENDED, &slot->flags);
2536 }
2537 }
2538 return ret;
2539 } else {
2540 set_bit(ATMCI_SUSPENDED, &slot->flags);
2541 }
2542 }
2543
2544 return 0;
2545}
2546
2547static int atmci_resume(struct device *dev)
2548{
2549 struct atmel_mci *host = dev_get_drvdata(dev);
2550 int i;
2551 int ret = 0;
2552
2553 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2554 struct atmel_mci_slot *slot = host->slot[i];
2555 int err;
2556
2557 slot = host->slot[i];
2558 if (!slot)
2559 continue;
2560 if (!test_bit(ATMCI_SUSPENDED, &slot->flags))
2561 continue;
2562 err = mmc_resume_host(slot->mmc);
2563 if (err < 0)
2564 ret = err;
2565 else
2566 clear_bit(ATMCI_SUSPENDED, &slot->flags);
2567 }
2568
2569 return ret;
2570}
2571static SIMPLE_DEV_PM_OPS(atmci_pm, atmci_suspend, atmci_resume);
2572#define ATMCI_PM_OPS (&atmci_pm)
2573#else
2574#define ATMCI_PM_OPS NULL
2575#endif
2576
2577static struct platform_driver atmci_driver = {
2578 .remove = __exit_p(atmci_remove),
2579 .driver = {
2580 .name = "atmel_mci",
2581 .pm = ATMCI_PM_OPS,
2582 .of_match_table = of_match_ptr(atmci_dt_ids),
2583 },
2584};
2585
2586static int __init atmci_init(void)
2587{
2588 return platform_driver_probe(&atmci_driver, atmci_probe);
2589}
2590
2591static void __exit atmci_exit(void)
2592{
2593 platform_driver_unregister(&atmci_driver);
2594}
2595
2596late_initcall(atmci_init);
2597module_exit(atmci_exit);
2598
2599MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
2600MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
2601MODULE_LICENSE("GPL v2");
2602