1
2
3
4
5
6
7
8
9
10#include <linux/blkdev.h>
11#include <linux/clk.h>
12#include <linux/debugfs.h>
13#include <linux/device.h>
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
16#include <linux/err.h>
17#include <linux/gpio.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/ioport.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/of_gpio.h>
25#include <linux/platform_device.h>
26#include <linux/scatterlist.h>
27#include <linux/seq_file.h>
28#include <linux/slab.h>
29#include <linux/stat.h>
30#include <linux/types.h>
31#include <linux/platform_data/atmel.h>
32
33#include <linux/mmc/host.h>
34#include <linux/mmc/sdio.h>
35
36#include <mach/atmel-mci.h>
37#include <linux/atmel-mci.h>
38#include <linux/atmel_pdc.h>
39
40#include <asm/io.h>
41#include <asm/unaligned.h>
42
43#include "atmel-mci-regs.h"
44
45#define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
46#define ATMCI_DMA_THRESHOLD 16
47
48enum {
49 EVENT_CMD_RDY = 0,
50 EVENT_XFER_COMPLETE,
51 EVENT_NOTBUSY,
52 EVENT_DATA_ERROR,
53};
54
55enum atmel_mci_state {
56 STATE_IDLE = 0,
57 STATE_SENDING_CMD,
58 STATE_DATA_XFER,
59 STATE_WAITING_NOTBUSY,
60 STATE_SENDING_STOP,
61 STATE_END_REQUEST,
62};
63
64enum atmci_xfer_dir {
65 XFER_RECEIVE = 0,
66 XFER_TRANSMIT,
67};
68
69enum atmci_pdc_buf {
70 PDC_FIRST_BUF = 0,
71 PDC_SECOND_BUF,
72};
73
74struct atmel_mci_caps {
75 bool has_dma_conf_reg;
76 bool has_pdc;
77 bool has_cfg_reg;
78 bool has_cstor_reg;
79 bool has_highspeed;
80 bool has_rwproof;
81 bool has_odd_clk_div;
82 bool has_bad_data_ordering;
83 bool need_reset_after_xfer;
84 bool need_blksz_mul_4;
85 bool need_notbusy_for_read_ops;
86};
87
88struct atmel_mci_dma {
89 struct dma_chan *chan;
90 struct dma_async_tx_descriptor *data_desc;
91};
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174struct atmel_mci {
175 spinlock_t lock;
176 void __iomem *regs;
177
178 struct scatterlist *sg;
179 unsigned int sg_len;
180 unsigned int pio_offset;
181 unsigned int *buffer;
182 unsigned int buf_size;
183 dma_addr_t buf_phys_addr;
184
185 struct atmel_mci_slot *cur_slot;
186 struct mmc_request *mrq;
187 struct mmc_command *cmd;
188 struct mmc_data *data;
189 unsigned int data_size;
190
191 struct atmel_mci_dma dma;
192 struct dma_chan *data_chan;
193 struct dma_slave_config dma_conf;
194
195 u32 cmd_status;
196 u32 data_status;
197 u32 stop_cmdr;
198
199 struct tasklet_struct tasklet;
200 unsigned long pending_events;
201 unsigned long completed_events;
202 enum atmel_mci_state state;
203 struct list_head queue;
204
205 bool need_clock_update;
206 bool need_reset;
207 struct timer_list timer;
208 u32 mode_reg;
209 u32 cfg_reg;
210 unsigned long bus_hz;
211 unsigned long mapbase;
212 struct clk *mck;
213 struct platform_device *pdev;
214
215 struct atmel_mci_slot *slot[ATMCI_MAX_NR_SLOTS];
216
217 struct atmel_mci_caps caps;
218
219 u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data);
220 void (*submit_data)(struct atmel_mci *host, struct mmc_data *data);
221 void (*stop_transfer)(struct atmel_mci *host);
222};
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243struct atmel_mci_slot {
244 struct mmc_host *mmc;
245 struct atmel_mci *host;
246
247 u32 sdc_reg;
248 u32 sdio_irq;
249
250 struct mmc_request *mrq;
251 struct list_head queue_node;
252
253 unsigned int clock;
254 unsigned long flags;
255#define ATMCI_CARD_PRESENT 0
256#define ATMCI_CARD_NEED_INIT 1
257#define ATMCI_SHUTDOWN 2
258
259 int detect_pin;
260 int wp_pin;
261 bool detect_is_active_high;
262
263 struct timer_list detect_timer;
264};
265
266#define atmci_test_and_clear_pending(host, event) \
267 test_and_clear_bit(event, &host->pending_events)
268#define atmci_set_completed(host, event) \
269 set_bit(event, &host->completed_events)
270#define atmci_set_pending(host, event) \
271 set_bit(event, &host->pending_events)
272
273
274
275
276
277static int atmci_req_show(struct seq_file *s, void *v)
278{
279 struct atmel_mci_slot *slot = s->private;
280 struct mmc_request *mrq;
281 struct mmc_command *cmd;
282 struct mmc_command *stop;
283 struct mmc_data *data;
284
285
286 spin_lock_bh(&slot->host->lock);
287 mrq = slot->mrq;
288
289 if (mrq) {
290 cmd = mrq->cmd;
291 data = mrq->data;
292 stop = mrq->stop;
293
294 if (cmd)
295 seq_printf(s,
296 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
297 cmd->opcode, cmd->arg, cmd->flags,
298 cmd->resp[0], cmd->resp[1], cmd->resp[2],
299 cmd->resp[3], cmd->error);
300 if (data)
301 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
302 data->bytes_xfered, data->blocks,
303 data->blksz, data->flags, data->error);
304 if (stop)
305 seq_printf(s,
306 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
307 stop->opcode, stop->arg, stop->flags,
308 stop->resp[0], stop->resp[1], stop->resp[2],
309 stop->resp[3], stop->error);
310 }
311
312 spin_unlock_bh(&slot->host->lock);
313
314 return 0;
315}
316
317static int atmci_req_open(struct inode *inode, struct file *file)
318{
319 return single_open(file, atmci_req_show, inode->i_private);
320}
321
322static const struct file_operations atmci_req_fops = {
323 .owner = THIS_MODULE,
324 .open = atmci_req_open,
325 .read = seq_read,
326 .llseek = seq_lseek,
327 .release = single_release,
328};
329
330static void atmci_show_status_reg(struct seq_file *s,
331 const char *regname, u32 value)
332{
333 static const char *sr_bit[] = {
334 [0] = "CMDRDY",
335 [1] = "RXRDY",
336 [2] = "TXRDY",
337 [3] = "BLKE",
338 [4] = "DTIP",
339 [5] = "NOTBUSY",
340 [6] = "ENDRX",
341 [7] = "ENDTX",
342 [8] = "SDIOIRQA",
343 [9] = "SDIOIRQB",
344 [12] = "SDIOWAIT",
345 [14] = "RXBUFF",
346 [15] = "TXBUFE",
347 [16] = "RINDE",
348 [17] = "RDIRE",
349 [18] = "RCRCE",
350 [19] = "RENDE",
351 [20] = "RTOE",
352 [21] = "DCRCE",
353 [22] = "DTOE",
354 [23] = "CSTOE",
355 [24] = "BLKOVRE",
356 [25] = "DMADONE",
357 [26] = "FIFOEMPTY",
358 [27] = "XFRDONE",
359 [30] = "OVRE",
360 [31] = "UNRE",
361 };
362 unsigned int i;
363
364 seq_printf(s, "%s:\t0x%08x", regname, value);
365 for (i = 0; i < ARRAY_SIZE(sr_bit); i++) {
366 if (value & (1 << i)) {
367 if (sr_bit[i])
368 seq_printf(s, " %s", sr_bit[i]);
369 else
370 seq_puts(s, " UNKNOWN");
371 }
372 }
373 seq_putc(s, '\n');
374}
375
376static int atmci_regs_show(struct seq_file *s, void *v)
377{
378 struct atmel_mci *host = s->private;
379 u32 *buf;
380 int ret = 0;
381
382
383 buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
384 if (!buf)
385 return -ENOMEM;
386
387
388
389
390
391
392 ret = clk_prepare_enable(host->mck);
393 if (ret)
394 goto out;
395
396 spin_lock_bh(&host->lock);
397 memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
398 spin_unlock_bh(&host->lock);
399
400 clk_disable_unprepare(host->mck);
401
402 seq_printf(s, "MR:\t0x%08x%s%s ",
403 buf[ATMCI_MR / 4],
404 buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
405 buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "");
406 if (host->caps.has_odd_clk_div)
407 seq_printf(s, "{CLKDIV,CLKODD}=%u\n",
408 ((buf[ATMCI_MR / 4] & 0xff) << 1)
409 | ((buf[ATMCI_MR / 4] >> 16) & 1));
410 else
411 seq_printf(s, "CLKDIV=%u\n",
412 (buf[ATMCI_MR / 4] & 0xff));
413 seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]);
414 seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]);
415 seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]);
416 seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
417 buf[ATMCI_BLKR / 4],
418 buf[ATMCI_BLKR / 4] & 0xffff,
419 (buf[ATMCI_BLKR / 4] >> 16) & 0xffff);
420 if (host->caps.has_cstor_reg)
421 seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]);
422
423
424
425 atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]);
426 atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]);
427
428 if (host->caps.has_dma_conf_reg) {
429 u32 val;
430
431 val = buf[ATMCI_DMA / 4];
432 seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
433 val, val & 3,
434 ((val >> 4) & 3) ?
435 1 << (((val >> 4) & 3) + 1) : 1,
436 val & ATMCI_DMAEN ? " DMAEN" : "");
437 }
438 if (host->caps.has_cfg_reg) {
439 u32 val;
440
441 val = buf[ATMCI_CFG / 4];
442 seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
443 val,
444 val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
445 val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
446 val & ATMCI_CFG_HSMODE ? " HSMODE" : "",
447 val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
448 }
449
450out:
451 kfree(buf);
452
453 return ret;
454}
455
456static int atmci_regs_open(struct inode *inode, struct file *file)
457{
458 return single_open(file, atmci_regs_show, inode->i_private);
459}
460
461static const struct file_operations atmci_regs_fops = {
462 .owner = THIS_MODULE,
463 .open = atmci_regs_open,
464 .read = seq_read,
465 .llseek = seq_lseek,
466 .release = single_release,
467};
468
469static void atmci_init_debugfs(struct atmel_mci_slot *slot)
470{
471 struct mmc_host *mmc = slot->mmc;
472 struct atmel_mci *host = slot->host;
473 struct dentry *root;
474 struct dentry *node;
475
476 root = mmc->debugfs_root;
477 if (!root)
478 return;
479
480 node = debugfs_create_file("regs", S_IRUSR, root, host,
481 &atmci_regs_fops);
482 if (IS_ERR(node))
483 return;
484 if (!node)
485 goto err;
486
487 node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
488 if (!node)
489 goto err;
490
491 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
492 if (!node)
493 goto err;
494
495 node = debugfs_create_x32("pending_events", S_IRUSR, root,
496 (u32 *)&host->pending_events);
497 if (!node)
498 goto err;
499
500 node = debugfs_create_x32("completed_events", S_IRUSR, root,
501 (u32 *)&host->completed_events);
502 if (!node)
503 goto err;
504
505 return;
506
507err:
508 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
509}
510
511#if defined(CONFIG_OF)
512static const struct of_device_id atmci_dt_ids[] = {
513 { .compatible = "atmel,hsmci" },
514 { }
515};
516
517MODULE_DEVICE_TABLE(of, atmci_dt_ids);
518
519static struct mci_platform_data*
520atmci_of_init(struct platform_device *pdev)
521{
522 struct device_node *np = pdev->dev.of_node;
523 struct device_node *cnp;
524 struct mci_platform_data *pdata;
525 u32 slot_id;
526
527 if (!np) {
528 dev_err(&pdev->dev, "device node not found\n");
529 return ERR_PTR(-EINVAL);
530 }
531
532 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
533 if (!pdata) {
534 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
535 return ERR_PTR(-ENOMEM);
536 }
537
538 for_each_child_of_node(np, cnp) {
539 if (of_property_read_u32(cnp, "reg", &slot_id)) {
540 dev_warn(&pdev->dev, "reg property is missing for %s\n",
541 cnp->full_name);
542 continue;
543 }
544
545 if (slot_id >= ATMCI_MAX_NR_SLOTS) {
546 dev_warn(&pdev->dev, "can't have more than %d slots\n",
547 ATMCI_MAX_NR_SLOTS);
548 break;
549 }
550
551 if (of_property_read_u32(cnp, "bus-width",
552 &pdata->slot[slot_id].bus_width))
553 pdata->slot[slot_id].bus_width = 1;
554
555 pdata->slot[slot_id].detect_pin =
556 of_get_named_gpio(cnp, "cd-gpios", 0);
557
558 pdata->slot[slot_id].detect_is_active_high =
559 of_property_read_bool(cnp, "cd-inverted");
560
561 pdata->slot[slot_id].wp_pin =
562 of_get_named_gpio(cnp, "wp-gpios", 0);
563 }
564
565 return pdata;
566}
567#else
568static inline struct mci_platform_data*
569atmci_of_init(struct platform_device *dev)
570{
571 return ERR_PTR(-EINVAL);
572}
573#endif
574
575static inline unsigned int atmci_get_version(struct atmel_mci *host)
576{
577 return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
578}
579
580static void atmci_timeout_timer(unsigned long data)
581{
582 struct atmel_mci *host;
583
584 host = (struct atmel_mci *)data;
585
586 dev_dbg(&host->pdev->dev, "software timeout\n");
587
588 if (host->mrq->cmd->data) {
589 host->mrq->cmd->data->error = -ETIMEDOUT;
590 host->data = NULL;
591
592
593
594
595
596 if (host->state == STATE_DATA_XFER)
597 host->stop_transfer(host);
598 } else {
599 host->mrq->cmd->error = -ETIMEDOUT;
600 host->cmd = NULL;
601 }
602 host->need_reset = 1;
603 host->state = STATE_END_REQUEST;
604 smp_wmb();
605 tasklet_schedule(&host->tasklet);
606}
607
608static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
609 unsigned int ns)
610{
611
612
613
614
615 unsigned int us = DIV_ROUND_UP(ns, 1000);
616
617
618 return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
619}
620
621static void atmci_set_timeout(struct atmel_mci *host,
622 struct atmel_mci_slot *slot, struct mmc_data *data)
623{
624 static unsigned dtomul_to_shift[] = {
625 0, 4, 7, 8, 10, 12, 16, 20
626 };
627 unsigned timeout;
628 unsigned dtocyc;
629 unsigned dtomul;
630
631 timeout = atmci_ns_to_clocks(host, data->timeout_ns)
632 + data->timeout_clks;
633
634 for (dtomul = 0; dtomul < 8; dtomul++) {
635 unsigned shift = dtomul_to_shift[dtomul];
636 dtocyc = (timeout + (1 << shift) - 1) >> shift;
637 if (dtocyc < 15)
638 break;
639 }
640
641 if (dtomul >= 8) {
642 dtomul = 7;
643 dtocyc = 15;
644 }
645
646 dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
647 dtocyc << dtomul_to_shift[dtomul]);
648 atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc)));
649}
650
651
652
653
654static u32 atmci_prepare_command(struct mmc_host *mmc,
655 struct mmc_command *cmd)
656{
657 struct mmc_data *data;
658 u32 cmdr;
659
660 cmd->error = -EINPROGRESS;
661
662 cmdr = ATMCI_CMDR_CMDNB(cmd->opcode);
663
664 if (cmd->flags & MMC_RSP_PRESENT) {
665 if (cmd->flags & MMC_RSP_136)
666 cmdr |= ATMCI_CMDR_RSPTYP_136BIT;
667 else
668 cmdr |= ATMCI_CMDR_RSPTYP_48BIT;
669 }
670
671
672
673
674
675
676 cmdr |= ATMCI_CMDR_MAXLAT_64CYC;
677
678 if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
679 cmdr |= ATMCI_CMDR_OPDCMD;
680
681 data = cmd->data;
682 if (data) {
683 cmdr |= ATMCI_CMDR_START_XFER;
684
685 if (cmd->opcode == SD_IO_RW_EXTENDED) {
686 cmdr |= ATMCI_CMDR_SDIO_BLOCK;
687 } else {
688 if (data->flags & MMC_DATA_STREAM)
689 cmdr |= ATMCI_CMDR_STREAM;
690 else if (data->blocks > 1)
691 cmdr |= ATMCI_CMDR_MULTI_BLOCK;
692 else
693 cmdr |= ATMCI_CMDR_BLOCK;
694 }
695
696 if (data->flags & MMC_DATA_READ)
697 cmdr |= ATMCI_CMDR_TRDIR_READ;
698 }
699
700 return cmdr;
701}
702
703static void atmci_send_command(struct atmel_mci *host,
704 struct mmc_command *cmd, u32 cmd_flags)
705{
706 WARN_ON(host->cmd);
707 host->cmd = cmd;
708
709 dev_vdbg(&host->pdev->dev,
710 "start command: ARGR=0x%08x CMDR=0x%08x\n",
711 cmd->arg, cmd_flags);
712
713 atmci_writel(host, ATMCI_ARGR, cmd->arg);
714 atmci_writel(host, ATMCI_CMDR, cmd_flags);
715}
716
717static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
718{
719 dev_dbg(&host->pdev->dev, "send stop command\n");
720 atmci_send_command(host, data->stop, host->stop_cmdr);
721 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
722}
723
724
725
726
727
728static void atmci_pdc_set_single_buf(struct atmel_mci *host,
729 enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
730{
731 u32 pointer_reg, counter_reg;
732 unsigned int buf_size;
733
734 if (dir == XFER_RECEIVE) {
735 pointer_reg = ATMEL_PDC_RPR;
736 counter_reg = ATMEL_PDC_RCR;
737 } else {
738 pointer_reg = ATMEL_PDC_TPR;
739 counter_reg = ATMEL_PDC_TCR;
740 }
741
742 if (buf_nb == PDC_SECOND_BUF) {
743 pointer_reg += ATMEL_PDC_SCND_BUF_OFF;
744 counter_reg += ATMEL_PDC_SCND_BUF_OFF;
745 }
746
747 if (!host->caps.has_rwproof) {
748 buf_size = host->buf_size;
749 atmci_writel(host, pointer_reg, host->buf_phys_addr);
750 } else {
751 buf_size = sg_dma_len(host->sg);
752 atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
753 }
754
755 if (host->data_size <= buf_size) {
756 if (host->data_size & 0x3) {
757
758 atmci_writel(host, counter_reg, host->data_size);
759 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE);
760 } else {
761
762 atmci_writel(host, counter_reg, host->data_size / 4);
763 }
764 host->data_size = 0;
765 } else {
766
767 atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4);
768 host->data_size -= sg_dma_len(host->sg);
769 if (host->data_size)
770 host->sg = sg_next(host->sg);
771 }
772}
773
774
775
776
777
778
779static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
780{
781 atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF);
782 if (host->data_size)
783 atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF);
784}
785
786
787
788
789static void atmci_pdc_cleanup(struct atmel_mci *host)
790{
791 struct mmc_data *data = host->data;
792
793 if (data)
794 dma_unmap_sg(&host->pdev->dev,
795 data->sg, data->sg_len,
796 ((data->flags & MMC_DATA_WRITE)
797 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
798}
799
800
801
802
803
804
805static void atmci_pdc_complete(struct atmel_mci *host)
806{
807 int transfer_size = host->data->blocks * host->data->blksz;
808 int i;
809
810 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
811
812 if ((!host->caps.has_rwproof)
813 && (host->data->flags & MMC_DATA_READ)) {
814 if (host->caps.has_bad_data_ordering)
815 for (i = 0; i < transfer_size; i++)
816 host->buffer[i] = swab32(host->buffer[i]);
817 sg_copy_from_buffer(host->data->sg, host->data->sg_len,
818 host->buffer, transfer_size);
819 }
820
821 atmci_pdc_cleanup(host);
822
823
824
825
826
827 if (host->data) {
828 dev_dbg(&host->pdev->dev,
829 "(%s) set pending xfer complete\n", __func__);
830 atmci_set_pending(host, EVENT_XFER_COMPLETE);
831 tasklet_schedule(&host->tasklet);
832 }
833}
834
835static void atmci_dma_cleanup(struct atmel_mci *host)
836{
837 struct mmc_data *data = host->data;
838
839 if (data)
840 dma_unmap_sg(host->dma.chan->device->dev,
841 data->sg, data->sg_len,
842 ((data->flags & MMC_DATA_WRITE)
843 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
844}
845
846
847
848
849static void atmci_dma_complete(void *arg)
850{
851 struct atmel_mci *host = arg;
852 struct mmc_data *data = host->data;
853
854 dev_vdbg(&host->pdev->dev, "DMA complete\n");
855
856 if (host->caps.has_dma_conf_reg)
857
858 atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
859
860 atmci_dma_cleanup(host);
861
862
863
864
865
866 if (data) {
867 dev_dbg(&host->pdev->dev,
868 "(%s) set pending xfer complete\n", __func__);
869 atmci_set_pending(host, EVENT_XFER_COMPLETE);
870 tasklet_schedule(&host->tasklet);
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
893 }
894}
895
896
897
898
899
900static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
901{
902 u32 iflags;
903
904 data->error = -EINPROGRESS;
905
906 host->sg = data->sg;
907 host->sg_len = data->sg_len;
908 host->data = data;
909 host->data_chan = NULL;
910
911 iflags = ATMCI_DATA_ERROR_FLAGS;
912
913
914
915
916
917
918
919
920 if (data->blocks * data->blksz < 12
921 || (data->blocks * data->blksz) & 3)
922 host->need_reset = true;
923
924 host->pio_offset = 0;
925 if (data->flags & MMC_DATA_READ)
926 iflags |= ATMCI_RXRDY;
927 else
928 iflags |= ATMCI_TXRDY;
929
930 return iflags;
931}
932
933
934
935
936
937
938
939static u32
940atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
941{
942 u32 iflags, tmp;
943 unsigned int sg_len;
944 enum dma_data_direction dir;
945 int i;
946
947 data->error = -EINPROGRESS;
948
949 host->data = data;
950 host->sg = data->sg;
951 iflags = ATMCI_DATA_ERROR_FLAGS;
952
953
954 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
955
956 if (data->flags & MMC_DATA_READ) {
957 dir = DMA_FROM_DEVICE;
958 iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
959 } else {
960 dir = DMA_TO_DEVICE;
961 iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
962 }
963
964
965 tmp = atmci_readl(host, ATMCI_MR);
966 tmp &= 0x0000ffff;
967 tmp |= ATMCI_BLKLEN(data->blksz);
968 atmci_writel(host, ATMCI_MR, tmp);
969
970
971 host->data_size = data->blocks * data->blksz;
972 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
973
974 if ((!host->caps.has_rwproof)
975 && (host->data->flags & MMC_DATA_WRITE)) {
976 sg_copy_to_buffer(host->data->sg, host->data->sg_len,
977 host->buffer, host->data_size);
978 if (host->caps.has_bad_data_ordering)
979 for (i = 0; i < host->data_size; i++)
980 host->buffer[i] = swab32(host->buffer[i]);
981 }
982
983 if (host->data_size)
984 atmci_pdc_set_both_buf(host,
985 ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
986
987 return iflags;
988}
989
990static u32
991atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
992{
993 struct dma_chan *chan;
994 struct dma_async_tx_descriptor *desc;
995 struct scatterlist *sg;
996 unsigned int i;
997 enum dma_data_direction direction;
998 enum dma_transfer_direction slave_dirn;
999 unsigned int sglen;
1000 u32 maxburst;
1001 u32 iflags;
1002
1003 data->error = -EINPROGRESS;
1004
1005 WARN_ON(host->data);
1006 host->sg = NULL;
1007 host->data = data;
1008
1009 iflags = ATMCI_DATA_ERROR_FLAGS;
1010
1011
1012
1013
1014
1015
1016 if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
1017 return atmci_prepare_data(host, data);
1018 if (data->blksz & 3)
1019 return atmci_prepare_data(host, data);
1020
1021 for_each_sg(data->sg, sg, data->sg_len, i) {
1022 if (sg->offset & 3 || sg->length & 3)
1023 return atmci_prepare_data(host, data);
1024 }
1025
1026
1027 chan = host->dma.chan;
1028 if (chan)
1029 host->data_chan = chan;
1030
1031 if (!chan)
1032 return -ENODEV;
1033
1034 if (data->flags & MMC_DATA_READ) {
1035 direction = DMA_FROM_DEVICE;
1036 host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
1037 maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst);
1038 } else {
1039 direction = DMA_TO_DEVICE;
1040 host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
1041 maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
1042 }
1043
1044 if (host->caps.has_dma_conf_reg)
1045 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) |
1046 ATMCI_DMAEN);
1047
1048 sglen = dma_map_sg(chan->device->dev, data->sg,
1049 data->sg_len, direction);
1050
1051 dmaengine_slave_config(chan, &host->dma_conf);
1052 desc = dmaengine_prep_slave_sg(chan,
1053 data->sg, sglen, slave_dirn,
1054 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1055 if (!desc)
1056 goto unmap_exit;
1057
1058 host->dma.data_desc = desc;
1059 desc->callback = atmci_dma_complete;
1060 desc->callback_param = host;
1061
1062 return iflags;
1063unmap_exit:
1064 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
1065 return -ENOMEM;
1066}
1067
1068static void
1069atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
1070{
1071 return;
1072}
1073
1074
1075
1076
1077static void
1078atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data)
1079{
1080 if (data->flags & MMC_DATA_READ)
1081 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
1082 else
1083 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1084}
1085
1086static void
1087atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
1088{
1089 struct dma_chan *chan = host->data_chan;
1090 struct dma_async_tx_descriptor *desc = host->dma.data_desc;
1091
1092 if (chan) {
1093 dmaengine_submit(desc);
1094 dma_async_issue_pending(chan);
1095 }
1096}
1097
1098static void atmci_stop_transfer(struct atmel_mci *host)
1099{
1100 dev_dbg(&host->pdev->dev,
1101 "(%s) set pending xfer complete\n", __func__);
1102 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1103 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1104}
1105
1106
1107
1108
1109static void atmci_stop_transfer_pdc(struct atmel_mci *host)
1110{
1111 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
1112}
1113
1114static void atmci_stop_transfer_dma(struct atmel_mci *host)
1115{
1116 struct dma_chan *chan = host->data_chan;
1117
1118 if (chan) {
1119 dmaengine_terminate_all(chan);
1120 atmci_dma_cleanup(host);
1121 } else {
1122
1123 dev_dbg(&host->pdev->dev,
1124 "(%s) set pending xfer complete\n", __func__);
1125 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1126 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1127 }
1128}
1129
1130
1131
1132
1133
1134static void atmci_start_request(struct atmel_mci *host,
1135 struct atmel_mci_slot *slot)
1136{
1137 struct mmc_request *mrq;
1138 struct mmc_command *cmd;
1139 struct mmc_data *data;
1140 u32 iflags;
1141 u32 cmdflags;
1142
1143 mrq = slot->mrq;
1144 host->cur_slot = slot;
1145 host->mrq = mrq;
1146
1147 host->pending_events = 0;
1148 host->completed_events = 0;
1149 host->cmd_status = 0;
1150 host->data_status = 0;
1151
1152 dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
1153
1154 if (host->need_reset || host->caps.need_reset_after_xfer) {
1155 iflags = atmci_readl(host, ATMCI_IMR);
1156 iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
1157 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1158 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1159 atmci_writel(host, ATMCI_MR, host->mode_reg);
1160 if (host->caps.has_cfg_reg)
1161 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1162 atmci_writel(host, ATMCI_IER, iflags);
1163 host->need_reset = false;
1164 }
1165 atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
1166
1167 iflags = atmci_readl(host, ATMCI_IMR);
1168 if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
1169 dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
1170 iflags);
1171
1172 if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
1173
1174 atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT);
1175 while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY))
1176 cpu_relax();
1177 }
1178 iflags = 0;
1179 data = mrq->data;
1180 if (data) {
1181 atmci_set_timeout(host, slot, data);
1182
1183
1184 atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks)
1185 | ATMCI_BLKLEN(data->blksz));
1186 dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
1187 ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz));
1188
1189 iflags |= host->prepare_data(host, data);
1190 }
1191
1192 iflags |= ATMCI_CMDRDY;
1193 cmd = mrq->cmd;
1194 cmdflags = atmci_prepare_command(slot->mmc, cmd);
1195
1196
1197
1198
1199
1200
1201
1202 if (host->submit_data != &atmci_submit_data_dma)
1203 atmci_send_command(host, cmd, cmdflags);
1204
1205 if (data)
1206 host->submit_data(host, data);
1207
1208 if (host->submit_data == &atmci_submit_data_dma)
1209 atmci_send_command(host, cmd, cmdflags);
1210
1211 if (mrq->stop) {
1212 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
1213 host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
1214 if (!(data->flags & MMC_DATA_WRITE))
1215 host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;
1216 if (data->flags & MMC_DATA_STREAM)
1217 host->stop_cmdr |= ATMCI_CMDR_STREAM;
1218 else
1219 host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
1220 }
1221
1222
1223
1224
1225
1226
1227
1228 atmci_writel(host, ATMCI_IER, iflags);
1229
1230 mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000));
1231}
1232
1233static void atmci_queue_request(struct atmel_mci *host,
1234 struct atmel_mci_slot *slot, struct mmc_request *mrq)
1235{
1236 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1237 host->state);
1238
1239 spin_lock_bh(&host->lock);
1240 slot->mrq = mrq;
1241 if (host->state == STATE_IDLE) {
1242 host->state = STATE_SENDING_CMD;
1243 atmci_start_request(host, slot);
1244 } else {
1245 dev_dbg(&host->pdev->dev, "queue request\n");
1246 list_add_tail(&slot->queue_node, &host->queue);
1247 }
1248 spin_unlock_bh(&host->lock);
1249}
1250
1251static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1252{
1253 struct atmel_mci_slot *slot = mmc_priv(mmc);
1254 struct atmel_mci *host = slot->host;
1255 struct mmc_data *data;
1256
1257 WARN_ON(slot->mrq);
1258 dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268 if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
1269 mrq->cmd->error = -ENOMEDIUM;
1270 mmc_request_done(mmc, mrq);
1271 return;
1272 }
1273
1274
1275 data = mrq->data;
1276 if (data && data->blocks > 1 && data->blksz & 3) {
1277 mrq->cmd->error = -EINVAL;
1278 mmc_request_done(mmc, mrq);
1279 }
1280
1281 atmci_queue_request(host, slot, mrq);
1282}
1283
1284static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1285{
1286 struct atmel_mci_slot *slot = mmc_priv(mmc);
1287 struct atmel_mci *host = slot->host;
1288 unsigned int i;
1289 bool unprepare_clk;
1290
1291 slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
1292 switch (ios->bus_width) {
1293 case MMC_BUS_WIDTH_1:
1294 slot->sdc_reg |= ATMCI_SDCBUS_1BIT;
1295 break;
1296 case MMC_BUS_WIDTH_4:
1297 slot->sdc_reg |= ATMCI_SDCBUS_4BIT;
1298 break;
1299 }
1300
1301 if (ios->clock) {
1302 unsigned int clock_min = ~0U;
1303 u32 clkdiv;
1304
1305 clk_prepare(host->mck);
1306 unprepare_clk = true;
1307
1308 spin_lock_bh(&host->lock);
1309 if (!host->mode_reg) {
1310 clk_enable(host->mck);
1311 unprepare_clk = false;
1312 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1313 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1314 if (host->caps.has_cfg_reg)
1315 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1316 }
1317
1318
1319
1320
1321
1322 slot->clock = ios->clock;
1323 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1324 if (host->slot[i] && host->slot[i]->clock
1325 && host->slot[i]->clock < clock_min)
1326 clock_min = host->slot[i]->clock;
1327 }
1328
1329
1330 if (host->caps.has_odd_clk_div) {
1331 clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
1332 if (clkdiv > 511) {
1333 dev_warn(&mmc->class_dev,
1334 "clock %u too slow; using %lu\n",
1335 clock_min, host->bus_hz / (511 + 2));
1336 clkdiv = 511;
1337 }
1338 host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1)
1339 | ATMCI_MR_CLKODD(clkdiv & 1);
1340 } else {
1341 clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
1342 if (clkdiv > 255) {
1343 dev_warn(&mmc->class_dev,
1344 "clock %u too slow; using %lu\n",
1345 clock_min, host->bus_hz / (2 * 256));
1346 clkdiv = 255;
1347 }
1348 host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
1349 }
1350
1351
1352
1353
1354
1355
1356 if (host->caps.has_rwproof)
1357 host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF);
1358
1359 if (host->caps.has_cfg_reg) {
1360
1361 if (ios->timing == MMC_TIMING_SD_HS)
1362 host->cfg_reg |= ATMCI_CFG_HSMODE;
1363 else
1364 host->cfg_reg &= ~ATMCI_CFG_HSMODE;
1365 }
1366
1367 if (list_empty(&host->queue)) {
1368 atmci_writel(host, ATMCI_MR, host->mode_reg);
1369 if (host->caps.has_cfg_reg)
1370 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1371 } else {
1372 host->need_clock_update = true;
1373 }
1374
1375 spin_unlock_bh(&host->lock);
1376 } else {
1377 bool any_slot_active = false;
1378
1379 unprepare_clk = false;
1380
1381 spin_lock_bh(&host->lock);
1382 slot->clock = 0;
1383 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1384 if (host->slot[i] && host->slot[i]->clock) {
1385 any_slot_active = true;
1386 break;
1387 }
1388 }
1389 if (!any_slot_active) {
1390 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
1391 if (host->mode_reg) {
1392 atmci_readl(host, ATMCI_MR);
1393 clk_disable(host->mck);
1394 unprepare_clk = true;
1395 }
1396 host->mode_reg = 0;
1397 }
1398 spin_unlock_bh(&host->lock);
1399 }
1400
1401 if (unprepare_clk)
1402 clk_unprepare(host->mck);
1403
1404 switch (ios->power_mode) {
1405 case MMC_POWER_OFF:
1406 if (!IS_ERR(mmc->supply.vmmc))
1407 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1408 break;
1409 case MMC_POWER_UP:
1410 set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
1411 if (!IS_ERR(mmc->supply.vmmc))
1412 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1413 break;
1414 default:
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427 break;
1428 }
1429}
1430
1431static int atmci_get_ro(struct mmc_host *mmc)
1432{
1433 int read_only = -ENOSYS;
1434 struct atmel_mci_slot *slot = mmc_priv(mmc);
1435
1436 if (gpio_is_valid(slot->wp_pin)) {
1437 read_only = gpio_get_value(slot->wp_pin);
1438 dev_dbg(&mmc->class_dev, "card is %s\n",
1439 read_only ? "read-only" : "read-write");
1440 }
1441
1442 return read_only;
1443}
1444
1445static int atmci_get_cd(struct mmc_host *mmc)
1446{
1447 int present = -ENOSYS;
1448 struct atmel_mci_slot *slot = mmc_priv(mmc);
1449
1450 if (gpio_is_valid(slot->detect_pin)) {
1451 present = !(gpio_get_value(slot->detect_pin) ^
1452 slot->detect_is_active_high);
1453 dev_dbg(&mmc->class_dev, "card is %spresent\n",
1454 present ? "" : "not ");
1455 }
1456
1457 return present;
1458}
1459
1460static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1461{
1462 struct atmel_mci_slot *slot = mmc_priv(mmc);
1463 struct atmel_mci *host = slot->host;
1464
1465 if (enable)
1466 atmci_writel(host, ATMCI_IER, slot->sdio_irq);
1467 else
1468 atmci_writel(host, ATMCI_IDR, slot->sdio_irq);
1469}
1470
1471static const struct mmc_host_ops atmci_ops = {
1472 .request = atmci_request,
1473 .set_ios = atmci_set_ios,
1474 .get_ro = atmci_get_ro,
1475 .get_cd = atmci_get_cd,
1476 .enable_sdio_irq = atmci_enable_sdio_irq,
1477};
1478
1479
1480static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1481 __releases(&host->lock)
1482 __acquires(&host->lock)
1483{
1484 struct atmel_mci_slot *slot = NULL;
1485 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1486
1487 WARN_ON(host->cmd || host->data);
1488
1489
1490
1491
1492
1493
1494 if (host->need_clock_update) {
1495 atmci_writel(host, ATMCI_MR, host->mode_reg);
1496 if (host->caps.has_cfg_reg)
1497 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1498 }
1499
1500 host->cur_slot->mrq = NULL;
1501 host->mrq = NULL;
1502 if (!list_empty(&host->queue)) {
1503 slot = list_entry(host->queue.next,
1504 struct atmel_mci_slot, queue_node);
1505 list_del(&slot->queue_node);
1506 dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
1507 mmc_hostname(slot->mmc));
1508 host->state = STATE_SENDING_CMD;
1509 atmci_start_request(host, slot);
1510 } else {
1511 dev_vdbg(&host->pdev->dev, "list empty\n");
1512 host->state = STATE_IDLE;
1513 }
1514
1515 del_timer(&host->timer);
1516
1517 spin_unlock(&host->lock);
1518 mmc_request_done(prev_mmc, mrq);
1519 spin_lock(&host->lock);
1520}
1521
1522static void atmci_command_complete(struct atmel_mci *host,
1523 struct mmc_command *cmd)
1524{
1525 u32 status = host->cmd_status;
1526
1527
1528 cmd->resp[0] = atmci_readl(host, ATMCI_RSPR);
1529 cmd->resp[1] = atmci_readl(host, ATMCI_RSPR);
1530 cmd->resp[2] = atmci_readl(host, ATMCI_RSPR);
1531 cmd->resp[3] = atmci_readl(host, ATMCI_RSPR);
1532
1533 if (status & ATMCI_RTOE)
1534 cmd->error = -ETIMEDOUT;
1535 else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE))
1536 cmd->error = -EILSEQ;
1537 else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
1538 cmd->error = -EIO;
1539 else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
1540 if (host->caps.need_blksz_mul_4) {
1541 cmd->error = -EINVAL;
1542 host->need_reset = 1;
1543 }
1544 } else
1545 cmd->error = 0;
1546}
1547
1548static void atmci_detect_change(unsigned long data)
1549{
1550 struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data;
1551 bool present;
1552 bool present_old;
1553
1554
1555
1556
1557
1558
1559
1560 smp_rmb();
1561 if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
1562 return;
1563
1564 enable_irq(gpio_to_irq(slot->detect_pin));
1565 present = !(gpio_get_value(slot->detect_pin) ^
1566 slot->detect_is_active_high);
1567 present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
1568
1569 dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
1570 present, present_old);
1571
1572 if (present != present_old) {
1573 struct atmel_mci *host = slot->host;
1574 struct mmc_request *mrq;
1575
1576 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1577 present ? "inserted" : "removed");
1578
1579 spin_lock(&host->lock);
1580
1581 if (!present)
1582 clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
1583 else
1584 set_bit(ATMCI_CARD_PRESENT, &slot->flags);
1585
1586
1587 mrq = slot->mrq;
1588 if (mrq) {
1589 if (mrq == host->mrq) {
1590
1591
1592
1593
1594 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1595 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1596 atmci_writel(host, ATMCI_MR, host->mode_reg);
1597 if (host->caps.has_cfg_reg)
1598 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1599
1600 host->data = NULL;
1601 host->cmd = NULL;
1602
1603 switch (host->state) {
1604 case STATE_IDLE:
1605 break;
1606 case STATE_SENDING_CMD:
1607 mrq->cmd->error = -ENOMEDIUM;
1608 if (mrq->data)
1609 host->stop_transfer(host);
1610 break;
1611 case STATE_DATA_XFER:
1612 mrq->data->error = -ENOMEDIUM;
1613 host->stop_transfer(host);
1614 break;
1615 case STATE_WAITING_NOTBUSY:
1616 mrq->data->error = -ENOMEDIUM;
1617 break;
1618 case STATE_SENDING_STOP:
1619 mrq->stop->error = -ENOMEDIUM;
1620 break;
1621 case STATE_END_REQUEST:
1622 break;
1623 }
1624
1625 atmci_request_end(host, mrq);
1626 } else {
1627 list_del(&slot->queue_node);
1628 mrq->cmd->error = -ENOMEDIUM;
1629 if (mrq->data)
1630 mrq->data->error = -ENOMEDIUM;
1631 if (mrq->stop)
1632 mrq->stop->error = -ENOMEDIUM;
1633
1634 spin_unlock(&host->lock);
1635 mmc_request_done(slot->mmc, mrq);
1636 spin_lock(&host->lock);
1637 }
1638 }
1639 spin_unlock(&host->lock);
1640
1641 mmc_detect_change(slot->mmc, 0);
1642 }
1643}
1644
1645static void atmci_tasklet_func(unsigned long priv)
1646{
1647 struct atmel_mci *host = (struct atmel_mci *)priv;
1648 struct mmc_request *mrq = host->mrq;
1649 struct mmc_data *data = host->data;
1650 enum atmel_mci_state state = host->state;
1651 enum atmel_mci_state prev_state;
1652 u32 status;
1653
1654 spin_lock(&host->lock);
1655
1656 state = host->state;
1657
1658 dev_vdbg(&host->pdev->dev,
1659 "tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
1660 state, host->pending_events, host->completed_events,
1661 atmci_readl(host, ATMCI_IMR));
1662
1663 do {
1664 prev_state = state;
1665 dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
1666
1667 switch (state) {
1668 case STATE_IDLE:
1669 break;
1670
1671 case STATE_SENDING_CMD:
1672
1673
1674
1675
1676
1677
1678 dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
1679 if (!atmci_test_and_clear_pending(host,
1680 EVENT_CMD_RDY))
1681 break;
1682
1683 dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
1684 host->cmd = NULL;
1685 atmci_set_completed(host, EVENT_CMD_RDY);
1686 atmci_command_complete(host, mrq->cmd);
1687 if (mrq->data) {
1688 dev_dbg(&host->pdev->dev,
1689 "command with data transfer");
1690
1691
1692
1693
1694 if (mrq->cmd->error) {
1695 host->stop_transfer(host);
1696 host->data = NULL;
1697 atmci_writel(host, ATMCI_IDR,
1698 ATMCI_TXRDY | ATMCI_RXRDY
1699 | ATMCI_DATA_ERROR_FLAGS);
1700 state = STATE_END_REQUEST;
1701 } else
1702 state = STATE_DATA_XFER;
1703 } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
1704 dev_dbg(&host->pdev->dev,
1705 "command response need waiting notbusy");
1706 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1707 state = STATE_WAITING_NOTBUSY;
1708 } else
1709 state = STATE_END_REQUEST;
1710
1711 break;
1712
1713 case STATE_DATA_XFER:
1714 if (atmci_test_and_clear_pending(host,
1715 EVENT_DATA_ERROR)) {
1716 dev_dbg(&host->pdev->dev, "set completed data error\n");
1717 atmci_set_completed(host, EVENT_DATA_ERROR);
1718 state = STATE_END_REQUEST;
1719 break;
1720 }
1721
1722
1723
1724
1725
1726
1727
1728
1729 dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
1730 if (!atmci_test_and_clear_pending(host,
1731 EVENT_XFER_COMPLETE))
1732 break;
1733
1734 dev_dbg(&host->pdev->dev,
1735 "(%s) set completed xfer complete\n",
1736 __func__);
1737 atmci_set_completed(host, EVENT_XFER_COMPLETE);
1738
1739 if (host->caps.need_notbusy_for_read_ops ||
1740 (host->data->flags & MMC_DATA_WRITE)) {
1741 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1742 state = STATE_WAITING_NOTBUSY;
1743 } else if (host->mrq->stop) {
1744 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
1745 atmci_send_stop_cmd(host, data);
1746 state = STATE_SENDING_STOP;
1747 } else {
1748 host->data = NULL;
1749 data->bytes_xfered = data->blocks * data->blksz;
1750 data->error = 0;
1751 state = STATE_END_REQUEST;
1752 }
1753 break;
1754
1755 case STATE_WAITING_NOTBUSY:
1756
1757
1758
1759
1760
1761
1762 dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
1763 if (!atmci_test_and_clear_pending(host,
1764 EVENT_NOTBUSY))
1765 break;
1766
1767 dev_dbg(&host->pdev->dev, "set completed not busy\n");
1768 atmci_set_completed(host, EVENT_NOTBUSY);
1769
1770 if (host->data) {
1771
1772
1773
1774
1775
1776 if (host->mrq->stop) {
1777 atmci_writel(host, ATMCI_IER,
1778 ATMCI_CMDRDY);
1779 atmci_send_stop_cmd(host, data);
1780 state = STATE_SENDING_STOP;
1781 } else {
1782 host->data = NULL;
1783 data->bytes_xfered = data->blocks
1784 * data->blksz;
1785 data->error = 0;
1786 state = STATE_END_REQUEST;
1787 }
1788 } else
1789 state = STATE_END_REQUEST;
1790 break;
1791
1792 case STATE_SENDING_STOP:
1793
1794
1795
1796
1797
1798
1799 dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
1800 if (!atmci_test_and_clear_pending(host,
1801 EVENT_CMD_RDY))
1802 break;
1803
1804 dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
1805 host->cmd = NULL;
1806 data->bytes_xfered = data->blocks * data->blksz;
1807 data->error = 0;
1808 atmci_command_complete(host, mrq->stop);
1809 if (mrq->stop->error) {
1810 host->stop_transfer(host);
1811 atmci_writel(host, ATMCI_IDR,
1812 ATMCI_TXRDY | ATMCI_RXRDY
1813 | ATMCI_DATA_ERROR_FLAGS);
1814 state = STATE_END_REQUEST;
1815 } else {
1816 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1817 state = STATE_WAITING_NOTBUSY;
1818 }
1819 host->data = NULL;
1820 break;
1821
1822 case STATE_END_REQUEST:
1823 atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
1824 | ATMCI_DATA_ERROR_FLAGS);
1825 status = host->data_status;
1826 if (unlikely(status)) {
1827 host->stop_transfer(host);
1828 host->data = NULL;
1829 if (data) {
1830 if (status & ATMCI_DTOE) {
1831 data->error = -ETIMEDOUT;
1832 } else if (status & ATMCI_DCRCE) {
1833 data->error = -EILSEQ;
1834 } else {
1835 data->error = -EIO;
1836 }
1837 }
1838 }
1839
1840 atmci_request_end(host, host->mrq);
1841 state = STATE_IDLE;
1842 break;
1843 }
1844 } while (state != prev_state);
1845
1846 host->state = state;
1847
1848 spin_unlock(&host->lock);
1849}
1850
1851static void atmci_read_data_pio(struct atmel_mci *host)
1852{
1853 struct scatterlist *sg = host->sg;
1854 void *buf = sg_virt(sg);
1855 unsigned int offset = host->pio_offset;
1856 struct mmc_data *data = host->data;
1857 u32 value;
1858 u32 status;
1859 unsigned int nbytes = 0;
1860
1861 do {
1862 value = atmci_readl(host, ATMCI_RDR);
1863 if (likely(offset + 4 <= sg->length)) {
1864 put_unaligned(value, (u32 *)(buf + offset));
1865
1866 offset += 4;
1867 nbytes += 4;
1868
1869 if (offset == sg->length) {
1870 flush_dcache_page(sg_page(sg));
1871 host->sg = sg = sg_next(sg);
1872 host->sg_len--;
1873 if (!sg || !host->sg_len)
1874 goto done;
1875
1876 offset = 0;
1877 buf = sg_virt(sg);
1878 }
1879 } else {
1880 unsigned int remaining = sg->length - offset;
1881 memcpy(buf + offset, &value, remaining);
1882 nbytes += remaining;
1883
1884 flush_dcache_page(sg_page(sg));
1885 host->sg = sg = sg_next(sg);
1886 host->sg_len--;
1887 if (!sg || !host->sg_len)
1888 goto done;
1889
1890 offset = 4 - remaining;
1891 buf = sg_virt(sg);
1892 memcpy(buf, (u8 *)&value + remaining, offset);
1893 nbytes += offset;
1894 }
1895
1896 status = atmci_readl(host, ATMCI_SR);
1897 if (status & ATMCI_DATA_ERROR_FLAGS) {
1898 atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY
1899 | ATMCI_DATA_ERROR_FLAGS));
1900 host->data_status = status;
1901 data->bytes_xfered += nbytes;
1902 return;
1903 }
1904 } while (status & ATMCI_RXRDY);
1905
1906 host->pio_offset = offset;
1907 data->bytes_xfered += nbytes;
1908
1909 return;
1910
1911done:
1912 atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY);
1913 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1914 data->bytes_xfered += nbytes;
1915 smp_wmb();
1916 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1917}
1918
1919static void atmci_write_data_pio(struct atmel_mci *host)
1920{
1921 struct scatterlist *sg = host->sg;
1922 void *buf = sg_virt(sg);
1923 unsigned int offset = host->pio_offset;
1924 struct mmc_data *data = host->data;
1925 u32 value;
1926 u32 status;
1927 unsigned int nbytes = 0;
1928
1929 do {
1930 if (likely(offset + 4 <= sg->length)) {
1931 value = get_unaligned((u32 *)(buf + offset));
1932 atmci_writel(host, ATMCI_TDR, value);
1933
1934 offset += 4;
1935 nbytes += 4;
1936 if (offset == sg->length) {
1937 host->sg = sg = sg_next(sg);
1938 host->sg_len--;
1939 if (!sg || !host->sg_len)
1940 goto done;
1941
1942 offset = 0;
1943 buf = sg_virt(sg);
1944 }
1945 } else {
1946 unsigned int remaining = sg->length - offset;
1947
1948 value = 0;
1949 memcpy(&value, buf + offset, remaining);
1950 nbytes += remaining;
1951
1952 host->sg = sg = sg_next(sg);
1953 host->sg_len--;
1954 if (!sg || !host->sg_len) {
1955 atmci_writel(host, ATMCI_TDR, value);
1956 goto done;
1957 }
1958
1959 offset = 4 - remaining;
1960 buf = sg_virt(sg);
1961 memcpy((u8 *)&value + remaining, buf, offset);
1962 atmci_writel(host, ATMCI_TDR, value);
1963 nbytes += offset;
1964 }
1965
1966 status = atmci_readl(host, ATMCI_SR);
1967 if (status & ATMCI_DATA_ERROR_FLAGS) {
1968 atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY
1969 | ATMCI_DATA_ERROR_FLAGS));
1970 host->data_status = status;
1971 data->bytes_xfered += nbytes;
1972 return;
1973 }
1974 } while (status & ATMCI_TXRDY);
1975
1976 host->pio_offset = offset;
1977 data->bytes_xfered += nbytes;
1978
1979 return;
1980
1981done:
1982 atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY);
1983 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1984 data->bytes_xfered += nbytes;
1985 smp_wmb();
1986 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1987}
1988
1989static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
1990{
1991 int i;
1992
1993 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1994 struct atmel_mci_slot *slot = host->slot[i];
1995 if (slot && (status & slot->sdio_irq)) {
1996 mmc_signal_sdio_irq(slot->mmc);
1997 }
1998 }
1999}
2000
2001
2002static irqreturn_t atmci_interrupt(int irq, void *dev_id)
2003{
2004 struct atmel_mci *host = dev_id;
2005 u32 status, mask, pending;
2006 unsigned int pass_count = 0;
2007
2008 do {
2009 status = atmci_readl(host, ATMCI_SR);
2010 mask = atmci_readl(host, ATMCI_IMR);
2011 pending = status & mask;
2012 if (!pending)
2013 break;
2014
2015 if (pending & ATMCI_DATA_ERROR_FLAGS) {
2016 dev_dbg(&host->pdev->dev, "IRQ: data error\n");
2017 atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
2018 | ATMCI_RXRDY | ATMCI_TXRDY
2019 | ATMCI_ENDRX | ATMCI_ENDTX
2020 | ATMCI_RXBUFF | ATMCI_TXBUFE);
2021
2022 host->data_status = status;
2023 dev_dbg(&host->pdev->dev, "set pending data error\n");
2024 smp_wmb();
2025 atmci_set_pending(host, EVENT_DATA_ERROR);
2026 tasklet_schedule(&host->tasklet);
2027 }
2028
2029 if (pending & ATMCI_TXBUFE) {
2030 dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
2031 atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
2032 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
2033
2034
2035
2036
2037
2038 if (host->data_size) {
2039 atmci_pdc_set_both_buf(host, XFER_TRANSMIT);
2040 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
2041 atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE);
2042 } else {
2043 atmci_pdc_complete(host);
2044 }
2045 } else if (pending & ATMCI_ENDTX) {
2046 dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
2047 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
2048
2049 if (host->data_size) {
2050 atmci_pdc_set_single_buf(host,
2051 XFER_TRANSMIT, PDC_SECOND_BUF);
2052 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
2053 }
2054 }
2055
2056 if (pending & ATMCI_RXBUFF) {
2057 dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
2058 atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
2059 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
2060
2061
2062
2063
2064
2065 if (host->data_size) {
2066 atmci_pdc_set_both_buf(host, XFER_RECEIVE);
2067 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
2068 atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF);
2069 } else {
2070 atmci_pdc_complete(host);
2071 }
2072 } else if (pending & ATMCI_ENDRX) {
2073 dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
2074 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
2075
2076 if (host->data_size) {
2077 atmci_pdc_set_single_buf(host,
2078 XFER_RECEIVE, PDC_SECOND_BUF);
2079 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
2080 }
2081 }
2082
2083
2084
2085
2086
2087
2088
2089 if (pending & ATMCI_BLKE) {
2090 dev_dbg(&host->pdev->dev, "IRQ: blke\n");
2091 atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
2092 smp_wmb();
2093 dev_dbg(&host->pdev->dev, "set pending notbusy\n");
2094 atmci_set_pending(host, EVENT_NOTBUSY);
2095 tasklet_schedule(&host->tasklet);
2096 }
2097
2098 if (pending & ATMCI_NOTBUSY) {
2099 dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
2100 atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
2101 smp_wmb();
2102 dev_dbg(&host->pdev->dev, "set pending notbusy\n");
2103 atmci_set_pending(host, EVENT_NOTBUSY);
2104 tasklet_schedule(&host->tasklet);
2105 }
2106
2107 if (pending & ATMCI_RXRDY)
2108 atmci_read_data_pio(host);
2109 if (pending & ATMCI_TXRDY)
2110 atmci_write_data_pio(host);
2111
2112 if (pending & ATMCI_CMDRDY) {
2113 dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
2114 atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
2115 host->cmd_status = status;
2116 smp_wmb();
2117 dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
2118 atmci_set_pending(host, EVENT_CMD_RDY);
2119 tasklet_schedule(&host->tasklet);
2120 }
2121
2122 if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
2123 atmci_sdio_interrupt(host, status);
2124
2125 } while (pass_count++ < 5);
2126
2127 return pass_count ? IRQ_HANDLED : IRQ_NONE;
2128}
2129
2130static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
2131{
2132 struct atmel_mci_slot *slot = dev_id;
2133
2134
2135
2136
2137
2138
2139 disable_irq_nosync(irq);
2140 mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
2141
2142 return IRQ_HANDLED;
2143}
2144
2145static int __init atmci_init_slot(struct atmel_mci *host,
2146 struct mci_slot_pdata *slot_data, unsigned int id,
2147 u32 sdc_reg, u32 sdio_irq)
2148{
2149 struct mmc_host *mmc;
2150 struct atmel_mci_slot *slot;
2151
2152 mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
2153 if (!mmc)
2154 return -ENOMEM;
2155
2156 slot = mmc_priv(mmc);
2157 slot->mmc = mmc;
2158 slot->host = host;
2159 slot->detect_pin = slot_data->detect_pin;
2160 slot->wp_pin = slot_data->wp_pin;
2161 slot->detect_is_active_high = slot_data->detect_is_active_high;
2162 slot->sdc_reg = sdc_reg;
2163 slot->sdio_irq = sdio_irq;
2164
2165 dev_dbg(&mmc->class_dev,
2166 "slot[%u]: bus_width=%u, detect_pin=%d, "
2167 "detect_is_active_high=%s, wp_pin=%d\n",
2168 id, slot_data->bus_width, slot_data->detect_pin,
2169 slot_data->detect_is_active_high ? "true" : "false",
2170 slot_data->wp_pin);
2171
2172 mmc->ops = &atmci_ops;
2173 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
2174 mmc->f_max = host->bus_hz / 2;
2175 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2176 if (sdio_irq)
2177 mmc->caps |= MMC_CAP_SDIO_IRQ;
2178 if (host->caps.has_highspeed)
2179 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
2180
2181
2182
2183
2184
2185 if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
2186 mmc->caps |= MMC_CAP_4_BIT_DATA;
2187
2188 if (atmci_get_version(host) < 0x200) {
2189 mmc->max_segs = 256;
2190 mmc->max_blk_size = 4095;
2191 mmc->max_blk_count = 256;
2192 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2193 mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
2194 } else {
2195 mmc->max_segs = 64;
2196 mmc->max_req_size = 32768 * 512;
2197 mmc->max_blk_size = 32768;
2198 mmc->max_blk_count = 512;
2199 }
2200
2201
2202 set_bit(ATMCI_CARD_PRESENT, &slot->flags);
2203 if (gpio_is_valid(slot->detect_pin)) {
2204 if (gpio_request(slot->detect_pin, "mmc_detect")) {
2205 dev_dbg(&mmc->class_dev, "no detect pin available\n");
2206 slot->detect_pin = -EBUSY;
2207 } else if (gpio_get_value(slot->detect_pin) ^
2208 slot->detect_is_active_high) {
2209 clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
2210 }
2211 }
2212
2213 if (!gpio_is_valid(slot->detect_pin))
2214 mmc->caps |= MMC_CAP_NEEDS_POLL;
2215
2216 if (gpio_is_valid(slot->wp_pin)) {
2217 if (gpio_request(slot->wp_pin, "mmc_wp")) {
2218 dev_dbg(&mmc->class_dev, "no WP pin available\n");
2219 slot->wp_pin = -EBUSY;
2220 }
2221 }
2222
2223 host->slot[id] = slot;
2224 mmc_regulator_get_supply(mmc);
2225 mmc_add_host(mmc);
2226
2227 if (gpio_is_valid(slot->detect_pin)) {
2228 int ret;
2229
2230 setup_timer(&slot->detect_timer, atmci_detect_change,
2231 (unsigned long)slot);
2232
2233 ret = request_irq(gpio_to_irq(slot->detect_pin),
2234 atmci_detect_interrupt,
2235 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
2236 "mmc-detect", slot);
2237 if (ret) {
2238 dev_dbg(&mmc->class_dev,
2239 "could not request IRQ %d for detect pin\n",
2240 gpio_to_irq(slot->detect_pin));
2241 gpio_free(slot->detect_pin);
2242 slot->detect_pin = -EBUSY;
2243 }
2244 }
2245
2246 atmci_init_debugfs(slot);
2247
2248 return 0;
2249}
2250
2251static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
2252 unsigned int id)
2253{
2254
2255
2256 set_bit(ATMCI_SHUTDOWN, &slot->flags);
2257 smp_wmb();
2258
2259 mmc_remove_host(slot->mmc);
2260
2261 if (gpio_is_valid(slot->detect_pin)) {
2262 int pin = slot->detect_pin;
2263
2264 free_irq(gpio_to_irq(pin), slot);
2265 del_timer_sync(&slot->detect_timer);
2266 gpio_free(pin);
2267 }
2268 if (gpio_is_valid(slot->wp_pin))
2269 gpio_free(slot->wp_pin);
2270
2271 slot->host->slot[id] = NULL;
2272 mmc_free_host(slot->mmc);
2273}
2274
2275static bool atmci_filter(struct dma_chan *chan, void *pdata)
2276{
2277 struct mci_platform_data *sl_pdata = pdata;
2278 struct mci_dma_data *sl;
2279
2280 if (!sl_pdata)
2281 return false;
2282
2283 sl = sl_pdata->dma_slave;
2284 if (sl && find_slave_dev(sl) == chan->device->dev) {
2285 chan->private = slave_data_ptr(sl);
2286 return true;
2287 } else {
2288 return false;
2289 }
2290}
2291
2292static bool atmci_configure_dma(struct atmel_mci *host)
2293{
2294 struct mci_platform_data *pdata;
2295 dma_cap_mask_t mask;
2296
2297 if (host == NULL)
2298 return false;
2299
2300 pdata = host->pdev->dev.platform_data;
2301
2302 dma_cap_zero(mask);
2303 dma_cap_set(DMA_SLAVE, mask);
2304
2305 host->dma.chan = dma_request_slave_channel_compat(mask, atmci_filter, pdata,
2306 &host->pdev->dev, "rxtx");
2307 if (!host->dma.chan) {
2308 dev_warn(&host->pdev->dev, "no DMA channel available\n");
2309 return false;
2310 } else {
2311 dev_info(&host->pdev->dev,
2312 "using %s for DMA transfers\n",
2313 dma_chan_name(host->dma.chan));
2314
2315 host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
2316 host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2317 host->dma_conf.src_maxburst = 1;
2318 host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
2319 host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2320 host->dma_conf.dst_maxburst = 1;
2321 host->dma_conf.device_fc = false;
2322 return true;
2323 }
2324}
2325
2326
2327
2328
2329
2330
2331static void __init atmci_get_cap(struct atmel_mci *host)
2332{
2333 unsigned int version;
2334
2335 version = atmci_get_version(host);
2336 dev_info(&host->pdev->dev,
2337 "version: 0x%x\n", version);
2338
2339 host->caps.has_dma_conf_reg = 0;
2340 host->caps.has_pdc = ATMCI_PDC_CONNECTED;
2341 host->caps.has_cfg_reg = 0;
2342 host->caps.has_cstor_reg = 0;
2343 host->caps.has_highspeed = 0;
2344 host->caps.has_rwproof = 0;
2345 host->caps.has_odd_clk_div = 0;
2346 host->caps.has_bad_data_ordering = 1;
2347 host->caps.need_reset_after_xfer = 1;
2348 host->caps.need_blksz_mul_4 = 1;
2349 host->caps.need_notbusy_for_read_ops = 0;
2350
2351
2352 switch (version & 0xf00) {
2353 case 0x500:
2354 host->caps.has_odd_clk_div = 1;
2355 case 0x400:
2356 case 0x300:
2357 host->caps.has_dma_conf_reg = 1;
2358 host->caps.has_pdc = 0;
2359 host->caps.has_cfg_reg = 1;
2360 host->caps.has_cstor_reg = 1;
2361 host->caps.has_highspeed = 1;
2362 case 0x200:
2363 host->caps.has_rwproof = 1;
2364 host->caps.need_blksz_mul_4 = 0;
2365 host->caps.need_notbusy_for_read_ops = 1;
2366 case 0x100:
2367 host->caps.has_bad_data_ordering = 0;
2368 host->caps.need_reset_after_xfer = 0;
2369 case 0x0:
2370 break;
2371 default:
2372 host->caps.has_pdc = 0;
2373 dev_warn(&host->pdev->dev,
2374 "Unmanaged mci version, set minimum capabilities\n");
2375 break;
2376 }
2377}
2378
2379static int __init atmci_probe(struct platform_device *pdev)
2380{
2381 struct mci_platform_data *pdata;
2382 struct atmel_mci *host;
2383 struct resource *regs;
2384 unsigned int nr_slots;
2385 int irq;
2386 int ret;
2387
2388 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2389 if (!regs)
2390 return -ENXIO;
2391 pdata = pdev->dev.platform_data;
2392 if (!pdata) {
2393 pdata = atmci_of_init(pdev);
2394 if (IS_ERR(pdata)) {
2395 dev_err(&pdev->dev, "platform data not available\n");
2396 return PTR_ERR(pdata);
2397 }
2398 }
2399
2400 irq = platform_get_irq(pdev, 0);
2401 if (irq < 0)
2402 return irq;
2403
2404 host = kzalloc(sizeof(struct atmel_mci), GFP_KERNEL);
2405 if (!host)
2406 return -ENOMEM;
2407
2408 host->pdev = pdev;
2409 spin_lock_init(&host->lock);
2410 INIT_LIST_HEAD(&host->queue);
2411
2412 host->mck = clk_get(&pdev->dev, "mci_clk");
2413 if (IS_ERR(host->mck)) {
2414 ret = PTR_ERR(host->mck);
2415 goto err_clk_get;
2416 }
2417
2418 ret = -ENOMEM;
2419 host->regs = ioremap(regs->start, resource_size(regs));
2420 if (!host->regs)
2421 goto err_ioremap;
2422
2423 ret = clk_prepare_enable(host->mck);
2424 if (ret)
2425 goto err_request_irq;
2426 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
2427 host->bus_hz = clk_get_rate(host->mck);
2428 clk_disable_unprepare(host->mck);
2429
2430 host->mapbase = regs->start;
2431
2432 tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host);
2433
2434 ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
2435 if (ret)
2436 goto err_request_irq;
2437
2438
2439 atmci_get_cap(host);
2440 if (atmci_configure_dma(host)) {
2441 host->prepare_data = &atmci_prepare_data_dma;
2442 host->submit_data = &atmci_submit_data_dma;
2443 host->stop_transfer = &atmci_stop_transfer_dma;
2444 } else if (host->caps.has_pdc) {
2445 dev_info(&pdev->dev, "using PDC\n");
2446 host->prepare_data = &atmci_prepare_data_pdc;
2447 host->submit_data = &atmci_submit_data_pdc;
2448 host->stop_transfer = &atmci_stop_transfer_pdc;
2449 } else {
2450 dev_info(&pdev->dev, "using PIO\n");
2451 host->prepare_data = &atmci_prepare_data;
2452 host->submit_data = &atmci_submit_data;
2453 host->stop_transfer = &atmci_stop_transfer;
2454 }
2455
2456 platform_set_drvdata(pdev, host);
2457
2458 setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
2459
2460
2461 nr_slots = 0;
2462 ret = -ENODEV;
2463 if (pdata->slot[0].bus_width) {
2464 ret = atmci_init_slot(host, &pdata->slot[0],
2465 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
2466 if (!ret) {
2467 nr_slots++;
2468 host->buf_size = host->slot[0]->mmc->max_req_size;
2469 }
2470 }
2471 if (pdata->slot[1].bus_width) {
2472 ret = atmci_init_slot(host, &pdata->slot[1],
2473 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
2474 if (!ret) {
2475 nr_slots++;
2476 if (host->slot[1]->mmc->max_req_size > host->buf_size)
2477 host->buf_size =
2478 host->slot[1]->mmc->max_req_size;
2479 }
2480 }
2481
2482 if (!nr_slots) {
2483 dev_err(&pdev->dev, "init failed: no slot defined\n");
2484 goto err_init_slot;
2485 }
2486
2487 if (!host->caps.has_rwproof) {
2488 host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
2489 &host->buf_phys_addr,
2490 GFP_KERNEL);
2491 if (!host->buffer) {
2492 ret = -ENOMEM;
2493 dev_err(&pdev->dev, "buffer allocation failed\n");
2494 goto err_init_slot;
2495 }
2496 }
2497
2498 dev_info(&pdev->dev,
2499 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
2500 host->mapbase, irq, nr_slots);
2501
2502 return 0;
2503
2504err_init_slot:
2505 if (host->dma.chan)
2506 dma_release_channel(host->dma.chan);
2507 free_irq(irq, host);
2508err_request_irq:
2509 iounmap(host->regs);
2510err_ioremap:
2511 clk_put(host->mck);
2512err_clk_get:
2513 kfree(host);
2514 return ret;
2515}
2516
2517static int __exit atmci_remove(struct platform_device *pdev)
2518{
2519 struct atmel_mci *host = platform_get_drvdata(pdev);
2520 unsigned int i;
2521
2522 if (host->buffer)
2523 dma_free_coherent(&pdev->dev, host->buf_size,
2524 host->buffer, host->buf_phys_addr);
2525
2526 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2527 if (host->slot[i])
2528 atmci_cleanup_slot(host->slot[i], i);
2529 }
2530
2531 clk_prepare_enable(host->mck);
2532 atmci_writel(host, ATMCI_IDR, ~0UL);
2533 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
2534 atmci_readl(host, ATMCI_SR);
2535 clk_disable_unprepare(host->mck);
2536
2537 if (host->dma.chan)
2538 dma_release_channel(host->dma.chan);
2539
2540 free_irq(platform_get_irq(pdev, 0), host);
2541 iounmap(host->regs);
2542
2543 clk_put(host->mck);
2544 kfree(host);
2545
2546 return 0;
2547}
2548
2549static struct platform_driver atmci_driver = {
2550 .remove = __exit_p(atmci_remove),
2551 .driver = {
2552 .name = "atmel_mci",
2553 .of_match_table = of_match_ptr(atmci_dt_ids),
2554 },
2555};
2556
2557static int __init atmci_init(void)
2558{
2559 return platform_driver_probe(&atmci_driver, atmci_probe);
2560}
2561
2562static void __exit atmci_exit(void)
2563{
2564 platform_driver_unregister(&atmci_driver);
2565}
2566
2567late_initcall(atmci_init);
2568module_exit(atmci_exit);
2569
2570MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
2571MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
2572MODULE_LICENSE("GPL v2");
2573