1
2
3
4
5
6
7
8
9
10#include <linux/blkdev.h>
11#include <linux/clk.h>
12#include <linux/debugfs.h>
13#include <linux/device.h>
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
16#include <linux/err.h>
17#include <linux/gpio.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/ioport.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/of_device.h>
25#include <linux/of_gpio.h>
26#include <linux/platform_device.h>
27#include <linux/scatterlist.h>
28#include <linux/seq_file.h>
29#include <linux/slab.h>
30#include <linux/stat.h>
31#include <linux/types.h>
32#include <linux/platform_data/mmc-atmel-mci.h>
33
34#include <linux/mmc/host.h>
35#include <linux/mmc/sdio.h>
36
37#include <linux/atmel-mci.h>
38#include <linux/atmel_pdc.h>
39#include <linux/pm.h>
40#include <linux/pm_runtime.h>
41#include <linux/pinctrl/consumer.h>
42
43#include <asm/cacheflush.h>
44#include <asm/io.h>
45#include <asm/unaligned.h>
46
47#include "atmel-mci-regs.h"
48
49#define AUTOSUSPEND_DELAY 50
50
51#define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)
52#define ATMCI_DMA_THRESHOLD 16
53
54enum {
55 EVENT_CMD_RDY = 0,
56 EVENT_XFER_COMPLETE,
57 EVENT_NOTBUSY,
58 EVENT_DATA_ERROR,
59};
60
61enum atmel_mci_state {
62 STATE_IDLE = 0,
63 STATE_SENDING_CMD,
64 STATE_DATA_XFER,
65 STATE_WAITING_NOTBUSY,
66 STATE_SENDING_STOP,
67 STATE_END_REQUEST,
68};
69
70enum atmci_xfer_dir {
71 XFER_RECEIVE = 0,
72 XFER_TRANSMIT,
73};
74
75enum atmci_pdc_buf {
76 PDC_FIRST_BUF = 0,
77 PDC_SECOND_BUF,
78};
79
80struct atmel_mci_caps {
81 bool has_dma_conf_reg;
82 bool has_pdc;
83 bool has_cfg_reg;
84 bool has_cstor_reg;
85 bool has_highspeed;
86 bool has_rwproof;
87 bool has_odd_clk_div;
88 bool has_bad_data_ordering;
89 bool need_reset_after_xfer;
90 bool need_blksz_mul_4;
91 bool need_notbusy_for_read_ops;
92};
93
94struct atmel_mci_dma {
95 struct dma_chan *chan;
96 struct dma_async_tx_descriptor *data_desc;
97};
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180struct atmel_mci {
181 spinlock_t lock;
182 void __iomem *regs;
183
184 struct scatterlist *sg;
185 unsigned int sg_len;
186 unsigned int pio_offset;
187 unsigned int *buffer;
188 unsigned int buf_size;
189 dma_addr_t buf_phys_addr;
190
191 struct atmel_mci_slot *cur_slot;
192 struct mmc_request *mrq;
193 struct mmc_command *cmd;
194 struct mmc_data *data;
195 unsigned int data_size;
196
197 struct atmel_mci_dma dma;
198 struct dma_chan *data_chan;
199 struct dma_slave_config dma_conf;
200
201 u32 cmd_status;
202 u32 data_status;
203 u32 stop_cmdr;
204
205 struct tasklet_struct tasklet;
206 unsigned long pending_events;
207 unsigned long completed_events;
208 enum atmel_mci_state state;
209 struct list_head queue;
210
211 bool need_clock_update;
212 bool need_reset;
213 struct timer_list timer;
214 u32 mode_reg;
215 u32 cfg_reg;
216 unsigned long bus_hz;
217 unsigned long mapbase;
218 struct clk *mck;
219 struct platform_device *pdev;
220
221 struct atmel_mci_slot *slot[ATMCI_MAX_NR_SLOTS];
222
223 struct atmel_mci_caps caps;
224
225 u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data);
226 void (*submit_data)(struct atmel_mci *host, struct mmc_data *data);
227 void (*stop_transfer)(struct atmel_mci *host);
228};
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249struct atmel_mci_slot {
250 struct mmc_host *mmc;
251 struct atmel_mci *host;
252
253 u32 sdc_reg;
254 u32 sdio_irq;
255
256 struct mmc_request *mrq;
257 struct list_head queue_node;
258
259 unsigned int clock;
260 unsigned long flags;
261#define ATMCI_CARD_PRESENT 0
262#define ATMCI_CARD_NEED_INIT 1
263#define ATMCI_SHUTDOWN 2
264
265 int detect_pin;
266 int wp_pin;
267 bool detect_is_active_high;
268
269 struct timer_list detect_timer;
270};
271
272#define atmci_test_and_clear_pending(host, event) \
273 test_and_clear_bit(event, &host->pending_events)
274#define atmci_set_completed(host, event) \
275 set_bit(event, &host->completed_events)
276#define atmci_set_pending(host, event) \
277 set_bit(event, &host->pending_events)
278
279
280
281
282
283static int atmci_req_show(struct seq_file *s, void *v)
284{
285 struct atmel_mci_slot *slot = s->private;
286 struct mmc_request *mrq;
287 struct mmc_command *cmd;
288 struct mmc_command *stop;
289 struct mmc_data *data;
290
291
292 spin_lock_bh(&slot->host->lock);
293 mrq = slot->mrq;
294
295 if (mrq) {
296 cmd = mrq->cmd;
297 data = mrq->data;
298 stop = mrq->stop;
299
300 if (cmd)
301 seq_printf(s,
302 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
303 cmd->opcode, cmd->arg, cmd->flags,
304 cmd->resp[0], cmd->resp[1], cmd->resp[2],
305 cmd->resp[3], cmd->error);
306 if (data)
307 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
308 data->bytes_xfered, data->blocks,
309 data->blksz, data->flags, data->error);
310 if (stop)
311 seq_printf(s,
312 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
313 stop->opcode, stop->arg, stop->flags,
314 stop->resp[0], stop->resp[1], stop->resp[2],
315 stop->resp[3], stop->error);
316 }
317
318 spin_unlock_bh(&slot->host->lock);
319
320 return 0;
321}
322
323static int atmci_req_open(struct inode *inode, struct file *file)
324{
325 return single_open(file, atmci_req_show, inode->i_private);
326}
327
328static const struct file_operations atmci_req_fops = {
329 .owner = THIS_MODULE,
330 .open = atmci_req_open,
331 .read = seq_read,
332 .llseek = seq_lseek,
333 .release = single_release,
334};
335
336static void atmci_show_status_reg(struct seq_file *s,
337 const char *regname, u32 value)
338{
339 static const char *sr_bit[] = {
340 [0] = "CMDRDY",
341 [1] = "RXRDY",
342 [2] = "TXRDY",
343 [3] = "BLKE",
344 [4] = "DTIP",
345 [5] = "NOTBUSY",
346 [6] = "ENDRX",
347 [7] = "ENDTX",
348 [8] = "SDIOIRQA",
349 [9] = "SDIOIRQB",
350 [12] = "SDIOWAIT",
351 [14] = "RXBUFF",
352 [15] = "TXBUFE",
353 [16] = "RINDE",
354 [17] = "RDIRE",
355 [18] = "RCRCE",
356 [19] = "RENDE",
357 [20] = "RTOE",
358 [21] = "DCRCE",
359 [22] = "DTOE",
360 [23] = "CSTOE",
361 [24] = "BLKOVRE",
362 [25] = "DMADONE",
363 [26] = "FIFOEMPTY",
364 [27] = "XFRDONE",
365 [30] = "OVRE",
366 [31] = "UNRE",
367 };
368 unsigned int i;
369
370 seq_printf(s, "%s:\t0x%08x", regname, value);
371 for (i = 0; i < ARRAY_SIZE(sr_bit); i++) {
372 if (value & (1 << i)) {
373 if (sr_bit[i])
374 seq_printf(s, " %s", sr_bit[i]);
375 else
376 seq_puts(s, " UNKNOWN");
377 }
378 }
379 seq_putc(s, '\n');
380}
381
382static int atmci_regs_show(struct seq_file *s, void *v)
383{
384 struct atmel_mci *host = s->private;
385 u32 *buf;
386 int ret = 0;
387
388
389 buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);
390 if (!buf)
391 return -ENOMEM;
392
393 pm_runtime_get_sync(&host->pdev->dev);
394
395
396
397
398
399
400 spin_lock_bh(&host->lock);
401 memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
402 spin_unlock_bh(&host->lock);
403
404 pm_runtime_mark_last_busy(&host->pdev->dev);
405 pm_runtime_put_autosuspend(&host->pdev->dev);
406
407 seq_printf(s, "MR:\t0x%08x%s%s ",
408 buf[ATMCI_MR / 4],
409 buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
410 buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "");
411 if (host->caps.has_odd_clk_div)
412 seq_printf(s, "{CLKDIV,CLKODD}=%u\n",
413 ((buf[ATMCI_MR / 4] & 0xff) << 1)
414 | ((buf[ATMCI_MR / 4] >> 16) & 1));
415 else
416 seq_printf(s, "CLKDIV=%u\n",
417 (buf[ATMCI_MR / 4] & 0xff));
418 seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]);
419 seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]);
420 seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]);
421 seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
422 buf[ATMCI_BLKR / 4],
423 buf[ATMCI_BLKR / 4] & 0xffff,
424 (buf[ATMCI_BLKR / 4] >> 16) & 0xffff);
425 if (host->caps.has_cstor_reg)
426 seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]);
427
428
429
430 atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]);
431 atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]);
432
433 if (host->caps.has_dma_conf_reg) {
434 u32 val;
435
436 val = buf[ATMCI_DMA / 4];
437 seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
438 val, val & 3,
439 ((val >> 4) & 3) ?
440 1 << (((val >> 4) & 3) + 1) : 1,
441 val & ATMCI_DMAEN ? " DMAEN" : "");
442 }
443 if (host->caps.has_cfg_reg) {
444 u32 val;
445
446 val = buf[ATMCI_CFG / 4];
447 seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
448 val,
449 val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
450 val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
451 val & ATMCI_CFG_HSMODE ? " HSMODE" : "",
452 val & ATMCI_CFG_LSYNC ? " LSYNC" : "");
453 }
454
455 kfree(buf);
456
457 return ret;
458}
459
460static int atmci_regs_open(struct inode *inode, struct file *file)
461{
462 return single_open(file, atmci_regs_show, inode->i_private);
463}
464
465static const struct file_operations atmci_regs_fops = {
466 .owner = THIS_MODULE,
467 .open = atmci_regs_open,
468 .read = seq_read,
469 .llseek = seq_lseek,
470 .release = single_release,
471};
472
473static void atmci_init_debugfs(struct atmel_mci_slot *slot)
474{
475 struct mmc_host *mmc = slot->mmc;
476 struct atmel_mci *host = slot->host;
477 struct dentry *root;
478 struct dentry *node;
479
480 root = mmc->debugfs_root;
481 if (!root)
482 return;
483
484 node = debugfs_create_file("regs", S_IRUSR, root, host,
485 &atmci_regs_fops);
486 if (IS_ERR(node))
487 return;
488 if (!node)
489 goto err;
490
491 node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
492 if (!node)
493 goto err;
494
495 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
496 if (!node)
497 goto err;
498
499 node = debugfs_create_x32("pending_events", S_IRUSR, root,
500 (u32 *)&host->pending_events);
501 if (!node)
502 goto err;
503
504 node = debugfs_create_x32("completed_events", S_IRUSR, root,
505 (u32 *)&host->completed_events);
506 if (!node)
507 goto err;
508
509 return;
510
511err:
512 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
513}
514
515#if defined(CONFIG_OF)
516static const struct of_device_id atmci_dt_ids[] = {
517 { .compatible = "atmel,hsmci" },
518 { }
519};
520
521MODULE_DEVICE_TABLE(of, atmci_dt_ids);
522
523static struct mci_platform_data*
524atmci_of_init(struct platform_device *pdev)
525{
526 struct device_node *np = pdev->dev.of_node;
527 struct device_node *cnp;
528 struct mci_platform_data *pdata;
529 u32 slot_id;
530
531 if (!np) {
532 dev_err(&pdev->dev, "device node not found\n");
533 return ERR_PTR(-EINVAL);
534 }
535
536 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
537 if (!pdata) {
538 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
539 return ERR_PTR(-ENOMEM);
540 }
541
542 for_each_child_of_node(np, cnp) {
543 if (of_property_read_u32(cnp, "reg", &slot_id)) {
544 dev_warn(&pdev->dev, "reg property is missing for %s\n",
545 cnp->full_name);
546 continue;
547 }
548
549 if (slot_id >= ATMCI_MAX_NR_SLOTS) {
550 dev_warn(&pdev->dev, "can't have more than %d slots\n",
551 ATMCI_MAX_NR_SLOTS);
552 break;
553 }
554
555 if (of_property_read_u32(cnp, "bus-width",
556 &pdata->slot[slot_id].bus_width))
557 pdata->slot[slot_id].bus_width = 1;
558
559 pdata->slot[slot_id].detect_pin =
560 of_get_named_gpio(cnp, "cd-gpios", 0);
561
562 pdata->slot[slot_id].detect_is_active_high =
563 of_property_read_bool(cnp, "cd-inverted");
564
565 pdata->slot[slot_id].non_removable =
566 of_property_read_bool(cnp, "non-removable");
567
568 pdata->slot[slot_id].wp_pin =
569 of_get_named_gpio(cnp, "wp-gpios", 0);
570 }
571
572 return pdata;
573}
574#else
575static inline struct mci_platform_data*
576atmci_of_init(struct platform_device *dev)
577{
578 return ERR_PTR(-EINVAL);
579}
580#endif
581
582static inline unsigned int atmci_get_version(struct atmel_mci *host)
583{
584 return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
585}
586
587static void atmci_timeout_timer(unsigned long data)
588{
589 struct atmel_mci *host;
590
591 host = (struct atmel_mci *)data;
592
593 dev_dbg(&host->pdev->dev, "software timeout\n");
594
595 if (host->mrq->cmd->data) {
596 host->mrq->cmd->data->error = -ETIMEDOUT;
597 host->data = NULL;
598
599
600
601
602
603 if (host->state == STATE_DATA_XFER)
604 host->stop_transfer(host);
605 } else {
606 host->mrq->cmd->error = -ETIMEDOUT;
607 host->cmd = NULL;
608 }
609 host->need_reset = 1;
610 host->state = STATE_END_REQUEST;
611 smp_wmb();
612 tasklet_schedule(&host->tasklet);
613}
614
615static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
616 unsigned int ns)
617{
618
619
620
621
622 unsigned int us = DIV_ROUND_UP(ns, 1000);
623
624
625 return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
626}
627
628static void atmci_set_timeout(struct atmel_mci *host,
629 struct atmel_mci_slot *slot, struct mmc_data *data)
630{
631 static unsigned dtomul_to_shift[] = {
632 0, 4, 7, 8, 10, 12, 16, 20
633 };
634 unsigned timeout;
635 unsigned dtocyc;
636 unsigned dtomul;
637
638 timeout = atmci_ns_to_clocks(host, data->timeout_ns)
639 + data->timeout_clks;
640
641 for (dtomul = 0; dtomul < 8; dtomul++) {
642 unsigned shift = dtomul_to_shift[dtomul];
643 dtocyc = (timeout + (1 << shift) - 1) >> shift;
644 if (dtocyc < 15)
645 break;
646 }
647
648 if (dtomul >= 8) {
649 dtomul = 7;
650 dtocyc = 15;
651 }
652
653 dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",
654 dtocyc << dtomul_to_shift[dtomul]);
655 atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc)));
656}
657
658
659
660
661static u32 atmci_prepare_command(struct mmc_host *mmc,
662 struct mmc_command *cmd)
663{
664 struct mmc_data *data;
665 u32 cmdr;
666
667 cmd->error = -EINPROGRESS;
668
669 cmdr = ATMCI_CMDR_CMDNB(cmd->opcode);
670
671 if (cmd->flags & MMC_RSP_PRESENT) {
672 if (cmd->flags & MMC_RSP_136)
673 cmdr |= ATMCI_CMDR_RSPTYP_136BIT;
674 else
675 cmdr |= ATMCI_CMDR_RSPTYP_48BIT;
676 }
677
678
679
680
681
682
683 cmdr |= ATMCI_CMDR_MAXLAT_64CYC;
684
685 if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
686 cmdr |= ATMCI_CMDR_OPDCMD;
687
688 data = cmd->data;
689 if (data) {
690 cmdr |= ATMCI_CMDR_START_XFER;
691
692 if (cmd->opcode == SD_IO_RW_EXTENDED) {
693 cmdr |= ATMCI_CMDR_SDIO_BLOCK;
694 } else {
695 if (data->flags & MMC_DATA_STREAM)
696 cmdr |= ATMCI_CMDR_STREAM;
697 else if (data->blocks > 1)
698 cmdr |= ATMCI_CMDR_MULTI_BLOCK;
699 else
700 cmdr |= ATMCI_CMDR_BLOCK;
701 }
702
703 if (data->flags & MMC_DATA_READ)
704 cmdr |= ATMCI_CMDR_TRDIR_READ;
705 }
706
707 return cmdr;
708}
709
710static void atmci_send_command(struct atmel_mci *host,
711 struct mmc_command *cmd, u32 cmd_flags)
712{
713 WARN_ON(host->cmd);
714 host->cmd = cmd;
715
716 dev_vdbg(&host->pdev->dev,
717 "start command: ARGR=0x%08x CMDR=0x%08x\n",
718 cmd->arg, cmd_flags);
719
720 atmci_writel(host, ATMCI_ARGR, cmd->arg);
721 atmci_writel(host, ATMCI_CMDR, cmd_flags);
722}
723
724static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
725{
726 dev_dbg(&host->pdev->dev, "send stop command\n");
727 atmci_send_command(host, data->stop, host->stop_cmdr);
728 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
729}
730
731
732
733
734
735static void atmci_pdc_set_single_buf(struct atmel_mci *host,
736 enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
737{
738 u32 pointer_reg, counter_reg;
739 unsigned int buf_size;
740
741 if (dir == XFER_RECEIVE) {
742 pointer_reg = ATMEL_PDC_RPR;
743 counter_reg = ATMEL_PDC_RCR;
744 } else {
745 pointer_reg = ATMEL_PDC_TPR;
746 counter_reg = ATMEL_PDC_TCR;
747 }
748
749 if (buf_nb == PDC_SECOND_BUF) {
750 pointer_reg += ATMEL_PDC_SCND_BUF_OFF;
751 counter_reg += ATMEL_PDC_SCND_BUF_OFF;
752 }
753
754 if (!host->caps.has_rwproof) {
755 buf_size = host->buf_size;
756 atmci_writel(host, pointer_reg, host->buf_phys_addr);
757 } else {
758 buf_size = sg_dma_len(host->sg);
759 atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
760 }
761
762 if (host->data_size <= buf_size) {
763 if (host->data_size & 0x3) {
764
765 atmci_writel(host, counter_reg, host->data_size);
766 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE);
767 } else {
768
769 atmci_writel(host, counter_reg, host->data_size / 4);
770 }
771 host->data_size = 0;
772 } else {
773
774 atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4);
775 host->data_size -= sg_dma_len(host->sg);
776 if (host->data_size)
777 host->sg = sg_next(host->sg);
778 }
779}
780
781
782
783
784
785
786static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
787{
788 atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF);
789 if (host->data_size)
790 atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF);
791}
792
793
794
795
796static void atmci_pdc_cleanup(struct atmel_mci *host)
797{
798 struct mmc_data *data = host->data;
799
800 if (data)
801 dma_unmap_sg(&host->pdev->dev,
802 data->sg, data->sg_len,
803 ((data->flags & MMC_DATA_WRITE)
804 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
805}
806
807
808
809
810
811
812static void atmci_pdc_complete(struct atmel_mci *host)
813{
814 int transfer_size = host->data->blocks * host->data->blksz;
815 int i;
816
817 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
818
819 if ((!host->caps.has_rwproof)
820 && (host->data->flags & MMC_DATA_READ)) {
821 if (host->caps.has_bad_data_ordering)
822 for (i = 0; i < transfer_size; i++)
823 host->buffer[i] = swab32(host->buffer[i]);
824 sg_copy_from_buffer(host->data->sg, host->data->sg_len,
825 host->buffer, transfer_size);
826 }
827
828 atmci_pdc_cleanup(host);
829
830 dev_dbg(&host->pdev->dev, "(%s) set pending xfer complete\n", __func__);
831 atmci_set_pending(host, EVENT_XFER_COMPLETE);
832 tasklet_schedule(&host->tasklet);
833}
834
835static void atmci_dma_cleanup(struct atmel_mci *host)
836{
837 struct mmc_data *data = host->data;
838
839 if (data)
840 dma_unmap_sg(host->dma.chan->device->dev,
841 data->sg, data->sg_len,
842 ((data->flags & MMC_DATA_WRITE)
843 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
844}
845
846
847
848
849static void atmci_dma_complete(void *arg)
850{
851 struct atmel_mci *host = arg;
852 struct mmc_data *data = host->data;
853
854 dev_vdbg(&host->pdev->dev, "DMA complete\n");
855
856 if (host->caps.has_dma_conf_reg)
857
858 atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
859
860 atmci_dma_cleanup(host);
861
862
863
864
865
866 if (data) {
867 dev_dbg(&host->pdev->dev,
868 "(%s) set pending xfer complete\n", __func__);
869 atmci_set_pending(host, EVENT_XFER_COMPLETE);
870 tasklet_schedule(&host->tasklet);
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
893 }
894}
895
896
897
898
899
900static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
901{
902 u32 iflags;
903
904 data->error = -EINPROGRESS;
905
906 host->sg = data->sg;
907 host->sg_len = data->sg_len;
908 host->data = data;
909 host->data_chan = NULL;
910
911 iflags = ATMCI_DATA_ERROR_FLAGS;
912
913
914
915
916
917
918
919
920 if (data->blocks * data->blksz < 12
921 || (data->blocks * data->blksz) & 3)
922 host->need_reset = true;
923
924 host->pio_offset = 0;
925 if (data->flags & MMC_DATA_READ)
926 iflags |= ATMCI_RXRDY;
927 else
928 iflags |= ATMCI_TXRDY;
929
930 return iflags;
931}
932
933
934
935
936
937
938
939static u32
940atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
941{
942 u32 iflags, tmp;
943 unsigned int sg_len;
944 enum dma_data_direction dir;
945 int i;
946
947 data->error = -EINPROGRESS;
948
949 host->data = data;
950 host->sg = data->sg;
951 iflags = ATMCI_DATA_ERROR_FLAGS;
952
953
954 atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
955
956 if (data->flags & MMC_DATA_READ) {
957 dir = DMA_FROM_DEVICE;
958 iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
959 } else {
960 dir = DMA_TO_DEVICE;
961 iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
962 }
963
964
965 tmp = atmci_readl(host, ATMCI_MR);
966 tmp &= 0x0000ffff;
967 tmp |= ATMCI_BLKLEN(data->blksz);
968 atmci_writel(host, ATMCI_MR, tmp);
969
970
971 host->data_size = data->blocks * data->blksz;
972 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
973
974 if ((!host->caps.has_rwproof)
975 && (host->data->flags & MMC_DATA_WRITE)) {
976 sg_copy_to_buffer(host->data->sg, host->data->sg_len,
977 host->buffer, host->data_size);
978 if (host->caps.has_bad_data_ordering)
979 for (i = 0; i < host->data_size; i++)
980 host->buffer[i] = swab32(host->buffer[i]);
981 }
982
983 if (host->data_size)
984 atmci_pdc_set_both_buf(host,
985 ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
986
987 return iflags;
988}
989
990static u32
991atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
992{
993 struct dma_chan *chan;
994 struct dma_async_tx_descriptor *desc;
995 struct scatterlist *sg;
996 unsigned int i;
997 enum dma_data_direction direction;
998 enum dma_transfer_direction slave_dirn;
999 unsigned int sglen;
1000 u32 maxburst;
1001 u32 iflags;
1002
1003 data->error = -EINPROGRESS;
1004
1005 WARN_ON(host->data);
1006 host->sg = NULL;
1007 host->data = data;
1008
1009 iflags = ATMCI_DATA_ERROR_FLAGS;
1010
1011
1012
1013
1014
1015
1016 if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD)
1017 return atmci_prepare_data(host, data);
1018 if (data->blksz & 3)
1019 return atmci_prepare_data(host, data);
1020
1021 for_each_sg(data->sg, sg, data->sg_len, i) {
1022 if (sg->offset & 3 || sg->length & 3)
1023 return atmci_prepare_data(host, data);
1024 }
1025
1026
1027 chan = host->dma.chan;
1028 if (chan)
1029 host->data_chan = chan;
1030
1031 if (!chan)
1032 return -ENODEV;
1033
1034 if (data->flags & MMC_DATA_READ) {
1035 direction = DMA_FROM_DEVICE;
1036 host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
1037 maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst);
1038 } else {
1039 direction = DMA_TO_DEVICE;
1040 host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
1041 maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
1042 }
1043
1044 if (host->caps.has_dma_conf_reg)
1045 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) |
1046 ATMCI_DMAEN);
1047
1048 sglen = dma_map_sg(chan->device->dev, data->sg,
1049 data->sg_len, direction);
1050
1051 dmaengine_slave_config(chan, &host->dma_conf);
1052 desc = dmaengine_prep_slave_sg(chan,
1053 data->sg, sglen, slave_dirn,
1054 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1055 if (!desc)
1056 goto unmap_exit;
1057
1058 host->dma.data_desc = desc;
1059 desc->callback = atmci_dma_complete;
1060 desc->callback_param = host;
1061
1062 return iflags;
1063unmap_exit:
1064 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
1065 return -ENOMEM;
1066}
1067
1068static void
1069atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
1070{
1071 return;
1072}
1073
1074
1075
1076
1077static void
1078atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data)
1079{
1080 if (data->flags & MMC_DATA_READ)
1081 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
1082 else
1083 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
1084}
1085
1086static void
1087atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
1088{
1089 struct dma_chan *chan = host->data_chan;
1090 struct dma_async_tx_descriptor *desc = host->dma.data_desc;
1091
1092 if (chan) {
1093 dmaengine_submit(desc);
1094 dma_async_issue_pending(chan);
1095 }
1096}
1097
1098static void atmci_stop_transfer(struct atmel_mci *host)
1099{
1100 dev_dbg(&host->pdev->dev,
1101 "(%s) set pending xfer complete\n", __func__);
1102 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1103 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1104}
1105
1106
1107
1108
1109static void atmci_stop_transfer_pdc(struct atmel_mci *host)
1110{
1111 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
1112}
1113
1114static void atmci_stop_transfer_dma(struct atmel_mci *host)
1115{
1116 struct dma_chan *chan = host->data_chan;
1117
1118 if (chan) {
1119 dmaengine_terminate_all(chan);
1120 atmci_dma_cleanup(host);
1121 } else {
1122
1123 dev_dbg(&host->pdev->dev,
1124 "(%s) set pending xfer complete\n", __func__);
1125 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1126 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1127 }
1128}
1129
1130
1131
1132
1133
1134static void atmci_start_request(struct atmel_mci *host,
1135 struct atmel_mci_slot *slot)
1136{
1137 struct mmc_request *mrq;
1138 struct mmc_command *cmd;
1139 struct mmc_data *data;
1140 u32 iflags;
1141 u32 cmdflags;
1142
1143 mrq = slot->mrq;
1144 host->cur_slot = slot;
1145 host->mrq = mrq;
1146
1147 host->pending_events = 0;
1148 host->completed_events = 0;
1149 host->cmd_status = 0;
1150 host->data_status = 0;
1151
1152 dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
1153
1154 if (host->need_reset || host->caps.need_reset_after_xfer) {
1155 iflags = atmci_readl(host, ATMCI_IMR);
1156 iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
1157 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1158 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1159 atmci_writel(host, ATMCI_MR, host->mode_reg);
1160 if (host->caps.has_cfg_reg)
1161 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1162 atmci_writel(host, ATMCI_IER, iflags);
1163 host->need_reset = false;
1164 }
1165 atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
1166
1167 iflags = atmci_readl(host, ATMCI_IMR);
1168 if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
1169 dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
1170 iflags);
1171
1172 if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
1173
1174 atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT);
1175 while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY))
1176 cpu_relax();
1177 }
1178 iflags = 0;
1179 data = mrq->data;
1180 if (data) {
1181 atmci_set_timeout(host, slot, data);
1182
1183
1184 atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks)
1185 | ATMCI_BLKLEN(data->blksz));
1186 dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
1187 ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz));
1188
1189 iflags |= host->prepare_data(host, data);
1190 }
1191
1192 iflags |= ATMCI_CMDRDY;
1193 cmd = mrq->cmd;
1194 cmdflags = atmci_prepare_command(slot->mmc, cmd);
1195
1196
1197
1198
1199
1200
1201
1202 if (host->submit_data != &atmci_submit_data_dma)
1203 atmci_send_command(host, cmd, cmdflags);
1204
1205 if (data)
1206 host->submit_data(host, data);
1207
1208 if (host->submit_data == &atmci_submit_data_dma)
1209 atmci_send_command(host, cmd, cmdflags);
1210
1211 if (mrq->stop) {
1212 host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
1213 host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
1214 if (!(data->flags & MMC_DATA_WRITE))
1215 host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;
1216 if (data->flags & MMC_DATA_STREAM)
1217 host->stop_cmdr |= ATMCI_CMDR_STREAM;
1218 else
1219 host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
1220 }
1221
1222
1223
1224
1225
1226
1227
1228 atmci_writel(host, ATMCI_IER, iflags);
1229
1230 mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000));
1231}
1232
1233static void atmci_queue_request(struct atmel_mci *host,
1234 struct atmel_mci_slot *slot, struct mmc_request *mrq)
1235{
1236 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1237 host->state);
1238
1239 spin_lock_bh(&host->lock);
1240 slot->mrq = mrq;
1241 if (host->state == STATE_IDLE) {
1242 host->state = STATE_SENDING_CMD;
1243 atmci_start_request(host, slot);
1244 } else {
1245 dev_dbg(&host->pdev->dev, "queue request\n");
1246 list_add_tail(&slot->queue_node, &host->queue);
1247 }
1248 spin_unlock_bh(&host->lock);
1249}
1250
1251static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1252{
1253 struct atmel_mci_slot *slot = mmc_priv(mmc);
1254 struct atmel_mci *host = slot->host;
1255 struct mmc_data *data;
1256
1257 WARN_ON(slot->mrq);
1258 dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
1259
1260 pm_runtime_get_sync(&host->pdev->dev);
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270 if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
1271 mrq->cmd->error = -ENOMEDIUM;
1272 mmc_request_done(mmc, mrq);
1273 return;
1274 }
1275
1276
1277 data = mrq->data;
1278 if (data && data->blocks > 1 && data->blksz & 3) {
1279 mrq->cmd->error = -EINVAL;
1280 mmc_request_done(mmc, mrq);
1281 }
1282
1283 atmci_queue_request(host, slot, mrq);
1284}
1285
1286static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1287{
1288 struct atmel_mci_slot *slot = mmc_priv(mmc);
1289 struct atmel_mci *host = slot->host;
1290 unsigned int i;
1291
1292 pm_runtime_get_sync(&host->pdev->dev);
1293
1294 slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
1295 switch (ios->bus_width) {
1296 case MMC_BUS_WIDTH_1:
1297 slot->sdc_reg |= ATMCI_SDCBUS_1BIT;
1298 break;
1299 case MMC_BUS_WIDTH_4:
1300 slot->sdc_reg |= ATMCI_SDCBUS_4BIT;
1301 break;
1302 }
1303
1304 if (ios->clock) {
1305 unsigned int clock_min = ~0U;
1306 int clkdiv;
1307
1308 spin_lock_bh(&host->lock);
1309 if (!host->mode_reg) {
1310 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1311 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1312 if (host->caps.has_cfg_reg)
1313 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1314 }
1315
1316
1317
1318
1319
1320 slot->clock = ios->clock;
1321 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1322 if (host->slot[i] && host->slot[i]->clock
1323 && host->slot[i]->clock < clock_min)
1324 clock_min = host->slot[i]->clock;
1325 }
1326
1327
1328 if (host->caps.has_odd_clk_div) {
1329 clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
1330 if (clkdiv < 0) {
1331 dev_warn(&mmc->class_dev,
1332 "clock %u too fast; using %lu\n",
1333 clock_min, host->bus_hz / 2);
1334 clkdiv = 0;
1335 } else if (clkdiv > 511) {
1336 dev_warn(&mmc->class_dev,
1337 "clock %u too slow; using %lu\n",
1338 clock_min, host->bus_hz / (511 + 2));
1339 clkdiv = 511;
1340 }
1341 host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1)
1342 | ATMCI_MR_CLKODD(clkdiv & 1);
1343 } else {
1344 clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1;
1345 if (clkdiv > 255) {
1346 dev_warn(&mmc->class_dev,
1347 "clock %u too slow; using %lu\n",
1348 clock_min, host->bus_hz / (2 * 256));
1349 clkdiv = 255;
1350 }
1351 host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);
1352 }
1353
1354
1355
1356
1357
1358
1359 if (host->caps.has_rwproof)
1360 host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF);
1361
1362 if (host->caps.has_cfg_reg) {
1363
1364 if (ios->timing == MMC_TIMING_SD_HS)
1365 host->cfg_reg |= ATMCI_CFG_HSMODE;
1366 else
1367 host->cfg_reg &= ~ATMCI_CFG_HSMODE;
1368 }
1369
1370 if (list_empty(&host->queue)) {
1371 atmci_writel(host, ATMCI_MR, host->mode_reg);
1372 if (host->caps.has_cfg_reg)
1373 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1374 } else {
1375 host->need_clock_update = true;
1376 }
1377
1378 spin_unlock_bh(&host->lock);
1379 } else {
1380 bool any_slot_active = false;
1381
1382 spin_lock_bh(&host->lock);
1383 slot->clock = 0;
1384 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1385 if (host->slot[i] && host->slot[i]->clock) {
1386 any_slot_active = true;
1387 break;
1388 }
1389 }
1390 if (!any_slot_active) {
1391 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
1392 if (host->mode_reg) {
1393 atmci_readl(host, ATMCI_MR);
1394 }
1395 host->mode_reg = 0;
1396 }
1397 spin_unlock_bh(&host->lock);
1398 }
1399
1400 switch (ios->power_mode) {
1401 case MMC_POWER_OFF:
1402 if (!IS_ERR(mmc->supply.vmmc))
1403 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1404 break;
1405 case MMC_POWER_UP:
1406 set_bit(ATMCI_CARD_NEED_INIT, &slot->flags);
1407 if (!IS_ERR(mmc->supply.vmmc))
1408 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1409 break;
1410 default:
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423 break;
1424 }
1425
1426 pm_runtime_mark_last_busy(&host->pdev->dev);
1427 pm_runtime_put_autosuspend(&host->pdev->dev);
1428}
1429
1430static int atmci_get_ro(struct mmc_host *mmc)
1431{
1432 int read_only = -ENOSYS;
1433 struct atmel_mci_slot *slot = mmc_priv(mmc);
1434
1435 if (gpio_is_valid(slot->wp_pin)) {
1436 read_only = gpio_get_value(slot->wp_pin);
1437 dev_dbg(&mmc->class_dev, "card is %s\n",
1438 read_only ? "read-only" : "read-write");
1439 }
1440
1441 return read_only;
1442}
1443
1444static int atmci_get_cd(struct mmc_host *mmc)
1445{
1446 int present = -ENOSYS;
1447 struct atmel_mci_slot *slot = mmc_priv(mmc);
1448
1449 if (gpio_is_valid(slot->detect_pin)) {
1450 present = !(gpio_get_value(slot->detect_pin) ^
1451 slot->detect_is_active_high);
1452 dev_dbg(&mmc->class_dev, "card is %spresent\n",
1453 present ? "" : "not ");
1454 }
1455
1456 return present;
1457}
1458
1459static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1460{
1461 struct atmel_mci_slot *slot = mmc_priv(mmc);
1462 struct atmel_mci *host = slot->host;
1463
1464 if (enable)
1465 atmci_writel(host, ATMCI_IER, slot->sdio_irq);
1466 else
1467 atmci_writel(host, ATMCI_IDR, slot->sdio_irq);
1468}
1469
1470static const struct mmc_host_ops atmci_ops = {
1471 .request = atmci_request,
1472 .set_ios = atmci_set_ios,
1473 .get_ro = atmci_get_ro,
1474 .get_cd = atmci_get_cd,
1475 .enable_sdio_irq = atmci_enable_sdio_irq,
1476};
1477
1478
1479static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1480 __releases(&host->lock)
1481 __acquires(&host->lock)
1482{
1483 struct atmel_mci_slot *slot = NULL;
1484 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1485
1486 WARN_ON(host->cmd || host->data);
1487
1488
1489
1490
1491
1492
1493 if (host->need_clock_update) {
1494 atmci_writel(host, ATMCI_MR, host->mode_reg);
1495 if (host->caps.has_cfg_reg)
1496 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1497 }
1498
1499 host->cur_slot->mrq = NULL;
1500 host->mrq = NULL;
1501 if (!list_empty(&host->queue)) {
1502 slot = list_entry(host->queue.next,
1503 struct atmel_mci_slot, queue_node);
1504 list_del(&slot->queue_node);
1505 dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
1506 mmc_hostname(slot->mmc));
1507 host->state = STATE_SENDING_CMD;
1508 atmci_start_request(host, slot);
1509 } else {
1510 dev_vdbg(&host->pdev->dev, "list empty\n");
1511 host->state = STATE_IDLE;
1512 }
1513
1514 del_timer(&host->timer);
1515
1516 spin_unlock(&host->lock);
1517 mmc_request_done(prev_mmc, mrq);
1518 spin_lock(&host->lock);
1519
1520 pm_runtime_mark_last_busy(&host->pdev->dev);
1521 pm_runtime_put_autosuspend(&host->pdev->dev);
1522}
1523
1524static void atmci_command_complete(struct atmel_mci *host,
1525 struct mmc_command *cmd)
1526{
1527 u32 status = host->cmd_status;
1528
1529
1530 cmd->resp[0] = atmci_readl(host, ATMCI_RSPR);
1531 cmd->resp[1] = atmci_readl(host, ATMCI_RSPR);
1532 cmd->resp[2] = atmci_readl(host, ATMCI_RSPR);
1533 cmd->resp[3] = atmci_readl(host, ATMCI_RSPR);
1534
1535 if (status & ATMCI_RTOE)
1536 cmd->error = -ETIMEDOUT;
1537 else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE))
1538 cmd->error = -EILSEQ;
1539 else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
1540 cmd->error = -EIO;
1541 else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
1542 if (host->caps.need_blksz_mul_4) {
1543 cmd->error = -EINVAL;
1544 host->need_reset = 1;
1545 }
1546 } else
1547 cmd->error = 0;
1548}
1549
1550static void atmci_detect_change(unsigned long data)
1551{
1552 struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data;
1553 bool present;
1554 bool present_old;
1555
1556
1557
1558
1559
1560
1561
1562 smp_rmb();
1563 if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
1564 return;
1565
1566 enable_irq(gpio_to_irq(slot->detect_pin));
1567 present = !(gpio_get_value(slot->detect_pin) ^
1568 slot->detect_is_active_high);
1569 present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
1570
1571 dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
1572 present, present_old);
1573
1574 if (present != present_old) {
1575 struct atmel_mci *host = slot->host;
1576 struct mmc_request *mrq;
1577
1578 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1579 present ? "inserted" : "removed");
1580
1581 spin_lock(&host->lock);
1582
1583 if (!present)
1584 clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
1585 else
1586 set_bit(ATMCI_CARD_PRESENT, &slot->flags);
1587
1588
1589 mrq = slot->mrq;
1590 if (mrq) {
1591 if (mrq == host->mrq) {
1592
1593
1594
1595
1596 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
1597 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
1598 atmci_writel(host, ATMCI_MR, host->mode_reg);
1599 if (host->caps.has_cfg_reg)
1600 atmci_writel(host, ATMCI_CFG, host->cfg_reg);
1601
1602 host->data = NULL;
1603 host->cmd = NULL;
1604
1605 switch (host->state) {
1606 case STATE_IDLE:
1607 break;
1608 case STATE_SENDING_CMD:
1609 mrq->cmd->error = -ENOMEDIUM;
1610 if (mrq->data)
1611 host->stop_transfer(host);
1612 break;
1613 case STATE_DATA_XFER:
1614 mrq->data->error = -ENOMEDIUM;
1615 host->stop_transfer(host);
1616 break;
1617 case STATE_WAITING_NOTBUSY:
1618 mrq->data->error = -ENOMEDIUM;
1619 break;
1620 case STATE_SENDING_STOP:
1621 mrq->stop->error = -ENOMEDIUM;
1622 break;
1623 case STATE_END_REQUEST:
1624 break;
1625 }
1626
1627 atmci_request_end(host, mrq);
1628 } else {
1629 list_del(&slot->queue_node);
1630 mrq->cmd->error = -ENOMEDIUM;
1631 if (mrq->data)
1632 mrq->data->error = -ENOMEDIUM;
1633 if (mrq->stop)
1634 mrq->stop->error = -ENOMEDIUM;
1635
1636 spin_unlock(&host->lock);
1637 mmc_request_done(slot->mmc, mrq);
1638 spin_lock(&host->lock);
1639 }
1640 }
1641 spin_unlock(&host->lock);
1642
1643 mmc_detect_change(slot->mmc, 0);
1644 }
1645}
1646
1647static void atmci_tasklet_func(unsigned long priv)
1648{
1649 struct atmel_mci *host = (struct atmel_mci *)priv;
1650 struct mmc_request *mrq = host->mrq;
1651 struct mmc_data *data = host->data;
1652 enum atmel_mci_state state = host->state;
1653 enum atmel_mci_state prev_state;
1654 u32 status;
1655
1656 spin_lock(&host->lock);
1657
1658 state = host->state;
1659
1660 dev_vdbg(&host->pdev->dev,
1661 "tasklet: state %u pending/completed/mask %lx/%lx/%x\n",
1662 state, host->pending_events, host->completed_events,
1663 atmci_readl(host, ATMCI_IMR));
1664
1665 do {
1666 prev_state = state;
1667 dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
1668
1669 switch (state) {
1670 case STATE_IDLE:
1671 break;
1672
1673 case STATE_SENDING_CMD:
1674
1675
1676
1677
1678
1679
1680 dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
1681 if (!atmci_test_and_clear_pending(host,
1682 EVENT_CMD_RDY))
1683 break;
1684
1685 dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
1686 host->cmd = NULL;
1687 atmci_set_completed(host, EVENT_CMD_RDY);
1688 atmci_command_complete(host, mrq->cmd);
1689 if (mrq->data) {
1690 dev_dbg(&host->pdev->dev,
1691 "command with data transfer");
1692
1693
1694
1695
1696 if (mrq->cmd->error) {
1697 host->stop_transfer(host);
1698 host->data = NULL;
1699 atmci_writel(host, ATMCI_IDR,
1700 ATMCI_TXRDY | ATMCI_RXRDY
1701 | ATMCI_DATA_ERROR_FLAGS);
1702 state = STATE_END_REQUEST;
1703 } else
1704 state = STATE_DATA_XFER;
1705 } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
1706 dev_dbg(&host->pdev->dev,
1707 "command response need waiting notbusy");
1708 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1709 state = STATE_WAITING_NOTBUSY;
1710 } else
1711 state = STATE_END_REQUEST;
1712
1713 break;
1714
1715 case STATE_DATA_XFER:
1716 if (atmci_test_and_clear_pending(host,
1717 EVENT_DATA_ERROR)) {
1718 dev_dbg(&host->pdev->dev, "set completed data error\n");
1719 atmci_set_completed(host, EVENT_DATA_ERROR);
1720 state = STATE_END_REQUEST;
1721 break;
1722 }
1723
1724
1725
1726
1727
1728
1729
1730
1731 dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
1732 if (!atmci_test_and_clear_pending(host,
1733 EVENT_XFER_COMPLETE))
1734 break;
1735
1736 dev_dbg(&host->pdev->dev,
1737 "(%s) set completed xfer complete\n",
1738 __func__);
1739 atmci_set_completed(host, EVENT_XFER_COMPLETE);
1740
1741 if (host->caps.need_notbusy_for_read_ops ||
1742 (host->data->flags & MMC_DATA_WRITE)) {
1743 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1744 state = STATE_WAITING_NOTBUSY;
1745 } else if (host->mrq->stop) {
1746 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
1747 atmci_send_stop_cmd(host, data);
1748 state = STATE_SENDING_STOP;
1749 } else {
1750 host->data = NULL;
1751 data->bytes_xfered = data->blocks * data->blksz;
1752 data->error = 0;
1753 state = STATE_END_REQUEST;
1754 }
1755 break;
1756
1757 case STATE_WAITING_NOTBUSY:
1758
1759
1760
1761
1762
1763
1764 dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
1765 if (!atmci_test_and_clear_pending(host,
1766 EVENT_NOTBUSY))
1767 break;
1768
1769 dev_dbg(&host->pdev->dev, "set completed not busy\n");
1770 atmci_set_completed(host, EVENT_NOTBUSY);
1771
1772 if (host->data) {
1773
1774
1775
1776
1777
1778 if (host->mrq->stop) {
1779 atmci_writel(host, ATMCI_IER,
1780 ATMCI_CMDRDY);
1781 atmci_send_stop_cmd(host, data);
1782 state = STATE_SENDING_STOP;
1783 } else {
1784 host->data = NULL;
1785 data->bytes_xfered = data->blocks
1786 * data->blksz;
1787 data->error = 0;
1788 state = STATE_END_REQUEST;
1789 }
1790 } else
1791 state = STATE_END_REQUEST;
1792 break;
1793
1794 case STATE_SENDING_STOP:
1795
1796
1797
1798
1799
1800
1801 dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
1802 if (!atmci_test_and_clear_pending(host,
1803 EVENT_CMD_RDY))
1804 break;
1805
1806 dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
1807 host->cmd = NULL;
1808 data->bytes_xfered = data->blocks * data->blksz;
1809 data->error = 0;
1810 atmci_command_complete(host, mrq->stop);
1811 if (mrq->stop->error) {
1812 host->stop_transfer(host);
1813 atmci_writel(host, ATMCI_IDR,
1814 ATMCI_TXRDY | ATMCI_RXRDY
1815 | ATMCI_DATA_ERROR_FLAGS);
1816 state = STATE_END_REQUEST;
1817 } else {
1818 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1819 state = STATE_WAITING_NOTBUSY;
1820 }
1821 host->data = NULL;
1822 break;
1823
1824 case STATE_END_REQUEST:
1825 atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
1826 | ATMCI_DATA_ERROR_FLAGS);
1827 status = host->data_status;
1828 if (unlikely(status)) {
1829 host->stop_transfer(host);
1830 host->data = NULL;
1831 if (data) {
1832 if (status & ATMCI_DTOE) {
1833 data->error = -ETIMEDOUT;
1834 } else if (status & ATMCI_DCRCE) {
1835 data->error = -EILSEQ;
1836 } else {
1837 data->error = -EIO;
1838 }
1839 }
1840 }
1841
1842 atmci_request_end(host, host->mrq);
1843 state = STATE_IDLE;
1844 break;
1845 }
1846 } while (state != prev_state);
1847
1848 host->state = state;
1849
1850 spin_unlock(&host->lock);
1851}
1852
1853static void atmci_read_data_pio(struct atmel_mci *host)
1854{
1855 struct scatterlist *sg = host->sg;
1856 void *buf = sg_virt(sg);
1857 unsigned int offset = host->pio_offset;
1858 struct mmc_data *data = host->data;
1859 u32 value;
1860 u32 status;
1861 unsigned int nbytes = 0;
1862
1863 do {
1864 value = atmci_readl(host, ATMCI_RDR);
1865 if (likely(offset + 4 <= sg->length)) {
1866 put_unaligned(value, (u32 *)(buf + offset));
1867
1868 offset += 4;
1869 nbytes += 4;
1870
1871 if (offset == sg->length) {
1872 flush_dcache_page(sg_page(sg));
1873 host->sg = sg = sg_next(sg);
1874 host->sg_len--;
1875 if (!sg || !host->sg_len)
1876 goto done;
1877
1878 offset = 0;
1879 buf = sg_virt(sg);
1880 }
1881 } else {
1882 unsigned int remaining = sg->length - offset;
1883 memcpy(buf + offset, &value, remaining);
1884 nbytes += remaining;
1885
1886 flush_dcache_page(sg_page(sg));
1887 host->sg = sg = sg_next(sg);
1888 host->sg_len--;
1889 if (!sg || !host->sg_len)
1890 goto done;
1891
1892 offset = 4 - remaining;
1893 buf = sg_virt(sg);
1894 memcpy(buf, (u8 *)&value + remaining, offset);
1895 nbytes += offset;
1896 }
1897
1898 status = atmci_readl(host, ATMCI_SR);
1899 if (status & ATMCI_DATA_ERROR_FLAGS) {
1900 atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY
1901 | ATMCI_DATA_ERROR_FLAGS));
1902 host->data_status = status;
1903 data->bytes_xfered += nbytes;
1904 return;
1905 }
1906 } while (status & ATMCI_RXRDY);
1907
1908 host->pio_offset = offset;
1909 data->bytes_xfered += nbytes;
1910
1911 return;
1912
1913done:
1914 atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY);
1915 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1916 data->bytes_xfered += nbytes;
1917 smp_wmb();
1918 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1919}
1920
1921static void atmci_write_data_pio(struct atmel_mci *host)
1922{
1923 struct scatterlist *sg = host->sg;
1924 void *buf = sg_virt(sg);
1925 unsigned int offset = host->pio_offset;
1926 struct mmc_data *data = host->data;
1927 u32 value;
1928 u32 status;
1929 unsigned int nbytes = 0;
1930
1931 do {
1932 if (likely(offset + 4 <= sg->length)) {
1933 value = get_unaligned((u32 *)(buf + offset));
1934 atmci_writel(host, ATMCI_TDR, value);
1935
1936 offset += 4;
1937 nbytes += 4;
1938 if (offset == sg->length) {
1939 host->sg = sg = sg_next(sg);
1940 host->sg_len--;
1941 if (!sg || !host->sg_len)
1942 goto done;
1943
1944 offset = 0;
1945 buf = sg_virt(sg);
1946 }
1947 } else {
1948 unsigned int remaining = sg->length - offset;
1949
1950 value = 0;
1951 memcpy(&value, buf + offset, remaining);
1952 nbytes += remaining;
1953
1954 host->sg = sg = sg_next(sg);
1955 host->sg_len--;
1956 if (!sg || !host->sg_len) {
1957 atmci_writel(host, ATMCI_TDR, value);
1958 goto done;
1959 }
1960
1961 offset = 4 - remaining;
1962 buf = sg_virt(sg);
1963 memcpy((u8 *)&value + remaining, buf, offset);
1964 atmci_writel(host, ATMCI_TDR, value);
1965 nbytes += offset;
1966 }
1967
1968 status = atmci_readl(host, ATMCI_SR);
1969 if (status & ATMCI_DATA_ERROR_FLAGS) {
1970 atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY
1971 | ATMCI_DATA_ERROR_FLAGS));
1972 host->data_status = status;
1973 data->bytes_xfered += nbytes;
1974 return;
1975 }
1976 } while (status & ATMCI_TXRDY);
1977
1978 host->pio_offset = offset;
1979 data->bytes_xfered += nbytes;
1980
1981 return;
1982
1983done:
1984 atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY);
1985 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1986 data->bytes_xfered += nbytes;
1987 smp_wmb();
1988 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1989}
1990
1991static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
1992{
1993 int i;
1994
1995 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
1996 struct atmel_mci_slot *slot = host->slot[i];
1997 if (slot && (status & slot->sdio_irq)) {
1998 mmc_signal_sdio_irq(slot->mmc);
1999 }
2000 }
2001}
2002
2003
2004static irqreturn_t atmci_interrupt(int irq, void *dev_id)
2005{
2006 struct atmel_mci *host = dev_id;
2007 u32 status, mask, pending;
2008 unsigned int pass_count = 0;
2009
2010 do {
2011 status = atmci_readl(host, ATMCI_SR);
2012 mask = atmci_readl(host, ATMCI_IMR);
2013 pending = status & mask;
2014 if (!pending)
2015 break;
2016
2017 if (pending & ATMCI_DATA_ERROR_FLAGS) {
2018 dev_dbg(&host->pdev->dev, "IRQ: data error\n");
2019 atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
2020 | ATMCI_RXRDY | ATMCI_TXRDY
2021 | ATMCI_ENDRX | ATMCI_ENDTX
2022 | ATMCI_RXBUFF | ATMCI_TXBUFE);
2023
2024 host->data_status = status;
2025 dev_dbg(&host->pdev->dev, "set pending data error\n");
2026 smp_wmb();
2027 atmci_set_pending(host, EVENT_DATA_ERROR);
2028 tasklet_schedule(&host->tasklet);
2029 }
2030
2031 if (pending & ATMCI_TXBUFE) {
2032 dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
2033 atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
2034 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
2035
2036
2037
2038
2039
2040 if (host->data_size) {
2041 atmci_pdc_set_both_buf(host, XFER_TRANSMIT);
2042 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
2043 atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE);
2044 } else {
2045 atmci_pdc_complete(host);
2046 }
2047 } else if (pending & ATMCI_ENDTX) {
2048 dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
2049 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
2050
2051 if (host->data_size) {
2052 atmci_pdc_set_single_buf(host,
2053 XFER_TRANSMIT, PDC_SECOND_BUF);
2054 atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
2055 }
2056 }
2057
2058 if (pending & ATMCI_RXBUFF) {
2059 dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
2060 atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
2061 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
2062
2063
2064
2065
2066
2067 if (host->data_size) {
2068 atmci_pdc_set_both_buf(host, XFER_RECEIVE);
2069 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
2070 atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF);
2071 } else {
2072 atmci_pdc_complete(host);
2073 }
2074 } else if (pending & ATMCI_ENDRX) {
2075 dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
2076 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
2077
2078 if (host->data_size) {
2079 atmci_pdc_set_single_buf(host,
2080 XFER_RECEIVE, PDC_SECOND_BUF);
2081 atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
2082 }
2083 }
2084
2085
2086
2087
2088
2089
2090
2091 if (pending & ATMCI_BLKE) {
2092 dev_dbg(&host->pdev->dev, "IRQ: blke\n");
2093 atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
2094 smp_wmb();
2095 dev_dbg(&host->pdev->dev, "set pending notbusy\n");
2096 atmci_set_pending(host, EVENT_NOTBUSY);
2097 tasklet_schedule(&host->tasklet);
2098 }
2099
2100 if (pending & ATMCI_NOTBUSY) {
2101 dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
2102 atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
2103 smp_wmb();
2104 dev_dbg(&host->pdev->dev, "set pending notbusy\n");
2105 atmci_set_pending(host, EVENT_NOTBUSY);
2106 tasklet_schedule(&host->tasklet);
2107 }
2108
2109 if (pending & ATMCI_RXRDY)
2110 atmci_read_data_pio(host);
2111 if (pending & ATMCI_TXRDY)
2112 atmci_write_data_pio(host);
2113
2114 if (pending & ATMCI_CMDRDY) {
2115 dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
2116 atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
2117 host->cmd_status = status;
2118 smp_wmb();
2119 dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
2120 atmci_set_pending(host, EVENT_CMD_RDY);
2121 tasklet_schedule(&host->tasklet);
2122 }
2123
2124 if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
2125 atmci_sdio_interrupt(host, status);
2126
2127 } while (pass_count++ < 5);
2128
2129 return pass_count ? IRQ_HANDLED : IRQ_NONE;
2130}
2131
2132static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
2133{
2134 struct atmel_mci_slot *slot = dev_id;
2135
2136
2137
2138
2139
2140
2141 disable_irq_nosync(irq);
2142 mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
2143
2144 return IRQ_HANDLED;
2145}
2146
2147static int atmci_init_slot(struct atmel_mci *host,
2148 struct mci_slot_pdata *slot_data, unsigned int id,
2149 u32 sdc_reg, u32 sdio_irq)
2150{
2151 struct mmc_host *mmc;
2152 struct atmel_mci_slot *slot;
2153
2154 mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev);
2155 if (!mmc)
2156 return -ENOMEM;
2157
2158 slot = mmc_priv(mmc);
2159 slot->mmc = mmc;
2160 slot->host = host;
2161 slot->detect_pin = slot_data->detect_pin;
2162 slot->wp_pin = slot_data->wp_pin;
2163 slot->detect_is_active_high = slot_data->detect_is_active_high;
2164 slot->sdc_reg = sdc_reg;
2165 slot->sdio_irq = sdio_irq;
2166
2167 dev_dbg(&mmc->class_dev,
2168 "slot[%u]: bus_width=%u, detect_pin=%d, "
2169 "detect_is_active_high=%s, wp_pin=%d\n",
2170 id, slot_data->bus_width, slot_data->detect_pin,
2171 slot_data->detect_is_active_high ? "true" : "false",
2172 slot_data->wp_pin);
2173
2174 mmc->ops = &atmci_ops;
2175 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
2176 mmc->f_max = host->bus_hz / 2;
2177 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2178 if (sdio_irq)
2179 mmc->caps |= MMC_CAP_SDIO_IRQ;
2180 if (host->caps.has_highspeed)
2181 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
2182
2183
2184
2185
2186
2187 if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
2188 mmc->caps |= MMC_CAP_4_BIT_DATA;
2189
2190 if (atmci_get_version(host) < 0x200) {
2191 mmc->max_segs = 256;
2192 mmc->max_blk_size = 4095;
2193 mmc->max_blk_count = 256;
2194 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2195 mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
2196 } else {
2197 mmc->max_segs = 64;
2198 mmc->max_req_size = 32768 * 512;
2199 mmc->max_blk_size = 32768;
2200 mmc->max_blk_count = 512;
2201 }
2202
2203
2204 set_bit(ATMCI_CARD_PRESENT, &slot->flags);
2205 if (gpio_is_valid(slot->detect_pin)) {
2206 if (devm_gpio_request(&host->pdev->dev, slot->detect_pin,
2207 "mmc_detect")) {
2208 dev_dbg(&mmc->class_dev, "no detect pin available\n");
2209 slot->detect_pin = -EBUSY;
2210 } else if (gpio_get_value(slot->detect_pin) ^
2211 slot->detect_is_active_high) {
2212 clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
2213 }
2214 }
2215
2216 if (!gpio_is_valid(slot->detect_pin)) {
2217 if (slot_data->non_removable)
2218 mmc->caps |= MMC_CAP_NONREMOVABLE;
2219 else
2220 mmc->caps |= MMC_CAP_NEEDS_POLL;
2221 }
2222
2223 if (gpio_is_valid(slot->wp_pin)) {
2224 if (devm_gpio_request(&host->pdev->dev, slot->wp_pin,
2225 "mmc_wp")) {
2226 dev_dbg(&mmc->class_dev, "no WP pin available\n");
2227 slot->wp_pin = -EBUSY;
2228 }
2229 }
2230
2231 host->slot[id] = slot;
2232 mmc_regulator_get_supply(mmc);
2233 mmc_add_host(mmc);
2234
2235 if (gpio_is_valid(slot->detect_pin)) {
2236 int ret;
2237
2238 setup_timer(&slot->detect_timer, atmci_detect_change,
2239 (unsigned long)slot);
2240
2241 ret = request_irq(gpio_to_irq(slot->detect_pin),
2242 atmci_detect_interrupt,
2243 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
2244 "mmc-detect", slot);
2245 if (ret) {
2246 dev_dbg(&mmc->class_dev,
2247 "could not request IRQ %d for detect pin\n",
2248 gpio_to_irq(slot->detect_pin));
2249 slot->detect_pin = -EBUSY;
2250 }
2251 }
2252
2253 atmci_init_debugfs(slot);
2254
2255 return 0;
2256}
2257
2258static void atmci_cleanup_slot(struct atmel_mci_slot *slot,
2259 unsigned int id)
2260{
2261
2262
2263 set_bit(ATMCI_SHUTDOWN, &slot->flags);
2264 smp_wmb();
2265
2266 mmc_remove_host(slot->mmc);
2267
2268 if (gpio_is_valid(slot->detect_pin)) {
2269 int pin = slot->detect_pin;
2270
2271 free_irq(gpio_to_irq(pin), slot);
2272 del_timer_sync(&slot->detect_timer);
2273 }
2274
2275 slot->host->slot[id] = NULL;
2276 mmc_free_host(slot->mmc);
2277}
2278
2279static int atmci_configure_dma(struct atmel_mci *host)
2280{
2281 host->dma.chan = dma_request_slave_channel_reason(&host->pdev->dev,
2282 "rxtx");
2283 if (IS_ERR(host->dma.chan))
2284 return PTR_ERR(host->dma.chan);
2285
2286 dev_info(&host->pdev->dev, "using %s for DMA transfers\n",
2287 dma_chan_name(host->dma.chan));
2288
2289 host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
2290 host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2291 host->dma_conf.src_maxburst = 1;
2292 host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
2293 host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2294 host->dma_conf.dst_maxburst = 1;
2295 host->dma_conf.device_fc = false;
2296
2297 return 0;
2298}
2299
2300
2301
2302
2303
2304
2305static void atmci_get_cap(struct atmel_mci *host)
2306{
2307 unsigned int version;
2308
2309 version = atmci_get_version(host);
2310 dev_info(&host->pdev->dev,
2311 "version: 0x%x\n", version);
2312
2313 host->caps.has_dma_conf_reg = 0;
2314 host->caps.has_pdc = ATMCI_PDC_CONNECTED;
2315 host->caps.has_cfg_reg = 0;
2316 host->caps.has_cstor_reg = 0;
2317 host->caps.has_highspeed = 0;
2318 host->caps.has_rwproof = 0;
2319 host->caps.has_odd_clk_div = 0;
2320 host->caps.has_bad_data_ordering = 1;
2321 host->caps.need_reset_after_xfer = 1;
2322 host->caps.need_blksz_mul_4 = 1;
2323 host->caps.need_notbusy_for_read_ops = 0;
2324
2325
2326 switch (version & 0xf00) {
2327 case 0x600:
2328 case 0x500:
2329 host->caps.has_odd_clk_div = 1;
2330 case 0x400:
2331 case 0x300:
2332 host->caps.has_dma_conf_reg = 1;
2333 host->caps.has_pdc = 0;
2334 host->caps.has_cfg_reg = 1;
2335 host->caps.has_cstor_reg = 1;
2336 host->caps.has_highspeed = 1;
2337 case 0x200:
2338 host->caps.has_rwproof = 1;
2339 host->caps.need_blksz_mul_4 = 0;
2340 host->caps.need_notbusy_for_read_ops = 1;
2341 case 0x100:
2342 host->caps.has_bad_data_ordering = 0;
2343 host->caps.need_reset_after_xfer = 0;
2344 case 0x0:
2345 break;
2346 default:
2347 host->caps.has_pdc = 0;
2348 dev_warn(&host->pdev->dev,
2349 "Unmanaged mci version, set minimum capabilities\n");
2350 break;
2351 }
2352}
2353
2354static int atmci_probe(struct platform_device *pdev)
2355{
2356 struct mci_platform_data *pdata;
2357 struct atmel_mci *host;
2358 struct resource *regs;
2359 unsigned int nr_slots;
2360 int irq;
2361 int ret, i;
2362
2363 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2364 if (!regs)
2365 return -ENXIO;
2366 pdata = pdev->dev.platform_data;
2367 if (!pdata) {
2368 pdata = atmci_of_init(pdev);
2369 if (IS_ERR(pdata)) {
2370 dev_err(&pdev->dev, "platform data not available\n");
2371 return PTR_ERR(pdata);
2372 }
2373 }
2374
2375 irq = platform_get_irq(pdev, 0);
2376 if (irq < 0)
2377 return irq;
2378
2379 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
2380 if (!host)
2381 return -ENOMEM;
2382
2383 host->pdev = pdev;
2384 spin_lock_init(&host->lock);
2385 INIT_LIST_HEAD(&host->queue);
2386
2387 host->mck = devm_clk_get(&pdev->dev, "mci_clk");
2388 if (IS_ERR(host->mck))
2389 return PTR_ERR(host->mck);
2390
2391 host->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
2392 if (!host->regs)
2393 return -ENOMEM;
2394
2395 ret = clk_prepare_enable(host->mck);
2396 if (ret)
2397 return ret;
2398
2399 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
2400 host->bus_hz = clk_get_rate(host->mck);
2401
2402 host->mapbase = regs->start;
2403
2404 tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host);
2405
2406 ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host);
2407 if (ret) {
2408 clk_disable_unprepare(host->mck);
2409 return ret;
2410 }
2411
2412
2413 atmci_get_cap(host);
2414 ret = atmci_configure_dma(host);
2415 if (ret == -EPROBE_DEFER)
2416 goto err_dma_probe_defer;
2417 if (ret == 0) {
2418 host->prepare_data = &atmci_prepare_data_dma;
2419 host->submit_data = &atmci_submit_data_dma;
2420 host->stop_transfer = &atmci_stop_transfer_dma;
2421 } else if (host->caps.has_pdc) {
2422 dev_info(&pdev->dev, "using PDC\n");
2423 host->prepare_data = &atmci_prepare_data_pdc;
2424 host->submit_data = &atmci_submit_data_pdc;
2425 host->stop_transfer = &atmci_stop_transfer_pdc;
2426 } else {
2427 dev_info(&pdev->dev, "using PIO\n");
2428 host->prepare_data = &atmci_prepare_data;
2429 host->submit_data = &atmci_submit_data;
2430 host->stop_transfer = &atmci_stop_transfer;
2431 }
2432
2433 platform_set_drvdata(pdev, host);
2434
2435 setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
2436
2437 pm_runtime_get_noresume(&pdev->dev);
2438 pm_runtime_set_active(&pdev->dev);
2439 pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_DELAY);
2440 pm_runtime_use_autosuspend(&pdev->dev);
2441 pm_runtime_enable(&pdev->dev);
2442
2443
2444 nr_slots = 0;
2445 ret = -ENODEV;
2446 if (pdata->slot[0].bus_width) {
2447 ret = atmci_init_slot(host, &pdata->slot[0],
2448 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
2449 if (!ret) {
2450 nr_slots++;
2451 host->buf_size = host->slot[0]->mmc->max_req_size;
2452 }
2453 }
2454 if (pdata->slot[1].bus_width) {
2455 ret = atmci_init_slot(host, &pdata->slot[1],
2456 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
2457 if (!ret) {
2458 nr_slots++;
2459 if (host->slot[1]->mmc->max_req_size > host->buf_size)
2460 host->buf_size =
2461 host->slot[1]->mmc->max_req_size;
2462 }
2463 }
2464
2465 if (!nr_slots) {
2466 dev_err(&pdev->dev, "init failed: no slot defined\n");
2467 goto err_init_slot;
2468 }
2469
2470 if (!host->caps.has_rwproof) {
2471 host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
2472 &host->buf_phys_addr,
2473 GFP_KERNEL);
2474 if (!host->buffer) {
2475 ret = -ENOMEM;
2476 dev_err(&pdev->dev, "buffer allocation failed\n");
2477 goto err_dma_alloc;
2478 }
2479 }
2480
2481 dev_info(&pdev->dev,
2482 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
2483 host->mapbase, irq, nr_slots);
2484
2485 pm_runtime_mark_last_busy(&host->pdev->dev);
2486 pm_runtime_put_autosuspend(&pdev->dev);
2487
2488 return 0;
2489
2490err_dma_alloc:
2491 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2492 if (host->slot[i])
2493 atmci_cleanup_slot(host->slot[i], i);
2494 }
2495err_init_slot:
2496 clk_disable_unprepare(host->mck);
2497
2498 pm_runtime_disable(&pdev->dev);
2499 pm_runtime_put_noidle(&pdev->dev);
2500
2501 del_timer_sync(&host->timer);
2502 if (!IS_ERR(host->dma.chan))
2503 dma_release_channel(host->dma.chan);
2504err_dma_probe_defer:
2505 free_irq(irq, host);
2506 return ret;
2507}
2508
2509static int atmci_remove(struct platform_device *pdev)
2510{
2511 struct atmel_mci *host = platform_get_drvdata(pdev);
2512 unsigned int i;
2513
2514 pm_runtime_get_sync(&pdev->dev);
2515
2516 if (host->buffer)
2517 dma_free_coherent(&pdev->dev, host->buf_size,
2518 host->buffer, host->buf_phys_addr);
2519
2520 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2521 if (host->slot[i])
2522 atmci_cleanup_slot(host->slot[i], i);
2523 }
2524
2525 atmci_writel(host, ATMCI_IDR, ~0UL);
2526 atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);
2527 atmci_readl(host, ATMCI_SR);
2528
2529 del_timer_sync(&host->timer);
2530 if (!IS_ERR(host->dma.chan))
2531 dma_release_channel(host->dma.chan);
2532
2533 free_irq(platform_get_irq(pdev, 0), host);
2534
2535 clk_disable_unprepare(host->mck);
2536
2537 pm_runtime_disable(&pdev->dev);
2538 pm_runtime_put_noidle(&pdev->dev);
2539
2540 return 0;
2541}
2542
2543#ifdef CONFIG_PM
2544static int atmci_runtime_suspend(struct device *dev)
2545{
2546 struct atmel_mci *host = dev_get_drvdata(dev);
2547
2548 clk_disable_unprepare(host->mck);
2549
2550 pinctrl_pm_select_sleep_state(dev);
2551
2552 return 0;
2553}
2554
2555static int atmci_runtime_resume(struct device *dev)
2556{
2557 struct atmel_mci *host = dev_get_drvdata(dev);
2558
2559 pinctrl_pm_select_default_state(dev);
2560
2561 return clk_prepare_enable(host->mck);
2562}
2563#endif
2564
2565static const struct dev_pm_ops atmci_dev_pm_ops = {
2566 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2567 pm_runtime_force_resume)
2568 SET_RUNTIME_PM_OPS(atmci_runtime_suspend, atmci_runtime_resume, NULL)
2569};
2570
2571static struct platform_driver atmci_driver = {
2572 .probe = atmci_probe,
2573 .remove = atmci_remove,
2574 .driver = {
2575 .name = "atmel_mci",
2576 .of_match_table = of_match_ptr(atmci_dt_ids),
2577 .pm = &atmci_dev_pm_ops,
2578 },
2579};
2580module_platform_driver(atmci_driver);
2581
2582MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
2583MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
2584MODULE_LICENSE("GPL v2");
2585