1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/ioport.h>
26#include <linux/of.h>
27#include <linux/of_device.h>
28#include <linux/of_gpio.h>
29#include <linux/platform_device.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
32#include <linux/dma-mapping.h>
33#include <linux/dmaengine.h>
34#include <linux/highmem.h>
35#include <linux/clk.h>
36#include <linux/err.h>
37#include <linux/completion.h>
38#include <linux/mmc/host.h>
39#include <linux/mmc/mmc.h>
40#include <linux/mmc/sdio.h>
41#include <linux/gpio.h>
42#include <linux/regulator/consumer.h>
43#include <linux/module.h>
44#include <linux/stmp_device.h>
45#include <linux/spi/mxs-spi.h>
46
47#define DRIVER_NAME "mxs-mmc"
48
49#define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \
50 BM_SSP_CTRL1_RESP_ERR_IRQ | \
51 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \
52 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | \
53 BM_SSP_CTRL1_DATA_CRC_IRQ | \
54 BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | \
55 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \
56 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)
57
58
59#define MXS_MMC_DETECT_TIMEOUT (HZ/2)
60
61struct mxs_mmc_host {
62 struct mxs_ssp ssp;
63
64 struct mmc_host *mmc;
65 struct mmc_request *mrq;
66 struct mmc_command *cmd;
67 struct mmc_data *data;
68
69 unsigned char bus_width;
70 spinlock_t lock;
71 int sdio_irq_en;
72 int wp_gpio;
73 bool wp_inverted;
74 bool cd_inverted;
75 bool broken_cd;
76 bool non_removable;
77};
78
79static int mxs_mmc_get_ro(struct mmc_host *mmc)
80{
81 struct mxs_mmc_host *host = mmc_priv(mmc);
82 int ret;
83
84 if (!gpio_is_valid(host->wp_gpio))
85 return -EINVAL;
86
87 ret = gpio_get_value(host->wp_gpio);
88
89 if (host->wp_inverted)
90 ret = !ret;
91
92 return ret;
93}
94
95static int mxs_mmc_get_cd(struct mmc_host *mmc)
96{
97 struct mxs_mmc_host *host = mmc_priv(mmc);
98 struct mxs_ssp *ssp = &host->ssp;
99
100 return host->non_removable || host->broken_cd ||
101 !(readl(ssp->base + HW_SSP_STATUS(ssp)) &
102 BM_SSP_STATUS_CARD_DETECT) ^ host->cd_inverted;
103}
104
105static int mxs_mmc_reset(struct mxs_mmc_host *host)
106{
107 struct mxs_ssp *ssp = &host->ssp;
108 u32 ctrl0, ctrl1;
109 int ret;
110
111 ret = stmp_reset_block(ssp->base);
112 if (ret)
113 return ret;
114
115 ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
116 ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
117 BF_SSP(0x7, CTRL1_WORD_LENGTH) |
118 BM_SSP_CTRL1_DMA_ENABLE |
119 BM_SSP_CTRL1_POLARITY |
120 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN |
121 BM_SSP_CTRL1_DATA_CRC_IRQ_EN |
122 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN |
123 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN |
124 BM_SSP_CTRL1_RESP_ERR_IRQ_EN;
125
126 writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
127 BF_SSP(2, TIMING_CLOCK_DIVIDE) |
128 BF_SSP(0, TIMING_CLOCK_RATE),
129 ssp->base + HW_SSP_TIMING(ssp));
130
131 if (host->sdio_irq_en) {
132 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
133 ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN;
134 }
135
136 writel(ctrl0, ssp->base + HW_SSP_CTRL0);
137 writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp));
138 return 0;
139}
140
141static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
142 struct mmc_command *cmd);
143
144static void mxs_mmc_request_done(struct mxs_mmc_host *host)
145{
146 struct mmc_command *cmd = host->cmd;
147 struct mmc_data *data = host->data;
148 struct mmc_request *mrq = host->mrq;
149 struct mxs_ssp *ssp = &host->ssp;
150
151 if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
152 if (mmc_resp_type(cmd) & MMC_RSP_136) {
153 cmd->resp[3] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
154 cmd->resp[2] = readl(ssp->base + HW_SSP_SDRESP1(ssp));
155 cmd->resp[1] = readl(ssp->base + HW_SSP_SDRESP2(ssp));
156 cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP3(ssp));
157 } else {
158 cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP0(ssp));
159 }
160 }
161
162 if (data) {
163 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
164 data->sg_len, ssp->dma_dir);
165
166
167
168
169 if (!data->error)
170 data->bytes_xfered = data->blocks * data->blksz;
171 else
172 data->bytes_xfered = 0;
173
174 host->data = NULL;
175 if (mrq->stop) {
176 mxs_mmc_start_cmd(host, mrq->stop);
177 return;
178 }
179 }
180
181 host->mrq = NULL;
182 mmc_request_done(host->mmc, mrq);
183}
184
185static void mxs_mmc_dma_irq_callback(void *param)
186{
187 struct mxs_mmc_host *host = param;
188
189 mxs_mmc_request_done(host);
190}
191
192static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
193{
194 struct mxs_mmc_host *host = dev_id;
195 struct mmc_command *cmd = host->cmd;
196 struct mmc_data *data = host->data;
197 struct mxs_ssp *ssp = &host->ssp;
198 u32 stat;
199
200 spin_lock(&host->lock);
201
202 stat = readl(ssp->base + HW_SSP_CTRL1(ssp));
203 writel(stat & MXS_MMC_IRQ_BITS,
204 ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
205
206 spin_unlock(&host->lock);
207
208 if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
209 mmc_signal_sdio_irq(host->mmc);
210
211 if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
212 cmd->error = -ETIMEDOUT;
213 else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
214 cmd->error = -EIO;
215
216 if (data) {
217 if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ |
218 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ))
219 data->error = -ETIMEDOUT;
220 else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ)
221 data->error = -EILSEQ;
222 else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ |
223 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ))
224 data->error = -EIO;
225 }
226
227 return IRQ_HANDLED;
228}
229
230static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
231 struct mxs_mmc_host *host, unsigned long flags)
232{
233 struct mxs_ssp *ssp = &host->ssp;
234 struct dma_async_tx_descriptor *desc;
235 struct mmc_data *data = host->data;
236 struct scatterlist * sgl;
237 unsigned int sg_len;
238
239 if (data) {
240
241 dma_map_sg(mmc_dev(host->mmc), data->sg,
242 data->sg_len, ssp->dma_dir);
243 sgl = data->sg;
244 sg_len = data->sg_len;
245 } else {
246
247 sgl = (struct scatterlist *) ssp->ssp_pio_words;
248 sg_len = SSP_PIO_NUM;
249 }
250
251 desc = dmaengine_prep_slave_sg(ssp->dmach,
252 sgl, sg_len, ssp->slave_dirn, flags);
253 if (desc) {
254 desc->callback = mxs_mmc_dma_irq_callback;
255 desc->callback_param = host;
256 } else {
257 if (data)
258 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
259 data->sg_len, ssp->dma_dir);
260 }
261
262 return desc;
263}
264
265static void mxs_mmc_bc(struct mxs_mmc_host *host)
266{
267 struct mxs_ssp *ssp = &host->ssp;
268 struct mmc_command *cmd = host->cmd;
269 struct dma_async_tx_descriptor *desc;
270 u32 ctrl0, cmd0, cmd1;
271
272 ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC;
273 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC;
274 cmd1 = cmd->arg;
275
276 if (host->sdio_irq_en) {
277 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
278 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
279 }
280
281 ssp->ssp_pio_words[0] = ctrl0;
282 ssp->ssp_pio_words[1] = cmd0;
283 ssp->ssp_pio_words[2] = cmd1;
284 ssp->dma_dir = DMA_NONE;
285 ssp->slave_dirn = DMA_TRANS_NONE;
286 desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
287 if (!desc)
288 goto out;
289
290 dmaengine_submit(desc);
291 dma_async_issue_pending(ssp->dmach);
292 return;
293
294out:
295 dev_warn(mmc_dev(host->mmc),
296 "%s: failed to prep dma\n", __func__);
297}
298
299static void mxs_mmc_ac(struct mxs_mmc_host *host)
300{
301 struct mxs_ssp *ssp = &host->ssp;
302 struct mmc_command *cmd = host->cmd;
303 struct dma_async_tx_descriptor *desc;
304 u32 ignore_crc, get_resp, long_resp;
305 u32 ctrl0, cmd0, cmd1;
306
307 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
308 0 : BM_SSP_CTRL0_IGNORE_CRC;
309 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
310 BM_SSP_CTRL0_GET_RESP : 0;
311 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
312 BM_SSP_CTRL0_LONG_RESP : 0;
313
314 ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp;
315 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
316 cmd1 = cmd->arg;
317
318 if (host->sdio_irq_en) {
319 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
320 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
321 }
322
323 ssp->ssp_pio_words[0] = ctrl0;
324 ssp->ssp_pio_words[1] = cmd0;
325 ssp->ssp_pio_words[2] = cmd1;
326 ssp->dma_dir = DMA_NONE;
327 ssp->slave_dirn = DMA_TRANS_NONE;
328 desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
329 if (!desc)
330 goto out;
331
332 dmaengine_submit(desc);
333 dma_async_issue_pending(ssp->dmach);
334 return;
335
336out:
337 dev_warn(mmc_dev(host->mmc),
338 "%s: failed to prep dma\n", __func__);
339}
340
341static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns)
342{
343 const unsigned int ssp_timeout_mul = 4096;
344
345
346
347
348 const unsigned int clock_per_ms = clock_rate / 1000;
349 const unsigned int ms = ns / 1000;
350 const unsigned int ticks = ms * clock_per_ms;
351 const unsigned int ssp_ticks = ticks / ssp_timeout_mul;
352
353 WARN_ON(ssp_ticks == 0);
354 return ssp_ticks;
355}
356
357static void mxs_mmc_adtc(struct mxs_mmc_host *host)
358{
359 struct mmc_command *cmd = host->cmd;
360 struct mmc_data *data = cmd->data;
361 struct dma_async_tx_descriptor *desc;
362 struct scatterlist *sgl = data->sg, *sg;
363 unsigned int sg_len = data->sg_len;
364 unsigned int i;
365
366 unsigned short dma_data_dir, timeout;
367 enum dma_transfer_direction slave_dirn;
368 unsigned int data_size = 0, log2_blksz;
369 unsigned int blocks = data->blocks;
370
371 struct mxs_ssp *ssp = &host->ssp;
372
373 u32 ignore_crc, get_resp, long_resp, read;
374 u32 ctrl0, cmd0, cmd1, val;
375
376 ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ?
377 0 : BM_SSP_CTRL0_IGNORE_CRC;
378 get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ?
379 BM_SSP_CTRL0_GET_RESP : 0;
380 long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ?
381 BM_SSP_CTRL0_LONG_RESP : 0;
382
383 if (data->flags & MMC_DATA_WRITE) {
384 dma_data_dir = DMA_TO_DEVICE;
385 slave_dirn = DMA_MEM_TO_DEV;
386 read = 0;
387 } else {
388 dma_data_dir = DMA_FROM_DEVICE;
389 slave_dirn = DMA_DEV_TO_MEM;
390 read = BM_SSP_CTRL0_READ;
391 }
392
393 ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) |
394 ignore_crc | get_resp | long_resp |
395 BM_SSP_CTRL0_DATA_XFER | read |
396 BM_SSP_CTRL0_WAIT_FOR_IRQ |
397 BM_SSP_CTRL0_ENABLE;
398
399 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
400
401
402 log2_blksz = ilog2(data->blksz);
403
404
405
406
407
408 for_each_sg(sgl, sg, sg_len, i)
409 data_size += sg->length;
410
411 if (data_size != data->blocks * data->blksz)
412 blocks = 1;
413
414
415 if (ssp_is_old(ssp)) {
416 ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
417 cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
418 BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
419 } else {
420 writel(data_size, ssp->base + HW_SSP_XFER_SIZE);
421 writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) |
422 BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT),
423 ssp->base + HW_SSP_BLOCK_SIZE);
424 }
425
426 if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
427 (cmd->opcode == SD_IO_RW_EXTENDED))
428 cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
429
430 cmd1 = cmd->arg;
431
432 if (host->sdio_irq_en) {
433 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
434 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
435 }
436
437
438 timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns);
439 val = readl(ssp->base + HW_SSP_TIMING(ssp));
440 val &= ~(BM_SSP_TIMING_TIMEOUT);
441 val |= BF_SSP(timeout, TIMING_TIMEOUT);
442 writel(val, ssp->base + HW_SSP_TIMING(ssp));
443
444
445 ssp->ssp_pio_words[0] = ctrl0;
446 ssp->ssp_pio_words[1] = cmd0;
447 ssp->ssp_pio_words[2] = cmd1;
448 ssp->dma_dir = DMA_NONE;
449 ssp->slave_dirn = DMA_TRANS_NONE;
450 desc = mxs_mmc_prep_dma(host, 0);
451 if (!desc)
452 goto out;
453
454
455 WARN_ON(host->data != NULL);
456 host->data = data;
457 ssp->dma_dir = dma_data_dir;
458 ssp->slave_dirn = slave_dirn;
459 desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
460 if (!desc)
461 goto out;
462
463 dmaengine_submit(desc);
464 dma_async_issue_pending(ssp->dmach);
465 return;
466out:
467 dev_warn(mmc_dev(host->mmc),
468 "%s: failed to prep dma\n", __func__);
469}
470
471static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
472 struct mmc_command *cmd)
473{
474 host->cmd = cmd;
475
476 switch (mmc_cmd_type(cmd)) {
477 case MMC_CMD_BC:
478 mxs_mmc_bc(host);
479 break;
480 case MMC_CMD_BCR:
481 mxs_mmc_ac(host);
482 break;
483 case MMC_CMD_AC:
484 mxs_mmc_ac(host);
485 break;
486 case MMC_CMD_ADTC:
487 mxs_mmc_adtc(host);
488 break;
489 default:
490 dev_warn(mmc_dev(host->mmc),
491 "%s: unknown MMC command\n", __func__);
492 break;
493 }
494}
495
496static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
497{
498 struct mxs_mmc_host *host = mmc_priv(mmc);
499
500 WARN_ON(host->mrq != NULL);
501 host->mrq = mrq;
502 mxs_mmc_start_cmd(host, mrq->cmd);
503}
504
505static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
506{
507 struct mxs_mmc_host *host = mmc_priv(mmc);
508
509 if (ios->bus_width == MMC_BUS_WIDTH_8)
510 host->bus_width = 2;
511 else if (ios->bus_width == MMC_BUS_WIDTH_4)
512 host->bus_width = 1;
513 else
514 host->bus_width = 0;
515
516 if (ios->clock)
517 mxs_ssp_set_clk_rate(&host->ssp, ios->clock);
518}
519
520static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
521{
522 struct mxs_mmc_host *host = mmc_priv(mmc);
523 struct mxs_ssp *ssp = &host->ssp;
524 unsigned long flags;
525
526 spin_lock_irqsave(&host->lock, flags);
527
528 host->sdio_irq_en = enable;
529
530 if (enable) {
531 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
532 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
533 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
534 ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_SET);
535 } else {
536 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
537 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
538 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
539 ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR);
540 }
541
542 spin_unlock_irqrestore(&host->lock, flags);
543
544 if (enable && readl(ssp->base + HW_SSP_STATUS(ssp)) &
545 BM_SSP_STATUS_SDIO_IRQ)
546 mmc_signal_sdio_irq(host->mmc);
547
548}
549
550static const struct mmc_host_ops mxs_mmc_ops = {
551 .request = mxs_mmc_request,
552 .get_ro = mxs_mmc_get_ro,
553 .get_cd = mxs_mmc_get_cd,
554 .set_ios = mxs_mmc_set_ios,
555 .enable_sdio_irq = mxs_mmc_enable_sdio_irq,
556};
557
558static struct platform_device_id mxs_ssp_ids[] = {
559 {
560 .name = "imx23-mmc",
561 .driver_data = IMX23_SSP,
562 }, {
563 .name = "imx28-mmc",
564 .driver_data = IMX28_SSP,
565 }, {
566
567 }
568};
569MODULE_DEVICE_TABLE(platform, mxs_ssp_ids);
570
571static const struct of_device_id mxs_mmc_dt_ids[] = {
572 { .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_SSP, },
573 { .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_SSP, },
574 { }
575};
576MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
577
578static int mxs_mmc_probe(struct platform_device *pdev)
579{
580 const struct of_device_id *of_id =
581 of_match_device(mxs_mmc_dt_ids, &pdev->dev);
582 struct device_node *np = pdev->dev.of_node;
583 struct mxs_mmc_host *host;
584 struct mmc_host *mmc;
585 struct resource *iores;
586 int ret = 0, irq_err;
587 struct regulator *reg_vmmc;
588 enum of_gpio_flags flags;
589 struct mxs_ssp *ssp;
590 u32 bus_width = 0;
591
592 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
593 irq_err = platform_get_irq(pdev, 0);
594 if (!iores || irq_err < 0)
595 return -EINVAL;
596
597 mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
598 if (!mmc)
599 return -ENOMEM;
600
601 host = mmc_priv(mmc);
602 ssp = &host->ssp;
603 ssp->dev = &pdev->dev;
604 ssp->base = devm_ioremap_resource(&pdev->dev, iores);
605 if (IS_ERR(ssp->base)) {
606 ret = PTR_ERR(ssp->base);
607 goto out_mmc_free;
608 }
609
610 ssp->devid = (enum mxs_ssp_id) of_id->data;
611
612 host->mmc = mmc;
613 host->sdio_irq_en = 0;
614
615 reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc");
616 if (!IS_ERR(reg_vmmc)) {
617 ret = regulator_enable(reg_vmmc);
618 if (ret) {
619 dev_err(&pdev->dev,
620 "Failed to enable vmmc regulator: %d\n", ret);
621 goto out_mmc_free;
622 }
623 }
624
625 ssp->clk = devm_clk_get(&pdev->dev, NULL);
626 if (IS_ERR(ssp->clk)) {
627 ret = PTR_ERR(ssp->clk);
628 goto out_mmc_free;
629 }
630 clk_prepare_enable(ssp->clk);
631
632 ret = mxs_mmc_reset(host);
633 if (ret) {
634 dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret);
635 goto out_clk_disable;
636 }
637
638 ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
639 if (!ssp->dmach) {
640 dev_err(mmc_dev(host->mmc),
641 "%s: failed to request dma\n", __func__);
642 ret = -ENODEV;
643 goto out_clk_disable;
644 }
645
646
647 mmc->ops = &mxs_mmc_ops;
648 mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
649 MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
650
651 of_property_read_u32(np, "bus-width", &bus_width);
652 if (bus_width == 4)
653 mmc->caps |= MMC_CAP_4_BIT_DATA;
654 else if (bus_width == 8)
655 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
656 host->broken_cd = of_property_read_bool(np, "broken-cd");
657 host->non_removable = of_property_read_bool(np, "non-removable");
658 if (host->non_removable)
659 mmc->caps |= MMC_CAP_NONREMOVABLE;
660 host->wp_gpio = of_get_named_gpio_flags(np, "wp-gpios", 0, &flags);
661 if (flags & OF_GPIO_ACTIVE_LOW)
662 host->wp_inverted = 1;
663
664 host->cd_inverted = of_property_read_bool(np, "cd-inverted");
665
666 mmc->f_min = 400000;
667 mmc->f_max = 288000000;
668 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
669
670 mmc->max_segs = 52;
671 mmc->max_blk_size = 1 << 0xf;
672 mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff;
673 mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff;
674 mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev);
675
676 platform_set_drvdata(pdev, mmc);
677
678 ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
679 DRIVER_NAME, host);
680 if (ret)
681 goto out_free_dma;
682
683 spin_lock_init(&host->lock);
684
685 ret = mmc_add_host(mmc);
686 if (ret)
687 goto out_free_dma;
688
689 dev_info(mmc_dev(host->mmc), "initialized\n");
690
691 return 0;
692
693out_free_dma:
694 if (ssp->dmach)
695 dma_release_channel(ssp->dmach);
696out_clk_disable:
697 clk_disable_unprepare(ssp->clk);
698out_mmc_free:
699 mmc_free_host(mmc);
700 return ret;
701}
702
703static int mxs_mmc_remove(struct platform_device *pdev)
704{
705 struct mmc_host *mmc = platform_get_drvdata(pdev);
706 struct mxs_mmc_host *host = mmc_priv(mmc);
707 struct mxs_ssp *ssp = &host->ssp;
708
709 mmc_remove_host(mmc);
710
711 if (ssp->dmach)
712 dma_release_channel(ssp->dmach);
713
714 clk_disable_unprepare(ssp->clk);
715
716 mmc_free_host(mmc);
717
718 return 0;
719}
720
721#ifdef CONFIG_PM
722static int mxs_mmc_suspend(struct device *dev)
723{
724 struct mmc_host *mmc = dev_get_drvdata(dev);
725 struct mxs_mmc_host *host = mmc_priv(mmc);
726 struct mxs_ssp *ssp = &host->ssp;
727 int ret = 0;
728
729 ret = mmc_suspend_host(mmc);
730
731 clk_disable_unprepare(ssp->clk);
732
733 return ret;
734}
735
736static int mxs_mmc_resume(struct device *dev)
737{
738 struct mmc_host *mmc = dev_get_drvdata(dev);
739 struct mxs_mmc_host *host = mmc_priv(mmc);
740 struct mxs_ssp *ssp = &host->ssp;
741 int ret = 0;
742
743 clk_prepare_enable(ssp->clk);
744
745 ret = mmc_resume_host(mmc);
746
747 return ret;
748}
749
750static const struct dev_pm_ops mxs_mmc_pm_ops = {
751 .suspend = mxs_mmc_suspend,
752 .resume = mxs_mmc_resume,
753};
754#endif
755
756static struct platform_driver mxs_mmc_driver = {
757 .probe = mxs_mmc_probe,
758 .remove = mxs_mmc_remove,
759 .id_table = mxs_ssp_ids,
760 .driver = {
761 .name = DRIVER_NAME,
762 .owner = THIS_MODULE,
763#ifdef CONFIG_PM
764 .pm = &mxs_mmc_pm_ops,
765#endif
766 .of_match_table = mxs_mmc_dt_ids,
767 },
768};
769
770module_platform_driver(mxs_mmc_driver);
771
772MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
773MODULE_AUTHOR("Freescale Semiconductor");
774MODULE_LICENSE("GPL");
775MODULE_ALIAS("platform:" DRIVER_NAME);
776