1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/ioport.h>
34#include <linux/of.h>
35#include <linux/of_device.h>
36#include <linux/of_gpio.h>
37#include <linux/platform_device.h>
38#include <linux/delay.h>
39#include <linux/interrupt.h>
40#include <linux/dma-mapping.h>
41#include <linux/dmaengine.h>
42#include <linux/highmem.h>
43#include <linux/clk.h>
44#include <linux/err.h>
45#include <linux/completion.h>
46#include <linux/gpio.h>
47#include <linux/regulator/consumer.h>
48#include <linux/module.h>
49#include <linux/pinctrl/consumer.h>
50#include <linux/stmp_device.h>
51#include <linux/spi/spi.h>
52#include <linux/spi/mxs-spi.h>
53
54#define DRIVER_NAME "mxs-spi"
55
56
57#define SSP_TIMEOUT 10000
58
59#define SG_MAXLEN 0xff00
60
61struct mxs_spi {
62 struct mxs_ssp ssp;
63 struct completion c;
64};
65
66static int mxs_spi_setup_transfer(struct spi_device *dev,
67 struct spi_transfer *t)
68{
69 struct mxs_spi *spi = spi_master_get_devdata(dev->master);
70 struct mxs_ssp *ssp = &spi->ssp;
71 uint8_t bits_per_word;
72 uint32_t hz = 0;
73
74 bits_per_word = dev->bits_per_word;
75 if (t && t->bits_per_word)
76 bits_per_word = t->bits_per_word;
77
78 if (bits_per_word != 8) {
79 dev_err(&dev->dev, "%s, unsupported bits_per_word=%d\n",
80 __func__, bits_per_word);
81 return -EINVAL;
82 }
83
84 hz = dev->max_speed_hz;
85 if (t && t->speed_hz)
86 hz = min(hz, t->speed_hz);
87 if (hz == 0) {
88 dev_err(&dev->dev, "Cannot continue with zero clock\n");
89 return -EINVAL;
90 }
91
92 mxs_ssp_set_clk_rate(ssp, hz);
93
94 writel(BF_SSP_CTRL1_SSP_MODE(BV_SSP_CTRL1_SSP_MODE__SPI) |
95 BF_SSP_CTRL1_WORD_LENGTH
96 (BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) |
97 ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
98 ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0),
99 ssp->base + HW_SSP_CTRL1(ssp));
100
101 writel(0x0, ssp->base + HW_SSP_CMD0);
102 writel(0x0, ssp->base + HW_SSP_CMD1);
103
104 return 0;
105}
106
107static int mxs_spi_setup(struct spi_device *dev)
108{
109 int err = 0;
110
111 if (!dev->bits_per_word)
112 dev->bits_per_word = 8;
113
114 if (dev->mode & ~(SPI_CPOL | SPI_CPHA))
115 return -EINVAL;
116
117 err = mxs_spi_setup_transfer(dev, NULL);
118 if (err) {
119 dev_err(&dev->dev,
120 "Failed to setup transfer, error = %d\n", err);
121 }
122
123 return err;
124}
125
126static uint32_t mxs_spi_cs_to_reg(unsigned cs)
127{
128 uint32_t select = 0;
129
130
131
132
133
134
135
136
137
138 if (cs & 1)
139 select |= BM_SSP_CTRL0_WAIT_FOR_CMD;
140 if (cs & 2)
141 select |= BM_SSP_CTRL0_WAIT_FOR_IRQ;
142
143 return select;
144}
145
146static void mxs_spi_set_cs(struct mxs_spi *spi, unsigned cs)
147{
148 const uint32_t mask =
149 BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ;
150 uint32_t select;
151 struct mxs_ssp *ssp = &spi->ssp;
152
153 writel(mask, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
154 select = mxs_spi_cs_to_reg(cs);
155 writel(select, ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
156}
157
158static inline void mxs_spi_enable(struct mxs_spi *spi)
159{
160 struct mxs_ssp *ssp = &spi->ssp;
161
162 writel(BM_SSP_CTRL0_LOCK_CS,
163 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
164 writel(BM_SSP_CTRL0_IGNORE_CRC,
165 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
166}
167
168static inline void mxs_spi_disable(struct mxs_spi *spi)
169{
170 struct mxs_ssp *ssp = &spi->ssp;
171
172 writel(BM_SSP_CTRL0_LOCK_CS,
173 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
174 writel(BM_SSP_CTRL0_IGNORE_CRC,
175 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
176}
177
178static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set)
179{
180 const unsigned long timeout = jiffies + msecs_to_jiffies(SSP_TIMEOUT);
181 struct mxs_ssp *ssp = &spi->ssp;
182 uint32_t reg;
183
184 do {
185 reg = readl_relaxed(ssp->base + offset);
186
187 if (!set)
188 reg = ~reg;
189
190 reg &= mask;
191
192 if (reg == mask)
193 return 0;
194 } while (time_before(jiffies, timeout));
195
196 return -ETIMEDOUT;
197}
198
199static void mxs_ssp_dma_irq_callback(void *param)
200{
201 struct mxs_spi *spi = param;
202 complete(&spi->c);
203}
204
205static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id)
206{
207 struct mxs_ssp *ssp = dev_id;
208 dev_err(ssp->dev, "%s[%i] CTRL1=%08x STATUS=%08x\n",
209 __func__, __LINE__,
210 readl(ssp->base + HW_SSP_CTRL1(ssp)),
211 readl(ssp->base + HW_SSP_STATUS(ssp)));
212 return IRQ_HANDLED;
213}
214
215static int mxs_spi_txrx_dma(struct mxs_spi *spi, int cs,
216 unsigned char *buf, int len,
217 int *first, int *last, int write)
218{
219 struct mxs_ssp *ssp = &spi->ssp;
220 struct dma_async_tx_descriptor *desc = NULL;
221 const bool vmalloced_buf = is_vmalloc_addr(buf);
222 const int desc_len = vmalloced_buf ? PAGE_SIZE : SG_MAXLEN;
223 const int sgs = DIV_ROUND_UP(len, desc_len);
224 int sg_count;
225 int min, ret;
226 uint32_t ctrl0;
227 struct page *vm_page;
228 void *sg_buf;
229 struct {
230 uint32_t pio[4];
231 struct scatterlist sg;
232 } *dma_xfer;
233
234 if (!len)
235 return -EINVAL;
236
237 dma_xfer = kzalloc(sizeof(*dma_xfer) * sgs, GFP_KERNEL);
238 if (!dma_xfer)
239 return -ENOMEM;
240
241 INIT_COMPLETION(spi->c);
242
243 ctrl0 = readl(ssp->base + HW_SSP_CTRL0);
244 ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT;
245 ctrl0 |= BM_SSP_CTRL0_DATA_XFER | mxs_spi_cs_to_reg(cs);
246
247 if (*first)
248 ctrl0 |= BM_SSP_CTRL0_LOCK_CS;
249 if (!write)
250 ctrl0 |= BM_SSP_CTRL0_READ;
251
252
253 for (sg_count = 0; sg_count < sgs; sg_count++) {
254 min = min(len, desc_len);
255
256
257 if ((sg_count + 1 == sgs) && *last)
258 ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC;
259
260 if (ssp->devid == IMX23_SSP) {
261 ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT;
262 ctrl0 |= min;
263 }
264
265 dma_xfer[sg_count].pio[0] = ctrl0;
266 dma_xfer[sg_count].pio[3] = min;
267
268 if (vmalloced_buf) {
269 vm_page = vmalloc_to_page(buf);
270 if (!vm_page) {
271 ret = -ENOMEM;
272 goto err_vmalloc;
273 }
274 sg_buf = page_address(vm_page) +
275 ((size_t)buf & ~PAGE_MASK);
276 } else {
277 sg_buf = buf;
278 }
279
280 sg_init_one(&dma_xfer[sg_count].sg, sg_buf, min);
281 ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
282 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
283
284 len -= min;
285 buf += min;
286
287
288 desc = dmaengine_prep_slave_sg(ssp->dmach,
289 (struct scatterlist *)dma_xfer[sg_count].pio,
290 (ssp->devid == IMX23_SSP) ? 1 : 4,
291 DMA_TRANS_NONE,
292 sg_count ? DMA_PREP_INTERRUPT : 0);
293 if (!desc) {
294 dev_err(ssp->dev,
295 "Failed to get PIO reg. write descriptor.\n");
296 ret = -EINVAL;
297 goto err_mapped;
298 }
299
300 desc = dmaengine_prep_slave_sg(ssp->dmach,
301 &dma_xfer[sg_count].sg, 1,
302 write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
303 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
304
305 if (!desc) {
306 dev_err(ssp->dev,
307 "Failed to get DMA data write descriptor.\n");
308 ret = -EINVAL;
309 goto err_mapped;
310 }
311 }
312
313
314
315
316
317 desc->callback = mxs_ssp_dma_irq_callback;
318 desc->callback_param = spi;
319
320
321 dmaengine_submit(desc);
322 dma_async_issue_pending(ssp->dmach);
323
324 ret = wait_for_completion_timeout(&spi->c,
325 msecs_to_jiffies(SSP_TIMEOUT));
326 if (!ret) {
327 dev_err(ssp->dev, "DMA transfer timeout\n");
328 ret = -ETIMEDOUT;
329 dmaengine_terminate_all(ssp->dmach);
330 goto err_vmalloc;
331 }
332
333 ret = 0;
334
335err_vmalloc:
336 while (--sg_count >= 0) {
337err_mapped:
338 dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
339 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
340 }
341
342 kfree(dma_xfer);
343
344 return ret;
345}
346
347static int mxs_spi_txrx_pio(struct mxs_spi *spi, int cs,
348 unsigned char *buf, int len,
349 int *first, int *last, int write)
350{
351 struct mxs_ssp *ssp = &spi->ssp;
352
353 if (*first)
354 mxs_spi_enable(spi);
355
356 mxs_spi_set_cs(spi, cs);
357
358 while (len--) {
359 if (*last && len == 0)
360 mxs_spi_disable(spi);
361
362 if (ssp->devid == IMX23_SSP) {
363 writel(BM_SSP_CTRL0_XFER_COUNT,
364 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
365 writel(1,
366 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
367 } else {
368 writel(1, ssp->base + HW_SSP_XFER_SIZE);
369 }
370
371 if (write)
372 writel(BM_SSP_CTRL0_READ,
373 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
374 else
375 writel(BM_SSP_CTRL0_READ,
376 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
377
378 writel(BM_SSP_CTRL0_RUN,
379 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
380
381 if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 1))
382 return -ETIMEDOUT;
383
384 if (write)
385 writel(*buf, ssp->base + HW_SSP_DATA(ssp));
386
387 writel(BM_SSP_CTRL0_DATA_XFER,
388 ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
389
390 if (!write) {
391 if (mxs_ssp_wait(spi, HW_SSP_STATUS(ssp),
392 BM_SSP_STATUS_FIFO_EMPTY, 0))
393 return -ETIMEDOUT;
394
395 *buf = (readl(ssp->base + HW_SSP_DATA(ssp)) & 0xff);
396 }
397
398 if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 0))
399 return -ETIMEDOUT;
400
401 buf++;
402 }
403
404 if (len <= 0)
405 return 0;
406
407 return -ETIMEDOUT;
408}
409
410static int mxs_spi_transfer_one(struct spi_master *master,
411 struct spi_message *m)
412{
413 struct mxs_spi *spi = spi_master_get_devdata(master);
414 struct mxs_ssp *ssp = &spi->ssp;
415 int first, last;
416 struct spi_transfer *t, *tmp_t;
417 int status = 0;
418 int cs;
419
420 first = last = 0;
421
422 cs = m->spi->chip_select;
423
424 list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) {
425
426 status = mxs_spi_setup_transfer(m->spi, t);
427 if (status)
428 break;
429
430 if (&t->transfer_list == m->transfers.next)
431 first = 1;
432 if (&t->transfer_list == m->transfers.prev)
433 last = 1;
434 if ((t->rx_buf && t->tx_buf) || (t->rx_dma && t->tx_dma)) {
435 dev_err(ssp->dev,
436 "Cannot send and receive simultaneously\n");
437 status = -EINVAL;
438 break;
439 }
440
441
442
443
444
445
446
447
448
449
450 if (t->len < 32) {
451 writel(BM_SSP_CTRL1_DMA_ENABLE,
452 ssp->base + HW_SSP_CTRL1(ssp) +
453 STMP_OFFSET_REG_CLR);
454
455 if (t->tx_buf)
456 status = mxs_spi_txrx_pio(spi, cs,
457 (void *)t->tx_buf,
458 t->len, &first, &last, 1);
459 if (t->rx_buf)
460 status = mxs_spi_txrx_pio(spi, cs,
461 t->rx_buf, t->len,
462 &first, &last, 0);
463 } else {
464 writel(BM_SSP_CTRL1_DMA_ENABLE,
465 ssp->base + HW_SSP_CTRL1(ssp) +
466 STMP_OFFSET_REG_SET);
467
468 if (t->tx_buf)
469 status = mxs_spi_txrx_dma(spi, cs,
470 (void *)t->tx_buf, t->len,
471 &first, &last, 1);
472 if (t->rx_buf)
473 status = mxs_spi_txrx_dma(spi, cs,
474 t->rx_buf, t->len,
475 &first, &last, 0);
476 }
477
478 if (status) {
479 stmp_reset_block(ssp->base);
480 break;
481 }
482
483 m->actual_length += t->len;
484 first = last = 0;
485 }
486
487 m->status = status;
488 spi_finalize_current_message(master);
489
490 return status;
491}
492
493static const struct of_device_id mxs_spi_dt_ids[] = {
494 { .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, },
495 { .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, },
496 { }
497};
498MODULE_DEVICE_TABLE(of, mxs_spi_dt_ids);
499
500static int mxs_spi_probe(struct platform_device *pdev)
501{
502 const struct of_device_id *of_id =
503 of_match_device(mxs_spi_dt_ids, &pdev->dev);
504 struct device_node *np = pdev->dev.of_node;
505 struct spi_master *master;
506 struct mxs_spi *spi;
507 struct mxs_ssp *ssp;
508 struct resource *iores;
509 struct pinctrl *pinctrl;
510 struct clk *clk;
511 void __iomem *base;
512 int devid, clk_freq;
513 int ret = 0, irq_err;
514
515
516
517
518
519
520 const int clk_freq_default = 160000000;
521
522 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
523 irq_err = platform_get_irq(pdev, 0);
524 if (!iores || irq_err < 0)
525 return -EINVAL;
526
527 base = devm_ioremap_resource(&pdev->dev, iores);
528 if (IS_ERR(base))
529 return PTR_ERR(base);
530
531 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
532 if (IS_ERR(pinctrl))
533 return PTR_ERR(pinctrl);
534
535 clk = devm_clk_get(&pdev->dev, NULL);
536 if (IS_ERR(clk))
537 return PTR_ERR(clk);
538
539 devid = (enum mxs_ssp_id) of_id->data;
540 ret = of_property_read_u32(np, "clock-frequency",
541 &clk_freq);
542 if (ret)
543 clk_freq = clk_freq_default;
544
545 master = spi_alloc_master(&pdev->dev, sizeof(*spi));
546 if (!master)
547 return -ENOMEM;
548
549 master->transfer_one_message = mxs_spi_transfer_one;
550 master->setup = mxs_spi_setup;
551 master->mode_bits = SPI_CPOL | SPI_CPHA;
552 master->num_chipselect = 3;
553 master->dev.of_node = np;
554 master->flags = SPI_MASTER_HALF_DUPLEX;
555
556 spi = spi_master_get_devdata(master);
557 ssp = &spi->ssp;
558 ssp->dev = &pdev->dev;
559 ssp->clk = clk;
560 ssp->base = base;
561 ssp->devid = devid;
562
563 init_completion(&spi->c);
564
565 ret = devm_request_irq(&pdev->dev, irq_err, mxs_ssp_irq_handler, 0,
566 DRIVER_NAME, ssp);
567 if (ret)
568 goto out_master_free;
569
570 ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
571 if (!ssp->dmach) {
572 dev_err(ssp->dev, "Failed to request DMA\n");
573 ret = -ENODEV;
574 goto out_master_free;
575 }
576
577 clk_prepare_enable(ssp->clk);
578 clk_set_rate(ssp->clk, clk_freq);
579 ssp->clk_rate = clk_get_rate(ssp->clk) / 1000;
580
581 stmp_reset_block(ssp->base);
582
583 platform_set_drvdata(pdev, master);
584
585 ret = spi_register_master(master);
586 if (ret) {
587 dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret);
588 goto out_free_dma;
589 }
590
591 return 0;
592
593out_free_dma:
594 dma_release_channel(ssp->dmach);
595 clk_disable_unprepare(ssp->clk);
596out_master_free:
597 spi_master_put(master);
598 return ret;
599}
600
601static int mxs_spi_remove(struct platform_device *pdev)
602{
603 struct spi_master *master;
604 struct mxs_spi *spi;
605 struct mxs_ssp *ssp;
606
607 master = spi_master_get(platform_get_drvdata(pdev));
608 spi = spi_master_get_devdata(master);
609 ssp = &spi->ssp;
610
611 spi_unregister_master(master);
612
613 dma_release_channel(ssp->dmach);
614
615 clk_disable_unprepare(ssp->clk);
616
617 spi_master_put(master);
618
619 return 0;
620}
621
622static struct platform_driver mxs_spi_driver = {
623 .probe = mxs_spi_probe,
624 .remove = mxs_spi_remove,
625 .driver = {
626 .name = DRIVER_NAME,
627 .owner = THIS_MODULE,
628 .of_match_table = mxs_spi_dt_ids,
629 },
630};
631
632module_platform_driver(mxs_spi_driver);
633
634MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
635MODULE_DESCRIPTION("MXS SPI master driver");
636MODULE_LICENSE("GPL");
637MODULE_ALIAS("platform:mxs-spi");
638