1
2
3
4
5
6
7
8#include <linux/clk.h>
9#include <linux/delay.h>
10#include <linux/dmaengine.h>
11#include <linux/dma-mapping.h>
12#include <linux/interrupt.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/of_device.h>
16#include <linux/pinctrl/consumer.h>
17#include <linux/regmap.h>
18#include <linux/spi/spi.h>
19#include <linux/spi/spi-fsl-dspi.h>
20
21#define DRIVER_NAME "fsl-dspi"
22
23#define SPI_MCR 0x00
24#define SPI_MCR_MASTER BIT(31)
25#define SPI_MCR_PCSIS(x) ((x) << 16)
26#define SPI_MCR_CLR_TXF BIT(11)
27#define SPI_MCR_CLR_RXF BIT(10)
28#define SPI_MCR_XSPI BIT(3)
29
30#define SPI_TCR 0x08
31#define SPI_TCR_GET_TCNT(x) (((x) & GENMASK(31, 16)) >> 16)
32
33#define SPI_CTAR(x) (0x0c + (((x) & GENMASK(1, 0)) * 4))
34#define SPI_CTAR_FMSZ(x) (((x) << 27) & GENMASK(30, 27))
35#define SPI_CTAR_CPOL BIT(26)
36#define SPI_CTAR_CPHA BIT(25)
37#define SPI_CTAR_LSBFE BIT(24)
38#define SPI_CTAR_PCSSCK(x) (((x) << 22) & GENMASK(23, 22))
39#define SPI_CTAR_PASC(x) (((x) << 20) & GENMASK(21, 20))
40#define SPI_CTAR_PDT(x) (((x) << 18) & GENMASK(19, 18))
41#define SPI_CTAR_PBR(x) (((x) << 16) & GENMASK(17, 16))
42#define SPI_CTAR_CSSCK(x) (((x) << 12) & GENMASK(15, 12))
43#define SPI_CTAR_ASC(x) (((x) << 8) & GENMASK(11, 8))
44#define SPI_CTAR_DT(x) (((x) << 4) & GENMASK(7, 4))
45#define SPI_CTAR_BR(x) ((x) & GENMASK(3, 0))
46#define SPI_CTAR_SCALE_BITS 0xf
47
48#define SPI_CTAR0_SLAVE 0x0c
49
50#define SPI_SR 0x2c
51#define SPI_SR_TCFQF BIT(31)
52#define SPI_SR_EOQF BIT(28)
53#define SPI_SR_TFUF BIT(27)
54#define SPI_SR_TFFF BIT(25)
55#define SPI_SR_CMDTCF BIT(23)
56#define SPI_SR_SPEF BIT(21)
57#define SPI_SR_RFOF BIT(19)
58#define SPI_SR_TFIWF BIT(18)
59#define SPI_SR_RFDF BIT(17)
60#define SPI_SR_CMDFFF BIT(16)
61#define SPI_SR_CLEAR (SPI_SR_TCFQF | SPI_SR_EOQF | \
62 SPI_SR_TFUF | SPI_SR_TFFF | \
63 SPI_SR_CMDTCF | SPI_SR_SPEF | \
64 SPI_SR_RFOF | SPI_SR_TFIWF | \
65 SPI_SR_RFDF | SPI_SR_CMDFFF)
66
67#define SPI_RSER_TFFFE BIT(25)
68#define SPI_RSER_TFFFD BIT(24)
69#define SPI_RSER_RFDFE BIT(17)
70#define SPI_RSER_RFDFD BIT(16)
71
72#define SPI_RSER 0x30
73#define SPI_RSER_TCFQE BIT(31)
74#define SPI_RSER_EOQFE BIT(28)
75#define SPI_RSER_CMDTCFE BIT(23)
76
77#define SPI_PUSHR 0x34
78#define SPI_PUSHR_CMD_CONT BIT(15)
79#define SPI_PUSHR_CMD_CTAS(x) (((x) << 12 & GENMASK(14, 12)))
80#define SPI_PUSHR_CMD_EOQ BIT(11)
81#define SPI_PUSHR_CMD_CTCNT BIT(10)
82#define SPI_PUSHR_CMD_PCS(x) (BIT(x) & GENMASK(5, 0))
83
84#define SPI_PUSHR_SLAVE 0x34
85
86#define SPI_POPR 0x38
87
88#define SPI_TXFR0 0x3c
89#define SPI_TXFR1 0x40
90#define SPI_TXFR2 0x44
91#define SPI_TXFR3 0x48
92#define SPI_RXFR0 0x7c
93#define SPI_RXFR1 0x80
94#define SPI_RXFR2 0x84
95#define SPI_RXFR3 0x88
96
97#define SPI_CTARE(x) (0x11c + (((x) & GENMASK(1, 0)) * 4))
98#define SPI_CTARE_FMSZE(x) (((x) & 0x1) << 16)
99#define SPI_CTARE_DTCP(x) ((x) & 0x7ff)
100
101#define SPI_SREX 0x13c
102
103#define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
104#define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4)
105
106#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
107
108struct chip_data {
109 u32 ctar_val;
110};
111
112enum dspi_trans_mode {
113 DSPI_EOQ_MODE = 0,
114 DSPI_XSPI_MODE,
115 DSPI_DMA_MODE,
116};
117
118struct fsl_dspi_devtype_data {
119 enum dspi_trans_mode trans_mode;
120 u8 max_clock_factor;
121 int fifo_size;
122};
123
124enum {
125 LS1021A,
126 LS1012A,
127 LS1028A,
128 LS1043A,
129 LS1046A,
130 LS2080A,
131 LS2085A,
132 LX2160A,
133 MCF5441X,
134 VF610,
135};
136
137static const struct fsl_dspi_devtype_data devtype_data[] = {
138 [VF610] = {
139 .trans_mode = DSPI_DMA_MODE,
140 .max_clock_factor = 2,
141 .fifo_size = 4,
142 },
143 [LS1021A] = {
144
145 .trans_mode = DSPI_XSPI_MODE,
146 .max_clock_factor = 8,
147 .fifo_size = 4,
148 },
149 [LS1012A] = {
150
151 .trans_mode = DSPI_XSPI_MODE,
152 .max_clock_factor = 8,
153 .fifo_size = 16,
154 },
155 [LS1028A] = {
156 .trans_mode = DSPI_XSPI_MODE,
157 .max_clock_factor = 8,
158 .fifo_size = 4,
159 },
160 [LS1043A] = {
161
162 .trans_mode = DSPI_XSPI_MODE,
163 .max_clock_factor = 8,
164 .fifo_size = 16,
165 },
166 [LS1046A] = {
167
168 .trans_mode = DSPI_XSPI_MODE,
169 .max_clock_factor = 8,
170 .fifo_size = 16,
171 },
172 [LS2080A] = {
173 .trans_mode = DSPI_DMA_MODE,
174 .max_clock_factor = 8,
175 .fifo_size = 4,
176 },
177 [LS2085A] = {
178 .trans_mode = DSPI_DMA_MODE,
179 .max_clock_factor = 8,
180 .fifo_size = 4,
181 },
182 [LX2160A] = {
183 .trans_mode = DSPI_DMA_MODE,
184 .max_clock_factor = 8,
185 .fifo_size = 4,
186 },
187 [MCF5441X] = {
188 .trans_mode = DSPI_EOQ_MODE,
189 .max_clock_factor = 8,
190 .fifo_size = 16,
191 },
192};
193
194struct fsl_dspi_dma {
195 u32 *tx_dma_buf;
196 struct dma_chan *chan_tx;
197 dma_addr_t tx_dma_phys;
198 struct completion cmd_tx_complete;
199 struct dma_async_tx_descriptor *tx_desc;
200
201 u32 *rx_dma_buf;
202 struct dma_chan *chan_rx;
203 dma_addr_t rx_dma_phys;
204 struct completion cmd_rx_complete;
205 struct dma_async_tx_descriptor *rx_desc;
206};
207
208struct fsl_dspi {
209 struct spi_controller *ctlr;
210 struct platform_device *pdev;
211
212 struct regmap *regmap;
213 struct regmap *regmap_pushr;
214 int irq;
215 struct clk *clk;
216
217 struct spi_transfer *cur_transfer;
218 struct spi_message *cur_msg;
219 struct chip_data *cur_chip;
220 size_t progress;
221 size_t len;
222 const void *tx;
223 void *rx;
224 u16 tx_cmd;
225 const struct fsl_dspi_devtype_data *devtype_data;
226
227 struct completion xfer_done;
228
229 struct fsl_dspi_dma *dma;
230
231 int oper_word_size;
232 int oper_bits_per_word;
233
234 int words_in_flight;
235
236
237
238
239
240 int pushr_cmd;
241 int pushr_tx;
242
243 void (*host_to_dev)(struct fsl_dspi *dspi, u32 *txdata);
244 void (*dev_to_host)(struct fsl_dspi *dspi, u32 rxdata);
245};
246
247static void dspi_native_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
248{
249 memcpy(txdata, dspi->tx, dspi->oper_word_size);
250 dspi->tx += dspi->oper_word_size;
251}
252
253static void dspi_native_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
254{
255 memcpy(dspi->rx, &rxdata, dspi->oper_word_size);
256 dspi->rx += dspi->oper_word_size;
257}
258
259static void dspi_8on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
260{
261 *txdata = cpu_to_be32(*(u32 *)dspi->tx);
262 dspi->tx += sizeof(u32);
263}
264
265static void dspi_8on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
266{
267 *(u32 *)dspi->rx = be32_to_cpu(rxdata);
268 dspi->rx += sizeof(u32);
269}
270
271static void dspi_8on16_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
272{
273 *txdata = cpu_to_be16(*(u16 *)dspi->tx);
274 dspi->tx += sizeof(u16);
275}
276
277static void dspi_8on16_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
278{
279 *(u16 *)dspi->rx = be16_to_cpu(rxdata);
280 dspi->rx += sizeof(u16);
281}
282
283static void dspi_16on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
284{
285 u16 hi = *(u16 *)dspi->tx;
286 u16 lo = *(u16 *)(dspi->tx + 2);
287
288 *txdata = (u32)hi << 16 | lo;
289 dspi->tx += sizeof(u32);
290}
291
292static void dspi_16on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
293{
294 u16 hi = rxdata & 0xffff;
295 u16 lo = rxdata >> 16;
296
297 *(u16 *)dspi->rx = lo;
298 *(u16 *)(dspi->rx + 2) = hi;
299 dspi->rx += sizeof(u32);
300}
301
302
303
304
305
306static u32 dspi_pop_tx(struct fsl_dspi *dspi)
307{
308 u32 txdata = 0;
309
310 if (dspi->tx)
311 dspi->host_to_dev(dspi, &txdata);
312 dspi->len -= dspi->oper_word_size;
313 return txdata;
314}
315
316
317static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
318{
319 u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
320
321 if (spi_controller_is_slave(dspi->ctlr))
322 return data;
323
324 if (dspi->len > 0)
325 cmd |= SPI_PUSHR_CMD_CONT;
326 return cmd << 16 | data;
327}
328
329
330static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
331{
332 if (!dspi->rx)
333 return;
334 dspi->dev_to_host(dspi, rxdata);
335}
336
337static void dspi_tx_dma_callback(void *arg)
338{
339 struct fsl_dspi *dspi = arg;
340 struct fsl_dspi_dma *dma = dspi->dma;
341
342 complete(&dma->cmd_tx_complete);
343}
344
345static void dspi_rx_dma_callback(void *arg)
346{
347 struct fsl_dspi *dspi = arg;
348 struct fsl_dspi_dma *dma = dspi->dma;
349 int i;
350
351 if (dspi->rx) {
352 for (i = 0; i < dspi->words_in_flight; i++)
353 dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
354 }
355
356 complete(&dma->cmd_rx_complete);
357}
358
359static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
360{
361 struct device *dev = &dspi->pdev->dev;
362 struct fsl_dspi_dma *dma = dspi->dma;
363 int time_left;
364 int i;
365
366 for (i = 0; i < dspi->words_in_flight; i++)
367 dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
368
369 dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
370 dma->tx_dma_phys,
371 dspi->words_in_flight *
372 DMA_SLAVE_BUSWIDTH_4_BYTES,
373 DMA_MEM_TO_DEV,
374 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
375 if (!dma->tx_desc) {
376 dev_err(dev, "Not able to get desc for DMA xfer\n");
377 return -EIO;
378 }
379
380 dma->tx_desc->callback = dspi_tx_dma_callback;
381 dma->tx_desc->callback_param = dspi;
382 if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
383 dev_err(dev, "DMA submit failed\n");
384 return -EINVAL;
385 }
386
387 dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
388 dma->rx_dma_phys,
389 dspi->words_in_flight *
390 DMA_SLAVE_BUSWIDTH_4_BYTES,
391 DMA_DEV_TO_MEM,
392 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
393 if (!dma->rx_desc) {
394 dev_err(dev, "Not able to get desc for DMA xfer\n");
395 return -EIO;
396 }
397
398 dma->rx_desc->callback = dspi_rx_dma_callback;
399 dma->rx_desc->callback_param = dspi;
400 if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
401 dev_err(dev, "DMA submit failed\n");
402 return -EINVAL;
403 }
404
405 reinit_completion(&dspi->dma->cmd_rx_complete);
406 reinit_completion(&dspi->dma->cmd_tx_complete);
407
408 dma_async_issue_pending(dma->chan_rx);
409 dma_async_issue_pending(dma->chan_tx);
410
411 if (spi_controller_is_slave(dspi->ctlr)) {
412 wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete);
413 return 0;
414 }
415
416 time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
417 DMA_COMPLETION_TIMEOUT);
418 if (time_left == 0) {
419 dev_err(dev, "DMA tx timeout\n");
420 dmaengine_terminate_all(dma->chan_tx);
421 dmaengine_terminate_all(dma->chan_rx);
422 return -ETIMEDOUT;
423 }
424
425 time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
426 DMA_COMPLETION_TIMEOUT);
427 if (time_left == 0) {
428 dev_err(dev, "DMA rx timeout\n");
429 dmaengine_terminate_all(dma->chan_tx);
430 dmaengine_terminate_all(dma->chan_rx);
431 return -ETIMEDOUT;
432 }
433
434 return 0;
435}
436
437static void dspi_setup_accel(struct fsl_dspi *dspi);
438
439static int dspi_dma_xfer(struct fsl_dspi *dspi)
440{
441 struct spi_message *message = dspi->cur_msg;
442 struct device *dev = &dspi->pdev->dev;
443 int ret = 0;
444
445
446
447
448
449 while (dspi->len) {
450
451 dspi_setup_accel(dspi);
452
453 dspi->words_in_flight = dspi->len / dspi->oper_word_size;
454 if (dspi->words_in_flight > dspi->devtype_data->fifo_size)
455 dspi->words_in_flight = dspi->devtype_data->fifo_size;
456
457 message->actual_length += dspi->words_in_flight *
458 dspi->oper_word_size;
459
460 ret = dspi_next_xfer_dma_submit(dspi);
461 if (ret) {
462 dev_err(dev, "DMA transfer failed\n");
463 break;
464 }
465 }
466
467 return ret;
468}
469
470static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
471{
472 int dma_bufsize = dspi->devtype_data->fifo_size * 2;
473 struct device *dev = &dspi->pdev->dev;
474 struct dma_slave_config cfg;
475 struct fsl_dspi_dma *dma;
476 int ret;
477
478 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
479 if (!dma)
480 return -ENOMEM;
481
482 dma->chan_rx = dma_request_chan(dev, "rx");
483 if (IS_ERR(dma->chan_rx)) {
484 dev_err(dev, "rx dma channel not available\n");
485 ret = PTR_ERR(dma->chan_rx);
486 return ret;
487 }
488
489 dma->chan_tx = dma_request_chan(dev, "tx");
490 if (IS_ERR(dma->chan_tx)) {
491 dev_err(dev, "tx dma channel not available\n");
492 ret = PTR_ERR(dma->chan_tx);
493 goto err_tx_channel;
494 }
495
496 dma->tx_dma_buf = dma_alloc_coherent(dma->chan_tx->device->dev,
497 dma_bufsize, &dma->tx_dma_phys,
498 GFP_KERNEL);
499 if (!dma->tx_dma_buf) {
500 ret = -ENOMEM;
501 goto err_tx_dma_buf;
502 }
503
504 dma->rx_dma_buf = dma_alloc_coherent(dma->chan_rx->device->dev,
505 dma_bufsize, &dma->rx_dma_phys,
506 GFP_KERNEL);
507 if (!dma->rx_dma_buf) {
508 ret = -ENOMEM;
509 goto err_rx_dma_buf;
510 }
511
512 cfg.src_addr = phy_addr + SPI_POPR;
513 cfg.dst_addr = phy_addr + SPI_PUSHR;
514 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
515 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
516 cfg.src_maxburst = 1;
517 cfg.dst_maxburst = 1;
518
519 cfg.direction = DMA_DEV_TO_MEM;
520 ret = dmaengine_slave_config(dma->chan_rx, &cfg);
521 if (ret) {
522 dev_err(dev, "can't configure rx dma channel\n");
523 ret = -EINVAL;
524 goto err_slave_config;
525 }
526
527 cfg.direction = DMA_MEM_TO_DEV;
528 ret = dmaengine_slave_config(dma->chan_tx, &cfg);
529 if (ret) {
530 dev_err(dev, "can't configure tx dma channel\n");
531 ret = -EINVAL;
532 goto err_slave_config;
533 }
534
535 dspi->dma = dma;
536 init_completion(&dma->cmd_tx_complete);
537 init_completion(&dma->cmd_rx_complete);
538
539 return 0;
540
541err_slave_config:
542 dma_free_coherent(dma->chan_rx->device->dev,
543 dma_bufsize, dma->rx_dma_buf, dma->rx_dma_phys);
544err_rx_dma_buf:
545 dma_free_coherent(dma->chan_tx->device->dev,
546 dma_bufsize, dma->tx_dma_buf, dma->tx_dma_phys);
547err_tx_dma_buf:
548 dma_release_channel(dma->chan_tx);
549err_tx_channel:
550 dma_release_channel(dma->chan_rx);
551
552 devm_kfree(dev, dma);
553 dspi->dma = NULL;
554
555 return ret;
556}
557
558static void dspi_release_dma(struct fsl_dspi *dspi)
559{
560 int dma_bufsize = dspi->devtype_data->fifo_size * 2;
561 struct fsl_dspi_dma *dma = dspi->dma;
562
563 if (!dma)
564 return;
565
566 if (dma->chan_tx) {
567 dma_unmap_single(dma->chan_tx->device->dev, dma->tx_dma_phys,
568 dma_bufsize, DMA_TO_DEVICE);
569 dma_release_channel(dma->chan_tx);
570 }
571
572 if (dma->chan_rx) {
573 dma_unmap_single(dma->chan_rx->device->dev, dma->rx_dma_phys,
574 dma_bufsize, DMA_FROM_DEVICE);
575 dma_release_channel(dma->chan_rx);
576 }
577}
578
579static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
580 unsigned long clkrate)
581{
582
583 int pbr_tbl[4] = {2, 3, 5, 7};
584 int brs[16] = { 2, 4, 6, 8,
585 16, 32, 64, 128,
586 256, 512, 1024, 2048,
587 4096, 8192, 16384, 32768 };
588 int scale_needed, scale, minscale = INT_MAX;
589 int i, j;
590
591 scale_needed = clkrate / speed_hz;
592 if (clkrate % speed_hz)
593 scale_needed++;
594
595 for (i = 0; i < ARRAY_SIZE(brs); i++)
596 for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) {
597 scale = brs[i] * pbr_tbl[j];
598 if (scale >= scale_needed) {
599 if (scale < minscale) {
600 minscale = scale;
601 *br = i;
602 *pbr = j;
603 }
604 break;
605 }
606 }
607
608 if (minscale == INT_MAX) {
609 pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n",
610 speed_hz, clkrate);
611 *pbr = ARRAY_SIZE(pbr_tbl) - 1;
612 *br = ARRAY_SIZE(brs) - 1;
613 }
614}
615
616static void ns_delay_scale(char *psc, char *sc, int delay_ns,
617 unsigned long clkrate)
618{
619 int scale_needed, scale, minscale = INT_MAX;
620 int pscale_tbl[4] = {1, 3, 5, 7};
621 u32 remainder;
622 int i, j;
623
624 scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
625 &remainder);
626 if (remainder)
627 scale_needed++;
628
629 for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++)
630 for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) {
631 scale = pscale_tbl[i] * (2 << j);
632 if (scale >= scale_needed) {
633 if (scale < minscale) {
634 minscale = scale;
635 *psc = i;
636 *sc = j;
637 }
638 break;
639 }
640 }
641
642 if (minscale == INT_MAX) {
643 pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value",
644 delay_ns, clkrate);
645 *psc = ARRAY_SIZE(pscale_tbl) - 1;
646 *sc = SPI_CTAR_SCALE_BITS;
647 }
648}
649
650static void dspi_pushr_write(struct fsl_dspi *dspi)
651{
652 regmap_write(dspi->regmap, SPI_PUSHR, dspi_pop_tx_pushr(dspi));
653}
654
655static void dspi_pushr_cmd_write(struct fsl_dspi *dspi, u16 cmd)
656{
657
658
659
660
661
662
663
664
665
666
667 if (dspi->len > dspi->oper_word_size)
668 cmd |= SPI_PUSHR_CMD_CONT;
669 regmap_write(dspi->regmap_pushr, dspi->pushr_cmd, cmd);
670}
671
672static void dspi_pushr_txdata_write(struct fsl_dspi *dspi, u16 txdata)
673{
674 regmap_write(dspi->regmap_pushr, dspi->pushr_tx, txdata);
675}
676
677static void dspi_xspi_fifo_write(struct fsl_dspi *dspi, int num_words)
678{
679 int num_bytes = num_words * dspi->oper_word_size;
680 u16 tx_cmd = dspi->tx_cmd;
681
682
683
684
685
686
687
688
689
690 if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT) && num_bytes == dspi->len)
691 tx_cmd |= SPI_PUSHR_CMD_EOQ;
692
693
694 regmap_write(dspi->regmap, SPI_CTARE(0),
695 SPI_FRAME_EBITS(dspi->oper_bits_per_word) |
696 SPI_CTARE_DTCP(num_words));
697
698
699
700
701
702 dspi_pushr_cmd_write(dspi, tx_cmd);
703
704
705 while (num_words--) {
706 u32 data = dspi_pop_tx(dspi);
707
708 dspi_pushr_txdata_write(dspi, data & 0xFFFF);
709 if (dspi->oper_bits_per_word > 16)
710 dspi_pushr_txdata_write(dspi, data >> 16);
711 }
712}
713
714static void dspi_eoq_fifo_write(struct fsl_dspi *dspi, int num_words)
715{
716 u16 xfer_cmd = dspi->tx_cmd;
717
718
719 while (num_words--) {
720 dspi->tx_cmd = xfer_cmd;
721
722 if (num_words == 0)
723 dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
724
725 dspi_pushr_write(dspi);
726 }
727}
728
729static u32 dspi_popr_read(struct fsl_dspi *dspi)
730{
731 u32 rxdata = 0;
732
733 regmap_read(dspi->regmap, SPI_POPR, &rxdata);
734 return rxdata;
735}
736
737static void dspi_fifo_read(struct fsl_dspi *dspi)
738{
739 int num_fifo_entries = dspi->words_in_flight;
740
741
742 while (num_fifo_entries--)
743 dspi_push_rx(dspi, dspi_popr_read(dspi));
744}
745
746static void dspi_setup_accel(struct fsl_dspi *dspi)
747{
748 struct spi_transfer *xfer = dspi->cur_transfer;
749 bool odd = !!(dspi->len & 1);
750
751
752 if (xfer->bits_per_word % 8)
753 goto no_accel;
754
755 if (!odd && dspi->len <= dspi->devtype_data->fifo_size * 2) {
756 dspi->oper_bits_per_word = 16;
757 } else if (odd && dspi->len <= dspi->devtype_data->fifo_size) {
758 dspi->oper_bits_per_word = 8;
759 } else {
760
761 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
762 dspi->oper_bits_per_word = 32;
763 else
764 dspi->oper_bits_per_word = 16;
765
766
767
768
769
770 do {
771 if (dspi->len >= DIV_ROUND_UP(dspi->oper_bits_per_word, 8))
772 break;
773
774 dspi->oper_bits_per_word /= 2;
775 } while (dspi->oper_bits_per_word > 8);
776 }
777
778 if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 32) {
779 dspi->dev_to_host = dspi_8on32_dev_to_host;
780 dspi->host_to_dev = dspi_8on32_host_to_dev;
781 } else if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 16) {
782 dspi->dev_to_host = dspi_8on16_dev_to_host;
783 dspi->host_to_dev = dspi_8on16_host_to_dev;
784 } else if (xfer->bits_per_word == 16 && dspi->oper_bits_per_word == 32) {
785 dspi->dev_to_host = dspi_16on32_dev_to_host;
786 dspi->host_to_dev = dspi_16on32_host_to_dev;
787 } else {
788no_accel:
789 dspi->dev_to_host = dspi_native_dev_to_host;
790 dspi->host_to_dev = dspi_native_host_to_dev;
791 dspi->oper_bits_per_word = xfer->bits_per_word;
792 }
793
794 dspi->oper_word_size = DIV_ROUND_UP(dspi->oper_bits_per_word, 8);
795
796
797
798
799
800
801 regmap_write(dspi->regmap, SPI_CTAR(0),
802 dspi->cur_chip->ctar_val |
803 SPI_FRAME_BITS(dspi->oper_bits_per_word));
804}
805
806static void dspi_fifo_write(struct fsl_dspi *dspi)
807{
808 int num_fifo_entries = dspi->devtype_data->fifo_size;
809 struct spi_transfer *xfer = dspi->cur_transfer;
810 struct spi_message *msg = dspi->cur_msg;
811 int num_words, num_bytes;
812
813 dspi_setup_accel(dspi);
814
815
816 if (dspi->oper_word_size == 4)
817 num_fifo_entries /= 2;
818
819
820
821
822
823
824 num_words = dspi->len / dspi->oper_word_size;
825 if (num_words > num_fifo_entries)
826 num_words = num_fifo_entries;
827
828
829 num_bytes = num_words * dspi->oper_word_size;
830 msg->actual_length += num_bytes;
831 dspi->progress += num_bytes / DIV_ROUND_UP(xfer->bits_per_word, 8);
832
833
834
835
836
837 dspi->words_in_flight = num_words;
838
839 spi_take_timestamp_pre(dspi->ctlr, xfer, dspi->progress, !dspi->irq);
840
841 if (dspi->devtype_data->trans_mode == DSPI_EOQ_MODE)
842 dspi_eoq_fifo_write(dspi, num_words);
843 else
844 dspi_xspi_fifo_write(dspi, num_words);
845
846
847
848
849
850
851 spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer,
852 dspi->progress, !dspi->irq);
853}
854
855static int dspi_rxtx(struct fsl_dspi *dspi)
856{
857 dspi_fifo_read(dspi);
858
859 if (!dspi->len)
860
861 return 0;
862
863 dspi_fifo_write(dspi);
864
865 return -EINPROGRESS;
866}
867
868static int dspi_poll(struct fsl_dspi *dspi)
869{
870 int tries = 1000;
871 u32 spi_sr;
872
873 do {
874 regmap_read(dspi->regmap, SPI_SR, &spi_sr);
875 regmap_write(dspi->regmap, SPI_SR, spi_sr);
876
877 if (spi_sr & (SPI_SR_EOQF | SPI_SR_CMDTCF))
878 break;
879 } while (--tries);
880
881 if (!tries)
882 return -ETIMEDOUT;
883
884 return dspi_rxtx(dspi);
885}
886
887static irqreturn_t dspi_interrupt(int irq, void *dev_id)
888{
889 struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
890 u32 spi_sr;
891
892 regmap_read(dspi->regmap, SPI_SR, &spi_sr);
893 regmap_write(dspi->regmap, SPI_SR, spi_sr);
894
895 if (!(spi_sr & (SPI_SR_EOQF | SPI_SR_CMDTCF)))
896 return IRQ_NONE;
897
898 if (dspi_rxtx(dspi) == 0)
899 complete(&dspi->xfer_done);
900
901 return IRQ_HANDLED;
902}
903
904static int dspi_transfer_one_message(struct spi_controller *ctlr,
905 struct spi_message *message)
906{
907 struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
908 struct spi_device *spi = message->spi;
909 struct spi_transfer *transfer;
910 int status = 0;
911
912 message->actual_length = 0;
913
914 list_for_each_entry(transfer, &message->transfers, transfer_list) {
915 dspi->cur_transfer = transfer;
916 dspi->cur_msg = message;
917 dspi->cur_chip = spi_get_ctldata(spi);
918
919 dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0) |
920 SPI_PUSHR_CMD_PCS(spi->chip_select);
921 if (list_is_last(&dspi->cur_transfer->transfer_list,
922 &dspi->cur_msg->transfers)) {
923
924
925
926 if (transfer->cs_change)
927 dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
928 } else {
929
930
931
932
933
934 if (!transfer->cs_change)
935 dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
936 }
937
938 dspi->tx = transfer->tx_buf;
939 dspi->rx = transfer->rx_buf;
940 dspi->len = transfer->len;
941 dspi->progress = 0;
942
943 regmap_update_bits(dspi->regmap, SPI_MCR,
944 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
945 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
946
947 spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
948 dspi->progress, !dspi->irq);
949
950 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
951 status = dspi_dma_xfer(dspi);
952 } else {
953 dspi_fifo_write(dspi);
954
955 if (dspi->irq) {
956 wait_for_completion(&dspi->xfer_done);
957 reinit_completion(&dspi->xfer_done);
958 } else {
959 do {
960 status = dspi_poll(dspi);
961 } while (status == -EINPROGRESS);
962 }
963 }
964 if (status)
965 break;
966
967 spi_transfer_delay_exec(transfer);
968 }
969
970 message->status = status;
971 spi_finalize_current_message(ctlr);
972
973 return status;
974}
975
976static int dspi_setup(struct spi_device *spi)
977{
978 struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
979 unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
980 u32 cs_sck_delay = 0, sck_cs_delay = 0;
981 struct fsl_dspi_platform_data *pdata;
982 unsigned char pasc = 0, asc = 0;
983 struct chip_data *chip;
984 unsigned long clkrate;
985
986
987 chip = spi_get_ctldata(spi);
988 if (chip == NULL) {
989 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
990 if (!chip)
991 return -ENOMEM;
992 }
993
994 pdata = dev_get_platdata(&dspi->pdev->dev);
995
996 if (!pdata) {
997 of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
998 &cs_sck_delay);
999
1000 of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
1001 &sck_cs_delay);
1002 } else {
1003 cs_sck_delay = pdata->cs_sck_delay;
1004 sck_cs_delay = pdata->sck_cs_delay;
1005 }
1006
1007 clkrate = clk_get_rate(dspi->clk);
1008 hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
1009
1010
1011 ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate);
1012
1013
1014 ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
1015
1016 chip->ctar_val = 0;
1017 if (spi->mode & SPI_CPOL)
1018 chip->ctar_val |= SPI_CTAR_CPOL;
1019 if (spi->mode & SPI_CPHA)
1020 chip->ctar_val |= SPI_CTAR_CPHA;
1021
1022 if (!spi_controller_is_slave(dspi->ctlr)) {
1023 chip->ctar_val |= SPI_CTAR_PCSSCK(pcssck) |
1024 SPI_CTAR_CSSCK(cssck) |
1025 SPI_CTAR_PASC(pasc) |
1026 SPI_CTAR_ASC(asc) |
1027 SPI_CTAR_PBR(pbr) |
1028 SPI_CTAR_BR(br);
1029
1030 if (spi->mode & SPI_LSB_FIRST)
1031 chip->ctar_val |= SPI_CTAR_LSBFE;
1032 }
1033
1034 spi_set_ctldata(spi, chip);
1035
1036 return 0;
1037}
1038
1039static void dspi_cleanup(struct spi_device *spi)
1040{
1041 struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
1042
1043 dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
1044 spi->controller->bus_num, spi->chip_select);
1045
1046 kfree(chip);
1047}
1048
1049static const struct of_device_id fsl_dspi_dt_ids[] = {
1050 {
1051 .compatible = "fsl,vf610-dspi",
1052 .data = &devtype_data[VF610],
1053 }, {
1054 .compatible = "fsl,ls1021a-v1.0-dspi",
1055 .data = &devtype_data[LS1021A],
1056 }, {
1057 .compatible = "fsl,ls1012a-dspi",
1058 .data = &devtype_data[LS1012A],
1059 }, {
1060 .compatible = "fsl,ls1028a-dspi",
1061 .data = &devtype_data[LS1028A],
1062 }, {
1063 .compatible = "fsl,ls1043a-dspi",
1064 .data = &devtype_data[LS1043A],
1065 }, {
1066 .compatible = "fsl,ls1046a-dspi",
1067 .data = &devtype_data[LS1046A],
1068 }, {
1069 .compatible = "fsl,ls2080a-dspi",
1070 .data = &devtype_data[LS2080A],
1071 }, {
1072 .compatible = "fsl,ls2085a-dspi",
1073 .data = &devtype_data[LS2085A],
1074 }, {
1075 .compatible = "fsl,lx2160a-dspi",
1076 .data = &devtype_data[LX2160A],
1077 },
1078 { }
1079};
1080MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
1081
1082#ifdef CONFIG_PM_SLEEP
1083static int dspi_suspend(struct device *dev)
1084{
1085 struct spi_controller *ctlr = dev_get_drvdata(dev);
1086 struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
1087
1088 spi_controller_suspend(ctlr);
1089 clk_disable_unprepare(dspi->clk);
1090
1091 pinctrl_pm_select_sleep_state(dev);
1092
1093 return 0;
1094}
1095
1096static int dspi_resume(struct device *dev)
1097{
1098 struct spi_controller *ctlr = dev_get_drvdata(dev);
1099 struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
1100 int ret;
1101
1102 pinctrl_pm_select_default_state(dev);
1103
1104 ret = clk_prepare_enable(dspi->clk);
1105 if (ret)
1106 return ret;
1107 spi_controller_resume(ctlr);
1108
1109 return 0;
1110}
1111#endif
1112
1113static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
1114
1115static const struct regmap_range dspi_volatile_ranges[] = {
1116 regmap_reg_range(SPI_MCR, SPI_TCR),
1117 regmap_reg_range(SPI_SR, SPI_SR),
1118 regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
1119};
1120
1121static const struct regmap_access_table dspi_volatile_table = {
1122 .yes_ranges = dspi_volatile_ranges,
1123 .n_yes_ranges = ARRAY_SIZE(dspi_volatile_ranges),
1124};
1125
1126static const struct regmap_config dspi_regmap_config = {
1127 .reg_bits = 32,
1128 .val_bits = 32,
1129 .reg_stride = 4,
1130 .max_register = 0x88,
1131 .volatile_table = &dspi_volatile_table,
1132};
1133
1134static const struct regmap_range dspi_xspi_volatile_ranges[] = {
1135 regmap_reg_range(SPI_MCR, SPI_TCR),
1136 regmap_reg_range(SPI_SR, SPI_SR),
1137 regmap_reg_range(SPI_PUSHR, SPI_RXFR3),
1138 regmap_reg_range(SPI_SREX, SPI_SREX),
1139};
1140
1141static const struct regmap_access_table dspi_xspi_volatile_table = {
1142 .yes_ranges = dspi_xspi_volatile_ranges,
1143 .n_yes_ranges = ARRAY_SIZE(dspi_xspi_volatile_ranges),
1144};
1145
1146static const struct regmap_config dspi_xspi_regmap_config[] = {
1147 {
1148 .reg_bits = 32,
1149 .val_bits = 32,
1150 .reg_stride = 4,
1151 .max_register = 0x13c,
1152 .volatile_table = &dspi_xspi_volatile_table,
1153 },
1154 {
1155 .name = "pushr",
1156 .reg_bits = 16,
1157 .val_bits = 16,
1158 .reg_stride = 2,
1159 .max_register = 0x2,
1160 },
1161};
1162
1163static int dspi_init(struct fsl_dspi *dspi)
1164{
1165 unsigned int mcr;
1166
1167
1168 mcr = SPI_MCR_PCSIS(GENMASK(dspi->ctlr->num_chipselect - 1, 0));
1169
1170 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
1171 mcr |= SPI_MCR_XSPI;
1172 if (!spi_controller_is_slave(dspi->ctlr))
1173 mcr |= SPI_MCR_MASTER;
1174
1175 regmap_write(dspi->regmap, SPI_MCR, mcr);
1176 regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
1177
1178 switch (dspi->devtype_data->trans_mode) {
1179 case DSPI_EOQ_MODE:
1180 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
1181 break;
1182 case DSPI_XSPI_MODE:
1183 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_CMDTCFE);
1184 break;
1185 case DSPI_DMA_MODE:
1186 regmap_write(dspi->regmap, SPI_RSER,
1187 SPI_RSER_TFFFE | SPI_RSER_TFFFD |
1188 SPI_RSER_RFDFE | SPI_RSER_RFDFD);
1189 break;
1190 default:
1191 dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
1192 dspi->devtype_data->trans_mode);
1193 return -EINVAL;
1194 }
1195
1196 return 0;
1197}
1198
1199static int dspi_slave_abort(struct spi_master *master)
1200{
1201 struct fsl_dspi *dspi = spi_master_get_devdata(master);
1202
1203
1204
1205
1206
1207 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
1208 dmaengine_terminate_sync(dspi->dma->chan_rx);
1209 dmaengine_terminate_sync(dspi->dma->chan_tx);
1210 }
1211
1212
1213 regmap_update_bits(dspi->regmap, SPI_MCR,
1214 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
1215 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
1216
1217 return 0;
1218}
1219
1220
1221
1222
1223
1224
1225
1226static size_t dspi_max_message_size(struct spi_device *spi)
1227{
1228 struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
1229
1230 if (dspi->devtype_data->trans_mode == DSPI_EOQ_MODE)
1231 return dspi->devtype_data->fifo_size;
1232
1233 return SIZE_MAX;
1234}
1235
1236static int dspi_probe(struct platform_device *pdev)
1237{
1238 struct device_node *np = pdev->dev.of_node;
1239 const struct regmap_config *regmap_config;
1240 struct fsl_dspi_platform_data *pdata;
1241 struct spi_controller *ctlr;
1242 int ret, cs_num, bus_num = -1;
1243 struct fsl_dspi *dspi;
1244 struct resource *res;
1245 void __iomem *base;
1246 bool big_endian;
1247
1248 ctlr = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
1249 if (!ctlr)
1250 return -ENOMEM;
1251
1252 dspi = spi_controller_get_devdata(ctlr);
1253 dspi->pdev = pdev;
1254 dspi->ctlr = ctlr;
1255
1256 ctlr->setup = dspi_setup;
1257 ctlr->transfer_one_message = dspi_transfer_one_message;
1258 ctlr->max_message_size = dspi_max_message_size;
1259 ctlr->dev.of_node = pdev->dev.of_node;
1260
1261 ctlr->cleanup = dspi_cleanup;
1262 ctlr->slave_abort = dspi_slave_abort;
1263 ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
1264
1265 pdata = dev_get_platdata(&pdev->dev);
1266 if (pdata) {
1267 ctlr->num_chipselect = pdata->cs_num;
1268 ctlr->bus_num = pdata->bus_num;
1269
1270
1271 dspi->devtype_data = &devtype_data[MCF5441X];
1272 big_endian = true;
1273 } else {
1274
1275 ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
1276 if (ret < 0) {
1277 dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
1278 goto out_ctlr_put;
1279 }
1280 ctlr->num_chipselect = cs_num;
1281
1282 of_property_read_u32(np, "bus-num", &bus_num);
1283 ctlr->bus_num = bus_num;
1284
1285 if (of_property_read_bool(np, "spi-slave"))
1286 ctlr->slave = true;
1287
1288 dspi->devtype_data = of_device_get_match_data(&pdev->dev);
1289 if (!dspi->devtype_data) {
1290 dev_err(&pdev->dev, "can't get devtype_data\n");
1291 ret = -EFAULT;
1292 goto out_ctlr_put;
1293 }
1294
1295 big_endian = of_device_is_big_endian(np);
1296 }
1297 if (big_endian) {
1298 dspi->pushr_cmd = 0;
1299 dspi->pushr_tx = 2;
1300 } else {
1301 dspi->pushr_cmd = 2;
1302 dspi->pushr_tx = 0;
1303 }
1304
1305 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
1306 ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1307 else
1308 ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
1309
1310 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1311 base = devm_ioremap_resource(&pdev->dev, res);
1312 if (IS_ERR(base)) {
1313 ret = PTR_ERR(base);
1314 goto out_ctlr_put;
1315 }
1316
1317 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
1318 regmap_config = &dspi_xspi_regmap_config[0];
1319 else
1320 regmap_config = &dspi_regmap_config;
1321 dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base, regmap_config);
1322 if (IS_ERR(dspi->regmap)) {
1323 dev_err(&pdev->dev, "failed to init regmap: %ld\n",
1324 PTR_ERR(dspi->regmap));
1325 ret = PTR_ERR(dspi->regmap);
1326 goto out_ctlr_put;
1327 }
1328
1329 if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) {
1330 dspi->regmap_pushr = devm_regmap_init_mmio(
1331 &pdev->dev, base + SPI_PUSHR,
1332 &dspi_xspi_regmap_config[1]);
1333 if (IS_ERR(dspi->regmap_pushr)) {
1334 dev_err(&pdev->dev,
1335 "failed to init pushr regmap: %ld\n",
1336 PTR_ERR(dspi->regmap_pushr));
1337 ret = PTR_ERR(dspi->regmap_pushr);
1338 goto out_ctlr_put;
1339 }
1340 }
1341
1342 dspi->clk = devm_clk_get(&pdev->dev, "dspi");
1343 if (IS_ERR(dspi->clk)) {
1344 ret = PTR_ERR(dspi->clk);
1345 dev_err(&pdev->dev, "unable to get clock\n");
1346 goto out_ctlr_put;
1347 }
1348 ret = clk_prepare_enable(dspi->clk);
1349 if (ret)
1350 goto out_ctlr_put;
1351
1352 ret = dspi_init(dspi);
1353 if (ret)
1354 goto out_clk_put;
1355
1356 dspi->irq = platform_get_irq(pdev, 0);
1357 if (dspi->irq <= 0) {
1358 dev_info(&pdev->dev,
1359 "can't get platform irq, using poll mode\n");
1360 dspi->irq = 0;
1361 goto poll_mode;
1362 }
1363
1364 ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt,
1365 IRQF_SHARED, pdev->name, dspi);
1366 if (ret < 0) {
1367 dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
1368 goto out_clk_put;
1369 }
1370
1371 init_completion(&dspi->xfer_done);
1372
1373poll_mode:
1374
1375 if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
1376 ret = dspi_request_dma(dspi, res->start);
1377 if (ret < 0) {
1378 dev_err(&pdev->dev, "can't get dma channels\n");
1379 goto out_clk_put;
1380 }
1381 }
1382
1383 ctlr->max_speed_hz =
1384 clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
1385
1386 if (dspi->devtype_data->trans_mode != DSPI_DMA_MODE)
1387 ctlr->ptp_sts_supported = true;
1388
1389 platform_set_drvdata(pdev, ctlr);
1390
1391 ret = spi_register_controller(ctlr);
1392 if (ret != 0) {
1393 dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
1394 goto out_clk_put;
1395 }
1396
1397 return ret;
1398
1399out_clk_put:
1400 clk_disable_unprepare(dspi->clk);
1401out_ctlr_put:
1402 spi_controller_put(ctlr);
1403
1404 return ret;
1405}
1406
1407static int dspi_remove(struct platform_device *pdev)
1408{
1409 struct spi_controller *ctlr = platform_get_drvdata(pdev);
1410 struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
1411
1412
1413 dspi_release_dma(dspi);
1414 clk_disable_unprepare(dspi->clk);
1415 spi_unregister_controller(dspi->ctlr);
1416
1417 return 0;
1418}
1419
1420static struct platform_driver fsl_dspi_driver = {
1421 .driver.name = DRIVER_NAME,
1422 .driver.of_match_table = fsl_dspi_dt_ids,
1423 .driver.owner = THIS_MODULE,
1424 .driver.pm = &dspi_pm,
1425 .probe = dspi_probe,
1426 .remove = dspi_remove,
1427};
1428module_platform_driver(fsl_dspi_driver);
1429
1430MODULE_DESCRIPTION("Freescale DSPI Controller Driver");
1431MODULE_LICENSE("GPL");
1432MODULE_ALIAS("platform:" DRIVER_NAME);
1433