1
2
3
4#include <linux/clk.h>
5#include <linux/device.h>
6#include <linux/dma-mapping.h>
7#include <linux/err.h>
8#include <linux/interrupt.h>
9#include <linux/module.h>
10#include <linux/platform_device.h>
11#include <linux/pm_runtime.h>
12#include <linux/spi/spi.h>
13#include <linux/of.h>
14
15
16#define SPIS_IRQ_EN_REG 0x0
17#define SPIS_IRQ_CLR_REG 0x4
18#define SPIS_IRQ_ST_REG 0x8
19#define SPIS_IRQ_MASK_REG 0xc
20#define SPIS_CFG_REG 0x10
21#define SPIS_RX_DATA_REG 0x14
22#define SPIS_TX_DATA_REG 0x18
23#define SPIS_RX_DST_REG 0x1c
24#define SPIS_TX_SRC_REG 0x20
25#define SPIS_DMA_CFG_REG 0x30
26#define SPIS_SOFT_RST_REG 0x40
27
28
29#define DMA_DONE_EN BIT(7)
30#define DATA_DONE_EN BIT(2)
31#define RSTA_DONE_EN BIT(1)
32#define CMD_INVALID_EN BIT(0)
33
34
35#define DMA_DONE_ST BIT(7)
36#define DATA_DONE_ST BIT(2)
37#define RSTA_DONE_ST BIT(1)
38#define CMD_INVALID_ST BIT(0)
39
40
41#define DMA_DONE_MASK BIT(7)
42#define DATA_DONE_MASK BIT(2)
43#define RSTA_DONE_MASK BIT(1)
44#define CMD_INVALID_MASK BIT(0)
45
46
47#define SPIS_TX_ENDIAN BIT(7)
48#define SPIS_RX_ENDIAN BIT(6)
49#define SPIS_TXMSBF BIT(5)
50#define SPIS_RXMSBF BIT(4)
51#define SPIS_CPHA BIT(3)
52#define SPIS_CPOL BIT(2)
53#define SPIS_TX_EN BIT(1)
54#define SPIS_RX_EN BIT(0)
55
56
57#define TX_DMA_TRIG_EN BIT(31)
58#define TX_DMA_EN BIT(30)
59#define RX_DMA_EN BIT(29)
60#define TX_DMA_LEN 0xfffff
61
62
63#define SPIS_DMA_ADDR_EN BIT(1)
64#define SPIS_SOFT_RST BIT(0)
65
66struct mtk_spi_slave {
67 struct device *dev;
68 void __iomem *base;
69 struct clk *spi_clk;
70 struct completion xfer_done;
71 struct spi_transfer *cur_transfer;
72 bool slave_aborted;
73 const struct mtk_spi_compatible *dev_comp;
74};
75
76struct mtk_spi_compatible {
77 const u32 max_fifo_size;
78 bool must_rx;
79};
80
81static const struct mtk_spi_compatible mt2712_compat = {
82 .max_fifo_size = 512,
83};
84static const struct mtk_spi_compatible mt8195_compat = {
85 .max_fifo_size = 128,
86 .must_rx = true,
87};
88
89static const struct of_device_id mtk_spi_slave_of_match[] = {
90 { .compatible = "mediatek,mt2712-spi-slave",
91 .data = (void *)&mt2712_compat,},
92 { .compatible = "mediatek,mt8195-spi-slave",
93 .data = (void *)&mt8195_compat,},
94 {}
95};
96MODULE_DEVICE_TABLE(of, mtk_spi_slave_of_match);
97
98static void mtk_spi_slave_disable_dma(struct mtk_spi_slave *mdata)
99{
100 u32 reg_val;
101
102 reg_val = readl(mdata->base + SPIS_DMA_CFG_REG);
103 reg_val &= ~RX_DMA_EN;
104 reg_val &= ~TX_DMA_EN;
105 writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
106}
107
108static void mtk_spi_slave_disable_xfer(struct mtk_spi_slave *mdata)
109{
110 u32 reg_val;
111
112 reg_val = readl(mdata->base + SPIS_CFG_REG);
113 reg_val &= ~SPIS_TX_EN;
114 reg_val &= ~SPIS_RX_EN;
115 writel(reg_val, mdata->base + SPIS_CFG_REG);
116}
117
118static int mtk_spi_slave_wait_for_completion(struct mtk_spi_slave *mdata)
119{
120 if (wait_for_completion_interruptible(&mdata->xfer_done) ||
121 mdata->slave_aborted) {
122 dev_err(mdata->dev, "interrupted\n");
123 return -EINTR;
124 }
125
126 return 0;
127}
128
129static int mtk_spi_slave_prepare_message(struct spi_controller *ctlr,
130 struct spi_message *msg)
131{
132 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
133 struct spi_device *spi = msg->spi;
134 bool cpha, cpol;
135 u32 reg_val;
136
137 cpha = spi->mode & SPI_CPHA ? 1 : 0;
138 cpol = spi->mode & SPI_CPOL ? 1 : 0;
139
140 reg_val = readl(mdata->base + SPIS_CFG_REG);
141 if (cpha)
142 reg_val |= SPIS_CPHA;
143 else
144 reg_val &= ~SPIS_CPHA;
145 if (cpol)
146 reg_val |= SPIS_CPOL;
147 else
148 reg_val &= ~SPIS_CPOL;
149
150 if (spi->mode & SPI_LSB_FIRST)
151 reg_val &= ~(SPIS_TXMSBF | SPIS_RXMSBF);
152 else
153 reg_val |= SPIS_TXMSBF | SPIS_RXMSBF;
154
155 reg_val &= ~SPIS_TX_ENDIAN;
156 reg_val &= ~SPIS_RX_ENDIAN;
157 writel(reg_val, mdata->base + SPIS_CFG_REG);
158
159 return 0;
160}
161
162static int mtk_spi_slave_fifo_transfer(struct spi_controller *ctlr,
163 struct spi_device *spi,
164 struct spi_transfer *xfer)
165{
166 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
167 int reg_val, cnt, remainder, ret;
168
169 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
170
171 reg_val = readl(mdata->base + SPIS_CFG_REG);
172 if (xfer->rx_buf)
173 reg_val |= SPIS_RX_EN;
174 if (xfer->tx_buf)
175 reg_val |= SPIS_TX_EN;
176 writel(reg_val, mdata->base + SPIS_CFG_REG);
177
178 cnt = xfer->len / 4;
179 if (xfer->tx_buf)
180 iowrite32_rep(mdata->base + SPIS_TX_DATA_REG,
181 xfer->tx_buf, cnt);
182
183 remainder = xfer->len % 4;
184 if (xfer->tx_buf && remainder > 0) {
185 reg_val = 0;
186 memcpy(®_val, xfer->tx_buf + cnt * 4, remainder);
187 writel(reg_val, mdata->base + SPIS_TX_DATA_REG);
188 }
189
190 ret = mtk_spi_slave_wait_for_completion(mdata);
191 if (ret) {
192 mtk_spi_slave_disable_xfer(mdata);
193 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
194 }
195
196 return ret;
197}
198
199static int mtk_spi_slave_dma_transfer(struct spi_controller *ctlr,
200 struct spi_device *spi,
201 struct spi_transfer *xfer)
202{
203 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
204 struct device *dev = mdata->dev;
205 int reg_val, ret;
206
207 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
208
209 if (xfer->tx_buf) {
210
211
212
213 void *nonconst_tx = (void *)xfer->tx_buf;
214
215 xfer->tx_dma = dma_map_single(dev, nonconst_tx,
216 xfer->len, DMA_TO_DEVICE);
217 if (dma_mapping_error(dev, xfer->tx_dma)) {
218 ret = -ENOMEM;
219 goto disable_transfer;
220 }
221 }
222
223 if (xfer->rx_buf) {
224 xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
225 xfer->len, DMA_FROM_DEVICE);
226 if (dma_mapping_error(dev, xfer->rx_dma)) {
227 ret = -ENOMEM;
228 goto unmap_txdma;
229 }
230 }
231
232 writel(xfer->tx_dma, mdata->base + SPIS_TX_SRC_REG);
233 writel(xfer->rx_dma, mdata->base + SPIS_RX_DST_REG);
234
235 writel(SPIS_DMA_ADDR_EN, mdata->base + SPIS_SOFT_RST_REG);
236
237
238 reg_val = readl(mdata->base + SPIS_CFG_REG);
239 if (xfer->tx_buf)
240 reg_val |= SPIS_TX_EN;
241 if (xfer->rx_buf)
242 reg_val |= SPIS_RX_EN;
243 writel(reg_val, mdata->base + SPIS_CFG_REG);
244
245
246 reg_val = 0;
247 reg_val |= (xfer->len - 1) & TX_DMA_LEN;
248 writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
249
250 reg_val = readl(mdata->base + SPIS_DMA_CFG_REG);
251 if (xfer->tx_buf)
252 reg_val |= TX_DMA_EN;
253 if (xfer->rx_buf)
254 reg_val |= RX_DMA_EN;
255 reg_val |= TX_DMA_TRIG_EN;
256 writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
257
258 ret = mtk_spi_slave_wait_for_completion(mdata);
259 if (ret)
260 goto unmap_rxdma;
261
262 return 0;
263
264unmap_rxdma:
265 if (xfer->rx_buf)
266 dma_unmap_single(dev, xfer->rx_dma,
267 xfer->len, DMA_FROM_DEVICE);
268
269unmap_txdma:
270 if (xfer->tx_buf)
271 dma_unmap_single(dev, xfer->tx_dma,
272 xfer->len, DMA_TO_DEVICE);
273
274disable_transfer:
275 mtk_spi_slave_disable_dma(mdata);
276 mtk_spi_slave_disable_xfer(mdata);
277 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
278
279 return ret;
280}
281
282static int mtk_spi_slave_transfer_one(struct spi_controller *ctlr,
283 struct spi_device *spi,
284 struct spi_transfer *xfer)
285{
286 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
287
288 reinit_completion(&mdata->xfer_done);
289 mdata->slave_aborted = false;
290 mdata->cur_transfer = xfer;
291
292 if (xfer->len > mdata->dev_comp->max_fifo_size)
293 return mtk_spi_slave_dma_transfer(ctlr, spi, xfer);
294 else
295 return mtk_spi_slave_fifo_transfer(ctlr, spi, xfer);
296}
297
298static int mtk_spi_slave_setup(struct spi_device *spi)
299{
300 struct mtk_spi_slave *mdata = spi_controller_get_devdata(spi->master);
301 u32 reg_val;
302
303 reg_val = DMA_DONE_EN | DATA_DONE_EN |
304 RSTA_DONE_EN | CMD_INVALID_EN;
305 writel(reg_val, mdata->base + SPIS_IRQ_EN_REG);
306
307 reg_val = DMA_DONE_MASK | DATA_DONE_MASK |
308 RSTA_DONE_MASK | CMD_INVALID_MASK;
309 writel(reg_val, mdata->base + SPIS_IRQ_MASK_REG);
310
311 mtk_spi_slave_disable_dma(mdata);
312 mtk_spi_slave_disable_xfer(mdata);
313
314 return 0;
315}
316
317static int mtk_slave_abort(struct spi_controller *ctlr)
318{
319 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
320
321 mdata->slave_aborted = true;
322 complete(&mdata->xfer_done);
323
324 return 0;
325}
326
327static irqreturn_t mtk_spi_slave_interrupt(int irq, void *dev_id)
328{
329 struct spi_controller *ctlr = dev_id;
330 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
331 struct spi_transfer *trans = mdata->cur_transfer;
332 u32 int_status, reg_val, cnt, remainder;
333
334 int_status = readl(mdata->base + SPIS_IRQ_ST_REG);
335 writel(int_status, mdata->base + SPIS_IRQ_CLR_REG);
336
337 if (!trans)
338 return IRQ_NONE;
339
340 if ((int_status & DMA_DONE_ST) &&
341 ((int_status & DATA_DONE_ST) ||
342 (int_status & RSTA_DONE_ST))) {
343 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
344
345 if (trans->tx_buf)
346 dma_unmap_single(mdata->dev, trans->tx_dma,
347 trans->len, DMA_TO_DEVICE);
348 if (trans->rx_buf)
349 dma_unmap_single(mdata->dev, trans->rx_dma,
350 trans->len, DMA_FROM_DEVICE);
351
352 mtk_spi_slave_disable_dma(mdata);
353 mtk_spi_slave_disable_xfer(mdata);
354 }
355
356 if ((!(int_status & DMA_DONE_ST)) &&
357 ((int_status & DATA_DONE_ST) ||
358 (int_status & RSTA_DONE_ST))) {
359 cnt = trans->len / 4;
360 if (trans->rx_buf)
361 ioread32_rep(mdata->base + SPIS_RX_DATA_REG,
362 trans->rx_buf, cnt);
363 remainder = trans->len % 4;
364 if (trans->rx_buf && remainder > 0) {
365 reg_val = readl(mdata->base + SPIS_RX_DATA_REG);
366 memcpy(trans->rx_buf + (cnt * 4),
367 ®_val, remainder);
368 }
369
370 mtk_spi_slave_disable_xfer(mdata);
371 }
372
373 if (int_status & CMD_INVALID_ST) {
374 dev_warn(&ctlr->dev, "cmd invalid\n");
375 return IRQ_NONE;
376 }
377
378 mdata->cur_transfer = NULL;
379 complete(&mdata->xfer_done);
380
381 return IRQ_HANDLED;
382}
383
384static int mtk_spi_slave_probe(struct platform_device *pdev)
385{
386 struct spi_controller *ctlr;
387 struct mtk_spi_slave *mdata;
388 int irq, ret;
389 const struct of_device_id *of_id;
390
391 ctlr = spi_alloc_slave(&pdev->dev, sizeof(*mdata));
392 if (!ctlr) {
393 dev_err(&pdev->dev, "failed to alloc spi slave\n");
394 return -ENOMEM;
395 }
396
397 ctlr->auto_runtime_pm = true;
398 ctlr->dev.of_node = pdev->dev.of_node;
399 ctlr->mode_bits = SPI_CPOL | SPI_CPHA;
400 ctlr->mode_bits |= SPI_LSB_FIRST;
401
402 ctlr->prepare_message = mtk_spi_slave_prepare_message;
403 ctlr->transfer_one = mtk_spi_slave_transfer_one;
404 ctlr->setup = mtk_spi_slave_setup;
405 ctlr->slave_abort = mtk_slave_abort;
406
407 of_id = of_match_node(mtk_spi_slave_of_match, pdev->dev.of_node);
408 if (!of_id) {
409 dev_err(&pdev->dev, "failed to probe of_node\n");
410 ret = -EINVAL;
411 goto err_put_ctlr;
412 }
413 mdata = spi_controller_get_devdata(ctlr);
414 mdata->dev_comp = of_id->data;
415
416 if (mdata->dev_comp->must_rx)
417 ctlr->flags = SPI_MASTER_MUST_RX;
418
419 platform_set_drvdata(pdev, ctlr);
420
421 init_completion(&mdata->xfer_done);
422 mdata->dev = &pdev->dev;
423 mdata->base = devm_platform_ioremap_resource(pdev, 0);
424 if (IS_ERR(mdata->base)) {
425 ret = PTR_ERR(mdata->base);
426 goto err_put_ctlr;
427 }
428
429 irq = platform_get_irq(pdev, 0);
430 if (irq < 0) {
431 ret = irq;
432 goto err_put_ctlr;
433 }
434
435 ret = devm_request_irq(&pdev->dev, irq, mtk_spi_slave_interrupt,
436 IRQF_TRIGGER_NONE, dev_name(&pdev->dev), ctlr);
437 if (ret) {
438 dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
439 goto err_put_ctlr;
440 }
441
442 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi");
443 if (IS_ERR(mdata->spi_clk)) {
444 ret = PTR_ERR(mdata->spi_clk);
445 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
446 goto err_put_ctlr;
447 }
448
449 ret = clk_prepare_enable(mdata->spi_clk);
450 if (ret < 0) {
451 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
452 goto err_put_ctlr;
453 }
454
455 pm_runtime_enable(&pdev->dev);
456
457 ret = devm_spi_register_controller(&pdev->dev, ctlr);
458 if (ret) {
459 dev_err(&pdev->dev,
460 "failed to register slave controller(%d)\n", ret);
461 clk_disable_unprepare(mdata->spi_clk);
462 goto err_disable_runtime_pm;
463 }
464
465 clk_disable_unprepare(mdata->spi_clk);
466
467 return 0;
468
469err_disable_runtime_pm:
470 pm_runtime_disable(&pdev->dev);
471err_put_ctlr:
472 spi_controller_put(ctlr);
473
474 return ret;
475}
476
477static int mtk_spi_slave_remove(struct platform_device *pdev)
478{
479 pm_runtime_disable(&pdev->dev);
480
481 return 0;
482}
483
484#ifdef CONFIG_PM_SLEEP
485static int mtk_spi_slave_suspend(struct device *dev)
486{
487 struct spi_controller *ctlr = dev_get_drvdata(dev);
488 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
489 int ret;
490
491 ret = spi_controller_suspend(ctlr);
492 if (ret)
493 return ret;
494
495 if (!pm_runtime_suspended(dev))
496 clk_disable_unprepare(mdata->spi_clk);
497
498 return ret;
499}
500
501static int mtk_spi_slave_resume(struct device *dev)
502{
503 struct spi_controller *ctlr = dev_get_drvdata(dev);
504 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
505 int ret;
506
507 if (!pm_runtime_suspended(dev)) {
508 ret = clk_prepare_enable(mdata->spi_clk);
509 if (ret < 0) {
510 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
511 return ret;
512 }
513 }
514
515 ret = spi_controller_resume(ctlr);
516 if (ret < 0)
517 clk_disable_unprepare(mdata->spi_clk);
518
519 return ret;
520}
521#endif
522
523#ifdef CONFIG_PM
524static int mtk_spi_slave_runtime_suspend(struct device *dev)
525{
526 struct spi_controller *ctlr = dev_get_drvdata(dev);
527 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
528
529 clk_disable_unprepare(mdata->spi_clk);
530
531 return 0;
532}
533
534static int mtk_spi_slave_runtime_resume(struct device *dev)
535{
536 struct spi_controller *ctlr = dev_get_drvdata(dev);
537 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
538 int ret;
539
540 ret = clk_prepare_enable(mdata->spi_clk);
541 if (ret < 0) {
542 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
543 return ret;
544 }
545
546 return 0;
547}
548#endif
549
550static const struct dev_pm_ops mtk_spi_slave_pm = {
551 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_slave_suspend, mtk_spi_slave_resume)
552 SET_RUNTIME_PM_OPS(mtk_spi_slave_runtime_suspend,
553 mtk_spi_slave_runtime_resume, NULL)
554};
555
556static struct platform_driver mtk_spi_slave_driver = {
557 .driver = {
558 .name = "mtk-spi-slave",
559 .pm = &mtk_spi_slave_pm,
560 .of_match_table = mtk_spi_slave_of_match,
561 },
562 .probe = mtk_spi_slave_probe,
563 .remove = mtk_spi_slave_remove,
564};
565
566module_platform_driver(mtk_spi_slave_driver);
567
568MODULE_DESCRIPTION("MTK SPI Slave Controller driver");
569MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
570MODULE_LICENSE("GPL v2");
571MODULE_ALIAS("platform:mtk-spi-slave");
572