1
2
3
4#include <linux/clk.h>
5#include <linux/device.h>
6#include <linux/dma-mapping.h>
7#include <linux/err.h>
8#include <linux/interrupt.h>
9#include <linux/module.h>
10#include <linux/platform_device.h>
11#include <linux/pm_runtime.h>
12#include <linux/spi/spi.h>
13
14#define SPIS_IRQ_EN_REG 0x0
15#define SPIS_IRQ_CLR_REG 0x4
16#define SPIS_IRQ_ST_REG 0x8
17#define SPIS_IRQ_MASK_REG 0xc
18#define SPIS_CFG_REG 0x10
19#define SPIS_RX_DATA_REG 0x14
20#define SPIS_TX_DATA_REG 0x18
21#define SPIS_RX_DST_REG 0x1c
22#define SPIS_TX_SRC_REG 0x20
23#define SPIS_DMA_CFG_REG 0x30
24#define SPIS_SOFT_RST_REG 0x40
25
26
27#define DMA_DONE_EN BIT(7)
28#define DATA_DONE_EN BIT(2)
29#define RSTA_DONE_EN BIT(1)
30#define CMD_INVALID_EN BIT(0)
31
32
33#define DMA_DONE_ST BIT(7)
34#define DATA_DONE_ST BIT(2)
35#define RSTA_DONE_ST BIT(1)
36#define CMD_INVALID_ST BIT(0)
37
38
39#define DMA_DONE_MASK BIT(7)
40#define DATA_DONE_MASK BIT(2)
41#define RSTA_DONE_MASK BIT(1)
42#define CMD_INVALID_MASK BIT(0)
43
44
45#define SPIS_TX_ENDIAN BIT(7)
46#define SPIS_RX_ENDIAN BIT(6)
47#define SPIS_TXMSBF BIT(5)
48#define SPIS_RXMSBF BIT(4)
49#define SPIS_CPHA BIT(3)
50#define SPIS_CPOL BIT(2)
51#define SPIS_TX_EN BIT(1)
52#define SPIS_RX_EN BIT(0)
53
54
55#define TX_DMA_TRIG_EN BIT(31)
56#define TX_DMA_EN BIT(30)
57#define RX_DMA_EN BIT(29)
58#define TX_DMA_LEN 0xfffff
59
60
61#define SPIS_DMA_ADDR_EN BIT(1)
62#define SPIS_SOFT_RST BIT(0)
63
64#define MTK_SPI_SLAVE_MAX_FIFO_SIZE 512U
65
66struct mtk_spi_slave {
67 struct device *dev;
68 void __iomem *base;
69 struct clk *spi_clk;
70 struct completion xfer_done;
71 struct spi_transfer *cur_transfer;
72 bool slave_aborted;
73};
74
75static const struct of_device_id mtk_spi_slave_of_match[] = {
76 { .compatible = "mediatek,mt2712-spi-slave", },
77 {}
78};
79MODULE_DEVICE_TABLE(of, mtk_spi_slave_of_match);
80
81static void mtk_spi_slave_disable_dma(struct mtk_spi_slave *mdata)
82{
83 u32 reg_val;
84
85 reg_val = readl(mdata->base + SPIS_DMA_CFG_REG);
86 reg_val &= ~RX_DMA_EN;
87 reg_val &= ~TX_DMA_EN;
88 writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
89}
90
91static void mtk_spi_slave_disable_xfer(struct mtk_spi_slave *mdata)
92{
93 u32 reg_val;
94
95 reg_val = readl(mdata->base + SPIS_CFG_REG);
96 reg_val &= ~SPIS_TX_EN;
97 reg_val &= ~SPIS_RX_EN;
98 writel(reg_val, mdata->base + SPIS_CFG_REG);
99}
100
101static int mtk_spi_slave_wait_for_completion(struct mtk_spi_slave *mdata)
102{
103 if (wait_for_completion_interruptible(&mdata->xfer_done) ||
104 mdata->slave_aborted) {
105 dev_err(mdata->dev, "interrupted\n");
106 return -EINTR;
107 }
108
109 return 0;
110}
111
112static int mtk_spi_slave_prepare_message(struct spi_controller *ctlr,
113 struct spi_message *msg)
114{
115 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
116 struct spi_device *spi = msg->spi;
117 bool cpha, cpol;
118 u32 reg_val;
119
120 cpha = spi->mode & SPI_CPHA ? 1 : 0;
121 cpol = spi->mode & SPI_CPOL ? 1 : 0;
122
123 reg_val = readl(mdata->base + SPIS_CFG_REG);
124 if (cpha)
125 reg_val |= SPIS_CPHA;
126 else
127 reg_val &= ~SPIS_CPHA;
128 if (cpol)
129 reg_val |= SPIS_CPOL;
130 else
131 reg_val &= ~SPIS_CPOL;
132
133 if (spi->mode & SPI_LSB_FIRST)
134 reg_val &= ~(SPIS_TXMSBF | SPIS_RXMSBF);
135 else
136 reg_val |= SPIS_TXMSBF | SPIS_RXMSBF;
137
138 reg_val &= ~SPIS_TX_ENDIAN;
139 reg_val &= ~SPIS_RX_ENDIAN;
140 writel(reg_val, mdata->base + SPIS_CFG_REG);
141
142 return 0;
143}
144
145static int mtk_spi_slave_fifo_transfer(struct spi_controller *ctlr,
146 struct spi_device *spi,
147 struct spi_transfer *xfer)
148{
149 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
150 int reg_val, cnt, remainder, ret;
151
152 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
153
154 reg_val = readl(mdata->base + SPIS_CFG_REG);
155 if (xfer->rx_buf)
156 reg_val |= SPIS_RX_EN;
157 if (xfer->tx_buf)
158 reg_val |= SPIS_TX_EN;
159 writel(reg_val, mdata->base + SPIS_CFG_REG);
160
161 cnt = xfer->len / 4;
162 if (xfer->tx_buf)
163 iowrite32_rep(mdata->base + SPIS_TX_DATA_REG,
164 xfer->tx_buf, cnt);
165
166 remainder = xfer->len % 4;
167 if (xfer->tx_buf && remainder > 0) {
168 reg_val = 0;
169 memcpy(®_val, xfer->tx_buf + cnt * 4, remainder);
170 writel(reg_val, mdata->base + SPIS_TX_DATA_REG);
171 }
172
173 ret = mtk_spi_slave_wait_for_completion(mdata);
174 if (ret) {
175 mtk_spi_slave_disable_xfer(mdata);
176 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
177 }
178
179 return ret;
180}
181
182static int mtk_spi_slave_dma_transfer(struct spi_controller *ctlr,
183 struct spi_device *spi,
184 struct spi_transfer *xfer)
185{
186 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
187 struct device *dev = mdata->dev;
188 int reg_val, ret;
189
190 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
191
192 if (xfer->tx_buf) {
193
194
195
196 void *nonconst_tx = (void *)xfer->tx_buf;
197
198 xfer->tx_dma = dma_map_single(dev, nonconst_tx,
199 xfer->len, DMA_TO_DEVICE);
200 if (dma_mapping_error(dev, xfer->tx_dma)) {
201 ret = -ENOMEM;
202 goto disable_transfer;
203 }
204 }
205
206 if (xfer->rx_buf) {
207 xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
208 xfer->len, DMA_FROM_DEVICE);
209 if (dma_mapping_error(dev, xfer->rx_dma)) {
210 ret = -ENOMEM;
211 goto unmap_txdma;
212 }
213 }
214
215 writel(xfer->tx_dma, mdata->base + SPIS_TX_SRC_REG);
216 writel(xfer->rx_dma, mdata->base + SPIS_RX_DST_REG);
217
218 writel(SPIS_DMA_ADDR_EN, mdata->base + SPIS_SOFT_RST_REG);
219
220
221 reg_val = readl(mdata->base + SPIS_CFG_REG);
222 if (xfer->tx_buf)
223 reg_val |= SPIS_TX_EN;
224 if (xfer->rx_buf)
225 reg_val |= SPIS_RX_EN;
226 writel(reg_val, mdata->base + SPIS_CFG_REG);
227
228
229 reg_val = 0;
230 reg_val |= (xfer->len - 1) & TX_DMA_LEN;
231 writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
232
233 reg_val = readl(mdata->base + SPIS_DMA_CFG_REG);
234 if (xfer->tx_buf)
235 reg_val |= TX_DMA_EN;
236 if (xfer->rx_buf)
237 reg_val |= RX_DMA_EN;
238 reg_val |= TX_DMA_TRIG_EN;
239 writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
240
241 ret = mtk_spi_slave_wait_for_completion(mdata);
242 if (ret)
243 goto unmap_rxdma;
244
245 return 0;
246
247unmap_rxdma:
248 if (xfer->rx_buf)
249 dma_unmap_single(dev, xfer->rx_dma,
250 xfer->len, DMA_FROM_DEVICE);
251
252unmap_txdma:
253 if (xfer->tx_buf)
254 dma_unmap_single(dev, xfer->tx_dma,
255 xfer->len, DMA_TO_DEVICE);
256
257disable_transfer:
258 mtk_spi_slave_disable_dma(mdata);
259 mtk_spi_slave_disable_xfer(mdata);
260 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
261
262 return ret;
263}
264
265static int mtk_spi_slave_transfer_one(struct spi_controller *ctlr,
266 struct spi_device *spi,
267 struct spi_transfer *xfer)
268{
269 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
270
271 reinit_completion(&mdata->xfer_done);
272 mdata->slave_aborted = false;
273 mdata->cur_transfer = xfer;
274
275 if (xfer->len > MTK_SPI_SLAVE_MAX_FIFO_SIZE)
276 return mtk_spi_slave_dma_transfer(ctlr, spi, xfer);
277 else
278 return mtk_spi_slave_fifo_transfer(ctlr, spi, xfer);
279}
280
281static int mtk_spi_slave_setup(struct spi_device *spi)
282{
283 struct mtk_spi_slave *mdata = spi_controller_get_devdata(spi->master);
284 u32 reg_val;
285
286 reg_val = DMA_DONE_EN | DATA_DONE_EN |
287 RSTA_DONE_EN | CMD_INVALID_EN;
288 writel(reg_val, mdata->base + SPIS_IRQ_EN_REG);
289
290 reg_val = DMA_DONE_MASK | DATA_DONE_MASK |
291 RSTA_DONE_MASK | CMD_INVALID_MASK;
292 writel(reg_val, mdata->base + SPIS_IRQ_MASK_REG);
293
294 mtk_spi_slave_disable_dma(mdata);
295 mtk_spi_slave_disable_xfer(mdata);
296
297 return 0;
298}
299
300static int mtk_slave_abort(struct spi_controller *ctlr)
301{
302 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
303
304 mdata->slave_aborted = true;
305 complete(&mdata->xfer_done);
306
307 return 0;
308}
309
310static irqreturn_t mtk_spi_slave_interrupt(int irq, void *dev_id)
311{
312 struct spi_controller *ctlr = dev_id;
313 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
314 struct spi_transfer *trans = mdata->cur_transfer;
315 u32 int_status, reg_val, cnt, remainder;
316
317 int_status = readl(mdata->base + SPIS_IRQ_ST_REG);
318 writel(int_status, mdata->base + SPIS_IRQ_CLR_REG);
319
320 if (!trans)
321 return IRQ_NONE;
322
323 if ((int_status & DMA_DONE_ST) &&
324 ((int_status & DATA_DONE_ST) ||
325 (int_status & RSTA_DONE_ST))) {
326 writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
327
328 if (trans->tx_buf)
329 dma_unmap_single(mdata->dev, trans->tx_dma,
330 trans->len, DMA_TO_DEVICE);
331 if (trans->rx_buf)
332 dma_unmap_single(mdata->dev, trans->rx_dma,
333 trans->len, DMA_FROM_DEVICE);
334
335 mtk_spi_slave_disable_dma(mdata);
336 mtk_spi_slave_disable_xfer(mdata);
337 }
338
339 if ((!(int_status & DMA_DONE_ST)) &&
340 ((int_status & DATA_DONE_ST) ||
341 (int_status & RSTA_DONE_ST))) {
342 cnt = trans->len / 4;
343 if (trans->rx_buf)
344 ioread32_rep(mdata->base + SPIS_RX_DATA_REG,
345 trans->rx_buf, cnt);
346 remainder = trans->len % 4;
347 if (trans->rx_buf && remainder > 0) {
348 reg_val = readl(mdata->base + SPIS_RX_DATA_REG);
349 memcpy(trans->rx_buf + (cnt * 4),
350 ®_val, remainder);
351 }
352
353 mtk_spi_slave_disable_xfer(mdata);
354 }
355
356 if (int_status & CMD_INVALID_ST) {
357 dev_warn(&ctlr->dev, "cmd invalid\n");
358 return IRQ_NONE;
359 }
360
361 mdata->cur_transfer = NULL;
362 complete(&mdata->xfer_done);
363
364 return IRQ_HANDLED;
365}
366
367static int mtk_spi_slave_probe(struct platform_device *pdev)
368{
369 struct spi_controller *ctlr;
370 struct mtk_spi_slave *mdata;
371 int irq, ret;
372
373 ctlr = spi_alloc_slave(&pdev->dev, sizeof(*mdata));
374 if (!ctlr) {
375 dev_err(&pdev->dev, "failed to alloc spi slave\n");
376 return -ENOMEM;
377 }
378
379 ctlr->auto_runtime_pm = true;
380 ctlr->dev.of_node = pdev->dev.of_node;
381 ctlr->mode_bits = SPI_CPOL | SPI_CPHA;
382 ctlr->mode_bits |= SPI_LSB_FIRST;
383
384 ctlr->prepare_message = mtk_spi_slave_prepare_message;
385 ctlr->transfer_one = mtk_spi_slave_transfer_one;
386 ctlr->setup = mtk_spi_slave_setup;
387 ctlr->slave_abort = mtk_slave_abort;
388
389 mdata = spi_controller_get_devdata(ctlr);
390
391 platform_set_drvdata(pdev, ctlr);
392
393 init_completion(&mdata->xfer_done);
394 mdata->dev = &pdev->dev;
395 mdata->base = devm_platform_ioremap_resource(pdev, 0);
396 if (IS_ERR(mdata->base)) {
397 ret = PTR_ERR(mdata->base);
398 goto err_put_ctlr;
399 }
400
401 irq = platform_get_irq(pdev, 0);
402 if (irq < 0) {
403 ret = irq;
404 goto err_put_ctlr;
405 }
406
407 ret = devm_request_irq(&pdev->dev, irq, mtk_spi_slave_interrupt,
408 IRQF_TRIGGER_NONE, dev_name(&pdev->dev), ctlr);
409 if (ret) {
410 dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
411 goto err_put_ctlr;
412 }
413
414 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi");
415 if (IS_ERR(mdata->spi_clk)) {
416 ret = PTR_ERR(mdata->spi_clk);
417 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
418 goto err_put_ctlr;
419 }
420
421 ret = clk_prepare_enable(mdata->spi_clk);
422 if (ret < 0) {
423 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
424 goto err_put_ctlr;
425 }
426
427 pm_runtime_enable(&pdev->dev);
428
429 ret = devm_spi_register_controller(&pdev->dev, ctlr);
430 if (ret) {
431 dev_err(&pdev->dev,
432 "failed to register slave controller(%d)\n", ret);
433 clk_disable_unprepare(mdata->spi_clk);
434 goto err_disable_runtime_pm;
435 }
436
437 clk_disable_unprepare(mdata->spi_clk);
438
439 return 0;
440
441err_disable_runtime_pm:
442 pm_runtime_disable(&pdev->dev);
443err_put_ctlr:
444 spi_controller_put(ctlr);
445
446 return ret;
447}
448
449static int mtk_spi_slave_remove(struct platform_device *pdev)
450{
451 pm_runtime_disable(&pdev->dev);
452
453 return 0;
454}
455
456#ifdef CONFIG_PM_SLEEP
457static int mtk_spi_slave_suspend(struct device *dev)
458{
459 struct spi_controller *ctlr = dev_get_drvdata(dev);
460 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
461 int ret;
462
463 ret = spi_controller_suspend(ctlr);
464 if (ret)
465 return ret;
466
467 if (!pm_runtime_suspended(dev))
468 clk_disable_unprepare(mdata->spi_clk);
469
470 return ret;
471}
472
473static int mtk_spi_slave_resume(struct device *dev)
474{
475 struct spi_controller *ctlr = dev_get_drvdata(dev);
476 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
477 int ret;
478
479 if (!pm_runtime_suspended(dev)) {
480 ret = clk_prepare_enable(mdata->spi_clk);
481 if (ret < 0) {
482 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
483 return ret;
484 }
485 }
486
487 ret = spi_controller_resume(ctlr);
488 if (ret < 0)
489 clk_disable_unprepare(mdata->spi_clk);
490
491 return ret;
492}
493#endif
494
495#ifdef CONFIG_PM
496static int mtk_spi_slave_runtime_suspend(struct device *dev)
497{
498 struct spi_controller *ctlr = dev_get_drvdata(dev);
499 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
500
501 clk_disable_unprepare(mdata->spi_clk);
502
503 return 0;
504}
505
506static int mtk_spi_slave_runtime_resume(struct device *dev)
507{
508 struct spi_controller *ctlr = dev_get_drvdata(dev);
509 struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
510 int ret;
511
512 ret = clk_prepare_enable(mdata->spi_clk);
513 if (ret < 0) {
514 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
515 return ret;
516 }
517
518 return 0;
519}
520#endif
521
522static const struct dev_pm_ops mtk_spi_slave_pm = {
523 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_slave_suspend, mtk_spi_slave_resume)
524 SET_RUNTIME_PM_OPS(mtk_spi_slave_runtime_suspend,
525 mtk_spi_slave_runtime_resume, NULL)
526};
527
528static struct platform_driver mtk_spi_slave_driver = {
529 .driver = {
530 .name = "mtk-spi-slave",
531 .pm = &mtk_spi_slave_pm,
532 .of_match_table = mtk_spi_slave_of_match,
533 },
534 .probe = mtk_spi_slave_probe,
535 .remove = mtk_spi_slave_remove,
536};
537
538module_platform_driver(mtk_spi_slave_driver);
539
540MODULE_DESCRIPTION("MTK SPI Slave Controller driver");
541MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
542MODULE_LICENSE("GPL v2");
543MODULE_ALIAS("platform:mtk-spi-slave");
544