1
2
3
4
5
6
7
8#include <linux/clk.h>
9#include <linux/delay.h>
10#include <linux/interrupt.h>
11#include <linux/io.h>
12#include <linux/module.h>
13#include <linux/of_irq.h>
14#include <linux/of_address.h>
15#include <linux/platform_device.h>
16#include <linux/spi/spi.h>
17#include <linux/workqueue.h>
18#include <linux/spi/spi-mem.h>
19
20
21#define ZYNQ_QSPI_CONFIG_OFFSET 0x00
22#define ZYNQ_QSPI_STATUS_OFFSET 0x04
23#define ZYNQ_QSPI_IEN_OFFSET 0x08
24#define ZYNQ_QSPI_IDIS_OFFSET 0x0C
25#define ZYNQ_QSPI_IMASK_OFFSET 0x10
26#define ZYNQ_QSPI_ENABLE_OFFSET 0x14
27#define ZYNQ_QSPI_DELAY_OFFSET 0x18
28#define ZYNQ_QSPI_TXD_00_00_OFFSET 0x1C
29#define ZYNQ_QSPI_TXD_00_01_OFFSET 0x80
30#define ZYNQ_QSPI_TXD_00_10_OFFSET 0x84
31#define ZYNQ_QSPI_TXD_00_11_OFFSET 0x88
32#define ZYNQ_QSPI_RXD_OFFSET 0x20
33#define ZYNQ_QSPI_SIC_OFFSET 0x24
34#define ZYNQ_QSPI_TX_THRESH_OFFSET 0x28
35#define ZYNQ_QSPI_RX_THRESH_OFFSET 0x2C
36#define ZYNQ_QSPI_GPIO_OFFSET 0x30
37#define ZYNQ_QSPI_LINEAR_CFG_OFFSET 0xA0
38#define ZYNQ_QSPI_MOD_ID_OFFSET 0xFC
39
40
41
42
43
44
45
46#define ZYNQ_QSPI_CONFIG_IFMODE_MASK BIT(31)
47#define ZYNQ_QSPI_CONFIG_MANSRT_MASK BIT(16)
48#define ZYNQ_QSPI_CONFIG_MANSRTEN_MASK BIT(15)
49#define ZYNQ_QSPI_CONFIG_SSFORCE_MASK BIT(14)
50#define ZYNQ_QSPI_CONFIG_BDRATE_MASK GENMASK(5, 3)
51#define ZYNQ_QSPI_CONFIG_CPHA_MASK BIT(2)
52#define ZYNQ_QSPI_CONFIG_CPOL_MASK BIT(1)
53#define ZYNQ_QSPI_CONFIG_FWIDTH_MASK GENMASK(7, 6)
54#define ZYNQ_QSPI_CONFIG_MSTREN_MASK BIT(0)
55
56
57
58
59
60
61
62#define ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX GENMASK(2, 0)
63#define ZYNQ_QSPI_CONFIG_BAUD_DIV_SHIFT 3
64#define ZYNQ_QSPI_CONFIG_PCS BIT(10)
65
66
67
68
69
70
71
72#define ZYNQ_QSPI_IXR_RX_OVERFLOW_MASK BIT(0)
73#define ZYNQ_QSPI_IXR_TXNFULL_MASK BIT(2)
74#define ZYNQ_QSPI_IXR_TXFULL_MASK BIT(3)
75#define ZYNQ_QSPI_IXR_RXNEMTY_MASK BIT(4)
76#define ZYNQ_QSPI_IXR_RXF_FULL_MASK BIT(5)
77#define ZYNQ_QSPI_IXR_TXF_UNDRFLOW_MASK BIT(6)
78#define ZYNQ_QSPI_IXR_ALL_MASK (ZYNQ_QSPI_IXR_RX_OVERFLOW_MASK | \
79 ZYNQ_QSPI_IXR_TXNFULL_MASK | \
80 ZYNQ_QSPI_IXR_TXFULL_MASK | \
81 ZYNQ_QSPI_IXR_RXNEMTY_MASK | \
82 ZYNQ_QSPI_IXR_RXF_FULL_MASK | \
83 ZYNQ_QSPI_IXR_TXF_UNDRFLOW_MASK)
84#define ZYNQ_QSPI_IXR_RXTX_MASK (ZYNQ_QSPI_IXR_TXNFULL_MASK | \
85 ZYNQ_QSPI_IXR_RXNEMTY_MASK)
86
87
88
89
90
91
92#define ZYNQ_QSPI_ENABLE_ENABLE_MASK BIT(0)
93
94
95
96
97
98
99
100#define ZYNQ_QSPI_LCFG_TWO_MEM BIT(30)
101#define ZYNQ_QSPI_LCFG_SEP_BUS BIT(29)
102#define ZYNQ_QSPI_LCFG_U_PAGE BIT(28)
103
104#define ZYNQ_QSPI_LCFG_DUMMY_SHIFT 8
105
106#define ZYNQ_QSPI_FAST_READ_QOUT_CODE 0x6B
107#define ZYNQ_QSPI_FIFO_DEPTH 63
108#define ZYNQ_QSPI_RX_THRESHOLD 32
109#define ZYNQ_QSPI_TX_THRESHOLD 1
110
111
112
113
114
115#define ZYNQ_QSPI_MODEBITS (SPI_CPOL | SPI_CPHA)
116
117
118#define ZYNQ_QSPI_MAX_NUM_CS 2
119
120
121
122
123
124
125
126
127
128
129
130
131
132struct zynq_qspi {
133 struct device *dev;
134 void __iomem *regs;
135 struct clk *refclk;
136 struct clk *pclk;
137 int irq;
138 u8 *txbuf;
139 u8 *rxbuf;
140 int tx_bytes;
141 int rx_bytes;
142 struct completion data_completion;
143};
144
145
146
147
148static inline u32 zynq_qspi_read(struct zynq_qspi *xqspi, u32 offset)
149{
150 return readl_relaxed(xqspi->regs + offset);
151}
152
153static inline void zynq_qspi_write(struct zynq_qspi *xqspi, u32 offset,
154 u32 val)
155{
156 writel_relaxed(val, xqspi->regs + offset);
157}
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180static void zynq_qspi_init_hw(struct zynq_qspi *xqspi, unsigned int num_cs)
181{
182 u32 config_reg;
183
184 zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET, 0);
185 zynq_qspi_write(xqspi, ZYNQ_QSPI_IDIS_OFFSET, ZYNQ_QSPI_IXR_ALL_MASK);
186
187
188 config_reg = 0;
189
190 if (num_cs > 1)
191 config_reg |= ZYNQ_QSPI_LCFG_TWO_MEM;
192
193 zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET, config_reg);
194
195
196 while (zynq_qspi_read(xqspi, ZYNQ_QSPI_STATUS_OFFSET) &
197 ZYNQ_QSPI_IXR_RXNEMTY_MASK)
198 zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET);
199
200 zynq_qspi_write(xqspi, ZYNQ_QSPI_STATUS_OFFSET, ZYNQ_QSPI_IXR_ALL_MASK);
201 config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
202 config_reg &= ~(ZYNQ_QSPI_CONFIG_MSTREN_MASK |
203 ZYNQ_QSPI_CONFIG_CPOL_MASK |
204 ZYNQ_QSPI_CONFIG_CPHA_MASK |
205 ZYNQ_QSPI_CONFIG_BDRATE_MASK |
206 ZYNQ_QSPI_CONFIG_SSFORCE_MASK |
207 ZYNQ_QSPI_CONFIG_MANSRTEN_MASK |
208 ZYNQ_QSPI_CONFIG_MANSRT_MASK);
209 config_reg |= (ZYNQ_QSPI_CONFIG_MSTREN_MASK |
210 ZYNQ_QSPI_CONFIG_SSFORCE_MASK |
211 ZYNQ_QSPI_CONFIG_FWIDTH_MASK |
212 ZYNQ_QSPI_CONFIG_IFMODE_MASK);
213 zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
214
215 zynq_qspi_write(xqspi, ZYNQ_QSPI_RX_THRESH_OFFSET,
216 ZYNQ_QSPI_RX_THRESHOLD);
217 zynq_qspi_write(xqspi, ZYNQ_QSPI_TX_THRESH_OFFSET,
218 ZYNQ_QSPI_TX_THRESHOLD);
219
220 zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET,
221 ZYNQ_QSPI_ENABLE_ENABLE_MASK);
222}
223
224static bool zynq_qspi_supports_op(struct spi_mem *mem,
225 const struct spi_mem_op *op)
226{
227 if (!spi_mem_default_supports_op(mem, op))
228 return false;
229
230
231
232
233 if (op->addr.nbytes > 3)
234 return false;
235
236 return true;
237}
238
239
240
241
242
243
244static void zynq_qspi_rxfifo_op(struct zynq_qspi *xqspi, unsigned int size)
245{
246 u32 data;
247
248 data = zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET);
249
250 if (xqspi->rxbuf) {
251 memcpy(xqspi->rxbuf, ((u8 *)&data) + 4 - size, size);
252 xqspi->rxbuf += size;
253 }
254
255 xqspi->rx_bytes -= size;
256 if (xqspi->rx_bytes < 0)
257 xqspi->rx_bytes = 0;
258}
259
260
261
262
263
264
265static void zynq_qspi_txfifo_op(struct zynq_qspi *xqspi, unsigned int size)
266{
267 static const unsigned int offset[4] = {
268 ZYNQ_QSPI_TXD_00_01_OFFSET, ZYNQ_QSPI_TXD_00_10_OFFSET,
269 ZYNQ_QSPI_TXD_00_11_OFFSET, ZYNQ_QSPI_TXD_00_00_OFFSET };
270 u32 data;
271
272 if (xqspi->txbuf) {
273 data = 0xffffffff;
274 memcpy(&data, xqspi->txbuf, size);
275 xqspi->txbuf += size;
276 } else {
277 data = 0;
278 }
279
280 xqspi->tx_bytes -= size;
281 zynq_qspi_write(xqspi, offset[size - 1], data);
282}
283
284
285
286
287
288
289static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
290{
291 struct spi_controller *ctlr = spi->master;
292 struct zynq_qspi *xqspi = spi_controller_get_devdata(ctlr);
293 u32 config_reg;
294
295
296 if (ctlr->num_chipselect > 1) {
297 config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET);
298 if (!spi->chip_select)
299 config_reg &= ~ZYNQ_QSPI_LCFG_U_PAGE;
300 else
301 config_reg |= ZYNQ_QSPI_LCFG_U_PAGE;
302
303 zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET, config_reg);
304 }
305
306
307 config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
308 if (assert)
309 config_reg &= ~ZYNQ_QSPI_CONFIG_PCS;
310 else
311 config_reg |= ZYNQ_QSPI_CONFIG_PCS;
312
313 zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
314}
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
334{
335 u32 config_reg, baud_rate_val = 0;
336
337
338
339
340
341
342
343
344
345
346 while ((baud_rate_val < ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX) &&
347 (clk_get_rate(xqspi->refclk) / (2 << baud_rate_val)) >
348 spi->max_speed_hz)
349 baud_rate_val++;
350
351 config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
352
353
354 config_reg &= (~ZYNQ_QSPI_CONFIG_CPHA_MASK) &
355 (~ZYNQ_QSPI_CONFIG_CPOL_MASK);
356 if (spi->mode & SPI_CPHA)
357 config_reg |= ZYNQ_QSPI_CONFIG_CPHA_MASK;
358 if (spi->mode & SPI_CPOL)
359 config_reg |= ZYNQ_QSPI_CONFIG_CPOL_MASK;
360
361 config_reg &= ~ZYNQ_QSPI_CONFIG_BDRATE_MASK;
362 config_reg |= (baud_rate_val << ZYNQ_QSPI_CONFIG_BAUD_DIV_SHIFT);
363 zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
364
365 return 0;
366}
367
368
369
370
371
372
373
374
375
376
377static int zynq_qspi_setup_op(struct spi_device *spi)
378{
379 struct spi_controller *ctlr = spi->master;
380 struct zynq_qspi *qspi = spi_controller_get_devdata(ctlr);
381
382 if (ctlr->busy)
383 return -EBUSY;
384
385 clk_enable(qspi->refclk);
386 clk_enable(qspi->pclk);
387 zynq_qspi_write(qspi, ZYNQ_QSPI_ENABLE_OFFSET,
388 ZYNQ_QSPI_ENABLE_ENABLE_MASK);
389
390 return 0;
391}
392
393
394
395
396
397
398
399static void zynq_qspi_write_op(struct zynq_qspi *xqspi, int txcount,
400 bool txempty)
401{
402 int count, len, k;
403
404 len = xqspi->tx_bytes;
405 if (len && len < 4) {
406
407
408
409
410 if (txempty)
411 zynq_qspi_txfifo_op(xqspi, len);
412
413 return;
414 }
415
416 count = len / 4;
417 if (count > txcount)
418 count = txcount;
419
420 if (xqspi->txbuf) {
421 iowrite32_rep(xqspi->regs + ZYNQ_QSPI_TXD_00_00_OFFSET,
422 xqspi->txbuf, count);
423 xqspi->txbuf += count * 4;
424 } else {
425 for (k = 0; k < count; k++)
426 writel_relaxed(0, xqspi->regs +
427 ZYNQ_QSPI_TXD_00_00_OFFSET);
428 }
429
430 xqspi->tx_bytes -= count * 4;
431}
432
433
434
435
436
437
438static void zynq_qspi_read_op(struct zynq_qspi *xqspi, int rxcount)
439{
440 int count, len, k;
441
442 len = xqspi->rx_bytes - xqspi->tx_bytes;
443 count = len / 4;
444 if (count > rxcount)
445 count = rxcount;
446 if (xqspi->rxbuf) {
447 ioread32_rep(xqspi->regs + ZYNQ_QSPI_RXD_OFFSET,
448 xqspi->rxbuf, count);
449 xqspi->rxbuf += count * 4;
450 } else {
451 for (k = 0; k < count; k++)
452 readl_relaxed(xqspi->regs + ZYNQ_QSPI_RXD_OFFSET);
453 }
454 xqspi->rx_bytes -= count * 4;
455 len -= count * 4;
456
457 if (len && len < 4 && count < rxcount)
458 zynq_qspi_rxfifo_op(xqspi, len);
459}
460
461
462
463
464
465
466
467
468
469
470
471
472static irqreturn_t zynq_qspi_irq(int irq, void *dev_id)
473{
474 u32 intr_status;
475 bool txempty;
476 struct zynq_qspi *xqspi = (struct zynq_qspi *)dev_id;
477
478 intr_status = zynq_qspi_read(xqspi, ZYNQ_QSPI_STATUS_OFFSET);
479 zynq_qspi_write(xqspi, ZYNQ_QSPI_STATUS_OFFSET, intr_status);
480
481 if ((intr_status & ZYNQ_QSPI_IXR_TXNFULL_MASK) ||
482 (intr_status & ZYNQ_QSPI_IXR_RXNEMTY_MASK)) {
483
484
485
486
487
488 txempty = !!(intr_status & ZYNQ_QSPI_IXR_TXNFULL_MASK);
489
490 zynq_qspi_read_op(xqspi, ZYNQ_QSPI_RX_THRESHOLD);
491 if (xqspi->tx_bytes) {
492
493 zynq_qspi_write_op(xqspi, ZYNQ_QSPI_RX_THRESHOLD,
494 txempty);
495 } else {
496
497
498
499
500 if (!xqspi->rx_bytes) {
501 zynq_qspi_write(xqspi,
502 ZYNQ_QSPI_IDIS_OFFSET,
503 ZYNQ_QSPI_IXR_RXTX_MASK);
504 complete(&xqspi->data_completion);
505 }
506 }
507 return IRQ_HANDLED;
508 }
509
510 return IRQ_NONE;
511}
512
513
514
515
516
517
518
519
520
521
522
523
524static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
525 const struct spi_mem_op *op)
526{
527 struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->master);
528 int err = 0, i;
529 u8 *tmpbuf;
530
531 dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
532 op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
533 op->dummy.buswidth, op->data.buswidth);
534
535 zynq_qspi_chipselect(mem->spi, true);
536 zynq_qspi_config_op(xqspi, mem->spi);
537
538 if (op->cmd.opcode) {
539 reinit_completion(&xqspi->data_completion);
540 xqspi->txbuf = (u8 *)&op->cmd.opcode;
541 xqspi->rxbuf = NULL;
542 xqspi->tx_bytes = sizeof(op->cmd.opcode);
543 xqspi->rx_bytes = sizeof(op->cmd.opcode);
544 zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
545 zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
546 ZYNQ_QSPI_IXR_RXTX_MASK);
547 if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
548 msecs_to_jiffies(1000)))
549 err = -ETIMEDOUT;
550 }
551
552 if (op->addr.nbytes) {
553 for (i = 0; i < op->addr.nbytes; i++) {
554 xqspi->txbuf[i] = op->addr.val >>
555 (8 * (op->addr.nbytes - i - 1));
556 }
557
558 reinit_completion(&xqspi->data_completion);
559 xqspi->rxbuf = NULL;
560 xqspi->tx_bytes = op->addr.nbytes;
561 xqspi->rx_bytes = op->addr.nbytes;
562 zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
563 zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
564 ZYNQ_QSPI_IXR_RXTX_MASK);
565 if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
566 msecs_to_jiffies(1000)))
567 err = -ETIMEDOUT;
568 }
569
570 if (op->dummy.nbytes) {
571 tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL);
572 memset(tmpbuf, 0xff, op->dummy.nbytes);
573 reinit_completion(&xqspi->data_completion);
574 xqspi->txbuf = tmpbuf;
575 xqspi->rxbuf = NULL;
576 xqspi->tx_bytes = op->dummy.nbytes;
577 xqspi->rx_bytes = op->dummy.nbytes;
578 zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
579 zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
580 ZYNQ_QSPI_IXR_RXTX_MASK);
581 if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
582 msecs_to_jiffies(1000)))
583 err = -ETIMEDOUT;
584
585 kfree(tmpbuf);
586 }
587
588 if (op->data.nbytes) {
589 reinit_completion(&xqspi->data_completion);
590 if (op->data.dir == SPI_MEM_DATA_OUT) {
591 xqspi->txbuf = (u8 *)op->data.buf.out;
592 xqspi->tx_bytes = op->data.nbytes;
593 xqspi->rxbuf = NULL;
594 xqspi->rx_bytes = op->data.nbytes;
595 } else {
596 xqspi->txbuf = NULL;
597 xqspi->rxbuf = (u8 *)op->data.buf.in;
598 xqspi->rx_bytes = op->data.nbytes;
599 xqspi->tx_bytes = op->data.nbytes;
600 }
601
602 zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
603 zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
604 ZYNQ_QSPI_IXR_RXTX_MASK);
605 if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
606 msecs_to_jiffies(1000)))
607 err = -ETIMEDOUT;
608 }
609 zynq_qspi_chipselect(mem->spi, false);
610
611 return err;
612}
613
614static const struct spi_controller_mem_ops zynq_qspi_mem_ops = {
615 .supports_op = zynq_qspi_supports_op,
616 .exec_op = zynq_qspi_exec_mem_op,
617};
618
619
620
621
622
623
624
625
626
627static int zynq_qspi_probe(struct platform_device *pdev)
628{
629 int ret = 0;
630 struct spi_controller *ctlr;
631 struct device *dev = &pdev->dev;
632 struct device_node *np = dev->of_node;
633 struct zynq_qspi *xqspi;
634 u32 num_cs;
635
636 ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
637 if (!ctlr)
638 return -ENOMEM;
639
640 xqspi = spi_controller_get_devdata(ctlr);
641 xqspi->dev = dev;
642 platform_set_drvdata(pdev, xqspi);
643 xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
644 if (IS_ERR(xqspi->regs)) {
645 ret = PTR_ERR(xqspi->regs);
646 goto remove_master;
647 }
648
649 xqspi->pclk = devm_clk_get(&pdev->dev, "pclk");
650 if (IS_ERR(xqspi->pclk)) {
651 dev_err(&pdev->dev, "pclk clock not found.\n");
652 ret = PTR_ERR(xqspi->pclk);
653 goto remove_master;
654 }
655
656 init_completion(&xqspi->data_completion);
657
658 xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
659 if (IS_ERR(xqspi->refclk)) {
660 dev_err(&pdev->dev, "ref_clk clock not found.\n");
661 ret = PTR_ERR(xqspi->refclk);
662 goto remove_master;
663 }
664
665 ret = clk_prepare_enable(xqspi->pclk);
666 if (ret) {
667 dev_err(&pdev->dev, "Unable to enable APB clock.\n");
668 goto remove_master;
669 }
670
671 ret = clk_prepare_enable(xqspi->refclk);
672 if (ret) {
673 dev_err(&pdev->dev, "Unable to enable device clock.\n");
674 goto clk_dis_pclk;
675 }
676
677 xqspi->irq = platform_get_irq(pdev, 0);
678 if (xqspi->irq <= 0) {
679 ret = -ENXIO;
680 goto remove_master;
681 }
682 ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
683 0, pdev->name, xqspi);
684 if (ret != 0) {
685 ret = -ENXIO;
686 dev_err(&pdev->dev, "request_irq failed\n");
687 goto remove_master;
688 }
689
690 ret = of_property_read_u32(np, "num-cs",
691 &num_cs);
692 if (ret < 0) {
693 ctlr->num_chipselect = 1;
694 } else if (num_cs > ZYNQ_QSPI_MAX_NUM_CS) {
695 dev_err(&pdev->dev, "only 2 chip selects are available\n");
696 goto remove_master;
697 } else {
698 ctlr->num_chipselect = num_cs;
699 }
700
701 ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
702 SPI_TX_DUAL | SPI_TX_QUAD;
703 ctlr->mem_ops = &zynq_qspi_mem_ops;
704 ctlr->setup = zynq_qspi_setup_op;
705 ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
706 ctlr->dev.of_node = np;
707
708
709 zynq_qspi_init_hw(xqspi, ctlr->num_chipselect);
710
711 ret = devm_spi_register_controller(&pdev->dev, ctlr);
712 if (ret) {
713 dev_err(&pdev->dev, "spi_register_master failed\n");
714 goto clk_dis_all;
715 }
716
717 return ret;
718
719clk_dis_all:
720 clk_disable_unprepare(xqspi->refclk);
721clk_dis_pclk:
722 clk_disable_unprepare(xqspi->pclk);
723remove_master:
724 spi_controller_put(ctlr);
725
726 return ret;
727}
728
729
730
731
732
733
734
735
736
737
738
739static int zynq_qspi_remove(struct platform_device *pdev)
740{
741 struct zynq_qspi *xqspi = platform_get_drvdata(pdev);
742
743 zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET, 0);
744
745 clk_disable_unprepare(xqspi->refclk);
746 clk_disable_unprepare(xqspi->pclk);
747
748 return 0;
749}
750
751static const struct of_device_id zynq_qspi_of_match[] = {
752 { .compatible = "xlnx,zynq-qspi-1.0", },
753 { }
754};
755
756MODULE_DEVICE_TABLE(of, zynq_qspi_of_match);
757
758
759
760
761static struct platform_driver zynq_qspi_driver = {
762 .probe = zynq_qspi_probe,
763 .remove = zynq_qspi_remove,
764 .driver = {
765 .name = "zynq-qspi",
766 .of_match_table = zynq_qspi_of_match,
767 },
768};
769
770module_platform_driver(zynq_qspi_driver);
771
772MODULE_AUTHOR("Xilinx, Inc.");
773MODULE_DESCRIPTION("Xilinx Zynq QSPI driver");
774MODULE_LICENSE("GPL");
775