1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/device.h>
19#include <linux/ioport.h>
20#include <linux/errno.h>
21#include <linux/interrupt.h>
22#include <linux/spi/spi.h>
23#include <linux/delay.h>
24#include <linux/clk.h>
25#include <linux/err.h>
26#include <linux/amba/bus.h>
27#include <linux/amba/pl022.h>
28#include <linux/io.h>
29#include <linux/slab.h>
30#include <linux/dmaengine.h>
31#include <linux/dma-mapping.h>
32#include <linux/scatterlist.h>
33#include <linux/pm_runtime.h>
34#include <linux/of.h>
35#include <linux/pinctrl/consumer.h>
36
37
38
39
40
41
42#define SSP_WRITE_BITS(reg, val, mask, sb) \
43 ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
44
45
46
47
48
49
50#define GEN_MASK_BITS(val, mask, sb) \
51 (((val)<<(sb)) & (mask))
52
53#define DRIVE_TX 0
54#define DO_NOT_DRIVE_TX 1
55
56#define DO_NOT_QUEUE_DMA 0
57#define QUEUE_DMA 1
58
59#define RX_TRANSFER 1
60#define TX_TRANSFER 2
61
62
63
64
65#define SSP_CR0(r) (r + 0x000)
66#define SSP_CR1(r) (r + 0x004)
67#define SSP_DR(r) (r + 0x008)
68#define SSP_SR(r) (r + 0x00C)
69#define SSP_CPSR(r) (r + 0x010)
70#define SSP_IMSC(r) (r + 0x014)
71#define SSP_RIS(r) (r + 0x018)
72#define SSP_MIS(r) (r + 0x01C)
73#define SSP_ICR(r) (r + 0x020)
74#define SSP_DMACR(r) (r + 0x024)
75#define SSP_CSR(r) (r + 0x030)
76#define SSP_ITCR(r) (r + 0x080)
77#define SSP_ITIP(r) (r + 0x084)
78#define SSP_ITOP(r) (r + 0x088)
79#define SSP_TDR(r) (r + 0x08C)
80
81#define SSP_PID0(r) (r + 0xFE0)
82#define SSP_PID1(r) (r + 0xFE4)
83#define SSP_PID2(r) (r + 0xFE8)
84#define SSP_PID3(r) (r + 0xFEC)
85
86#define SSP_CID0(r) (r + 0xFF0)
87#define SSP_CID1(r) (r + 0xFF4)
88#define SSP_CID2(r) (r + 0xFF8)
89#define SSP_CID3(r) (r + 0xFFC)
90
91
92
93
94#define SSP_CR0_MASK_DSS (0x0FUL << 0)
95#define SSP_CR0_MASK_FRF (0x3UL << 4)
96#define SSP_CR0_MASK_SPO (0x1UL << 6)
97#define SSP_CR0_MASK_SPH (0x1UL << 7)
98#define SSP_CR0_MASK_SCR (0xFFUL << 8)
99
100
101
102
103
104#define SSP_CR0_MASK_DSS_ST (0x1FUL << 0)
105#define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5)
106#define SSP_CR0_MASK_CSS_ST (0x1FUL << 16)
107#define SSP_CR0_MASK_FRF_ST (0x3UL << 21)
108
109
110
111
112#define SSP_CR1_MASK_LBM (0x1UL << 0)
113#define SSP_CR1_MASK_SSE (0x1UL << 1)
114#define SSP_CR1_MASK_MS (0x1UL << 2)
115#define SSP_CR1_MASK_SOD (0x1UL << 3)
116
117
118
119
120
121#define SSP_CR1_MASK_RENDN_ST (0x1UL << 4)
122#define SSP_CR1_MASK_TENDN_ST (0x1UL << 5)
123#define SSP_CR1_MASK_MWAIT_ST (0x1UL << 6)
124#define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7)
125#define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10)
126
127#define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13)
128
129
130
131
132#define SSP_SR_MASK_TFE (0x1UL << 0)
133#define SSP_SR_MASK_TNF (0x1UL << 1)
134#define SSP_SR_MASK_RNE (0x1UL << 2)
135#define SSP_SR_MASK_RFF (0x1UL << 3)
136#define SSP_SR_MASK_BSY (0x1UL << 4)
137
138
139
140
141#define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0)
142
143
144
145
146#define SSP_IMSC_MASK_RORIM (0x1UL << 0)
147#define SSP_IMSC_MASK_RTIM (0x1UL << 1)
148#define SSP_IMSC_MASK_RXIM (0x1UL << 2)
149#define SSP_IMSC_MASK_TXIM (0x1UL << 3)
150
151
152
153
154
155#define SSP_RIS_MASK_RORRIS (0x1UL << 0)
156
157#define SSP_RIS_MASK_RTRIS (0x1UL << 1)
158
159#define SSP_RIS_MASK_RXRIS (0x1UL << 2)
160
161#define SSP_RIS_MASK_TXRIS (0x1UL << 3)
162
163
164
165
166
167#define SSP_MIS_MASK_RORMIS (0x1UL << 0)
168
169#define SSP_MIS_MASK_RTMIS (0x1UL << 1)
170
171#define SSP_MIS_MASK_RXMIS (0x1UL << 2)
172
173#define SSP_MIS_MASK_TXMIS (0x1UL << 3)
174
175
176
177
178
179#define SSP_ICR_MASK_RORIC (0x1UL << 0)
180
181#define SSP_ICR_MASK_RTIC (0x1UL << 1)
182
183
184
185
186
187#define SSP_DMACR_MASK_RXDMAE (0x1UL << 0)
188
189#define SSP_DMACR_MASK_TXDMAE (0x1UL << 1)
190
191
192
193
194
195#define SSP_CSR_CSVALUE_MASK (0x1FUL << 0)
196
197
198
199
200#define SSP_ITCR_MASK_ITEN (0x1UL << 0)
201#define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1)
202
203
204
205
206#define ITIP_MASK_SSPRXD (0x1UL << 0)
207#define ITIP_MASK_SSPFSSIN (0x1UL << 1)
208#define ITIP_MASK_SSPCLKIN (0x1UL << 2)
209#define ITIP_MASK_RXDMAC (0x1UL << 3)
210#define ITIP_MASK_TXDMAC (0x1UL << 4)
211#define ITIP_MASK_SSPTXDIN (0x1UL << 5)
212
213
214
215
216#define ITOP_MASK_SSPTXD (0x1UL << 0)
217#define ITOP_MASK_SSPFSSOUT (0x1UL << 1)
218#define ITOP_MASK_SSPCLKOUT (0x1UL << 2)
219#define ITOP_MASK_SSPOEn (0x1UL << 3)
220#define ITOP_MASK_SSPCTLOEn (0x1UL << 4)
221#define ITOP_MASK_RORINTR (0x1UL << 5)
222#define ITOP_MASK_RTINTR (0x1UL << 6)
223#define ITOP_MASK_RXINTR (0x1UL << 7)
224#define ITOP_MASK_TXINTR (0x1UL << 8)
225#define ITOP_MASK_INTR (0x1UL << 9)
226#define ITOP_MASK_RXDMABREQ (0x1UL << 10)
227#define ITOP_MASK_RXDMASREQ (0x1UL << 11)
228#define ITOP_MASK_TXDMABREQ (0x1UL << 12)
229#define ITOP_MASK_TXDMASREQ (0x1UL << 13)
230
231
232
233
234#define TDR_MASK_TESTDATA (0xFFFFFFFF)
235
236
237
238
239
240
241
242#define STATE_START ((void *) 0)
243#define STATE_RUNNING ((void *) 1)
244#define STATE_DONE ((void *) 2)
245#define STATE_ERROR ((void *) -1)
246#define STATE_TIMEOUT ((void *) -2)
247
248
249
250
251#define SSP_DISABLED (0)
252#define SSP_ENABLED (1)
253
254
255
256
257#define SSP_DMA_DISABLED (0)
258#define SSP_DMA_ENABLED (1)
259
260
261
262
263#define SSP_DEFAULT_CLKRATE 0x2
264#define SSP_DEFAULT_PRESCALE 0x40
265
266
267
268
269#define CPSDVR_MIN 0x02
270#define CPSDVR_MAX 0xFE
271#define SCR_MIN 0x00
272#define SCR_MAX 0xFF
273
274
275
276
277#define DEFAULT_SSP_REG_IMSC 0x0UL
278#define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC
279#define ENABLE_ALL_INTERRUPTS ( \
280 SSP_IMSC_MASK_RORIM | \
281 SSP_IMSC_MASK_RTIM | \
282 SSP_IMSC_MASK_RXIM | \
283 SSP_IMSC_MASK_TXIM \
284)
285
286#define CLEAR_ALL_INTERRUPTS 0x3
287
288#define SPI_POLLING_TIMEOUT 1000
289
290
291
292
293enum ssp_reading {
294 READING_NULL,
295 READING_U8,
296 READING_U16,
297 READING_U32
298};
299
300
301
302
303enum ssp_writing {
304 WRITING_NULL,
305 WRITING_U8,
306 WRITING_U16,
307 WRITING_U32
308};
309
310
311
312
313
314
315
316
317
318
319
320
321
322struct vendor_data {
323 int fifodepth;
324 int max_bpw;
325 bool unidir;
326 bool extended_cr;
327 bool pl023;
328 bool loopback;
329 bool internal_cs_ctrl;
330};
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367struct pl022 {
368 struct amba_device *adev;
369 struct vendor_data *vendor;
370 resource_size_t phybase;
371 void __iomem *virtbase;
372 struct clk *clk;
373 struct spi_master *master;
374 struct pl022_ssp_controller *master_info;
375
376 struct tasklet_struct pump_transfers;
377 struct spi_message *cur_msg;
378 struct spi_transfer *cur_transfer;
379 struct chip_data *cur_chip;
380 bool next_msg_cs_active;
381 void *tx;
382 void *tx_end;
383 void *rx;
384 void *rx_end;
385 enum ssp_reading read;
386 enum ssp_writing write;
387 u32 exp_fifo_level;
388 enum ssp_rx_level_trig rx_lev_trig;
389 enum ssp_tx_level_trig tx_lev_trig;
390
391#ifdef CONFIG_DMA_ENGINE
392 struct dma_chan *dma_rx_channel;
393 struct dma_chan *dma_tx_channel;
394 struct sg_table sgt_rx;
395 struct sg_table sgt_tx;
396 char *dummypage;
397 bool dma_running;
398#endif
399 int cur_cs;
400 struct gpio_desc *cur_gpiod;
401};
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419struct chip_data {
420 u32 cr0;
421 u16 cr1;
422 u16 dmacr;
423 u16 cpsr;
424 u8 n_bytes;
425 bool enable_dma;
426 enum ssp_reading read;
427 enum ssp_writing write;
428 int xfer_type;
429};
430
431
432
433
434
435
436
437
438
439
440static void internal_cs_control(struct pl022 *pl022, u32 command)
441{
442 u32 tmp;
443
444 tmp = readw(SSP_CSR(pl022->virtbase));
445 if (command == SSP_CHIP_SELECT)
446 tmp &= ~BIT(pl022->cur_cs);
447 else
448 tmp |= BIT(pl022->cur_cs);
449 writew(tmp, SSP_CSR(pl022->virtbase));
450}
451
452static void pl022_cs_control(struct pl022 *pl022, u32 command)
453{
454 if (pl022->vendor->internal_cs_ctrl)
455 internal_cs_control(pl022, command);
456 else if (pl022->cur_gpiod)
457
458
459
460
461
462
463
464
465 gpiod_set_value(pl022->cur_gpiod, !command);
466}
467
468
469
470
471
472
473
474static void giveback(struct pl022 *pl022)
475{
476 struct spi_transfer *last_transfer;
477 pl022->next_msg_cs_active = false;
478
479 last_transfer = list_last_entry(&pl022->cur_msg->transfers,
480 struct spi_transfer, transfer_list);
481
482
483
484
485
486
487 spi_transfer_delay_exec(last_transfer);
488
489 if (!last_transfer->cs_change) {
490 struct spi_message *next_msg;
491
492
493
494
495
496
497
498
499
500
501
502
503 next_msg = spi_get_next_queued_message(pl022->master);
504
505
506
507
508
509 if (next_msg && next_msg->spi != pl022->cur_msg->spi)
510 next_msg = NULL;
511 if (!next_msg || pl022->cur_msg->state == STATE_ERROR)
512 pl022_cs_control(pl022, SSP_CHIP_DESELECT);
513 else
514 pl022->next_msg_cs_active = true;
515
516 }
517
518 pl022->cur_msg = NULL;
519 pl022->cur_transfer = NULL;
520 pl022->cur_chip = NULL;
521
522
523 writew((readw(SSP_CR1(pl022->virtbase)) &
524 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
525
526 spi_finalize_current_message(pl022->master);
527}
528
529
530
531
532
533static int flush(struct pl022 *pl022)
534{
535 unsigned long limit = loops_per_jiffy << 1;
536
537 dev_dbg(&pl022->adev->dev, "flush\n");
538 do {
539 while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
540 readw(SSP_DR(pl022->virtbase));
541 } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--);
542
543 pl022->exp_fifo_level = 0;
544
545 return limit;
546}
547
548
549
550
551
552static void restore_state(struct pl022 *pl022)
553{
554 struct chip_data *chip = pl022->cur_chip;
555
556 if (pl022->vendor->extended_cr)
557 writel(chip->cr0, SSP_CR0(pl022->virtbase));
558 else
559 writew(chip->cr0, SSP_CR0(pl022->virtbase));
560 writew(chip->cr1, SSP_CR1(pl022->virtbase));
561 writew(chip->dmacr, SSP_DMACR(pl022->virtbase));
562 writew(chip->cpsr, SSP_CPSR(pl022->virtbase));
563 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
564 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
565}
566
567
568
569
570#define DEFAULT_SSP_REG_CR0 ( \
571 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \
572 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \
573 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
574 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
575 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
576)
577
578
579#define DEFAULT_SSP_REG_CR0_ST ( \
580 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \
581 GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \
582 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
583 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
584 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
585 GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16) | \
586 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \
587)
588
589
590#define DEFAULT_SSP_REG_CR0_ST_PL023 ( \
591 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \
592 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
593 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
594 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
595)
596
597#define DEFAULT_SSP_REG_CR1 ( \
598 GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \
599 GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
600 GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
601 GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \
602)
603
604
605#define DEFAULT_SSP_REG_CR1_ST ( \
606 DEFAULT_SSP_REG_CR1 | \
607 GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
608 GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
609 GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\
610 GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
611 GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \
612)
613
614
615
616
617
618#define DEFAULT_SSP_REG_CR1_ST_PL023 ( \
619 GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
620 GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
621 GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \
622 GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
623 GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
624 GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
625 GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \
626 GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \
627)
628
629#define DEFAULT_SSP_REG_CPSR ( \
630 GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \
631)
632
633#define DEFAULT_SSP_REG_DMACR (\
634 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \
635 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \
636)
637
638
639
640
641
642static void load_ssp_default_config(struct pl022 *pl022)
643{
644 if (pl022->vendor->pl023) {
645 writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase));
646 writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase));
647 } else if (pl022->vendor->extended_cr) {
648 writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase));
649 writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase));
650 } else {
651 writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase));
652 writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase));
653 }
654 writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase));
655 writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase));
656 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
657 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
658}
659
660
661
662
663
664static void readwriter(struct pl022 *pl022)
665{
666
667
668
669
670
671
672
673
674
675
676
677 dev_dbg(&pl022->adev->dev,
678 "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
679 __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end);
680
681
682 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
683 && (pl022->rx < pl022->rx_end)) {
684 switch (pl022->read) {
685 case READING_NULL:
686 readw(SSP_DR(pl022->virtbase));
687 break;
688 case READING_U8:
689 *(u8 *) (pl022->rx) =
690 readw(SSP_DR(pl022->virtbase)) & 0xFFU;
691 break;
692 case READING_U16:
693 *(u16 *) (pl022->rx) =
694 (u16) readw(SSP_DR(pl022->virtbase));
695 break;
696 case READING_U32:
697 *(u32 *) (pl022->rx) =
698 readl(SSP_DR(pl022->virtbase));
699 break;
700 }
701 pl022->rx += (pl022->cur_chip->n_bytes);
702 pl022->exp_fifo_level--;
703 }
704
705
706
707 while ((pl022->exp_fifo_level < pl022->vendor->fifodepth)
708 && (pl022->tx < pl022->tx_end)) {
709 switch (pl022->write) {
710 case WRITING_NULL:
711 writew(0x0, SSP_DR(pl022->virtbase));
712 break;
713 case WRITING_U8:
714 writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase));
715 break;
716 case WRITING_U16:
717 writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase));
718 break;
719 case WRITING_U32:
720 writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase));
721 break;
722 }
723 pl022->tx += (pl022->cur_chip->n_bytes);
724 pl022->exp_fifo_level++;
725
726
727
728
729
730
731 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
732 && (pl022->rx < pl022->rx_end)) {
733 switch (pl022->read) {
734 case READING_NULL:
735 readw(SSP_DR(pl022->virtbase));
736 break;
737 case READING_U8:
738 *(u8 *) (pl022->rx) =
739 readw(SSP_DR(pl022->virtbase)) & 0xFFU;
740 break;
741 case READING_U16:
742 *(u16 *) (pl022->rx) =
743 (u16) readw(SSP_DR(pl022->virtbase));
744 break;
745 case READING_U32:
746 *(u32 *) (pl022->rx) =
747 readl(SSP_DR(pl022->virtbase));
748 break;
749 }
750 pl022->rx += (pl022->cur_chip->n_bytes);
751 pl022->exp_fifo_level--;
752 }
753 }
754
755
756
757
758}
759
760
761
762
763
764
765
766
767
768
769static void *next_transfer(struct pl022 *pl022)
770{
771 struct spi_message *msg = pl022->cur_msg;
772 struct spi_transfer *trans = pl022->cur_transfer;
773
774
775 if (trans->transfer_list.next != &msg->transfers) {
776 pl022->cur_transfer =
777 list_entry(trans->transfer_list.next,
778 struct spi_transfer, transfer_list);
779 return STATE_RUNNING;
780 }
781 return STATE_DONE;
782}
783
784
785
786
787
788#ifdef CONFIG_DMA_ENGINE
789static void unmap_free_dma_scatter(struct pl022 *pl022)
790{
791
792 dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl,
793 pl022->sgt_tx.nents, DMA_TO_DEVICE);
794 dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl,
795 pl022->sgt_rx.nents, DMA_FROM_DEVICE);
796 sg_free_table(&pl022->sgt_rx);
797 sg_free_table(&pl022->sgt_tx);
798}
799
800static void dma_callback(void *data)
801{
802 struct pl022 *pl022 = data;
803 struct spi_message *msg = pl022->cur_msg;
804
805 BUG_ON(!pl022->sgt_rx.sgl);
806
807#ifdef VERBOSE_DEBUG
808
809
810
811
812
813
814 {
815 struct scatterlist *sg;
816 unsigned int i;
817
818 dma_sync_sg_for_cpu(&pl022->adev->dev,
819 pl022->sgt_rx.sgl,
820 pl022->sgt_rx.nents,
821 DMA_FROM_DEVICE);
822
823 for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) {
824 dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i);
825 print_hex_dump(KERN_ERR, "SPI RX: ",
826 DUMP_PREFIX_OFFSET,
827 16,
828 1,
829 sg_virt(sg),
830 sg_dma_len(sg),
831 1);
832 }
833 for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) {
834 dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i);
835 print_hex_dump(KERN_ERR, "SPI TX: ",
836 DUMP_PREFIX_OFFSET,
837 16,
838 1,
839 sg_virt(sg),
840 sg_dma_len(sg),
841 1);
842 }
843 }
844#endif
845
846 unmap_free_dma_scatter(pl022);
847
848
849 msg->actual_length += pl022->cur_transfer->len;
850
851 msg->state = next_transfer(pl022);
852 if (msg->state != STATE_DONE && pl022->cur_transfer->cs_change)
853 pl022_cs_control(pl022, SSP_CHIP_DESELECT);
854 tasklet_schedule(&pl022->pump_transfers);
855}
856
857static void setup_dma_scatter(struct pl022 *pl022,
858 void *buffer,
859 unsigned int length,
860 struct sg_table *sgtab)
861{
862 struct scatterlist *sg;
863 int bytesleft = length;
864 void *bufp = buffer;
865 int mapbytes;
866 int i;
867
868 if (buffer) {
869 for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
870
871
872
873
874
875
876 if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
877 mapbytes = bytesleft;
878 else
879 mapbytes = PAGE_SIZE - offset_in_page(bufp);
880 sg_set_page(sg, virt_to_page(bufp),
881 mapbytes, offset_in_page(bufp));
882 bufp += mapbytes;
883 bytesleft -= mapbytes;
884 dev_dbg(&pl022->adev->dev,
885 "set RX/TX target page @ %p, %d bytes, %d left\n",
886 bufp, mapbytes, bytesleft);
887 }
888 } else {
889
890 for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
891 if (bytesleft < PAGE_SIZE)
892 mapbytes = bytesleft;
893 else
894 mapbytes = PAGE_SIZE;
895 sg_set_page(sg, virt_to_page(pl022->dummypage),
896 mapbytes, 0);
897 bytesleft -= mapbytes;
898 dev_dbg(&pl022->adev->dev,
899 "set RX/TX to dummy page %d bytes, %d left\n",
900 mapbytes, bytesleft);
901
902 }
903 }
904 BUG_ON(bytesleft);
905}
906
907
908
909
910
911static int configure_dma(struct pl022 *pl022)
912{
913 struct dma_slave_config rx_conf = {
914 .src_addr = SSP_DR(pl022->phybase),
915 .direction = DMA_DEV_TO_MEM,
916 .device_fc = false,
917 };
918 struct dma_slave_config tx_conf = {
919 .dst_addr = SSP_DR(pl022->phybase),
920 .direction = DMA_MEM_TO_DEV,
921 .device_fc = false,
922 };
923 unsigned int pages;
924 int ret;
925 int rx_sglen, tx_sglen;
926 struct dma_chan *rxchan = pl022->dma_rx_channel;
927 struct dma_chan *txchan = pl022->dma_tx_channel;
928 struct dma_async_tx_descriptor *rxdesc;
929 struct dma_async_tx_descriptor *txdesc;
930
931
932 if (!rxchan || !txchan)
933 return -ENODEV;
934
935
936
937
938
939
940
941 switch (pl022->rx_lev_trig) {
942 case SSP_RX_1_OR_MORE_ELEM:
943 rx_conf.src_maxburst = 1;
944 break;
945 case SSP_RX_4_OR_MORE_ELEM:
946 rx_conf.src_maxburst = 4;
947 break;
948 case SSP_RX_8_OR_MORE_ELEM:
949 rx_conf.src_maxburst = 8;
950 break;
951 case SSP_RX_16_OR_MORE_ELEM:
952 rx_conf.src_maxburst = 16;
953 break;
954 case SSP_RX_32_OR_MORE_ELEM:
955 rx_conf.src_maxburst = 32;
956 break;
957 default:
958 rx_conf.src_maxburst = pl022->vendor->fifodepth >> 1;
959 break;
960 }
961
962 switch (pl022->tx_lev_trig) {
963 case SSP_TX_1_OR_MORE_EMPTY_LOC:
964 tx_conf.dst_maxburst = 1;
965 break;
966 case SSP_TX_4_OR_MORE_EMPTY_LOC:
967 tx_conf.dst_maxburst = 4;
968 break;
969 case SSP_TX_8_OR_MORE_EMPTY_LOC:
970 tx_conf.dst_maxburst = 8;
971 break;
972 case SSP_TX_16_OR_MORE_EMPTY_LOC:
973 tx_conf.dst_maxburst = 16;
974 break;
975 case SSP_TX_32_OR_MORE_EMPTY_LOC:
976 tx_conf.dst_maxburst = 32;
977 break;
978 default:
979 tx_conf.dst_maxburst = pl022->vendor->fifodepth >> 1;
980 break;
981 }
982
983 switch (pl022->read) {
984 case READING_NULL:
985
986 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
987 break;
988 case READING_U8:
989 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
990 break;
991 case READING_U16:
992 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
993 break;
994 case READING_U32:
995 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
996 break;
997 }
998
999 switch (pl022->write) {
1000 case WRITING_NULL:
1001
1002 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
1003 break;
1004 case WRITING_U8:
1005 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1006 break;
1007 case WRITING_U16:
1008 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
1009 break;
1010 case WRITING_U32:
1011 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1012 break;
1013 }
1014
1015
1016 if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
1017 rx_conf.src_addr_width = tx_conf.dst_addr_width;
1018 if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
1019 tx_conf.dst_addr_width = rx_conf.src_addr_width;
1020 BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width);
1021
1022 dmaengine_slave_config(rxchan, &rx_conf);
1023 dmaengine_slave_config(txchan, &tx_conf);
1024
1025
1026 pages = DIV_ROUND_UP(pl022->cur_transfer->len, PAGE_SIZE);
1027 dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages);
1028
1029 ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_ATOMIC);
1030 if (ret)
1031 goto err_alloc_rx_sg;
1032
1033 ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_ATOMIC);
1034 if (ret)
1035 goto err_alloc_tx_sg;
1036
1037
1038 setup_dma_scatter(pl022, pl022->rx,
1039 pl022->cur_transfer->len, &pl022->sgt_rx);
1040 setup_dma_scatter(pl022, pl022->tx,
1041 pl022->cur_transfer->len, &pl022->sgt_tx);
1042
1043
1044 rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
1045 pl022->sgt_rx.nents, DMA_FROM_DEVICE);
1046 if (!rx_sglen)
1047 goto err_rx_sgmap;
1048
1049 tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl,
1050 pl022->sgt_tx.nents, DMA_TO_DEVICE);
1051 if (!tx_sglen)
1052 goto err_tx_sgmap;
1053
1054
1055 rxdesc = dmaengine_prep_slave_sg(rxchan,
1056 pl022->sgt_rx.sgl,
1057 rx_sglen,
1058 DMA_DEV_TO_MEM,
1059 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1060 if (!rxdesc)
1061 goto err_rxdesc;
1062
1063 txdesc = dmaengine_prep_slave_sg(txchan,
1064 pl022->sgt_tx.sgl,
1065 tx_sglen,
1066 DMA_MEM_TO_DEV,
1067 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1068 if (!txdesc)
1069 goto err_txdesc;
1070
1071
1072 rxdesc->callback = dma_callback;
1073 rxdesc->callback_param = pl022;
1074
1075
1076 dmaengine_submit(rxdesc);
1077 dmaengine_submit(txdesc);
1078 dma_async_issue_pending(rxchan);
1079 dma_async_issue_pending(txchan);
1080 pl022->dma_running = true;
1081
1082 return 0;
1083
1084err_txdesc:
1085 dmaengine_terminate_all(txchan);
1086err_rxdesc:
1087 dmaengine_terminate_all(rxchan);
1088 dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl,
1089 pl022->sgt_tx.nents, DMA_TO_DEVICE);
1090err_tx_sgmap:
1091 dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
1092 pl022->sgt_rx.nents, DMA_FROM_DEVICE);
1093err_rx_sgmap:
1094 sg_free_table(&pl022->sgt_tx);
1095err_alloc_tx_sg:
1096 sg_free_table(&pl022->sgt_rx);
1097err_alloc_rx_sg:
1098 return -ENOMEM;
1099}
1100
1101static int pl022_dma_probe(struct pl022 *pl022)
1102{
1103 dma_cap_mask_t mask;
1104
1105
1106 dma_cap_zero(mask);
1107 dma_cap_set(DMA_SLAVE, mask);
1108
1109
1110
1111
1112 pl022->dma_rx_channel = dma_request_channel(mask,
1113 pl022->master_info->dma_filter,
1114 pl022->master_info->dma_rx_param);
1115 if (!pl022->dma_rx_channel) {
1116 dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n");
1117 goto err_no_rxchan;
1118 }
1119
1120 pl022->dma_tx_channel = dma_request_channel(mask,
1121 pl022->master_info->dma_filter,
1122 pl022->master_info->dma_tx_param);
1123 if (!pl022->dma_tx_channel) {
1124 dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n");
1125 goto err_no_txchan;
1126 }
1127
1128 pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
1129 if (!pl022->dummypage)
1130 goto err_no_dummypage;
1131
1132 dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n",
1133 dma_chan_name(pl022->dma_rx_channel),
1134 dma_chan_name(pl022->dma_tx_channel));
1135
1136 return 0;
1137
1138err_no_dummypage:
1139 dma_release_channel(pl022->dma_tx_channel);
1140err_no_txchan:
1141 dma_release_channel(pl022->dma_rx_channel);
1142 pl022->dma_rx_channel = NULL;
1143err_no_rxchan:
1144 dev_err(&pl022->adev->dev,
1145 "Failed to work in dma mode, work without dma!\n");
1146 return -ENODEV;
1147}
1148
1149static int pl022_dma_autoprobe(struct pl022 *pl022)
1150{
1151 struct device *dev = &pl022->adev->dev;
1152 struct dma_chan *chan;
1153 int err;
1154
1155
1156 chan = dma_request_chan(dev, "rx");
1157 if (IS_ERR(chan)) {
1158 err = PTR_ERR(chan);
1159 goto err_no_rxchan;
1160 }
1161
1162 pl022->dma_rx_channel = chan;
1163
1164 chan = dma_request_chan(dev, "tx");
1165 if (IS_ERR(chan)) {
1166 err = PTR_ERR(chan);
1167 goto err_no_txchan;
1168 }
1169
1170 pl022->dma_tx_channel = chan;
1171
1172 pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
1173 if (!pl022->dummypage) {
1174 err = -ENOMEM;
1175 goto err_no_dummypage;
1176 }
1177
1178 return 0;
1179
1180err_no_dummypage:
1181 dma_release_channel(pl022->dma_tx_channel);
1182 pl022->dma_tx_channel = NULL;
1183err_no_txchan:
1184 dma_release_channel(pl022->dma_rx_channel);
1185 pl022->dma_rx_channel = NULL;
1186err_no_rxchan:
1187 return err;
1188}
1189
1190static void terminate_dma(struct pl022 *pl022)
1191{
1192 struct dma_chan *rxchan = pl022->dma_rx_channel;
1193 struct dma_chan *txchan = pl022->dma_tx_channel;
1194
1195 dmaengine_terminate_all(rxchan);
1196 dmaengine_terminate_all(txchan);
1197 unmap_free_dma_scatter(pl022);
1198 pl022->dma_running = false;
1199}
1200
1201static void pl022_dma_remove(struct pl022 *pl022)
1202{
1203 if (pl022->dma_running)
1204 terminate_dma(pl022);
1205 if (pl022->dma_tx_channel)
1206 dma_release_channel(pl022->dma_tx_channel);
1207 if (pl022->dma_rx_channel)
1208 dma_release_channel(pl022->dma_rx_channel);
1209 kfree(pl022->dummypage);
1210}
1211
1212#else
1213static inline int configure_dma(struct pl022 *pl022)
1214{
1215 return -ENODEV;
1216}
1217
1218static inline int pl022_dma_autoprobe(struct pl022 *pl022)
1219{
1220 return 0;
1221}
1222
1223static inline int pl022_dma_probe(struct pl022 *pl022)
1224{
1225 return 0;
1226}
1227
1228static inline void pl022_dma_remove(struct pl022 *pl022)
1229{
1230}
1231#endif
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
1247{
1248 struct pl022 *pl022 = dev_id;
1249 struct spi_message *msg = pl022->cur_msg;
1250 u16 irq_status = 0;
1251
1252 if (unlikely(!msg)) {
1253 dev_err(&pl022->adev->dev,
1254 "bad message state in interrupt handler");
1255
1256 return IRQ_HANDLED;
1257 }
1258
1259
1260 irq_status = readw(SSP_MIS(pl022->virtbase));
1261
1262 if (unlikely(!irq_status))
1263 return IRQ_NONE;
1264
1265
1266
1267
1268
1269
1270 if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) {
1271
1272
1273
1274
1275 dev_err(&pl022->adev->dev, "FIFO overrun\n");
1276 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF)
1277 dev_err(&pl022->adev->dev,
1278 "RXFIFO is full\n");
1279
1280
1281
1282
1283
1284
1285 writew(DISABLE_ALL_INTERRUPTS,
1286 SSP_IMSC(pl022->virtbase));
1287 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
1288 writew((readw(SSP_CR1(pl022->virtbase)) &
1289 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
1290 msg->state = STATE_ERROR;
1291
1292
1293 tasklet_schedule(&pl022->pump_transfers);
1294 return IRQ_HANDLED;
1295 }
1296
1297 readwriter(pl022);
1298
1299 if (pl022->tx == pl022->tx_end) {
1300
1301 writew((readw(SSP_IMSC(pl022->virtbase)) &
1302 ~SSP_IMSC_MASK_TXIM) | SSP_IMSC_MASK_RXIM,
1303 SSP_IMSC(pl022->virtbase));
1304 }
1305
1306
1307
1308
1309
1310
1311 if (pl022->rx >= pl022->rx_end) {
1312 writew(DISABLE_ALL_INTERRUPTS,
1313 SSP_IMSC(pl022->virtbase));
1314 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
1315 if (unlikely(pl022->rx > pl022->rx_end)) {
1316 dev_warn(&pl022->adev->dev, "read %u surplus "
1317 "bytes (did you request an odd "
1318 "number of bytes on a 16bit bus?)\n",
1319 (u32) (pl022->rx - pl022->rx_end));
1320 }
1321
1322 msg->actual_length += pl022->cur_transfer->len;
1323
1324 msg->state = next_transfer(pl022);
1325 if (msg->state != STATE_DONE && pl022->cur_transfer->cs_change)
1326 pl022_cs_control(pl022, SSP_CHIP_DESELECT);
1327 tasklet_schedule(&pl022->pump_transfers);
1328 return IRQ_HANDLED;
1329 }
1330
1331 return IRQ_HANDLED;
1332}
1333
1334
1335
1336
1337
1338static int set_up_next_transfer(struct pl022 *pl022,
1339 struct spi_transfer *transfer)
1340{
1341 int residue;
1342
1343
1344 residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes;
1345 if (unlikely(residue != 0)) {
1346 dev_err(&pl022->adev->dev,
1347 "message of %u bytes to transmit but the current "
1348 "chip bus has a data width of %u bytes!\n",
1349 pl022->cur_transfer->len,
1350 pl022->cur_chip->n_bytes);
1351 dev_err(&pl022->adev->dev, "skipping this message\n");
1352 return -EIO;
1353 }
1354 pl022->tx = (void *)transfer->tx_buf;
1355 pl022->tx_end = pl022->tx + pl022->cur_transfer->len;
1356 pl022->rx = (void *)transfer->rx_buf;
1357 pl022->rx_end = pl022->rx + pl022->cur_transfer->len;
1358 pl022->write =
1359 pl022->tx ? pl022->cur_chip->write : WRITING_NULL;
1360 pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL;
1361 return 0;
1362}
1363
1364
1365
1366
1367
1368
1369
1370static void pump_transfers(unsigned long data)
1371{
1372 struct pl022 *pl022 = (struct pl022 *) data;
1373 struct spi_message *message = NULL;
1374 struct spi_transfer *transfer = NULL;
1375 struct spi_transfer *previous = NULL;
1376
1377
1378 message = pl022->cur_msg;
1379 transfer = pl022->cur_transfer;
1380
1381
1382 if (message->state == STATE_ERROR) {
1383 message->status = -EIO;
1384 giveback(pl022);
1385 return;
1386 }
1387
1388
1389 if (message->state == STATE_DONE) {
1390 message->status = 0;
1391 giveback(pl022);
1392 return;
1393 }
1394
1395
1396 if (message->state == STATE_RUNNING) {
1397 previous = list_entry(transfer->transfer_list.prev,
1398 struct spi_transfer,
1399 transfer_list);
1400
1401
1402
1403
1404 spi_transfer_delay_exec(previous);
1405
1406
1407 if (previous->cs_change)
1408 pl022_cs_control(pl022, SSP_CHIP_SELECT);
1409 } else {
1410
1411 message->state = STATE_RUNNING;
1412 }
1413
1414 if (set_up_next_transfer(pl022, transfer)) {
1415 message->state = STATE_ERROR;
1416 message->status = -EIO;
1417 giveback(pl022);
1418 return;
1419 }
1420
1421 flush(pl022);
1422
1423 if (pl022->cur_chip->enable_dma) {
1424 if (configure_dma(pl022)) {
1425 dev_dbg(&pl022->adev->dev,
1426 "configuration of DMA failed, fall back to interrupt mode\n");
1427 goto err_config_dma;
1428 }
1429 return;
1430 }
1431
1432err_config_dma:
1433
1434 writew(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM, SSP_IMSC(pl022->virtbase));
1435}
1436
1437static void do_interrupt_dma_transfer(struct pl022 *pl022)
1438{
1439
1440
1441
1442
1443 u32 irqflags = (u32)(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM);
1444
1445
1446 if (!pl022->next_msg_cs_active)
1447 pl022_cs_control(pl022, SSP_CHIP_SELECT);
1448
1449 if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
1450
1451 pl022->cur_msg->state = STATE_ERROR;
1452 pl022->cur_msg->status = -EIO;
1453 giveback(pl022);
1454 return;
1455 }
1456
1457 if (pl022->cur_chip->enable_dma) {
1458
1459 if (configure_dma(pl022)) {
1460 dev_dbg(&pl022->adev->dev,
1461 "configuration of DMA failed, fall back to interrupt mode\n");
1462 goto err_config_dma;
1463 }
1464
1465 irqflags = DISABLE_ALL_INTERRUPTS;
1466 }
1467err_config_dma:
1468
1469 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
1470 SSP_CR1(pl022->virtbase));
1471 writew(irqflags, SSP_IMSC(pl022->virtbase));
1472}
1473
1474static void print_current_status(struct pl022 *pl022)
1475{
1476 u32 read_cr0;
1477 u16 read_cr1, read_dmacr, read_sr;
1478
1479 if (pl022->vendor->extended_cr)
1480 read_cr0 = readl(SSP_CR0(pl022->virtbase));
1481 else
1482 read_cr0 = readw(SSP_CR0(pl022->virtbase));
1483 read_cr1 = readw(SSP_CR1(pl022->virtbase));
1484 read_dmacr = readw(SSP_DMACR(pl022->virtbase));
1485 read_sr = readw(SSP_SR(pl022->virtbase));
1486
1487 dev_warn(&pl022->adev->dev, "spi-pl022 CR0: %x\n", read_cr0);
1488 dev_warn(&pl022->adev->dev, "spi-pl022 CR1: %x\n", read_cr1);
1489 dev_warn(&pl022->adev->dev, "spi-pl022 DMACR: %x\n", read_dmacr);
1490 dev_warn(&pl022->adev->dev, "spi-pl022 SR: %x\n", read_sr);
1491 dev_warn(&pl022->adev->dev,
1492 "spi-pl022 exp_fifo_level/fifodepth: %u/%d\n",
1493 pl022->exp_fifo_level,
1494 pl022->vendor->fifodepth);
1495
1496}
1497
1498static void do_polling_transfer(struct pl022 *pl022)
1499{
1500 struct spi_message *message = NULL;
1501 struct spi_transfer *transfer = NULL;
1502 struct spi_transfer *previous = NULL;
1503 unsigned long time, timeout;
1504
1505 message = pl022->cur_msg;
1506
1507 while (message->state != STATE_DONE) {
1508
1509 if (message->state == STATE_ERROR)
1510 break;
1511 transfer = pl022->cur_transfer;
1512
1513
1514 if (message->state == STATE_RUNNING) {
1515 previous =
1516 list_entry(transfer->transfer_list.prev,
1517 struct spi_transfer, transfer_list);
1518 spi_transfer_delay_exec(previous);
1519 if (previous->cs_change)
1520 pl022_cs_control(pl022, SSP_CHIP_SELECT);
1521 } else {
1522
1523 message->state = STATE_RUNNING;
1524 if (!pl022->next_msg_cs_active)
1525 pl022_cs_control(pl022, SSP_CHIP_SELECT);
1526 }
1527
1528
1529 if (set_up_next_transfer(pl022, transfer)) {
1530
1531 message->state = STATE_ERROR;
1532 break;
1533 }
1534
1535 flush(pl022);
1536 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
1537 SSP_CR1(pl022->virtbase));
1538
1539 dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n");
1540
1541 timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT);
1542 while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) {
1543 time = jiffies;
1544 readwriter(pl022);
1545 if (time_after(time, timeout)) {
1546 dev_warn(&pl022->adev->dev,
1547 "%s: timeout!\n", __func__);
1548 message->state = STATE_TIMEOUT;
1549 print_current_status(pl022);
1550 goto out;
1551 }
1552 cpu_relax();
1553 }
1554
1555
1556 message->actual_length += pl022->cur_transfer->len;
1557
1558 message->state = next_transfer(pl022);
1559 if (message->state != STATE_DONE
1560 && pl022->cur_transfer->cs_change)
1561 pl022_cs_control(pl022, SSP_CHIP_DESELECT);
1562 }
1563out:
1564
1565 if (message->state == STATE_DONE)
1566 message->status = 0;
1567 else if (message->state == STATE_TIMEOUT)
1568 message->status = -EAGAIN;
1569 else
1570 message->status = -EIO;
1571
1572 giveback(pl022);
1573 return;
1574}
1575
1576static int pl022_transfer_one_message(struct spi_master *master,
1577 struct spi_message *msg)
1578{
1579 struct pl022 *pl022 = spi_master_get_devdata(master);
1580
1581
1582 pl022->cur_msg = msg;
1583 msg->state = STATE_START;
1584
1585 pl022->cur_transfer = list_entry(msg->transfers.next,
1586 struct spi_transfer, transfer_list);
1587
1588
1589 pl022->cur_chip = spi_get_ctldata(msg->spi);
1590 pl022->cur_cs = msg->spi->chip_select;
1591
1592 pl022->cur_gpiod = msg->spi->cs_gpiod;
1593
1594 restore_state(pl022);
1595 flush(pl022);
1596
1597 if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
1598 do_polling_transfer(pl022);
1599 else
1600 do_interrupt_dma_transfer(pl022);
1601
1602 return 0;
1603}
1604
1605static int pl022_unprepare_transfer_hardware(struct spi_master *master)
1606{
1607 struct pl022 *pl022 = spi_master_get_devdata(master);
1608
1609
1610 writew((readw(SSP_CR1(pl022->virtbase)) &
1611 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
1612
1613 return 0;
1614}
1615
1616static int verify_controller_parameters(struct pl022 *pl022,
1617 struct pl022_config_chip const *chip_info)
1618{
1619 if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI)
1620 || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) {
1621 dev_err(&pl022->adev->dev,
1622 "interface is configured incorrectly\n");
1623 return -EINVAL;
1624 }
1625 if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) &&
1626 (!pl022->vendor->unidir)) {
1627 dev_err(&pl022->adev->dev,
1628 "unidirectional mode not supported in this "
1629 "hardware version\n");
1630 return -EINVAL;
1631 }
1632 if ((chip_info->hierarchy != SSP_MASTER)
1633 && (chip_info->hierarchy != SSP_SLAVE)) {
1634 dev_err(&pl022->adev->dev,
1635 "hierarchy is configured incorrectly\n");
1636 return -EINVAL;
1637 }
1638 if ((chip_info->com_mode != INTERRUPT_TRANSFER)
1639 && (chip_info->com_mode != DMA_TRANSFER)
1640 && (chip_info->com_mode != POLLING_TRANSFER)) {
1641 dev_err(&pl022->adev->dev,
1642 "Communication mode is configured incorrectly\n");
1643 return -EINVAL;
1644 }
1645 switch (chip_info->rx_lev_trig) {
1646 case SSP_RX_1_OR_MORE_ELEM:
1647 case SSP_RX_4_OR_MORE_ELEM:
1648 case SSP_RX_8_OR_MORE_ELEM:
1649
1650 break;
1651 case SSP_RX_16_OR_MORE_ELEM:
1652 if (pl022->vendor->fifodepth < 16) {
1653 dev_err(&pl022->adev->dev,
1654 "RX FIFO Trigger Level is configured incorrectly\n");
1655 return -EINVAL;
1656 }
1657 break;
1658 case SSP_RX_32_OR_MORE_ELEM:
1659 if (pl022->vendor->fifodepth < 32) {
1660 dev_err(&pl022->adev->dev,
1661 "RX FIFO Trigger Level is configured incorrectly\n");
1662 return -EINVAL;
1663 }
1664 break;
1665 default:
1666 dev_err(&pl022->adev->dev,
1667 "RX FIFO Trigger Level is configured incorrectly\n");
1668 return -EINVAL;
1669 }
1670 switch (chip_info->tx_lev_trig) {
1671 case SSP_TX_1_OR_MORE_EMPTY_LOC:
1672 case SSP_TX_4_OR_MORE_EMPTY_LOC:
1673 case SSP_TX_8_OR_MORE_EMPTY_LOC:
1674
1675 break;
1676 case SSP_TX_16_OR_MORE_EMPTY_LOC:
1677 if (pl022->vendor->fifodepth < 16) {
1678 dev_err(&pl022->adev->dev,
1679 "TX FIFO Trigger Level is configured incorrectly\n");
1680 return -EINVAL;
1681 }
1682 break;
1683 case SSP_TX_32_OR_MORE_EMPTY_LOC:
1684 if (pl022->vendor->fifodepth < 32) {
1685 dev_err(&pl022->adev->dev,
1686 "TX FIFO Trigger Level is configured incorrectly\n");
1687 return -EINVAL;
1688 }
1689 break;
1690 default:
1691 dev_err(&pl022->adev->dev,
1692 "TX FIFO Trigger Level is configured incorrectly\n");
1693 return -EINVAL;
1694 }
1695 if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
1696 if ((chip_info->ctrl_len < SSP_BITS_4)
1697 || (chip_info->ctrl_len > SSP_BITS_32)) {
1698 dev_err(&pl022->adev->dev,
1699 "CTRL LEN is configured incorrectly\n");
1700 return -EINVAL;
1701 }
1702 if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO)
1703 && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) {
1704 dev_err(&pl022->adev->dev,
1705 "Wait State is configured incorrectly\n");
1706 return -EINVAL;
1707 }
1708
1709 if (pl022->vendor->extended_cr) {
1710 if ((chip_info->duplex !=
1711 SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
1712 && (chip_info->duplex !=
1713 SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) {
1714 dev_err(&pl022->adev->dev,
1715 "Microwire duplex mode is configured incorrectly\n");
1716 return -EINVAL;
1717 }
1718 } else {
1719 if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) {
1720 dev_err(&pl022->adev->dev,
1721 "Microwire half duplex mode requested,"
1722 " but this is only available in the"
1723 " ST version of PL022\n");
1724 return -EINVAL;
1725 }
1726 }
1727 }
1728 return 0;
1729}
1730
1731static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr)
1732{
1733 return rate / (cpsdvsr * (1 + scr));
1734}
1735
1736static int calculate_effective_freq(struct pl022 *pl022, int freq, struct
1737 ssp_clock_params * clk_freq)
1738{
1739
1740 u16 cpsdvsr = CPSDVR_MIN, scr = SCR_MIN;
1741 u32 rate, max_tclk, min_tclk, best_freq = 0, best_cpsdvsr = 0,
1742 best_scr = 0, tmp, found = 0;
1743
1744 rate = clk_get_rate(pl022->clk);
1745
1746 max_tclk = spi_rate(rate, CPSDVR_MIN, SCR_MIN);
1747
1748 min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX);
1749
1750 if (freq > max_tclk)
1751 dev_warn(&pl022->adev->dev,
1752 "Max speed that can be programmed is %d Hz, you requested %d\n",
1753 max_tclk, freq);
1754
1755 if (freq < min_tclk) {
1756 dev_err(&pl022->adev->dev,
1757 "Requested frequency: %d Hz is less than minimum possible %d Hz\n",
1758 freq, min_tclk);
1759 return -EINVAL;
1760 }
1761
1762
1763
1764
1765
1766 while ((cpsdvsr <= CPSDVR_MAX) && !found) {
1767 while (scr <= SCR_MAX) {
1768 tmp = spi_rate(rate, cpsdvsr, scr);
1769
1770 if (tmp > freq) {
1771
1772 scr++;
1773 continue;
1774 }
1775
1776
1777
1778
1779
1780 if (tmp > best_freq) {
1781 best_freq = tmp;
1782 best_cpsdvsr = cpsdvsr;
1783 best_scr = scr;
1784
1785 if (tmp == freq)
1786 found = 1;
1787 }
1788
1789
1790
1791
1792 break;
1793 }
1794 cpsdvsr += 2;
1795 scr = SCR_MIN;
1796 }
1797
1798 WARN(!best_freq, "pl022: Matching cpsdvsr and scr not found for %d Hz rate \n",
1799 freq);
1800
1801 clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF);
1802 clk_freq->scr = (u8) (best_scr & 0xFF);
1803 dev_dbg(&pl022->adev->dev,
1804 "SSP Target Frequency is: %u, Effective Frequency is %u\n",
1805 freq, best_freq);
1806 dev_dbg(&pl022->adev->dev, "SSP cpsdvsr = %d, scr = %d\n",
1807 clk_freq->cpsdvsr, clk_freq->scr);
1808
1809 return 0;
1810}
1811
1812
1813
1814
1815
1816static const struct pl022_config_chip pl022_default_chip_info = {
1817 .com_mode = INTERRUPT_TRANSFER,
1818 .iface = SSP_INTERFACE_MOTOROLA_SPI,
1819 .hierarchy = SSP_MASTER,
1820 .slave_tx_disable = DO_NOT_DRIVE_TX,
1821 .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM,
1822 .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC,
1823 .ctrl_len = SSP_BITS_8,
1824 .wait_state = SSP_MWIRE_WAIT_ZERO,
1825 .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
1826};
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840static int pl022_setup(struct spi_device *spi)
1841{
1842 struct pl022_config_chip const *chip_info;
1843 struct pl022_config_chip chip_info_dt;
1844 struct chip_data *chip;
1845 struct ssp_clock_params clk_freq = { .cpsdvsr = 0, .scr = 0};
1846 int status = 0;
1847 struct pl022 *pl022 = spi_master_get_devdata(spi->master);
1848 unsigned int bits = spi->bits_per_word;
1849 u32 tmp;
1850 struct device_node *np = spi->dev.of_node;
1851
1852 if (!spi->max_speed_hz)
1853 return -EINVAL;
1854
1855
1856 chip = spi_get_ctldata(spi);
1857
1858 if (chip == NULL) {
1859 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1860 if (!chip)
1861 return -ENOMEM;
1862 dev_dbg(&spi->dev,
1863 "allocated memory for controller's runtime state\n");
1864 }
1865
1866
1867 chip_info = spi->controller_data;
1868
1869 if (chip_info == NULL) {
1870 if (np) {
1871 chip_info_dt = pl022_default_chip_info;
1872
1873 chip_info_dt.hierarchy = SSP_MASTER;
1874 of_property_read_u32(np, "pl022,interface",
1875 &chip_info_dt.iface);
1876 of_property_read_u32(np, "pl022,com-mode",
1877 &chip_info_dt.com_mode);
1878 of_property_read_u32(np, "pl022,rx-level-trig",
1879 &chip_info_dt.rx_lev_trig);
1880 of_property_read_u32(np, "pl022,tx-level-trig",
1881 &chip_info_dt.tx_lev_trig);
1882 of_property_read_u32(np, "pl022,ctrl-len",
1883 &chip_info_dt.ctrl_len);
1884 of_property_read_u32(np, "pl022,wait-state",
1885 &chip_info_dt.wait_state);
1886 of_property_read_u32(np, "pl022,duplex",
1887 &chip_info_dt.duplex);
1888
1889 chip_info = &chip_info_dt;
1890 } else {
1891 chip_info = &pl022_default_chip_info;
1892
1893 dev_dbg(&spi->dev,
1894 "using default controller_data settings\n");
1895 }
1896 } else
1897 dev_dbg(&spi->dev,
1898 "using user supplied controller_data settings\n");
1899
1900
1901
1902
1903
1904 if ((0 == chip_info->clk_freq.cpsdvsr)
1905 && (0 == chip_info->clk_freq.scr)) {
1906 status = calculate_effective_freq(pl022,
1907 spi->max_speed_hz,
1908 &clk_freq);
1909 if (status < 0)
1910 goto err_config_params;
1911 } else {
1912 memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq));
1913 if ((clk_freq.cpsdvsr % 2) != 0)
1914 clk_freq.cpsdvsr =
1915 clk_freq.cpsdvsr - 1;
1916 }
1917 if ((clk_freq.cpsdvsr < CPSDVR_MIN)
1918 || (clk_freq.cpsdvsr > CPSDVR_MAX)) {
1919 status = -EINVAL;
1920 dev_err(&spi->dev,
1921 "cpsdvsr is configured incorrectly\n");
1922 goto err_config_params;
1923 }
1924
1925 status = verify_controller_parameters(pl022, chip_info);
1926 if (status) {
1927 dev_err(&spi->dev, "controller data is incorrect");
1928 goto err_config_params;
1929 }
1930
1931 pl022->rx_lev_trig = chip_info->rx_lev_trig;
1932 pl022->tx_lev_trig = chip_info->tx_lev_trig;
1933
1934
1935 chip->xfer_type = chip_info->com_mode;
1936
1937
1938 if ((bits <= 3) || (bits > pl022->vendor->max_bpw)) {
1939 status = -ENOTSUPP;
1940 dev_err(&spi->dev, "illegal data size for this controller!\n");
1941 dev_err(&spi->dev, "This controller can only handle 4 <= n <= %d bit words\n",
1942 pl022->vendor->max_bpw);
1943 goto err_config_params;
1944 } else if (bits <= 8) {
1945 dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n");
1946 chip->n_bytes = 1;
1947 chip->read = READING_U8;
1948 chip->write = WRITING_U8;
1949 } else if (bits <= 16) {
1950 dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
1951 chip->n_bytes = 2;
1952 chip->read = READING_U16;
1953 chip->write = WRITING_U16;
1954 } else {
1955 dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
1956 chip->n_bytes = 4;
1957 chip->read = READING_U32;
1958 chip->write = WRITING_U32;
1959 }
1960
1961
1962 chip->cr0 = 0;
1963 chip->cr1 = 0;
1964 chip->dmacr = 0;
1965 chip->cpsr = 0;
1966 if ((chip_info->com_mode == DMA_TRANSFER)
1967 && ((pl022->master_info)->enable_dma)) {
1968 chip->enable_dma = true;
1969 dev_dbg(&spi->dev, "DMA mode set in controller state\n");
1970 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1971 SSP_DMACR_MASK_RXDMAE, 0);
1972 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1973 SSP_DMACR_MASK_TXDMAE, 1);
1974 } else {
1975 chip->enable_dma = false;
1976 dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
1977 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1978 SSP_DMACR_MASK_RXDMAE, 0);
1979 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1980 SSP_DMACR_MASK_TXDMAE, 1);
1981 }
1982
1983 chip->cpsr = clk_freq.cpsdvsr;
1984
1985
1986 if (pl022->vendor->extended_cr) {
1987 u32 etx;
1988
1989 if (pl022->vendor->pl023) {
1990
1991 SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay,
1992 SSP_CR1_MASK_FBCLKDEL_ST, 13);
1993 } else {
1994
1995 SSP_WRITE_BITS(chip->cr0, chip_info->duplex,
1996 SSP_CR0_MASK_HALFDUP_ST, 5);
1997 SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len,
1998 SSP_CR0_MASK_CSS_ST, 16);
1999 SSP_WRITE_BITS(chip->cr0, chip_info->iface,
2000 SSP_CR0_MASK_FRF_ST, 21);
2001 SSP_WRITE_BITS(chip->cr1, chip_info->wait_state,
2002 SSP_CR1_MASK_MWAIT_ST, 6);
2003 }
2004 SSP_WRITE_BITS(chip->cr0, bits - 1,
2005 SSP_CR0_MASK_DSS_ST, 0);
2006
2007 if (spi->mode & SPI_LSB_FIRST) {
2008 tmp = SSP_RX_LSB;
2009 etx = SSP_TX_LSB;
2010 } else {
2011 tmp = SSP_RX_MSB;
2012 etx = SSP_TX_MSB;
2013 }
2014 SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4);
2015 SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5);
2016 SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig,
2017 SSP_CR1_MASK_RXIFLSEL_ST, 7);
2018 SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig,
2019 SSP_CR1_MASK_TXIFLSEL_ST, 10);
2020 } else {
2021 SSP_WRITE_BITS(chip->cr0, bits - 1,
2022 SSP_CR0_MASK_DSS, 0);
2023 SSP_WRITE_BITS(chip->cr0, chip_info->iface,
2024 SSP_CR0_MASK_FRF, 4);
2025 }
2026
2027
2028 if (spi->mode & SPI_CPOL)
2029 tmp = SSP_CLK_POL_IDLE_HIGH;
2030 else
2031 tmp = SSP_CLK_POL_IDLE_LOW;
2032 SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6);
2033
2034 if (spi->mode & SPI_CPHA)
2035 tmp = SSP_CLK_SECOND_EDGE;
2036 else
2037 tmp = SSP_CLK_FIRST_EDGE;
2038 SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7);
2039
2040 SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8);
2041
2042 if (pl022->vendor->loopback) {
2043 if (spi->mode & SPI_LOOP)
2044 tmp = LOOPBACK_ENABLED;
2045 else
2046 tmp = LOOPBACK_DISABLED;
2047 SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0);
2048 }
2049 SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
2050 SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
2051 SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD,
2052 3);
2053
2054
2055 spi_set_ctldata(spi, chip);
2056 return status;
2057 err_config_params:
2058 spi_set_ctldata(spi, NULL);
2059 kfree(chip);
2060 return status;
2061}
2062
2063
2064
2065
2066
2067
2068
2069
2070static void pl022_cleanup(struct spi_device *spi)
2071{
2072 struct chip_data *chip = spi_get_ctldata(spi);
2073
2074 spi_set_ctldata(spi, NULL);
2075 kfree(chip);
2076}
2077
2078static struct pl022_ssp_controller *
2079pl022_platform_data_dt_get(struct device *dev)
2080{
2081 struct device_node *np = dev->of_node;
2082 struct pl022_ssp_controller *pd;
2083
2084 if (!np) {
2085 dev_err(dev, "no dt node defined\n");
2086 return NULL;
2087 }
2088
2089 pd = devm_kzalloc(dev, sizeof(struct pl022_ssp_controller), GFP_KERNEL);
2090 if (!pd)
2091 return NULL;
2092
2093 pd->bus_id = -1;
2094 pd->enable_dma = 1;
2095 of_property_read_u32(np, "pl022,autosuspend-delay",
2096 &pd->autosuspend_delay);
2097 pd->rt = of_property_read_bool(np, "pl022,rt");
2098
2099 return pd;
2100}
2101
2102static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
2103{
2104 struct device *dev = &adev->dev;
2105 struct pl022_ssp_controller *platform_info =
2106 dev_get_platdata(&adev->dev);
2107 struct spi_master *master;
2108 struct pl022 *pl022 = NULL;
2109 int status = 0;
2110
2111 dev_info(&adev->dev,
2112 "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid);
2113 if (!platform_info && IS_ENABLED(CONFIG_OF))
2114 platform_info = pl022_platform_data_dt_get(dev);
2115
2116 if (!platform_info) {
2117 dev_err(dev, "probe: no platform data defined\n");
2118 return -ENODEV;
2119 }
2120
2121
2122 master = spi_alloc_master(dev, sizeof(struct pl022));
2123 if (master == NULL) {
2124 dev_err(&adev->dev, "probe - cannot alloc SPI master\n");
2125 return -ENOMEM;
2126 }
2127
2128 pl022 = spi_master_get_devdata(master);
2129 pl022->master = master;
2130 pl022->master_info = platform_info;
2131 pl022->adev = adev;
2132 pl022->vendor = id->data;
2133
2134
2135
2136
2137
2138 master->bus_num = platform_info->bus_id;
2139 master->cleanup = pl022_cleanup;
2140 master->setup = pl022_setup;
2141 master->auto_runtime_pm = true;
2142 master->transfer_one_message = pl022_transfer_one_message;
2143 master->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware;
2144 master->rt = platform_info->rt;
2145 master->dev.of_node = dev->of_node;
2146 master->use_gpio_descriptors = true;
2147
2148
2149
2150
2151
2152 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
2153 if (pl022->vendor->extended_cr)
2154 master->mode_bits |= SPI_LSB_FIRST;
2155
2156 dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num);
2157
2158 status = amba_request_regions(adev, NULL);
2159 if (status)
2160 goto err_no_ioregion;
2161
2162 pl022->phybase = adev->res.start;
2163 pl022->virtbase = devm_ioremap(dev, adev->res.start,
2164 resource_size(&adev->res));
2165 if (pl022->virtbase == NULL) {
2166 status = -ENOMEM;
2167 goto err_no_ioremap;
2168 }
2169 dev_info(&adev->dev, "mapped registers from %pa to %p\n",
2170 &adev->res.start, pl022->virtbase);
2171
2172 pl022->clk = devm_clk_get(&adev->dev, NULL);
2173 if (IS_ERR(pl022->clk)) {
2174 status = PTR_ERR(pl022->clk);
2175 dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n");
2176 goto err_no_clk;
2177 }
2178
2179 status = clk_prepare_enable(pl022->clk);
2180 if (status) {
2181 dev_err(&adev->dev, "could not enable SSP/SPI bus clock\n");
2182 goto err_no_clk_en;
2183 }
2184
2185
2186 tasklet_init(&pl022->pump_transfers, pump_transfers,
2187 (unsigned long)pl022);
2188
2189
2190 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
2191 SSP_CR1(pl022->virtbase));
2192 load_ssp_default_config(pl022);
2193
2194 status = devm_request_irq(dev, adev->irq[0], pl022_interrupt_handler,
2195 0, "pl022", pl022);
2196 if (status < 0) {
2197 dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status);
2198 goto err_no_irq;
2199 }
2200
2201
2202 status = pl022_dma_autoprobe(pl022);
2203 if (status == -EPROBE_DEFER) {
2204 dev_dbg(dev, "deferring probe to get DMA channel\n");
2205 goto err_no_irq;
2206 }
2207
2208
2209 if (status == 0)
2210 platform_info->enable_dma = 1;
2211 else if (platform_info->enable_dma) {
2212 status = pl022_dma_probe(pl022);
2213 if (status != 0)
2214 platform_info->enable_dma = 0;
2215 }
2216
2217
2218 amba_set_drvdata(adev, pl022);
2219 status = devm_spi_register_master(&adev->dev, master);
2220 if (status != 0) {
2221 dev_err(&adev->dev,
2222 "probe - problem registering spi master\n");
2223 goto err_spi_register;
2224 }
2225 dev_dbg(dev, "probe succeeded\n");
2226
2227
2228 if (platform_info->autosuspend_delay > 0) {
2229 dev_info(&adev->dev,
2230 "will use autosuspend for runtime pm, delay %dms\n",
2231 platform_info->autosuspend_delay);
2232 pm_runtime_set_autosuspend_delay(dev,
2233 platform_info->autosuspend_delay);
2234 pm_runtime_use_autosuspend(dev);
2235 }
2236 pm_runtime_put(dev);
2237
2238 return 0;
2239
2240 err_spi_register:
2241 if (platform_info->enable_dma)
2242 pl022_dma_remove(pl022);
2243 err_no_irq:
2244 clk_disable_unprepare(pl022->clk);
2245 err_no_clk_en:
2246 err_no_clk:
2247 err_no_ioremap:
2248 amba_release_regions(adev);
2249 err_no_ioregion:
2250 spi_master_put(master);
2251 return status;
2252}
2253
2254static void
2255pl022_remove(struct amba_device *adev)
2256{
2257 struct pl022 *pl022 = amba_get_drvdata(adev);
2258
2259 if (!pl022)
2260 return;
2261
2262
2263
2264
2265
2266 pm_runtime_get_noresume(&adev->dev);
2267
2268 load_ssp_default_config(pl022);
2269 if (pl022->master_info->enable_dma)
2270 pl022_dma_remove(pl022);
2271
2272 clk_disable_unprepare(pl022->clk);
2273 amba_release_regions(adev);
2274 tasklet_disable(&pl022->pump_transfers);
2275}
2276
2277#ifdef CONFIG_PM_SLEEP
2278static int pl022_suspend(struct device *dev)
2279{
2280 struct pl022 *pl022 = dev_get_drvdata(dev);
2281 int ret;
2282
2283 ret = spi_master_suspend(pl022->master);
2284 if (ret)
2285 return ret;
2286
2287 ret = pm_runtime_force_suspend(dev);
2288 if (ret) {
2289 spi_master_resume(pl022->master);
2290 return ret;
2291 }
2292
2293 pinctrl_pm_select_sleep_state(dev);
2294
2295 dev_dbg(dev, "suspended\n");
2296 return 0;
2297}
2298
2299static int pl022_resume(struct device *dev)
2300{
2301 struct pl022 *pl022 = dev_get_drvdata(dev);
2302 int ret;
2303
2304 ret = pm_runtime_force_resume(dev);
2305 if (ret)
2306 dev_err(dev, "problem resuming\n");
2307
2308
2309 ret = spi_master_resume(pl022->master);
2310 if (!ret)
2311 dev_dbg(dev, "resumed\n");
2312
2313 return ret;
2314}
2315#endif
2316
2317#ifdef CONFIG_PM
2318static int pl022_runtime_suspend(struct device *dev)
2319{
2320 struct pl022 *pl022 = dev_get_drvdata(dev);
2321
2322 clk_disable_unprepare(pl022->clk);
2323 pinctrl_pm_select_idle_state(dev);
2324
2325 return 0;
2326}
2327
2328static int pl022_runtime_resume(struct device *dev)
2329{
2330 struct pl022 *pl022 = dev_get_drvdata(dev);
2331
2332 pinctrl_pm_select_default_state(dev);
2333 clk_prepare_enable(pl022->clk);
2334
2335 return 0;
2336}
2337#endif
2338
2339static const struct dev_pm_ops pl022_dev_pm_ops = {
2340 SET_SYSTEM_SLEEP_PM_OPS(pl022_suspend, pl022_resume)
2341 SET_RUNTIME_PM_OPS(pl022_runtime_suspend, pl022_runtime_resume, NULL)
2342};
2343
2344static struct vendor_data vendor_arm = {
2345 .fifodepth = 8,
2346 .max_bpw = 16,
2347 .unidir = false,
2348 .extended_cr = false,
2349 .pl023 = false,
2350 .loopback = true,
2351 .internal_cs_ctrl = false,
2352};
2353
2354static struct vendor_data vendor_st = {
2355 .fifodepth = 32,
2356 .max_bpw = 32,
2357 .unidir = false,
2358 .extended_cr = true,
2359 .pl023 = false,
2360 .loopback = true,
2361 .internal_cs_ctrl = false,
2362};
2363
2364static struct vendor_data vendor_st_pl023 = {
2365 .fifodepth = 32,
2366 .max_bpw = 32,
2367 .unidir = false,
2368 .extended_cr = true,
2369 .pl023 = true,
2370 .loopback = false,
2371 .internal_cs_ctrl = false,
2372};
2373
2374static struct vendor_data vendor_lsi = {
2375 .fifodepth = 8,
2376 .max_bpw = 16,
2377 .unidir = false,
2378 .extended_cr = false,
2379 .pl023 = false,
2380 .loopback = true,
2381 .internal_cs_ctrl = true,
2382};
2383
2384static const struct amba_id pl022_ids[] = {
2385 {
2386
2387
2388
2389
2390 .id = 0x00041022,
2391 .mask = 0x000fffff,
2392 .data = &vendor_arm,
2393 },
2394 {
2395
2396
2397
2398
2399 .id = 0x01080022,
2400 .mask = 0xffffffff,
2401 .data = &vendor_st,
2402 },
2403 {
2404
2405
2406
2407
2408
2409
2410
2411 .id = 0x00080023,
2412 .mask = 0xffffffff,
2413 .data = &vendor_st_pl023,
2414 },
2415 {
2416
2417
2418
2419
2420 .id = 0x000b6022,
2421 .mask = 0x000fffff,
2422 .data = &vendor_lsi,
2423 },
2424 { 0, 0 },
2425};
2426
2427MODULE_DEVICE_TABLE(amba, pl022_ids);
2428
2429static struct amba_driver pl022_driver = {
2430 .drv = {
2431 .name = "ssp-pl022",
2432 .pm = &pl022_dev_pm_ops,
2433 },
2434 .id_table = pl022_ids,
2435 .probe = pl022_probe,
2436 .remove = pl022_remove,
2437};
2438
2439static int __init pl022_init(void)
2440{
2441 return amba_driver_register(&pl022_driver);
2442}
2443subsys_initcall(pl022_init);
2444
2445static void __exit pl022_exit(void)
2446{
2447 amba_driver_unregister(&pl022_driver);
2448}
2449module_exit(pl022_exit);
2450
2451MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
2452MODULE_DESCRIPTION("PL022 SSP Controller Driver");
2453MODULE_LICENSE("GPL");
2454