1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/device.h>
30#include <linux/ioport.h>
31#include <linux/errno.h>
32#include <linux/interrupt.h>
33#include <linux/spi/spi.h>
34#include <linux/workqueue.h>
35#include <linux/delay.h>
36#include <linux/clk.h>
37#include <linux/err.h>
38#include <linux/amba/bus.h>
39#include <linux/amba/pl022.h>
40#include <linux/io.h>
41#include <linux/slab.h>
42#include <linux/dmaengine.h>
43#include <linux/dma-mapping.h>
44#include <linux/scatterlist.h>
45
46
47
48
49
50
51#define SSP_WRITE_BITS(reg, val, mask, sb) \
52 ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
53
54
55
56
57
58
59#define GEN_MASK_BITS(val, mask, sb) \
60 (((val)<<(sb)) & (mask))
61
62#define DRIVE_TX 0
63#define DO_NOT_DRIVE_TX 1
64
65#define DO_NOT_QUEUE_DMA 0
66#define QUEUE_DMA 1
67
68#define RX_TRANSFER 1
69#define TX_TRANSFER 2
70
71
72
73
74#define SSP_CR0(r) (r + 0x000)
75#define SSP_CR1(r) (r + 0x004)
76#define SSP_DR(r) (r + 0x008)
77#define SSP_SR(r) (r + 0x00C)
78#define SSP_CPSR(r) (r + 0x010)
79#define SSP_IMSC(r) (r + 0x014)
80#define SSP_RIS(r) (r + 0x018)
81#define SSP_MIS(r) (r + 0x01C)
82#define SSP_ICR(r) (r + 0x020)
83#define SSP_DMACR(r) (r + 0x024)
84#define SSP_ITCR(r) (r + 0x080)
85#define SSP_ITIP(r) (r + 0x084)
86#define SSP_ITOP(r) (r + 0x088)
87#define SSP_TDR(r) (r + 0x08C)
88
89#define SSP_PID0(r) (r + 0xFE0)
90#define SSP_PID1(r) (r + 0xFE4)
91#define SSP_PID2(r) (r + 0xFE8)
92#define SSP_PID3(r) (r + 0xFEC)
93
94#define SSP_CID0(r) (r + 0xFF0)
95#define SSP_CID1(r) (r + 0xFF4)
96#define SSP_CID2(r) (r + 0xFF8)
97#define SSP_CID3(r) (r + 0xFFC)
98
99
100
101
102#define SSP_CR0_MASK_DSS (0x0FUL << 0)
103#define SSP_CR0_MASK_FRF (0x3UL << 4)
104#define SSP_CR0_MASK_SPO (0x1UL << 6)
105#define SSP_CR0_MASK_SPH (0x1UL << 7)
106#define SSP_CR0_MASK_SCR (0xFFUL << 8)
107
108
109
110
111
112#define SSP_CR0_MASK_DSS_ST (0x1FUL << 0)
113#define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5)
114#define SSP_CR0_MASK_CSS_ST (0x1FUL << 16)
115#define SSP_CR0_MASK_FRF_ST (0x3UL << 21)
116
117
118
119
120
121#define SSP_CR1_MASK_LBM (0x1UL << 0)
122#define SSP_CR1_MASK_SSE (0x1UL << 1)
123#define SSP_CR1_MASK_MS (0x1UL << 2)
124#define SSP_CR1_MASK_SOD (0x1UL << 3)
125
126
127
128
129
130#define SSP_CR1_MASK_RENDN_ST (0x1UL << 4)
131#define SSP_CR1_MASK_TENDN_ST (0x1UL << 5)
132#define SSP_CR1_MASK_MWAIT_ST (0x1UL << 6)
133#define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7)
134#define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10)
135
136#define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13)
137
138
139
140
141#define SSP_SR_MASK_TFE (0x1UL << 0)
142#define SSP_SR_MASK_TNF (0x1UL << 1)
143#define SSP_SR_MASK_RNE (0x1UL << 2)
144#define SSP_SR_MASK_RFF (0x1UL << 3)
145#define SSP_SR_MASK_BSY (0x1UL << 4)
146
147
148
149
150#define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0)
151
152
153
154
155#define SSP_IMSC_MASK_RORIM (0x1UL << 0)
156#define SSP_IMSC_MASK_RTIM (0x1UL << 1)
157#define SSP_IMSC_MASK_RXIM (0x1UL << 2)
158#define SSP_IMSC_MASK_TXIM (0x1UL << 3)
159
160
161
162
163
164#define SSP_RIS_MASK_RORRIS (0x1UL << 0)
165
166#define SSP_RIS_MASK_RTRIS (0x1UL << 1)
167
168#define SSP_RIS_MASK_RXRIS (0x1UL << 2)
169
170#define SSP_RIS_MASK_TXRIS (0x1UL << 3)
171
172
173
174
175
176#define SSP_MIS_MASK_RORMIS (0x1UL << 0)
177
178#define SSP_MIS_MASK_RTMIS (0x1UL << 1)
179
180#define SSP_MIS_MASK_RXMIS (0x1UL << 2)
181
182#define SSP_MIS_MASK_TXMIS (0x1UL << 3)
183
184
185
186
187
188#define SSP_ICR_MASK_RORIC (0x1UL << 0)
189
190#define SSP_ICR_MASK_RTIC (0x1UL << 1)
191
192
193
194
195
196#define SSP_DMACR_MASK_RXDMAE (0x1UL << 0)
197
198#define SSP_DMACR_MASK_TXDMAE (0x1UL << 1)
199
200
201
202
203#define SSP_ITCR_MASK_ITEN (0x1UL << 0)
204#define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1)
205
206
207
208
209#define ITIP_MASK_SSPRXD (0x1UL << 0)
210#define ITIP_MASK_SSPFSSIN (0x1UL << 1)
211#define ITIP_MASK_SSPCLKIN (0x1UL << 2)
212#define ITIP_MASK_RXDMAC (0x1UL << 3)
213#define ITIP_MASK_TXDMAC (0x1UL << 4)
214#define ITIP_MASK_SSPTXDIN (0x1UL << 5)
215
216
217
218
219#define ITOP_MASK_SSPTXD (0x1UL << 0)
220#define ITOP_MASK_SSPFSSOUT (0x1UL << 1)
221#define ITOP_MASK_SSPCLKOUT (0x1UL << 2)
222#define ITOP_MASK_SSPOEn (0x1UL << 3)
223#define ITOP_MASK_SSPCTLOEn (0x1UL << 4)
224#define ITOP_MASK_RORINTR (0x1UL << 5)
225#define ITOP_MASK_RTINTR (0x1UL << 6)
226#define ITOP_MASK_RXINTR (0x1UL << 7)
227#define ITOP_MASK_TXINTR (0x1UL << 8)
228#define ITOP_MASK_INTR (0x1UL << 9)
229#define ITOP_MASK_RXDMABREQ (0x1UL << 10)
230#define ITOP_MASK_RXDMASREQ (0x1UL << 11)
231#define ITOP_MASK_TXDMABREQ (0x1UL << 12)
232#define ITOP_MASK_TXDMASREQ (0x1UL << 13)
233
234
235
236
237#define TDR_MASK_TESTDATA (0xFFFFFFFF)
238
239
240
241
242
243
244
245#define STATE_START ((void *) 0)
246#define STATE_RUNNING ((void *) 1)
247#define STATE_DONE ((void *) 2)
248#define STATE_ERROR ((void *) -1)
249
250
251
252
253#define SSP_DISABLED (0)
254#define SSP_ENABLED (1)
255
256
257
258
259#define SSP_DMA_DISABLED (0)
260#define SSP_DMA_ENABLED (1)
261
262
263
264
265#define SSP_DEFAULT_CLKRATE 0x2
266#define SSP_DEFAULT_PRESCALE 0x40
267
268
269
270
271#define CPSDVR_MIN 0x02
272#define CPSDVR_MAX 0xFE
273#define SCR_MIN 0x00
274#define SCR_MAX 0xFF
275
276
277
278
279#define DEFAULT_SSP_REG_IMSC 0x0UL
280#define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC
281#define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC)
282
283#define CLEAR_ALL_INTERRUPTS 0x3
284
285#define SPI_POLLING_TIMEOUT 1000
286
287
288
289
290
291enum ssp_reading {
292 READING_NULL,
293 READING_U8,
294 READING_U16,
295 READING_U32
296};
297
298
299
300
301enum ssp_writing {
302 WRITING_NULL,
303 WRITING_U8,
304 WRITING_U16,
305 WRITING_U32
306};
307
308
309
310
311
312
313
314
315
316
317
318struct vendor_data {
319 int fifodepth;
320 int max_bpw;
321 bool unidir;
322 bool extended_cr;
323 bool pl023;
324 bool loopback;
325};
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359struct pl022 {
360 struct amba_device *adev;
361 struct vendor_data *vendor;
362 resource_size_t phybase;
363 void __iomem *virtbase;
364 struct clk *clk;
365 struct spi_master *master;
366 struct pl022_ssp_controller *master_info;
367
368 struct workqueue_struct *workqueue;
369 struct work_struct pump_messages;
370 spinlock_t queue_lock;
371 struct list_head queue;
372 bool busy;
373 bool running;
374
375 struct tasklet_struct pump_transfers;
376 struct spi_message *cur_msg;
377 struct spi_transfer *cur_transfer;
378 struct chip_data *cur_chip;
379 void *tx;
380 void *tx_end;
381 void *rx;
382 void *rx_end;
383 enum ssp_reading read;
384 enum ssp_writing write;
385 u32 exp_fifo_level;
386
387#ifdef CONFIG_DMA_ENGINE
388 struct dma_chan *dma_rx_channel;
389 struct dma_chan *dma_tx_channel;
390 struct sg_table sgt_rx;
391 struct sg_table sgt_tx;
392 char *dummypage;
393#endif
394};
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413struct chip_data {
414 u32 cr0;
415 u16 cr1;
416 u16 dmacr;
417 u16 cpsr;
418 u8 n_bytes;
419 bool enable_dma;
420 enum ssp_reading read;
421 enum ssp_writing write;
422 void (*cs_control) (u32 command);
423 int xfer_type;
424};
425
426
427
428
429
430
431
432
433static void null_cs_control(u32 command)
434{
435 pr_debug("pl022: dummy chip select control, CS=0x%x\n", command);
436}
437
438
439
440
441
442
443
444static void giveback(struct pl022 *pl022)
445{
446 struct spi_transfer *last_transfer;
447 unsigned long flags;
448 struct spi_message *msg;
449 void (*curr_cs_control) (u32 command);
450
451
452
453
454
455
456 curr_cs_control = pl022->cur_chip->cs_control;
457 spin_lock_irqsave(&pl022->queue_lock, flags);
458 msg = pl022->cur_msg;
459 pl022->cur_msg = NULL;
460 pl022->cur_transfer = NULL;
461 pl022->cur_chip = NULL;
462 queue_work(pl022->workqueue, &pl022->pump_messages);
463 spin_unlock_irqrestore(&pl022->queue_lock, flags);
464
465 last_transfer = list_entry(msg->transfers.prev,
466 struct spi_transfer,
467 transfer_list);
468
469
470 if (last_transfer->delay_usecs)
471
472
473
474
475 udelay(last_transfer->delay_usecs);
476
477
478
479
480
481 if (!last_transfer->cs_change)
482 curr_cs_control(SSP_CHIP_DESELECT);
483 else {
484 struct spi_message *next_msg;
485
486
487
488
489
490
491
492
493
494
495
496
497 spin_lock_irqsave(&pl022->queue_lock, flags);
498 if (list_empty(&pl022->queue))
499 next_msg = NULL;
500 else
501 next_msg = list_entry(pl022->queue.next,
502 struct spi_message, queue);
503 spin_unlock_irqrestore(&pl022->queue_lock, flags);
504
505
506
507
508 if (next_msg && next_msg->spi != msg->spi)
509 next_msg = NULL;
510 if (!next_msg || msg->state == STATE_ERROR)
511 curr_cs_control(SSP_CHIP_DESELECT);
512 }
513 msg->state = NULL;
514 if (msg->complete)
515 msg->complete(msg->context);
516
517 clk_disable(pl022->clk);
518 amba_pclk_disable(pl022->adev);
519 amba_vcore_disable(pl022->adev);
520}
521
522
523
524
525
526static int flush(struct pl022 *pl022)
527{
528 unsigned long limit = loops_per_jiffy << 1;
529
530 dev_dbg(&pl022->adev->dev, "flush\n");
531 do {
532 while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
533 readw(SSP_DR(pl022->virtbase));
534 } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--);
535
536 pl022->exp_fifo_level = 0;
537
538 return limit;
539}
540
541
542
543
544
545static void restore_state(struct pl022 *pl022)
546{
547 struct chip_data *chip = pl022->cur_chip;
548
549 if (pl022->vendor->extended_cr)
550 writel(chip->cr0, SSP_CR0(pl022->virtbase));
551 else
552 writew(chip->cr0, SSP_CR0(pl022->virtbase));
553 writew(chip->cr1, SSP_CR1(pl022->virtbase));
554 writew(chip->dmacr, SSP_DMACR(pl022->virtbase));
555 writew(chip->cpsr, SSP_CPSR(pl022->virtbase));
556 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
557 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
558}
559
560
561
562
563#define DEFAULT_SSP_REG_CR0 ( \
564 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \
565 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \
566 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
567 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
568 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
569)
570
571
572#define DEFAULT_SSP_REG_CR0_ST ( \
573 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \
574 GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \
575 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
576 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
577 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
578 GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16) | \
579 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \
580)
581
582
583#define DEFAULT_SSP_REG_CR0_ST_PL023 ( \
584 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \
585 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
586 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
587 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
588)
589
590#define DEFAULT_SSP_REG_CR1 ( \
591 GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \
592 GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
593 GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
594 GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \
595)
596
597
598#define DEFAULT_SSP_REG_CR1_ST ( \
599 DEFAULT_SSP_REG_CR1 | \
600 GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
601 GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
602 GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\
603 GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
604 GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \
605)
606
607
608
609
610
611#define DEFAULT_SSP_REG_CR1_ST_PL023 ( \
612 GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
613 GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
614 GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \
615 GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
616 GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
617 GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
618 GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \
619 GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \
620)
621
622#define DEFAULT_SSP_REG_CPSR ( \
623 GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \
624)
625
626#define DEFAULT_SSP_REG_DMACR (\
627 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \
628 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \
629)
630
631
632
633
634
635static void load_ssp_default_config(struct pl022 *pl022)
636{
637 if (pl022->vendor->pl023) {
638 writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase));
639 writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase));
640 } else if (pl022->vendor->extended_cr) {
641 writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase));
642 writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase));
643 } else {
644 writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase));
645 writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase));
646 }
647 writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase));
648 writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase));
649 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
650 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
651}
652
653
654
655
656
657static void readwriter(struct pl022 *pl022)
658{
659
660
661
662
663
664
665
666
667
668
669
670 dev_dbg(&pl022->adev->dev,
671 "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
672 __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end);
673
674
675 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
676 && (pl022->rx < pl022->rx_end)) {
677 switch (pl022->read) {
678 case READING_NULL:
679 readw(SSP_DR(pl022->virtbase));
680 break;
681 case READING_U8:
682 *(u8 *) (pl022->rx) =
683 readw(SSP_DR(pl022->virtbase)) & 0xFFU;
684 break;
685 case READING_U16:
686 *(u16 *) (pl022->rx) =
687 (u16) readw(SSP_DR(pl022->virtbase));
688 break;
689 case READING_U32:
690 *(u32 *) (pl022->rx) =
691 readl(SSP_DR(pl022->virtbase));
692 break;
693 }
694 pl022->rx += (pl022->cur_chip->n_bytes);
695 pl022->exp_fifo_level--;
696 }
697
698
699
700 while ((pl022->exp_fifo_level < pl022->vendor->fifodepth)
701 && (pl022->tx < pl022->tx_end)) {
702 switch (pl022->write) {
703 case WRITING_NULL:
704 writew(0x0, SSP_DR(pl022->virtbase));
705 break;
706 case WRITING_U8:
707 writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase));
708 break;
709 case WRITING_U16:
710 writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase));
711 break;
712 case WRITING_U32:
713 writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase));
714 break;
715 }
716 pl022->tx += (pl022->cur_chip->n_bytes);
717 pl022->exp_fifo_level++;
718
719
720
721
722
723
724 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
725 && (pl022->rx < pl022->rx_end)) {
726 switch (pl022->read) {
727 case READING_NULL:
728 readw(SSP_DR(pl022->virtbase));
729 break;
730 case READING_U8:
731 *(u8 *) (pl022->rx) =
732 readw(SSP_DR(pl022->virtbase)) & 0xFFU;
733 break;
734 case READING_U16:
735 *(u16 *) (pl022->rx) =
736 (u16) readw(SSP_DR(pl022->virtbase));
737 break;
738 case READING_U32:
739 *(u32 *) (pl022->rx) =
740 readl(SSP_DR(pl022->virtbase));
741 break;
742 }
743 pl022->rx += (pl022->cur_chip->n_bytes);
744 pl022->exp_fifo_level--;
745 }
746 }
747
748
749
750
751}
752
753
754
755
756
757
758
759
760
761
762
763static void *next_transfer(struct pl022 *pl022)
764{
765 struct spi_message *msg = pl022->cur_msg;
766 struct spi_transfer *trans = pl022->cur_transfer;
767
768
769 if (trans->transfer_list.next != &msg->transfers) {
770 pl022->cur_transfer =
771 list_entry(trans->transfer_list.next,
772 struct spi_transfer, transfer_list);
773 return STATE_RUNNING;
774 }
775 return STATE_DONE;
776}
777
778
779
780
781
782#ifdef CONFIG_DMA_ENGINE
783static void unmap_free_dma_scatter(struct pl022 *pl022)
784{
785
786 dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl,
787 pl022->sgt_tx.nents, DMA_TO_DEVICE);
788 dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl,
789 pl022->sgt_rx.nents, DMA_FROM_DEVICE);
790 sg_free_table(&pl022->sgt_rx);
791 sg_free_table(&pl022->sgt_tx);
792}
793
794static void dma_callback(void *data)
795{
796 struct pl022 *pl022 = data;
797 struct spi_message *msg = pl022->cur_msg;
798
799 BUG_ON(!pl022->sgt_rx.sgl);
800
801#ifdef VERBOSE_DEBUG
802
803
804
805
806
807
808 {
809 struct scatterlist *sg;
810 unsigned int i;
811
812 dma_sync_sg_for_cpu(&pl022->adev->dev,
813 pl022->sgt_rx.sgl,
814 pl022->sgt_rx.nents,
815 DMA_FROM_DEVICE);
816
817 for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) {
818 dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i);
819 print_hex_dump(KERN_ERR, "SPI RX: ",
820 DUMP_PREFIX_OFFSET,
821 16,
822 1,
823 sg_virt(sg),
824 sg_dma_len(sg),
825 1);
826 }
827 for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) {
828 dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i);
829 print_hex_dump(KERN_ERR, "SPI TX: ",
830 DUMP_PREFIX_OFFSET,
831 16,
832 1,
833 sg_virt(sg),
834 sg_dma_len(sg),
835 1);
836 }
837 }
838#endif
839
840 unmap_free_dma_scatter(pl022);
841
842
843 msg->actual_length += pl022->cur_transfer->len;
844 if (pl022->cur_transfer->cs_change)
845 pl022->cur_chip->
846 cs_control(SSP_CHIP_DESELECT);
847
848
849 msg->state = next_transfer(pl022);
850 tasklet_schedule(&pl022->pump_transfers);
851}
852
853static void setup_dma_scatter(struct pl022 *pl022,
854 void *buffer,
855 unsigned int length,
856 struct sg_table *sgtab)
857{
858 struct scatterlist *sg;
859 int bytesleft = length;
860 void *bufp = buffer;
861 int mapbytes;
862 int i;
863
864 if (buffer) {
865 for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
866
867
868
869
870
871
872 if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
873 mapbytes = bytesleft;
874 else
875 mapbytes = PAGE_SIZE - offset_in_page(bufp);
876 sg_set_page(sg, virt_to_page(bufp),
877 mapbytes, offset_in_page(bufp));
878 bufp += mapbytes;
879 bytesleft -= mapbytes;
880 dev_dbg(&pl022->adev->dev,
881 "set RX/TX target page @ %p, %d bytes, %d left\n",
882 bufp, mapbytes, bytesleft);
883 }
884 } else {
885
886 for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
887 if (bytesleft < PAGE_SIZE)
888 mapbytes = bytesleft;
889 else
890 mapbytes = PAGE_SIZE;
891 sg_set_page(sg, virt_to_page(pl022->dummypage),
892 mapbytes, 0);
893 bytesleft -= mapbytes;
894 dev_dbg(&pl022->adev->dev,
895 "set RX/TX to dummy page %d bytes, %d left\n",
896 mapbytes, bytesleft);
897
898 }
899 }
900 BUG_ON(bytesleft);
901}
902
903
904
905
906
907static int configure_dma(struct pl022 *pl022)
908{
909 struct dma_slave_config rx_conf = {
910 .src_addr = SSP_DR(pl022->phybase),
911 .direction = DMA_FROM_DEVICE,
912 .src_maxburst = pl022->vendor->fifodepth >> 1,
913 };
914 struct dma_slave_config tx_conf = {
915 .dst_addr = SSP_DR(pl022->phybase),
916 .direction = DMA_TO_DEVICE,
917 .dst_maxburst = pl022->vendor->fifodepth >> 1,
918 };
919 unsigned int pages;
920 int ret;
921 int rx_sglen, tx_sglen;
922 struct dma_chan *rxchan = pl022->dma_rx_channel;
923 struct dma_chan *txchan = pl022->dma_tx_channel;
924 struct dma_async_tx_descriptor *rxdesc;
925 struct dma_async_tx_descriptor *txdesc;
926
927
928 if (!rxchan || !txchan)
929 return -ENODEV;
930
931 switch (pl022->read) {
932 case READING_NULL:
933
934 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
935 break;
936 case READING_U8:
937 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
938 break;
939 case READING_U16:
940 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
941 break;
942 case READING_U32:
943 rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
944 break;
945 }
946
947 switch (pl022->write) {
948 case WRITING_NULL:
949
950 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
951 break;
952 case WRITING_U8:
953 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
954 break;
955 case WRITING_U16:
956 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
957 break;
958 case WRITING_U32:
959 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
960 break;
961 }
962
963
964 if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
965 rx_conf.src_addr_width = tx_conf.dst_addr_width;
966 if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
967 tx_conf.dst_addr_width = rx_conf.src_addr_width;
968 BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width);
969
970 dmaengine_slave_config(rxchan, &rx_conf);
971 dmaengine_slave_config(txchan, &tx_conf);
972
973
974 pages = (pl022->cur_transfer->len >> PAGE_SHIFT) + 1;
975 dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages);
976
977 ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_KERNEL);
978 if (ret)
979 goto err_alloc_rx_sg;
980
981 ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_KERNEL);
982 if (ret)
983 goto err_alloc_tx_sg;
984
985
986 setup_dma_scatter(pl022, pl022->rx,
987 pl022->cur_transfer->len, &pl022->sgt_rx);
988 setup_dma_scatter(pl022, pl022->tx,
989 pl022->cur_transfer->len, &pl022->sgt_tx);
990
991
992 rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
993 pl022->sgt_rx.nents, DMA_FROM_DEVICE);
994 if (!rx_sglen)
995 goto err_rx_sgmap;
996
997 tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl,
998 pl022->sgt_tx.nents, DMA_TO_DEVICE);
999 if (!tx_sglen)
1000 goto err_tx_sgmap;
1001
1002
1003 rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
1004 pl022->sgt_rx.sgl,
1005 rx_sglen,
1006 DMA_FROM_DEVICE,
1007 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1008 if (!rxdesc)
1009 goto err_rxdesc;
1010
1011 txdesc = txchan->device->device_prep_slave_sg(txchan,
1012 pl022->sgt_tx.sgl,
1013 tx_sglen,
1014 DMA_TO_DEVICE,
1015 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1016 if (!txdesc)
1017 goto err_txdesc;
1018
1019
1020 rxdesc->callback = dma_callback;
1021 rxdesc->callback_param = pl022;
1022
1023
1024 dmaengine_submit(rxdesc);
1025 dmaengine_submit(txdesc);
1026 dma_async_issue_pending(rxchan);
1027 dma_async_issue_pending(txchan);
1028
1029 return 0;
1030
1031err_txdesc:
1032 dmaengine_terminate_all(txchan);
1033err_rxdesc:
1034 dmaengine_terminate_all(rxchan);
1035 dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl,
1036 pl022->sgt_tx.nents, DMA_TO_DEVICE);
1037err_tx_sgmap:
1038 dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
1039 pl022->sgt_tx.nents, DMA_FROM_DEVICE);
1040err_rx_sgmap:
1041 sg_free_table(&pl022->sgt_tx);
1042err_alloc_tx_sg:
1043 sg_free_table(&pl022->sgt_rx);
1044err_alloc_rx_sg:
1045 return -ENOMEM;
1046}
1047
1048static int __init pl022_dma_probe(struct pl022 *pl022)
1049{
1050 dma_cap_mask_t mask;
1051
1052
1053 dma_cap_zero(mask);
1054 dma_cap_set(DMA_SLAVE, mask);
1055
1056
1057
1058
1059 pl022->dma_rx_channel = dma_request_channel(mask,
1060 pl022->master_info->dma_filter,
1061 pl022->master_info->dma_rx_param);
1062 if (!pl022->dma_rx_channel) {
1063 dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n");
1064 goto err_no_rxchan;
1065 }
1066
1067 pl022->dma_tx_channel = dma_request_channel(mask,
1068 pl022->master_info->dma_filter,
1069 pl022->master_info->dma_tx_param);
1070 if (!pl022->dma_tx_channel) {
1071 dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n");
1072 goto err_no_txchan;
1073 }
1074
1075 pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
1076 if (!pl022->dummypage) {
1077 dev_dbg(&pl022->adev->dev, "no DMA dummypage!\n");
1078 goto err_no_dummypage;
1079 }
1080
1081 dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n",
1082 dma_chan_name(pl022->dma_rx_channel),
1083 dma_chan_name(pl022->dma_tx_channel));
1084
1085 return 0;
1086
1087err_no_dummypage:
1088 dma_release_channel(pl022->dma_tx_channel);
1089err_no_txchan:
1090 dma_release_channel(pl022->dma_rx_channel);
1091 pl022->dma_rx_channel = NULL;
1092err_no_rxchan:
1093 dev_err(&pl022->adev->dev,
1094 "Failed to work in dma mode, work without dma!\n");
1095 return -ENODEV;
1096}
1097
1098static void terminate_dma(struct pl022 *pl022)
1099{
1100 struct dma_chan *rxchan = pl022->dma_rx_channel;
1101 struct dma_chan *txchan = pl022->dma_tx_channel;
1102
1103 dmaengine_terminate_all(rxchan);
1104 dmaengine_terminate_all(txchan);
1105 unmap_free_dma_scatter(pl022);
1106}
1107
1108static void pl022_dma_remove(struct pl022 *pl022)
1109{
1110 if (pl022->busy)
1111 terminate_dma(pl022);
1112 if (pl022->dma_tx_channel)
1113 dma_release_channel(pl022->dma_tx_channel);
1114 if (pl022->dma_rx_channel)
1115 dma_release_channel(pl022->dma_rx_channel);
1116 kfree(pl022->dummypage);
1117}
1118
1119#else
1120static inline int configure_dma(struct pl022 *pl022)
1121{
1122 return -ENODEV;
1123}
1124
1125static inline int pl022_dma_probe(struct pl022 *pl022)
1126{
1127 return 0;
1128}
1129
1130static inline void pl022_dma_remove(struct pl022 *pl022)
1131{
1132}
1133#endif
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
1147{
1148 struct pl022 *pl022 = dev_id;
1149 struct spi_message *msg = pl022->cur_msg;
1150 u16 irq_status = 0;
1151 u16 flag = 0;
1152
1153 if (unlikely(!msg)) {
1154 dev_err(&pl022->adev->dev,
1155 "bad message state in interrupt handler");
1156
1157 return IRQ_HANDLED;
1158 }
1159
1160
1161 irq_status = readw(SSP_MIS(pl022->virtbase));
1162
1163 if (unlikely(!irq_status))
1164 return IRQ_NONE;
1165
1166
1167
1168
1169
1170
1171 if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) {
1172
1173
1174
1175
1176 dev_err(&pl022->adev->dev, "FIFO overrun\n");
1177 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF)
1178 dev_err(&pl022->adev->dev,
1179 "RXFIFO is full\n");
1180 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF)
1181 dev_err(&pl022->adev->dev,
1182 "TXFIFO is full\n");
1183
1184
1185
1186
1187
1188
1189 writew(DISABLE_ALL_INTERRUPTS,
1190 SSP_IMSC(pl022->virtbase));
1191 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
1192 writew((readw(SSP_CR1(pl022->virtbase)) &
1193 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
1194 msg->state = STATE_ERROR;
1195
1196
1197 tasklet_schedule(&pl022->pump_transfers);
1198 return IRQ_HANDLED;
1199 }
1200
1201 readwriter(pl022);
1202
1203 if ((pl022->tx == pl022->tx_end) && (flag == 0)) {
1204 flag = 1;
1205
1206 writew(readw(SSP_IMSC(pl022->virtbase)) &
1207 (~SSP_IMSC_MASK_TXIM),
1208 SSP_IMSC(pl022->virtbase));
1209 }
1210
1211
1212
1213
1214
1215
1216 if (pl022->rx >= pl022->rx_end) {
1217 writew(DISABLE_ALL_INTERRUPTS,
1218 SSP_IMSC(pl022->virtbase));
1219 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
1220 if (unlikely(pl022->rx > pl022->rx_end)) {
1221 dev_warn(&pl022->adev->dev, "read %u surplus "
1222 "bytes (did you request an odd "
1223 "number of bytes on a 16bit bus?)\n",
1224 (u32) (pl022->rx - pl022->rx_end));
1225 }
1226
1227 msg->actual_length += pl022->cur_transfer->len;
1228 if (pl022->cur_transfer->cs_change)
1229 pl022->cur_chip->
1230 cs_control(SSP_CHIP_DESELECT);
1231
1232 msg->state = next_transfer(pl022);
1233 tasklet_schedule(&pl022->pump_transfers);
1234 return IRQ_HANDLED;
1235 }
1236
1237 return IRQ_HANDLED;
1238}
1239
1240
1241
1242
1243
1244static int set_up_next_transfer(struct pl022 *pl022,
1245 struct spi_transfer *transfer)
1246{
1247 int residue;
1248
1249
1250 residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes;
1251 if (unlikely(residue != 0)) {
1252 dev_err(&pl022->adev->dev,
1253 "message of %u bytes to transmit but the current "
1254 "chip bus has a data width of %u bytes!\n",
1255 pl022->cur_transfer->len,
1256 pl022->cur_chip->n_bytes);
1257 dev_err(&pl022->adev->dev, "skipping this message\n");
1258 return -EIO;
1259 }
1260 pl022->tx = (void *)transfer->tx_buf;
1261 pl022->tx_end = pl022->tx + pl022->cur_transfer->len;
1262 pl022->rx = (void *)transfer->rx_buf;
1263 pl022->rx_end = pl022->rx + pl022->cur_transfer->len;
1264 pl022->write =
1265 pl022->tx ? pl022->cur_chip->write : WRITING_NULL;
1266 pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL;
1267 return 0;
1268}
1269
1270
1271
1272
1273
1274
1275
1276static void pump_transfers(unsigned long data)
1277{
1278 struct pl022 *pl022 = (struct pl022 *) data;
1279 struct spi_message *message = NULL;
1280 struct spi_transfer *transfer = NULL;
1281 struct spi_transfer *previous = NULL;
1282
1283
1284 message = pl022->cur_msg;
1285 transfer = pl022->cur_transfer;
1286
1287
1288 if (message->state == STATE_ERROR) {
1289 message->status = -EIO;
1290 giveback(pl022);
1291 return;
1292 }
1293
1294
1295 if (message->state == STATE_DONE) {
1296 message->status = 0;
1297 giveback(pl022);
1298 return;
1299 }
1300
1301
1302 if (message->state == STATE_RUNNING) {
1303 previous = list_entry(transfer->transfer_list.prev,
1304 struct spi_transfer,
1305 transfer_list);
1306 if (previous->delay_usecs)
1307
1308
1309
1310
1311 udelay(previous->delay_usecs);
1312
1313
1314 if (previous->cs_change)
1315 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
1316 } else {
1317
1318 message->state = STATE_RUNNING;
1319 }
1320
1321 if (set_up_next_transfer(pl022, transfer)) {
1322 message->state = STATE_ERROR;
1323 message->status = -EIO;
1324 giveback(pl022);
1325 return;
1326 }
1327
1328 flush(pl022);
1329
1330 if (pl022->cur_chip->enable_dma) {
1331 if (configure_dma(pl022)) {
1332 dev_dbg(&pl022->adev->dev,
1333 "configuration of DMA failed, fall back to interrupt mode\n");
1334 goto err_config_dma;
1335 }
1336 return;
1337 }
1338
1339err_config_dma:
1340 writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
1341}
1342
1343static void do_interrupt_dma_transfer(struct pl022 *pl022)
1344{
1345 u32 irqflags = ENABLE_ALL_INTERRUPTS;
1346
1347
1348 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
1349 if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
1350
1351 pl022->cur_msg->state = STATE_ERROR;
1352 pl022->cur_msg->status = -EIO;
1353 giveback(pl022);
1354 return;
1355 }
1356
1357 if (pl022->cur_chip->enable_dma) {
1358
1359 if (configure_dma(pl022)) {
1360 dev_dbg(&pl022->adev->dev,
1361 "configuration of DMA failed, fall back to interrupt mode\n");
1362 goto err_config_dma;
1363 }
1364
1365 irqflags = DISABLE_ALL_INTERRUPTS;
1366 }
1367err_config_dma:
1368
1369 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
1370 SSP_CR1(pl022->virtbase));
1371 writew(irqflags, SSP_IMSC(pl022->virtbase));
1372}
1373
1374static void do_polling_transfer(struct pl022 *pl022)
1375{
1376 struct spi_message *message = NULL;
1377 struct spi_transfer *transfer = NULL;
1378 struct spi_transfer *previous = NULL;
1379 struct chip_data *chip;
1380 unsigned long time, timeout;
1381
1382 chip = pl022->cur_chip;
1383 message = pl022->cur_msg;
1384
1385 while (message->state != STATE_DONE) {
1386
1387 if (message->state == STATE_ERROR)
1388 break;
1389 transfer = pl022->cur_transfer;
1390
1391
1392 if (message->state == STATE_RUNNING) {
1393 previous =
1394 list_entry(transfer->transfer_list.prev,
1395 struct spi_transfer, transfer_list);
1396 if (previous->delay_usecs)
1397 udelay(previous->delay_usecs);
1398 if (previous->cs_change)
1399 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
1400 } else {
1401
1402 message->state = STATE_RUNNING;
1403 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
1404 }
1405
1406
1407 if (set_up_next_transfer(pl022, transfer)) {
1408
1409 message->state = STATE_ERROR;
1410 break;
1411 }
1412
1413 flush(pl022);
1414 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
1415 SSP_CR1(pl022->virtbase));
1416
1417 dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n");
1418
1419 timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT);
1420 while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) {
1421 time = jiffies;
1422 readwriter(pl022);
1423 if (time_after(time, timeout)) {
1424 dev_warn(&pl022->adev->dev,
1425 "%s: timeout!\n", __func__);
1426 message->state = STATE_ERROR;
1427 goto out;
1428 }
1429 cpu_relax();
1430 }
1431
1432
1433 message->actual_length += pl022->cur_transfer->len;
1434 if (pl022->cur_transfer->cs_change)
1435 pl022->cur_chip->cs_control(SSP_CHIP_DESELECT);
1436
1437 message->state = next_transfer(pl022);
1438 }
1439out:
1440
1441 if (message->state == STATE_DONE)
1442 message->status = 0;
1443 else
1444 message->status = -EIO;
1445
1446 giveback(pl022);
1447 return;
1448}
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460static void pump_messages(struct work_struct *work)
1461{
1462 struct pl022 *pl022 =
1463 container_of(work, struct pl022, pump_messages);
1464 unsigned long flags;
1465
1466
1467 spin_lock_irqsave(&pl022->queue_lock, flags);
1468 if (list_empty(&pl022->queue) || !pl022->running) {
1469 pl022->busy = false;
1470 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1471 return;
1472 }
1473
1474 if (pl022->cur_msg) {
1475 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1476 return;
1477 }
1478
1479 pl022->cur_msg =
1480 list_entry(pl022->queue.next, struct spi_message, queue);
1481
1482 list_del_init(&pl022->cur_msg->queue);
1483 pl022->busy = true;
1484 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1485
1486
1487 pl022->cur_msg->state = STATE_START;
1488 pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next,
1489 struct spi_transfer,
1490 transfer_list);
1491
1492
1493 pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
1494
1495
1496
1497
1498
1499 amba_vcore_enable(pl022->adev);
1500 amba_pclk_enable(pl022->adev);
1501 clk_enable(pl022->clk);
1502 restore_state(pl022);
1503 flush(pl022);
1504
1505 if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
1506 do_polling_transfer(pl022);
1507 else
1508 do_interrupt_dma_transfer(pl022);
1509}
1510
1511
1512static int __init init_queue(struct pl022 *pl022)
1513{
1514 INIT_LIST_HEAD(&pl022->queue);
1515 spin_lock_init(&pl022->queue_lock);
1516
1517 pl022->running = false;
1518 pl022->busy = false;
1519
1520 tasklet_init(&pl022->pump_transfers,
1521 pump_transfers, (unsigned long)pl022);
1522
1523 INIT_WORK(&pl022->pump_messages, pump_messages);
1524 pl022->workqueue = create_singlethread_workqueue(
1525 dev_name(pl022->master->dev.parent));
1526 if (pl022->workqueue == NULL)
1527 return -EBUSY;
1528
1529 return 0;
1530}
1531
1532
1533static int start_queue(struct pl022 *pl022)
1534{
1535 unsigned long flags;
1536
1537 spin_lock_irqsave(&pl022->queue_lock, flags);
1538
1539 if (pl022->running || pl022->busy) {
1540 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1541 return -EBUSY;
1542 }
1543
1544 pl022->running = true;
1545 pl022->cur_msg = NULL;
1546 pl022->cur_transfer = NULL;
1547 pl022->cur_chip = NULL;
1548 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1549
1550 queue_work(pl022->workqueue, &pl022->pump_messages);
1551
1552 return 0;
1553}
1554
1555
1556static int stop_queue(struct pl022 *pl022)
1557{
1558 unsigned long flags;
1559 unsigned limit = 500;
1560 int status = 0;
1561
1562 spin_lock_irqsave(&pl022->queue_lock, flags);
1563
1564
1565
1566
1567
1568 while ((!list_empty(&pl022->queue) || pl022->busy) && limit--) {
1569 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1570 msleep(10);
1571 spin_lock_irqsave(&pl022->queue_lock, flags);
1572 }
1573
1574 if (!list_empty(&pl022->queue) || pl022->busy)
1575 status = -EBUSY;
1576 else
1577 pl022->running = false;
1578
1579 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1580
1581 return status;
1582}
1583
1584static int destroy_queue(struct pl022 *pl022)
1585{
1586 int status;
1587
1588 status = stop_queue(pl022);
1589
1590
1591
1592
1593
1594
1595 if (status != 0)
1596 return status;
1597
1598 destroy_workqueue(pl022->workqueue);
1599
1600 return 0;
1601}
1602
1603static int verify_controller_parameters(struct pl022 *pl022,
1604 struct pl022_config_chip const *chip_info)
1605{
1606 if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI)
1607 || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) {
1608 dev_err(&pl022->adev->dev,
1609 "interface is configured incorrectly\n");
1610 return -EINVAL;
1611 }
1612 if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) &&
1613 (!pl022->vendor->unidir)) {
1614 dev_err(&pl022->adev->dev,
1615 "unidirectional mode not supported in this "
1616 "hardware version\n");
1617 return -EINVAL;
1618 }
1619 if ((chip_info->hierarchy != SSP_MASTER)
1620 && (chip_info->hierarchy != SSP_SLAVE)) {
1621 dev_err(&pl022->adev->dev,
1622 "hierarchy is configured incorrectly\n");
1623 return -EINVAL;
1624 }
1625 if ((chip_info->com_mode != INTERRUPT_TRANSFER)
1626 && (chip_info->com_mode != DMA_TRANSFER)
1627 && (chip_info->com_mode != POLLING_TRANSFER)) {
1628 dev_err(&pl022->adev->dev,
1629 "Communication mode is configured incorrectly\n");
1630 return -EINVAL;
1631 }
1632 if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM)
1633 || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) {
1634 dev_err(&pl022->adev->dev,
1635 "RX FIFO Trigger Level is configured incorrectly\n");
1636 return -EINVAL;
1637 }
1638 if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC)
1639 || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) {
1640 dev_err(&pl022->adev->dev,
1641 "TX FIFO Trigger Level is configured incorrectly\n");
1642 return -EINVAL;
1643 }
1644 if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
1645 if ((chip_info->ctrl_len < SSP_BITS_4)
1646 || (chip_info->ctrl_len > SSP_BITS_32)) {
1647 dev_err(&pl022->adev->dev,
1648 "CTRL LEN is configured incorrectly\n");
1649 return -EINVAL;
1650 }
1651 if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO)
1652 && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) {
1653 dev_err(&pl022->adev->dev,
1654 "Wait State is configured incorrectly\n");
1655 return -EINVAL;
1656 }
1657
1658 if (pl022->vendor->extended_cr) {
1659 if ((chip_info->duplex !=
1660 SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
1661 && (chip_info->duplex !=
1662 SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) {
1663 dev_err(&pl022->adev->dev,
1664 "Microwire duplex mode is configured incorrectly\n");
1665 return -EINVAL;
1666 }
1667 } else {
1668 if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
1669 dev_err(&pl022->adev->dev,
1670 "Microwire half duplex mode requested,"
1671 " but this is only available in the"
1672 " ST version of PL022\n");
1673 return -EINVAL;
1674 }
1675 }
1676 return 0;
1677}
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688static int pl022_transfer(struct spi_device *spi, struct spi_message *msg)
1689{
1690 struct pl022 *pl022 = spi_master_get_devdata(spi->master);
1691 unsigned long flags;
1692
1693 spin_lock_irqsave(&pl022->queue_lock, flags);
1694
1695 if (!pl022->running) {
1696 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1697 return -ESHUTDOWN;
1698 }
1699 msg->actual_length = 0;
1700 msg->status = -EINPROGRESS;
1701 msg->state = STATE_START;
1702
1703 list_add_tail(&msg->queue, &pl022->queue);
1704 if (pl022->running && !pl022->busy)
1705 queue_work(pl022->workqueue, &pl022->pump_messages);
1706
1707 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1708 return 0;
1709}
1710
1711static int calculate_effective_freq(struct pl022 *pl022,
1712 int freq,
1713 struct ssp_clock_params *clk_freq)
1714{
1715
1716 u16 cpsdvsr = 2;
1717 u16 scr = 0;
1718 bool freq_found = false;
1719 u32 rate;
1720 u32 max_tclk;
1721 u32 min_tclk;
1722
1723 rate = clk_get_rate(pl022->clk);
1724
1725 max_tclk = (rate / (CPSDVR_MIN * (1 + SCR_MIN)));
1726
1727 min_tclk = (rate / (CPSDVR_MAX * (1 + SCR_MAX)));
1728
1729 if ((freq <= max_tclk) && (freq >= min_tclk)) {
1730 while (cpsdvsr <= CPSDVR_MAX && !freq_found) {
1731 while (scr <= SCR_MAX && !freq_found) {
1732 if ((rate /
1733 (cpsdvsr * (1 + scr))) > freq)
1734 scr += 1;
1735 else {
1736
1737
1738
1739
1740
1741 freq_found = true;
1742 if ((rate /
1743 (cpsdvsr * (1 + scr))) != freq) {
1744 if (scr == SCR_MIN) {
1745 cpsdvsr -= 2;
1746 scr = SCR_MAX;
1747 } else
1748 scr -= 1;
1749 }
1750 }
1751 }
1752 if (!freq_found) {
1753 cpsdvsr += 2;
1754 scr = SCR_MIN;
1755 }
1756 }
1757 if (cpsdvsr != 0) {
1758 dev_dbg(&pl022->adev->dev,
1759 "SSP Effective Frequency is %u\n",
1760 (rate / (cpsdvsr * (1 + scr))));
1761 clk_freq->cpsdvsr = (u8) (cpsdvsr & 0xFF);
1762 clk_freq->scr = (u8) (scr & 0xFF);
1763 dev_dbg(&pl022->adev->dev,
1764 "SSP cpsdvsr = %d, scr = %d\n",
1765 clk_freq->cpsdvsr, clk_freq->scr);
1766 }
1767 } else {
1768 dev_err(&pl022->adev->dev,
1769 "controller data is incorrect: out of range frequency");
1770 return -EINVAL;
1771 }
1772 return 0;
1773}
1774
1775
1776
1777
1778
1779
1780static const struct pl022_config_chip pl022_default_chip_info = {
1781 .com_mode = POLLING_TRANSFER,
1782 .iface = SSP_INTERFACE_MOTOROLA_SPI,
1783 .hierarchy = SSP_SLAVE,
1784 .slave_tx_disable = DO_NOT_DRIVE_TX,
1785 .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM,
1786 .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC,
1787 .ctrl_len = SSP_BITS_8,
1788 .wait_state = SSP_MWIRE_WAIT_ZERO,
1789 .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
1790 .cs_control = null_cs_control,
1791};
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806static int pl022_setup(struct spi_device *spi)
1807{
1808 struct pl022_config_chip const *chip_info;
1809 struct chip_data *chip;
1810 struct ssp_clock_params clk_freq = {0, };
1811 int status = 0;
1812 struct pl022 *pl022 = spi_master_get_devdata(spi->master);
1813 unsigned int bits = spi->bits_per_word;
1814 u32 tmp;
1815
1816 if (!spi->max_speed_hz)
1817 return -EINVAL;
1818
1819
1820 chip = spi_get_ctldata(spi);
1821
1822 if (chip == NULL) {
1823 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1824 if (!chip) {
1825 dev_err(&spi->dev,
1826 "cannot allocate controller state\n");
1827 return -ENOMEM;
1828 }
1829 dev_dbg(&spi->dev,
1830 "allocated memory for controller's runtime state\n");
1831 }
1832
1833
1834 chip_info = spi->controller_data;
1835
1836 if (chip_info == NULL) {
1837 chip_info = &pl022_default_chip_info;
1838
1839 dev_dbg(&spi->dev,
1840 "using default controller_data settings\n");
1841 } else
1842 dev_dbg(&spi->dev,
1843 "using user supplied controller_data settings\n");
1844
1845
1846
1847
1848
1849 if ((0 == chip_info->clk_freq.cpsdvsr)
1850 && (0 == chip_info->clk_freq.scr)) {
1851 status = calculate_effective_freq(pl022,
1852 spi->max_speed_hz,
1853 &clk_freq);
1854 if (status < 0)
1855 goto err_config_params;
1856 } else {
1857 memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq));
1858 if ((clk_freq.cpsdvsr % 2) != 0)
1859 clk_freq.cpsdvsr =
1860 clk_freq.cpsdvsr - 1;
1861 }
1862 if ((clk_freq.cpsdvsr < CPSDVR_MIN)
1863 || (clk_freq.cpsdvsr > CPSDVR_MAX)) {
1864 status = -EINVAL;
1865 dev_err(&spi->dev,
1866 "cpsdvsr is configured incorrectly\n");
1867 goto err_config_params;
1868 }
1869
1870
1871 status = verify_controller_parameters(pl022, chip_info);
1872 if (status) {
1873 dev_err(&spi->dev, "controller data is incorrect");
1874 goto err_config_params;
1875 }
1876
1877
1878 chip->xfer_type = chip_info->com_mode;
1879 if (!chip_info->cs_control) {
1880 chip->cs_control = null_cs_control;
1881 dev_warn(&spi->dev,
1882 "chip select function is NULL for this chip\n");
1883 } else
1884 chip->cs_control = chip_info->cs_control;
1885
1886 if (bits <= 3) {
1887
1888 status = -ENOTSUPP;
1889 goto err_config_params;
1890 } else if (bits <= 8) {
1891 dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n");
1892 chip->n_bytes = 1;
1893 chip->read = READING_U8;
1894 chip->write = WRITING_U8;
1895 } else if (bits <= 16) {
1896 dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
1897 chip->n_bytes = 2;
1898 chip->read = READING_U16;
1899 chip->write = WRITING_U16;
1900 } else {
1901 if (pl022->vendor->max_bpw >= 32) {
1902 dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
1903 chip->n_bytes = 4;
1904 chip->read = READING_U32;
1905 chip->write = WRITING_U32;
1906 } else {
1907 dev_err(&spi->dev,
1908 "illegal data size for this controller!\n");
1909 dev_err(&spi->dev,
1910 "a standard pl022 can only handle "
1911 "1 <= n <= 16 bit words\n");
1912 status = -ENOTSUPP;
1913 goto err_config_params;
1914 }
1915 }
1916
1917
1918 chip->cr0 = 0;
1919 chip->cr1 = 0;
1920 chip->dmacr = 0;
1921 chip->cpsr = 0;
1922 if ((chip_info->com_mode == DMA_TRANSFER)
1923 && ((pl022->master_info)->enable_dma)) {
1924 chip->enable_dma = true;
1925 dev_dbg(&spi->dev, "DMA mode set in controller state\n");
1926 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1927 SSP_DMACR_MASK_RXDMAE, 0);
1928 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1929 SSP_DMACR_MASK_TXDMAE, 1);
1930 } else {
1931 chip->enable_dma = false;
1932 dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
1933 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1934 SSP_DMACR_MASK_RXDMAE, 0);
1935 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1936 SSP_DMACR_MASK_TXDMAE, 1);
1937 }
1938
1939 chip->cpsr = clk_freq.cpsdvsr;
1940
1941
1942 if (pl022->vendor->extended_cr) {
1943 u32 etx;
1944
1945 if (pl022->vendor->pl023) {
1946
1947 SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay,
1948 SSP_CR1_MASK_FBCLKDEL_ST, 13);
1949 } else {
1950
1951 SSP_WRITE_BITS(chip->cr0, chip_info->duplex,
1952 SSP_CR0_MASK_HALFDUP_ST, 5);
1953 SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len,
1954 SSP_CR0_MASK_CSS_ST, 16);
1955 SSP_WRITE_BITS(chip->cr0, chip_info->iface,
1956 SSP_CR0_MASK_FRF_ST, 21);
1957 SSP_WRITE_BITS(chip->cr1, chip_info->wait_state,
1958 SSP_CR1_MASK_MWAIT_ST, 6);
1959 }
1960 SSP_WRITE_BITS(chip->cr0, bits - 1,
1961 SSP_CR0_MASK_DSS_ST, 0);
1962
1963 if (spi->mode & SPI_LSB_FIRST) {
1964 tmp = SSP_RX_LSB;
1965 etx = SSP_TX_LSB;
1966 } else {
1967 tmp = SSP_RX_MSB;
1968 etx = SSP_TX_MSB;
1969 }
1970 SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4);
1971 SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5);
1972 SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig,
1973 SSP_CR1_MASK_RXIFLSEL_ST, 7);
1974 SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig,
1975 SSP_CR1_MASK_TXIFLSEL_ST, 10);
1976 } else {
1977 SSP_WRITE_BITS(chip->cr0, bits - 1,
1978 SSP_CR0_MASK_DSS, 0);
1979 SSP_WRITE_BITS(chip->cr0, chip_info->iface,
1980 SSP_CR0_MASK_FRF, 4);
1981 }
1982
1983
1984 if (spi->mode & SPI_CPOL)
1985 tmp = SSP_CLK_POL_IDLE_HIGH;
1986 else
1987 tmp = SSP_CLK_POL_IDLE_LOW;
1988 SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6);
1989
1990 if (spi->mode & SPI_CPHA)
1991 tmp = SSP_CLK_SECOND_EDGE;
1992 else
1993 tmp = SSP_CLK_FIRST_EDGE;
1994 SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7);
1995
1996 SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8);
1997
1998 if (pl022->vendor->loopback) {
1999 if (spi->mode & SPI_LOOP)
2000 tmp = LOOPBACK_ENABLED;
2001 else
2002 tmp = LOOPBACK_DISABLED;
2003 SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0);
2004 }
2005 SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
2006 SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
2007 SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3);
2008
2009
2010 spi_set_ctldata(spi, chip);
2011 return status;
2012 err_config_params:
2013 spi_set_ctldata(spi, NULL);
2014 kfree(chip);
2015 return status;
2016}
2017
2018
2019
2020
2021
2022
2023
2024
2025static void pl022_cleanup(struct spi_device *spi)
2026{
2027 struct chip_data *chip = spi_get_ctldata(spi);
2028
2029 spi_set_ctldata(spi, NULL);
2030 kfree(chip);
2031}
2032
2033
2034static int __devinit
2035pl022_probe(struct amba_device *adev, const struct amba_id *id)
2036{
2037 struct device *dev = &adev->dev;
2038 struct pl022_ssp_controller *platform_info = adev->dev.platform_data;
2039 struct spi_master *master;
2040 struct pl022 *pl022 = NULL;
2041 int status = 0;
2042
2043 dev_info(&adev->dev,
2044 "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid);
2045 if (platform_info == NULL) {
2046 dev_err(&adev->dev, "probe - no platform data supplied\n");
2047 status = -ENODEV;
2048 goto err_no_pdata;
2049 }
2050
2051
2052 master = spi_alloc_master(dev, sizeof(struct pl022));
2053 if (master == NULL) {
2054 dev_err(&adev->dev, "probe - cannot alloc SPI master\n");
2055 status = -ENOMEM;
2056 goto err_no_master;
2057 }
2058
2059 pl022 = spi_master_get_devdata(master);
2060 pl022->master = master;
2061 pl022->master_info = platform_info;
2062 pl022->adev = adev;
2063 pl022->vendor = id->data;
2064
2065
2066
2067
2068
2069 master->bus_num = platform_info->bus_id;
2070 master->num_chipselect = platform_info->num_chipselect;
2071 master->cleanup = pl022_cleanup;
2072 master->setup = pl022_setup;
2073 master->transfer = pl022_transfer;
2074
2075
2076
2077
2078
2079 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
2080 if (pl022->vendor->extended_cr)
2081 master->mode_bits |= SPI_LSB_FIRST;
2082
2083 dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num);
2084
2085 status = amba_request_regions(adev, NULL);
2086 if (status)
2087 goto err_no_ioregion;
2088
2089 pl022->phybase = adev->res.start;
2090 pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res));
2091 if (pl022->virtbase == NULL) {
2092 status = -ENOMEM;
2093 goto err_no_ioremap;
2094 }
2095 printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
2096 adev->res.start, pl022->virtbase);
2097
2098 pl022->clk = clk_get(&adev->dev, NULL);
2099 if (IS_ERR(pl022->clk)) {
2100 status = PTR_ERR(pl022->clk);
2101 dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n");
2102 goto err_no_clk;
2103 }
2104
2105
2106 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
2107 SSP_CR1(pl022->virtbase));
2108 load_ssp_default_config(pl022);
2109
2110 status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022",
2111 pl022);
2112 if (status < 0) {
2113 dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status);
2114 goto err_no_irq;
2115 }
2116
2117
2118 if (platform_info->enable_dma) {
2119 status = pl022_dma_probe(pl022);
2120 if (status != 0)
2121 platform_info->enable_dma = 0;
2122 }
2123
2124
2125 status = init_queue(pl022);
2126 if (status != 0) {
2127 dev_err(&adev->dev, "probe - problem initializing queue\n");
2128 goto err_init_queue;
2129 }
2130 status = start_queue(pl022);
2131 if (status != 0) {
2132 dev_err(&adev->dev, "probe - problem starting queue\n");
2133 goto err_start_queue;
2134 }
2135
2136 amba_set_drvdata(adev, pl022);
2137 status = spi_register_master(master);
2138 if (status != 0) {
2139 dev_err(&adev->dev,
2140 "probe - problem registering spi master\n");
2141 goto err_spi_register;
2142 }
2143 dev_dbg(dev, "probe succeeded\n");
2144
2145
2146
2147
2148 amba_pclk_disable(adev);
2149 amba_vcore_disable(adev);
2150 return 0;
2151
2152 err_spi_register:
2153 err_start_queue:
2154 err_init_queue:
2155 destroy_queue(pl022);
2156 pl022_dma_remove(pl022);
2157 free_irq(adev->irq[0], pl022);
2158 err_no_irq:
2159 clk_put(pl022->clk);
2160 err_no_clk:
2161 iounmap(pl022->virtbase);
2162 err_no_ioremap:
2163 amba_release_regions(adev);
2164 err_no_ioregion:
2165 spi_master_put(master);
2166 err_no_master:
2167 err_no_pdata:
2168 return status;
2169}
2170
2171static int __devexit
2172pl022_remove(struct amba_device *adev)
2173{
2174 struct pl022 *pl022 = amba_get_drvdata(adev);
2175 int status = 0;
2176 if (!pl022)
2177 return 0;
2178
2179
2180 status = destroy_queue(pl022);
2181 if (status != 0) {
2182 dev_err(&adev->dev,
2183 "queue remove failed (%d)\n", status);
2184 return status;
2185 }
2186 load_ssp_default_config(pl022);
2187 pl022_dma_remove(pl022);
2188 free_irq(adev->irq[0], pl022);
2189 clk_disable(pl022->clk);
2190 clk_put(pl022->clk);
2191 iounmap(pl022->virtbase);
2192 amba_release_regions(adev);
2193 tasklet_disable(&pl022->pump_transfers);
2194 spi_unregister_master(pl022->master);
2195 spi_master_put(pl022->master);
2196 amba_set_drvdata(adev, NULL);
2197 dev_dbg(&adev->dev, "remove succeeded\n");
2198 return 0;
2199}
2200
2201#ifdef CONFIG_PM
2202static int pl022_suspend(struct amba_device *adev, pm_message_t state)
2203{
2204 struct pl022 *pl022 = amba_get_drvdata(adev);
2205 int status = 0;
2206
2207 status = stop_queue(pl022);
2208 if (status) {
2209 dev_warn(&adev->dev, "suspend cannot stop queue\n");
2210 return status;
2211 }
2212
2213 amba_vcore_enable(adev);
2214 amba_pclk_enable(adev);
2215 load_ssp_default_config(pl022);
2216 amba_pclk_disable(adev);
2217 amba_vcore_disable(adev);
2218 dev_dbg(&adev->dev, "suspended\n");
2219 return 0;
2220}
2221
2222static int pl022_resume(struct amba_device *adev)
2223{
2224 struct pl022 *pl022 = amba_get_drvdata(adev);
2225 int status = 0;
2226
2227
2228 status = start_queue(pl022);
2229 if (status)
2230 dev_err(&adev->dev, "problem starting queue (%d)\n", status);
2231 else
2232 dev_dbg(&adev->dev, "resumed\n");
2233
2234 return status;
2235}
2236#else
2237#define pl022_suspend NULL
2238#define pl022_resume NULL
2239#endif
2240
2241static struct vendor_data vendor_arm = {
2242 .fifodepth = 8,
2243 .max_bpw = 16,
2244 .unidir = false,
2245 .extended_cr = false,
2246 .pl023 = false,
2247 .loopback = true,
2248};
2249
2250
2251static struct vendor_data vendor_st = {
2252 .fifodepth = 32,
2253 .max_bpw = 32,
2254 .unidir = false,
2255 .extended_cr = true,
2256 .pl023 = false,
2257 .loopback = true,
2258};
2259
2260static struct vendor_data vendor_st_pl023 = {
2261 .fifodepth = 32,
2262 .max_bpw = 32,
2263 .unidir = false,
2264 .extended_cr = true,
2265 .pl023 = true,
2266 .loopback = false,
2267};
2268
2269static struct vendor_data vendor_db5500_pl023 = {
2270 .fifodepth = 32,
2271 .max_bpw = 32,
2272 .unidir = false,
2273 .extended_cr = true,
2274 .pl023 = true,
2275 .loopback = true,
2276};
2277
2278static struct amba_id pl022_ids[] = {
2279 {
2280
2281
2282
2283
2284 .id = 0x00041022,
2285 .mask = 0x000fffff,
2286 .data = &vendor_arm,
2287 },
2288 {
2289
2290
2291
2292
2293 .id = 0x01080022,
2294 .mask = 0xffffffff,
2295 .data = &vendor_st,
2296 },
2297 {
2298
2299
2300
2301
2302
2303
2304
2305 .id = 0x00080023,
2306 .mask = 0xffffffff,
2307 .data = &vendor_st_pl023,
2308 },
2309 {
2310 .id = 0x10080023,
2311 .mask = 0xffffffff,
2312 .data = &vendor_db5500_pl023,
2313 },
2314 { 0, 0 },
2315};
2316
2317static struct amba_driver pl022_driver = {
2318 .drv = {
2319 .name = "ssp-pl022",
2320 },
2321 .id_table = pl022_ids,
2322 .probe = pl022_probe,
2323 .remove = __devexit_p(pl022_remove),
2324 .suspend = pl022_suspend,
2325 .resume = pl022_resume,
2326};
2327
2328
2329static int __init pl022_init(void)
2330{
2331 return amba_driver_register(&pl022_driver);
2332}
2333
2334subsys_initcall(pl022_init);
2335
2336static void __exit pl022_exit(void)
2337{
2338 amba_driver_unregister(&pl022_driver);
2339}
2340
2341module_exit(pl022_exit);
2342
2343MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
2344MODULE_DESCRIPTION("PL022 SSP Controller Driver");
2345MODULE_LICENSE("GPL");
2346