1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/init.h>
34#include <linux/module.h>
35#include <linux/device.h>
36#include <linux/ioport.h>
37#include <linux/errno.h>
38#include <linux/interrupt.h>
39#include <linux/spi/spi.h>
40#include <linux/workqueue.h>
41#include <linux/delay.h>
42#include <linux/clk.h>
43#include <linux/err.h>
44#include <linux/amba/bus.h>
45#include <linux/amba/pl022.h>
46#include <linux/io.h>
47
48
49
50
51
52
53#define SSP_WRITE_BITS(reg, val, mask, sb) \
54 ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
55
56
57
58
59
60
61#define GEN_MASK_BITS(val, mask, sb) \
62 (((val)<<(sb)) & (mask))
63
64#define DRIVE_TX 0
65#define DO_NOT_DRIVE_TX 1
66
67#define DO_NOT_QUEUE_DMA 0
68#define QUEUE_DMA 1
69
70#define RX_TRANSFER 1
71#define TX_TRANSFER 2
72
73
74
75
76#define SSP_CR0(r) (r + 0x000)
77#define SSP_CR1(r) (r + 0x004)
78#define SSP_DR(r) (r + 0x008)
79#define SSP_SR(r) (r + 0x00C)
80#define SSP_CPSR(r) (r + 0x010)
81#define SSP_IMSC(r) (r + 0x014)
82#define SSP_RIS(r) (r + 0x018)
83#define SSP_MIS(r) (r + 0x01C)
84#define SSP_ICR(r) (r + 0x020)
85#define SSP_DMACR(r) (r + 0x024)
86#define SSP_ITCR(r) (r + 0x080)
87#define SSP_ITIP(r) (r + 0x084)
88#define SSP_ITOP(r) (r + 0x088)
89#define SSP_TDR(r) (r + 0x08C)
90
91#define SSP_PID0(r) (r + 0xFE0)
92#define SSP_PID1(r) (r + 0xFE4)
93#define SSP_PID2(r) (r + 0xFE8)
94#define SSP_PID3(r) (r + 0xFEC)
95
96#define SSP_CID0(r) (r + 0xFF0)
97#define SSP_CID1(r) (r + 0xFF4)
98#define SSP_CID2(r) (r + 0xFF8)
99#define SSP_CID3(r) (r + 0xFFC)
100
101
102
103
104#define SSP_CR0_MASK_DSS (0x1FUL << 0)
105#define SSP_CR0_MASK_HALFDUP (0x1UL << 5)
106#define SSP_CR0_MASK_SPO (0x1UL << 6)
107#define SSP_CR0_MASK_SPH (0x1UL << 7)
108#define SSP_CR0_MASK_SCR (0xFFUL << 8)
109#define SSP_CR0_MASK_CSS (0x1FUL << 16)
110#define SSP_CR0_MASK_FRF (0x3UL << 21)
111
112
113
114
115#define SSP_CR1_MASK_LBM (0x1UL << 0)
116#define SSP_CR1_MASK_SSE (0x1UL << 1)
117#define SSP_CR1_MASK_MS (0x1UL << 2)
118#define SSP_CR1_MASK_SOD (0x1UL << 3)
119#define SSP_CR1_MASK_RENDN (0x1UL << 4)
120#define SSP_CR1_MASK_TENDN (0x1UL << 5)
121#define SSP_CR1_MASK_MWAIT (0x1UL << 6)
122#define SSP_CR1_MASK_RXIFLSEL (0x7UL << 7)
123#define SSP_CR1_MASK_TXIFLSEL (0x7UL << 10)
124
125
126
127
128#define SSP_DR_MASK_DATA 0xFFFFFFFF
129
130
131
132
133#define SSP_SR_MASK_TFE (0x1UL << 0)
134#define SSP_SR_MASK_TNF (0x1UL << 1)
135#define SSP_SR_MASK_RNE (0x1UL << 2)
136#define SSP_SR_MASK_RFF (0x1UL << 3)
137#define SSP_SR_MASK_BSY (0x1UL << 4)
138
139
140
141
142#define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0)
143
144
145
146
147#define SSP_IMSC_MASK_RORIM (0x1UL << 0)
148#define SSP_IMSC_MASK_RTIM (0x1UL << 1)
149#define SSP_IMSC_MASK_RXIM (0x1UL << 2)
150#define SSP_IMSC_MASK_TXIM (0x1UL << 3)
151
152
153
154
155
156#define SSP_RIS_MASK_RORRIS (0x1UL << 0)
157
158#define SSP_RIS_MASK_RTRIS (0x1UL << 1)
159
160#define SSP_RIS_MASK_RXRIS (0x1UL << 2)
161
162#define SSP_RIS_MASK_TXRIS (0x1UL << 3)
163
164
165
166
167
168#define SSP_MIS_MASK_RORMIS (0x1UL << 0)
169
170#define SSP_MIS_MASK_RTMIS (0x1UL << 1)
171
172#define SSP_MIS_MASK_RXMIS (0x1UL << 2)
173
174#define SSP_MIS_MASK_TXMIS (0x1UL << 3)
175
176
177
178
179
180#define SSP_ICR_MASK_RORIC (0x1UL << 0)
181
182#define SSP_ICR_MASK_RTIC (0x1UL << 1)
183
184
185
186
187
188#define SSP_DMACR_MASK_RXDMAE (0x1UL << 0)
189
190#define SSP_DMACR_MASK_TXDMAE (0x1UL << 1)
191
192
193
194
195#define SSP_ITCR_MASK_ITEN (0x1UL << 0)
196#define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1)
197
198
199
200
201#define ITIP_MASK_SSPRXD (0x1UL << 0)
202#define ITIP_MASK_SSPFSSIN (0x1UL << 1)
203#define ITIP_MASK_SSPCLKIN (0x1UL << 2)
204#define ITIP_MASK_RXDMAC (0x1UL << 3)
205#define ITIP_MASK_TXDMAC (0x1UL << 4)
206#define ITIP_MASK_SSPTXDIN (0x1UL << 5)
207
208
209
210
211#define ITOP_MASK_SSPTXD (0x1UL << 0)
212#define ITOP_MASK_SSPFSSOUT (0x1UL << 1)
213#define ITOP_MASK_SSPCLKOUT (0x1UL << 2)
214#define ITOP_MASK_SSPOEn (0x1UL << 3)
215#define ITOP_MASK_SSPCTLOEn (0x1UL << 4)
216#define ITOP_MASK_RORINTR (0x1UL << 5)
217#define ITOP_MASK_RTINTR (0x1UL << 6)
218#define ITOP_MASK_RXINTR (0x1UL << 7)
219#define ITOP_MASK_TXINTR (0x1UL << 8)
220#define ITOP_MASK_INTR (0x1UL << 9)
221#define ITOP_MASK_RXDMABREQ (0x1UL << 10)
222#define ITOP_MASK_RXDMASREQ (0x1UL << 11)
223#define ITOP_MASK_TXDMABREQ (0x1UL << 12)
224#define ITOP_MASK_TXDMASREQ (0x1UL << 13)
225
226
227
228
229#define TDR_MASK_TESTDATA (0xFFFFFFFF)
230
231
232
233
234
235
236
237#define STATE_START ((void *) 0)
238#define STATE_RUNNING ((void *) 1)
239#define STATE_DONE ((void *) 2)
240#define STATE_ERROR ((void *) -1)
241
242
243
244
245#define QUEUE_RUNNING (0)
246#define QUEUE_STOPPED (1)
247
248
249
250#define SSP_DISABLED (0)
251#define SSP_ENABLED (1)
252
253
254
255
256#define SSP_DMA_DISABLED (0)
257#define SSP_DMA_ENABLED (1)
258
259
260
261
262#define NMDK_SSP_DEFAULT_CLKRATE 0x2
263#define NMDK_SSP_DEFAULT_PRESCALE 0x40
264
265
266
267
268#define CPSDVR_MIN 0x02
269#define CPSDVR_MAX 0xFE
270#define SCR_MIN 0x00
271#define SCR_MAX 0xFF
272
273
274
275
276#define DEFAULT_SSP_REG_IMSC 0x0UL
277#define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC
278#define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC)
279
280#define CLEAR_ALL_INTERRUPTS 0x3
281
282
283
284
285
286enum ssp_reading {
287 READING_NULL,
288 READING_U8,
289 READING_U16,
290 READING_U32
291};
292
293
294
295
296enum ssp_writing {
297 WRITING_NULL,
298 WRITING_U8,
299 WRITING_U16,
300 WRITING_U32
301};
302
303
304
305
306
307
308
309
310struct vendor_data {
311 int fifodepth;
312 int max_bpw;
313 bool unidir;
314};
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340struct pl022 {
341 struct amba_device *adev;
342 struct vendor_data *vendor;
343 resource_size_t phybase;
344 void __iomem *virtbase;
345 struct clk *clk;
346 struct spi_master *master;
347 struct pl022_ssp_controller *master_info;
348
349 struct workqueue_struct *workqueue;
350 struct work_struct pump_messages;
351 spinlock_t queue_lock;
352 struct list_head queue;
353 int busy;
354 int run;
355
356 struct tasklet_struct pump_transfers;
357 struct spi_message *cur_msg;
358 struct spi_transfer *cur_transfer;
359 struct chip_data *cur_chip;
360 void *tx;
361 void *tx_end;
362 void *rx;
363 void *rx_end;
364 enum ssp_reading read;
365 enum ssp_writing write;
366};
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384struct chip_data {
385 u16 cr0;
386 u16 cr1;
387 u16 dmacr;
388 u16 cpsr;
389 u8 n_bytes;
390 u8 enable_dma:1;
391 enum ssp_reading read;
392 enum ssp_writing write;
393 void (*cs_control) (u32 command);
394 int xfer_type;
395};
396
397
398
399
400
401
402
403
404static void null_cs_control(u32 command)
405{
406 pr_debug("pl022: dummy chip select control, CS=0x%x\n", command);
407}
408
409
410
411
412
413
414
415static void giveback(struct pl022 *pl022)
416{
417 struct spi_transfer *last_transfer;
418 unsigned long flags;
419 struct spi_message *msg;
420 void (*curr_cs_control) (u32 command);
421
422
423
424
425
426
427 curr_cs_control = pl022->cur_chip->cs_control;
428 spin_lock_irqsave(&pl022->queue_lock, flags);
429 msg = pl022->cur_msg;
430 pl022->cur_msg = NULL;
431 pl022->cur_transfer = NULL;
432 pl022->cur_chip = NULL;
433 queue_work(pl022->workqueue, &pl022->pump_messages);
434 spin_unlock_irqrestore(&pl022->queue_lock, flags);
435
436 last_transfer = list_entry(msg->transfers.prev,
437 struct spi_transfer,
438 transfer_list);
439
440
441 if (last_transfer->delay_usecs)
442
443
444
445
446 udelay(last_transfer->delay_usecs);
447
448
449
450
451
452 if (!last_transfer->cs_change)
453 curr_cs_control(SSP_CHIP_DESELECT);
454 else {
455 struct spi_message *next_msg;
456
457
458
459
460
461
462
463
464
465
466
467
468 spin_lock_irqsave(&pl022->queue_lock, flags);
469 if (list_empty(&pl022->queue))
470 next_msg = NULL;
471 else
472 next_msg = list_entry(pl022->queue.next,
473 struct spi_message, queue);
474 spin_unlock_irqrestore(&pl022->queue_lock, flags);
475
476
477
478
479 if (next_msg && next_msg->spi != msg->spi)
480 next_msg = NULL;
481 if (!next_msg || msg->state == STATE_ERROR)
482 curr_cs_control(SSP_CHIP_DESELECT);
483 }
484 msg->state = NULL;
485 if (msg->complete)
486 msg->complete(msg->context);
487
488 clk_disable(pl022->clk);
489}
490
491
492
493
494
495static int flush(struct pl022 *pl022)
496{
497 unsigned long limit = loops_per_jiffy << 1;
498
499 dev_dbg(&pl022->adev->dev, "flush\n");
500 do {
501 while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
502 readw(SSP_DR(pl022->virtbase));
503 } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--);
504 return limit;
505}
506
507
508
509
510
511static void restore_state(struct pl022 *pl022)
512{
513 struct chip_data *chip = pl022->cur_chip;
514
515 writew(chip->cr0, SSP_CR0(pl022->virtbase));
516 writew(chip->cr1, SSP_CR1(pl022->virtbase));
517 writew(chip->dmacr, SSP_DMACR(pl022->virtbase));
518 writew(chip->cpsr, SSP_CPSR(pl022->virtbase));
519 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
520 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
521}
522
523
524
525
526
527
528
529
530
531#define DEFAULT_SSP_REG_CR0 ( \
532 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \
533 GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \
534 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
535 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
536 GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
537 GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \
538 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \
539)
540
541#define DEFAULT_SSP_REG_CR1 ( \
542 GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \
543 GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
544 GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
545 GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \
546 GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN, 4) | \
547 GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN, 5) | \
548 GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT, 6) |\
549 GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL, 7) | \
550 GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL, 10) \
551)
552
553#define DEFAULT_SSP_REG_CPSR ( \
554 GEN_MASK_BITS(NMDK_SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \
555)
556
557#define DEFAULT_SSP_REG_DMACR (\
558 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \
559 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \
560)
561
562
563static void load_ssp_default_config(struct pl022 *pl022)
564{
565 writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase));
566 writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase));
567 writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase));
568 writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase));
569 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
570 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
571}
572
573
574
575
576
577static void readwriter(struct pl022 *pl022)
578{
579
580
581
582
583
584
585
586
587
588
589
590
591 dev_dbg(&pl022->adev->dev,
592 "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
593 __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end);
594
595
596 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
597 && (pl022->rx < pl022->rx_end)) {
598 switch (pl022->read) {
599 case READING_NULL:
600 readw(SSP_DR(pl022->virtbase));
601 break;
602 case READING_U8:
603 *(u8 *) (pl022->rx) =
604 readw(SSP_DR(pl022->virtbase)) & 0xFFU;
605 break;
606 case READING_U16:
607 *(u16 *) (pl022->rx) =
608 (u16) readw(SSP_DR(pl022->virtbase));
609 break;
610 case READING_U32:
611 *(u32 *) (pl022->rx) =
612 readl(SSP_DR(pl022->virtbase));
613 break;
614 }
615 pl022->rx += (pl022->cur_chip->n_bytes);
616 }
617
618
619
620 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF)
621 && (pl022->tx < pl022->tx_end)) {
622 switch (pl022->write) {
623 case WRITING_NULL:
624 writew(0x0, SSP_DR(pl022->virtbase));
625 break;
626 case WRITING_U8:
627 writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase));
628 break;
629 case WRITING_U16:
630 writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase));
631 break;
632 case WRITING_U32:
633 writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase));
634 break;
635 }
636 pl022->tx += (pl022->cur_chip->n_bytes);
637
638
639
640
641
642
643 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
644 && (pl022->rx < pl022->rx_end)) {
645 switch (pl022->read) {
646 case READING_NULL:
647 readw(SSP_DR(pl022->virtbase));
648 break;
649 case READING_U8:
650 *(u8 *) (pl022->rx) =
651 readw(SSP_DR(pl022->virtbase)) & 0xFFU;
652 break;
653 case READING_U16:
654 *(u16 *) (pl022->rx) =
655 (u16) readw(SSP_DR(pl022->virtbase));
656 break;
657 case READING_U32:
658 *(u32 *) (pl022->rx) =
659 readl(SSP_DR(pl022->virtbase));
660 break;
661 }
662 pl022->rx += (pl022->cur_chip->n_bytes);
663 }
664 }
665
666
667
668
669}
670
671
672
673
674
675
676
677
678
679
680
681static void *next_transfer(struct pl022 *pl022)
682{
683 struct spi_message *msg = pl022->cur_msg;
684 struct spi_transfer *trans = pl022->cur_transfer;
685
686
687 if (trans->transfer_list.next != &msg->transfers) {
688 pl022->cur_transfer =
689 list_entry(trans->transfer_list.next,
690 struct spi_transfer, transfer_list);
691 return STATE_RUNNING;
692 }
693 return STATE_DONE;
694}
695
696
697
698
699
700
701
702
703
704
705
706static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
707{
708 struct pl022 *pl022 = dev_id;
709 struct spi_message *msg = pl022->cur_msg;
710 u16 irq_status = 0;
711 u16 flag = 0;
712
713 if (unlikely(!msg)) {
714 dev_err(&pl022->adev->dev,
715 "bad message state in interrupt handler");
716
717 return IRQ_HANDLED;
718 }
719
720
721 irq_status = readw(SSP_MIS(pl022->virtbase));
722
723 if (unlikely(!irq_status))
724 return IRQ_NONE;
725
726
727 if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) {
728
729
730
731
732 dev_err(&pl022->adev->dev,
733 "FIFO overrun\n");
734 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF)
735 dev_err(&pl022->adev->dev,
736 "RXFIFO is full\n");
737 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF)
738 dev_err(&pl022->adev->dev,
739 "TXFIFO is full\n");
740
741
742
743
744
745
746 writew(DISABLE_ALL_INTERRUPTS,
747 SSP_IMSC(pl022->virtbase));
748 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
749 writew((readw(SSP_CR1(pl022->virtbase)) &
750 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
751 msg->state = STATE_ERROR;
752
753
754 tasklet_schedule(&pl022->pump_transfers);
755 return IRQ_HANDLED;
756 }
757
758 readwriter(pl022);
759
760 if ((pl022->tx == pl022->tx_end) && (flag == 0)) {
761 flag = 1;
762
763 writew(readw(SSP_IMSC(pl022->virtbase)) &
764 (~SSP_IMSC_MASK_TXIM),
765 SSP_IMSC(pl022->virtbase));
766 }
767
768
769
770
771
772
773 if (pl022->rx >= pl022->rx_end) {
774 writew(DISABLE_ALL_INTERRUPTS,
775 SSP_IMSC(pl022->virtbase));
776 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
777 if (unlikely(pl022->rx > pl022->rx_end)) {
778 dev_warn(&pl022->adev->dev, "read %u surplus "
779 "bytes (did you request an odd "
780 "number of bytes on a 16bit bus?)\n",
781 (u32) (pl022->rx - pl022->rx_end));
782 }
783
784 msg->actual_length += pl022->cur_transfer->len;
785 if (pl022->cur_transfer->cs_change)
786 pl022->cur_chip->
787 cs_control(SSP_CHIP_DESELECT);
788
789 msg->state = next_transfer(pl022);
790 tasklet_schedule(&pl022->pump_transfers);
791 return IRQ_HANDLED;
792 }
793
794 return IRQ_HANDLED;
795}
796
797
798
799
800
801static int set_up_next_transfer(struct pl022 *pl022,
802 struct spi_transfer *transfer)
803{
804 int residue;
805
806
807 residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes;
808 if (unlikely(residue != 0)) {
809 dev_err(&pl022->adev->dev,
810 "message of %u bytes to transmit but the current "
811 "chip bus has a data width of %u bytes!\n",
812 pl022->cur_transfer->len,
813 pl022->cur_chip->n_bytes);
814 dev_err(&pl022->adev->dev, "skipping this message\n");
815 return -EIO;
816 }
817 pl022->tx = (void *)transfer->tx_buf;
818 pl022->tx_end = pl022->tx + pl022->cur_transfer->len;
819 pl022->rx = (void *)transfer->rx_buf;
820 pl022->rx_end = pl022->rx + pl022->cur_transfer->len;
821 pl022->write =
822 pl022->tx ? pl022->cur_chip->write : WRITING_NULL;
823 pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL;
824 return 0;
825}
826
827
828
829
830
831
832
833static void pump_transfers(unsigned long data)
834{
835 struct pl022 *pl022 = (struct pl022 *) data;
836 struct spi_message *message = NULL;
837 struct spi_transfer *transfer = NULL;
838 struct spi_transfer *previous = NULL;
839
840
841 message = pl022->cur_msg;
842 transfer = pl022->cur_transfer;
843
844
845 if (message->state == STATE_ERROR) {
846 message->status = -EIO;
847 giveback(pl022);
848 return;
849 }
850
851
852 if (message->state == STATE_DONE) {
853 message->status = 0;
854 giveback(pl022);
855 return;
856 }
857
858
859 if (message->state == STATE_RUNNING) {
860 previous = list_entry(transfer->transfer_list.prev,
861 struct spi_transfer,
862 transfer_list);
863 if (previous->delay_usecs)
864
865
866
867
868 udelay(previous->delay_usecs);
869
870
871 if (previous->cs_change)
872 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
873 } else {
874
875 message->state = STATE_RUNNING;
876 }
877
878 if (set_up_next_transfer(pl022, transfer)) {
879 message->state = STATE_ERROR;
880 message->status = -EIO;
881 giveback(pl022);
882 return;
883 }
884
885 flush(pl022);
886 writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
887}
888
889
890
891
892
893
894
895static int configure_dma(void *data)
896{
897 struct pl022 *pl022 = data;
898 dev_dbg(&pl022->adev->dev, "configure DMA\n");
899 return -ENOTSUPP;
900}
901
902
903
904
905
906
907
908static void do_dma_transfer(void *data)
909{
910 struct pl022 *pl022 = data;
911
912 if (configure_dma(data)) {
913 dev_dbg(&pl022->adev->dev, "configuration of DMA Failed!\n");
914 goto err_config_dma;
915 }
916
917
918
919
920 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
921 if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
922
923 pl022->cur_msg->state = STATE_ERROR;
924 pl022->cur_msg->status = -EIO;
925 giveback(pl022);
926 return;
927 }
928
929 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
930 SSP_CR1(pl022->virtbase));
931
932
933 return;
934
935 err_config_dma:
936 pl022->cur_msg->state = STATE_ERROR;
937 pl022->cur_msg->status = -EIO;
938 giveback(pl022);
939 return;
940}
941
942static void do_interrupt_transfer(void *data)
943{
944 struct pl022 *pl022 = data;
945
946
947 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
948 if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
949
950 pl022->cur_msg->state = STATE_ERROR;
951 pl022->cur_msg->status = -EIO;
952 giveback(pl022);
953 return;
954 }
955
956 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
957 SSP_CR1(pl022->virtbase));
958 writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
959}
960
961static void do_polling_transfer(void *data)
962{
963 struct pl022 *pl022 = data;
964 struct spi_message *message = NULL;
965 struct spi_transfer *transfer = NULL;
966 struct spi_transfer *previous = NULL;
967 struct chip_data *chip;
968
969 chip = pl022->cur_chip;
970 message = pl022->cur_msg;
971
972 while (message->state != STATE_DONE) {
973
974 if (message->state == STATE_ERROR)
975 break;
976 transfer = pl022->cur_transfer;
977
978
979 if (message->state == STATE_RUNNING) {
980 previous =
981 list_entry(transfer->transfer_list.prev,
982 struct spi_transfer, transfer_list);
983 if (previous->delay_usecs)
984 udelay(previous->delay_usecs);
985 if (previous->cs_change)
986 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
987 } else {
988
989 message->state = STATE_RUNNING;
990 pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
991 }
992
993
994 if (set_up_next_transfer(pl022, transfer)) {
995
996 message->state = STATE_ERROR;
997 break;
998 }
999
1000 flush(pl022);
1001 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
1002 SSP_CR1(pl022->virtbase));
1003
1004 dev_dbg(&pl022->adev->dev, "POLLING TRANSFER ONGOING ... \n");
1005
1006 while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end)
1007 readwriter(pl022);
1008
1009
1010 message->actual_length += pl022->cur_transfer->len;
1011 if (pl022->cur_transfer->cs_change)
1012 pl022->cur_chip->cs_control(SSP_CHIP_DESELECT);
1013
1014 message->state = next_transfer(pl022);
1015 }
1016
1017
1018 if (message->state == STATE_DONE)
1019 message->status = 0;
1020 else
1021 message->status = -EIO;
1022
1023 giveback(pl022);
1024 return;
1025}
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037static void pump_messages(struct work_struct *work)
1038{
1039 struct pl022 *pl022 =
1040 container_of(work, struct pl022, pump_messages);
1041 unsigned long flags;
1042
1043
1044 spin_lock_irqsave(&pl022->queue_lock, flags);
1045 if (list_empty(&pl022->queue) || pl022->run == QUEUE_STOPPED) {
1046 pl022->busy = 0;
1047 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1048 return;
1049 }
1050
1051 if (pl022->cur_msg) {
1052 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1053 return;
1054 }
1055
1056 pl022->cur_msg =
1057 list_entry(pl022->queue.next, struct spi_message, queue);
1058
1059 list_del_init(&pl022->cur_msg->queue);
1060 pl022->busy = 1;
1061 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1062
1063
1064 pl022->cur_msg->state = STATE_START;
1065 pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next,
1066 struct spi_transfer,
1067 transfer_list);
1068
1069
1070 pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
1071
1072
1073
1074
1075 clk_enable(pl022->clk);
1076 restore_state(pl022);
1077 flush(pl022);
1078
1079 if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
1080 do_polling_transfer(pl022);
1081 else if (pl022->cur_chip->xfer_type == INTERRUPT_TRANSFER)
1082 do_interrupt_transfer(pl022);
1083 else
1084 do_dma_transfer(pl022);
1085}
1086
1087
1088static int __init init_queue(struct pl022 *pl022)
1089{
1090 INIT_LIST_HEAD(&pl022->queue);
1091 spin_lock_init(&pl022->queue_lock);
1092
1093 pl022->run = QUEUE_STOPPED;
1094 pl022->busy = 0;
1095
1096 tasklet_init(&pl022->pump_transfers,
1097 pump_transfers, (unsigned long)pl022);
1098
1099 INIT_WORK(&pl022->pump_messages, pump_messages);
1100 pl022->workqueue = create_singlethread_workqueue(
1101 dev_name(pl022->master->dev.parent));
1102 if (pl022->workqueue == NULL)
1103 return -EBUSY;
1104
1105 return 0;
1106}
1107
1108
1109static int start_queue(struct pl022 *pl022)
1110{
1111 unsigned long flags;
1112
1113 spin_lock_irqsave(&pl022->queue_lock, flags);
1114
1115 if (pl022->run == QUEUE_RUNNING || pl022->busy) {
1116 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1117 return -EBUSY;
1118 }
1119
1120 pl022->run = QUEUE_RUNNING;
1121 pl022->cur_msg = NULL;
1122 pl022->cur_transfer = NULL;
1123 pl022->cur_chip = NULL;
1124 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1125
1126 queue_work(pl022->workqueue, &pl022->pump_messages);
1127
1128 return 0;
1129}
1130
1131
1132static int stop_queue(struct pl022 *pl022)
1133{
1134 unsigned long flags;
1135 unsigned limit = 500;
1136 int status = 0;
1137
1138 spin_lock_irqsave(&pl022->queue_lock, flags);
1139
1140
1141
1142
1143
1144 pl022->run = QUEUE_STOPPED;
1145 while (!list_empty(&pl022->queue) && pl022->busy && limit--) {
1146 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1147 msleep(10);
1148 spin_lock_irqsave(&pl022->queue_lock, flags);
1149 }
1150
1151 if (!list_empty(&pl022->queue) || pl022->busy)
1152 status = -EBUSY;
1153
1154 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1155
1156 return status;
1157}
1158
1159static int destroy_queue(struct pl022 *pl022)
1160{
1161 int status;
1162
1163 status = stop_queue(pl022);
1164
1165
1166
1167
1168
1169
1170 if (status != 0)
1171 return status;
1172
1173 destroy_workqueue(pl022->workqueue);
1174
1175 return 0;
1176}
1177
1178static int verify_controller_parameters(struct pl022 *pl022,
1179 struct pl022_config_chip *chip_info)
1180{
1181 if ((chip_info->lbm != LOOPBACK_ENABLED)
1182 && (chip_info->lbm != LOOPBACK_DISABLED)) {
1183 dev_err(chip_info->dev,
1184 "loopback Mode is configured incorrectly\n");
1185 return -EINVAL;
1186 }
1187 if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI)
1188 || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) {
1189 dev_err(chip_info->dev,
1190 "interface is configured incorrectly\n");
1191 return -EINVAL;
1192 }
1193 if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) &&
1194 (!pl022->vendor->unidir)) {
1195 dev_err(chip_info->dev,
1196 "unidirectional mode not supported in this "
1197 "hardware version\n");
1198 return -EINVAL;
1199 }
1200 if ((chip_info->hierarchy != SSP_MASTER)
1201 && (chip_info->hierarchy != SSP_SLAVE)) {
1202 dev_err(chip_info->dev,
1203 "hierarchy is configured incorrectly\n");
1204 return -EINVAL;
1205 }
1206 if (((chip_info->clk_freq).cpsdvsr < CPSDVR_MIN)
1207 || ((chip_info->clk_freq).cpsdvsr > CPSDVR_MAX)) {
1208 dev_err(chip_info->dev,
1209 "cpsdvsr is configured incorrectly\n");
1210 return -EINVAL;
1211 }
1212 if ((chip_info->endian_rx != SSP_RX_MSB)
1213 && (chip_info->endian_rx != SSP_RX_LSB)) {
1214 dev_err(chip_info->dev,
1215 "RX FIFO endianess is configured incorrectly\n");
1216 return -EINVAL;
1217 }
1218 if ((chip_info->endian_tx != SSP_TX_MSB)
1219 && (chip_info->endian_tx != SSP_TX_LSB)) {
1220 dev_err(chip_info->dev,
1221 "TX FIFO endianess is configured incorrectly\n");
1222 return -EINVAL;
1223 }
1224 if ((chip_info->data_size < SSP_DATA_BITS_4)
1225 || (chip_info->data_size > SSP_DATA_BITS_32)) {
1226 dev_err(chip_info->dev,
1227 "DATA Size is configured incorrectly\n");
1228 return -EINVAL;
1229 }
1230 if ((chip_info->com_mode != INTERRUPT_TRANSFER)
1231 && (chip_info->com_mode != DMA_TRANSFER)
1232 && (chip_info->com_mode != POLLING_TRANSFER)) {
1233 dev_err(chip_info->dev,
1234 "Communication mode is configured incorrectly\n");
1235 return -EINVAL;
1236 }
1237 if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM)
1238 || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) {
1239 dev_err(chip_info->dev,
1240 "RX FIFO Trigger Level is configured incorrectly\n");
1241 return -EINVAL;
1242 }
1243 if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC)
1244 || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) {
1245 dev_err(chip_info->dev,
1246 "TX FIFO Trigger Level is configured incorrectly\n");
1247 return -EINVAL;
1248 }
1249 if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) {
1250 if ((chip_info->clk_phase != SSP_CLK_FIRST_EDGE)
1251 && (chip_info->clk_phase != SSP_CLK_SECOND_EDGE)) {
1252 dev_err(chip_info->dev,
1253 "Clock Phase is configured incorrectly\n");
1254 return -EINVAL;
1255 }
1256 if ((chip_info->clk_pol != SSP_CLK_POL_IDLE_LOW)
1257 && (chip_info->clk_pol != SSP_CLK_POL_IDLE_HIGH)) {
1258 dev_err(chip_info->dev,
1259 "Clock Polarity is configured incorrectly\n");
1260 return -EINVAL;
1261 }
1262 }
1263 if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
1264 if ((chip_info->ctrl_len < SSP_BITS_4)
1265 || (chip_info->ctrl_len > SSP_BITS_32)) {
1266 dev_err(chip_info->dev,
1267 "CTRL LEN is configured incorrectly\n");
1268 return -EINVAL;
1269 }
1270 if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO)
1271 && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) {
1272 dev_err(chip_info->dev,
1273 "Wait State is configured incorrectly\n");
1274 return -EINVAL;
1275 }
1276 if ((chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
1277 && (chip_info->duplex !=
1278 SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) {
1279 dev_err(chip_info->dev,
1280 "DUPLEX is configured incorrectly\n");
1281 return -EINVAL;
1282 }
1283 }
1284 if (chip_info->cs_control == NULL) {
1285 dev_warn(chip_info->dev,
1286 "Chip Select Function is NULL for this chip\n");
1287 chip_info->cs_control = null_cs_control;
1288 }
1289 return 0;
1290}
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301static int pl022_transfer(struct spi_device *spi, struct spi_message *msg)
1302{
1303 struct pl022 *pl022 = spi_master_get_devdata(spi->master);
1304 unsigned long flags;
1305
1306 spin_lock_irqsave(&pl022->queue_lock, flags);
1307
1308 if (pl022->run == QUEUE_STOPPED) {
1309 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1310 return -ESHUTDOWN;
1311 }
1312 msg->actual_length = 0;
1313 msg->status = -EINPROGRESS;
1314 msg->state = STATE_START;
1315
1316 list_add_tail(&msg->queue, &pl022->queue);
1317 if (pl022->run == QUEUE_RUNNING && !pl022->busy)
1318 queue_work(pl022->workqueue, &pl022->pump_messages);
1319
1320 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1321 return 0;
1322}
1323
1324static int calculate_effective_freq(struct pl022 *pl022,
1325 int freq,
1326 struct ssp_clock_params *clk_freq)
1327{
1328
1329 u16 cpsdvsr = 2;
1330 u16 scr = 0;
1331 bool freq_found = false;
1332 u32 rate;
1333 u32 max_tclk;
1334 u32 min_tclk;
1335
1336 rate = clk_get_rate(pl022->clk);
1337
1338 max_tclk = (rate / (CPSDVR_MIN * (1 + SCR_MIN)));
1339
1340 min_tclk = (rate / (CPSDVR_MAX * (1 + SCR_MAX)));
1341
1342 if ((freq <= max_tclk) && (freq >= min_tclk)) {
1343 while (cpsdvsr <= CPSDVR_MAX && !freq_found) {
1344 while (scr <= SCR_MAX && !freq_found) {
1345 if ((rate /
1346 (cpsdvsr * (1 + scr))) > freq)
1347 scr += 1;
1348 else {
1349
1350
1351
1352
1353
1354 freq_found = true;
1355 if ((rate /
1356 (cpsdvsr * (1 + scr))) != freq) {
1357 if (scr == SCR_MIN) {
1358 cpsdvsr -= 2;
1359 scr = SCR_MAX;
1360 } else
1361 scr -= 1;
1362 }
1363 }
1364 }
1365 if (!freq_found) {
1366 cpsdvsr += 2;
1367 scr = SCR_MIN;
1368 }
1369 }
1370 if (cpsdvsr != 0) {
1371 dev_dbg(&pl022->adev->dev,
1372 "SSP Effective Frequency is %u\n",
1373 (rate / (cpsdvsr * (1 + scr))));
1374 clk_freq->cpsdvsr = (u8) (cpsdvsr & 0xFF);
1375 clk_freq->scr = (u8) (scr & 0xFF);
1376 dev_dbg(&pl022->adev->dev,
1377 "SSP cpsdvsr = %d, scr = %d\n",
1378 clk_freq->cpsdvsr, clk_freq->scr);
1379 }
1380 } else {
1381 dev_err(&pl022->adev->dev,
1382 "controller data is incorrect: out of range frequency");
1383 return -EINVAL;
1384 }
1385 return 0;
1386}
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397static int process_dma_info(struct pl022_config_chip *chip_info,
1398 struct chip_data *chip)
1399{
1400 dev_err(chip_info->dev,
1401 "cannot process DMA info, DMA not implemented!\n");
1402 return -ENOTSUPP;
1403}
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
1420 | SPI_LSB_FIRST | SPI_LOOP)
1421
1422static int pl022_setup(struct spi_device *spi)
1423{
1424 struct pl022_config_chip *chip_info;
1425 struct chip_data *chip;
1426 int status = 0;
1427 struct pl022 *pl022 = spi_master_get_devdata(spi->master);
1428
1429 if (spi->mode & ~MODEBITS) {
1430 dev_dbg(&spi->dev, "unsupported mode bits %x\n",
1431 spi->mode & ~MODEBITS);
1432 return -EINVAL;
1433 }
1434
1435 if (!spi->max_speed_hz)
1436 return -EINVAL;
1437
1438
1439 chip = spi_get_ctldata(spi);
1440
1441 if (chip == NULL) {
1442 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1443 if (!chip) {
1444 dev_err(&spi->dev,
1445 "cannot allocate controller state\n");
1446 return -ENOMEM;
1447 }
1448 dev_dbg(&spi->dev,
1449 "allocated memory for controller's runtime state\n");
1450 }
1451
1452
1453 chip_info = spi->controller_data;
1454
1455 if (chip_info == NULL) {
1456
1457 dev_dbg(&spi->dev,
1458 "using default controller_data settings\n");
1459
1460 chip_info =
1461 kzalloc(sizeof(struct pl022_config_chip), GFP_KERNEL);
1462
1463 if (!chip_info) {
1464 dev_err(&spi->dev,
1465 "cannot allocate controller data\n");
1466 status = -ENOMEM;
1467 goto err_first_setup;
1468 }
1469
1470 dev_dbg(&spi->dev, "allocated memory for controller data\n");
1471
1472
1473 chip_info->dev = &spi->dev;
1474
1475
1476
1477
1478 chip_info->lbm = LOOPBACK_DISABLED;
1479 chip_info->com_mode = POLLING_TRANSFER;
1480 chip_info->iface = SSP_INTERFACE_MOTOROLA_SPI;
1481 chip_info->hierarchy = SSP_SLAVE;
1482 chip_info->slave_tx_disable = DO_NOT_DRIVE_TX;
1483 chip_info->endian_tx = SSP_TX_LSB;
1484 chip_info->endian_rx = SSP_RX_LSB;
1485 chip_info->data_size = SSP_DATA_BITS_12;
1486 chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM;
1487 chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC;
1488 chip_info->clk_phase = SSP_CLK_SECOND_EDGE;
1489 chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW;
1490 chip_info->ctrl_len = SSP_BITS_8;
1491 chip_info->wait_state = SSP_MWIRE_WAIT_ZERO;
1492 chip_info->duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX;
1493 chip_info->cs_control = null_cs_control;
1494 } else {
1495 dev_dbg(&spi->dev,
1496 "using user supplied controller_data settings\n");
1497 }
1498
1499
1500
1501
1502
1503 if ((0 == chip_info->clk_freq.cpsdvsr)
1504 && (0 == chip_info->clk_freq.scr)) {
1505 status = calculate_effective_freq(pl022,
1506 spi->max_speed_hz,
1507 &chip_info->clk_freq);
1508 if (status < 0)
1509 goto err_config_params;
1510 } else {
1511 if ((chip_info->clk_freq.cpsdvsr % 2) != 0)
1512 chip_info->clk_freq.cpsdvsr =
1513 chip_info->clk_freq.cpsdvsr - 1;
1514 }
1515 status = verify_controller_parameters(pl022, chip_info);
1516 if (status) {
1517 dev_err(&spi->dev, "controller data is incorrect");
1518 goto err_config_params;
1519 }
1520
1521 chip->xfer_type = chip_info->com_mode;
1522 chip->cs_control = chip_info->cs_control;
1523
1524 if (chip_info->data_size <= 8) {
1525 dev_dbg(&spi->dev, "1 <= n <=8 bits per word\n");
1526 chip->n_bytes = 1;
1527 chip->read = READING_U8;
1528 chip->write = WRITING_U8;
1529 } else if (chip_info->data_size <= 16) {
1530 dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
1531 chip->n_bytes = 2;
1532 chip->read = READING_U16;
1533 chip->write = WRITING_U16;
1534 } else {
1535 if (pl022->vendor->max_bpw >= 32) {
1536 dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
1537 chip->n_bytes = 4;
1538 chip->read = READING_U32;
1539 chip->write = WRITING_U32;
1540 } else {
1541 dev_err(&spi->dev,
1542 "illegal data size for this controller!\n");
1543 dev_err(&spi->dev,
1544 "a standard pl022 can only handle "
1545 "1 <= n <= 16 bit words\n");
1546 goto err_config_params;
1547 }
1548 }
1549
1550
1551 chip->cr0 = 0;
1552 chip->cr1 = 0;
1553 chip->dmacr = 0;
1554 chip->cpsr = 0;
1555 if ((chip_info->com_mode == DMA_TRANSFER)
1556 && ((pl022->master_info)->enable_dma)) {
1557 chip->enable_dma = 1;
1558 dev_dbg(&spi->dev, "DMA mode set in controller state\n");
1559 status = process_dma_info(chip_info, chip);
1560 if (status < 0)
1561 goto err_config_params;
1562 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1563 SSP_DMACR_MASK_RXDMAE, 0);
1564 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1565 SSP_DMACR_MASK_TXDMAE, 1);
1566 } else {
1567 chip->enable_dma = 0;
1568 dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
1569 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1570 SSP_DMACR_MASK_RXDMAE, 0);
1571 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1572 SSP_DMACR_MASK_TXDMAE, 1);
1573 }
1574
1575 chip->cpsr = chip_info->clk_freq.cpsdvsr;
1576
1577 SSP_WRITE_BITS(chip->cr0, chip_info->data_size, SSP_CR0_MASK_DSS, 0);
1578 SSP_WRITE_BITS(chip->cr0, chip_info->duplex, SSP_CR0_MASK_HALFDUP, 5);
1579 SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6);
1580 SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7);
1581 SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8);
1582 SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, SSP_CR0_MASK_CSS, 16);
1583 SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF, 21);
1584 SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0);
1585 SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
1586 SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
1587 SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3);
1588 SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx, SSP_CR1_MASK_RENDN, 4);
1589 SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx, SSP_CR1_MASK_TENDN, 5);
1590 SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, SSP_CR1_MASK_MWAIT, 6);
1591 SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, SSP_CR1_MASK_RXIFLSEL, 7);
1592 SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, SSP_CR1_MASK_TXIFLSEL, 10);
1593
1594
1595 spi_set_ctldata(spi, chip);
1596 return status;
1597 err_config_params:
1598 err_first_setup:
1599 kfree(chip);
1600 return status;
1601}
1602
1603
1604
1605
1606
1607
1608
1609
1610static void pl022_cleanup(struct spi_device *spi)
1611{
1612 struct chip_data *chip = spi_get_ctldata(spi);
1613
1614 spi_set_ctldata(spi, NULL);
1615 kfree(chip);
1616}
1617
1618
1619static int __init
1620pl022_probe(struct amba_device *adev, struct amba_id *id)
1621{
1622 struct device *dev = &adev->dev;
1623 struct pl022_ssp_controller *platform_info = adev->dev.platform_data;
1624 struct spi_master *master;
1625 struct pl022 *pl022 = NULL;
1626 int status = 0;
1627
1628 dev_info(&adev->dev,
1629 "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid);
1630 if (platform_info == NULL) {
1631 dev_err(&adev->dev, "probe - no platform data supplied\n");
1632 status = -ENODEV;
1633 goto err_no_pdata;
1634 }
1635
1636
1637 master = spi_alloc_master(dev, sizeof(struct pl022));
1638 if (master == NULL) {
1639 dev_err(&adev->dev, "probe - cannot alloc SPI master\n");
1640 status = -ENOMEM;
1641 goto err_no_master;
1642 }
1643
1644 pl022 = spi_master_get_devdata(master);
1645 pl022->master = master;
1646 pl022->master_info = platform_info;
1647 pl022->adev = adev;
1648 pl022->vendor = id->data;
1649
1650
1651
1652
1653
1654 master->bus_num = platform_info->bus_id;
1655 master->num_chipselect = platform_info->num_chipselect;
1656 master->cleanup = pl022_cleanup;
1657 master->setup = pl022_setup;
1658 master->transfer = pl022_transfer;
1659
1660 dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num);
1661
1662 status = amba_request_regions(adev, NULL);
1663 if (status)
1664 goto err_no_ioregion;
1665
1666 pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res));
1667 if (pl022->virtbase == NULL) {
1668 status = -ENOMEM;
1669 goto err_no_ioremap;
1670 }
1671 printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
1672 adev->res.start, pl022->virtbase);
1673
1674 pl022->clk = clk_get(&adev->dev, NULL);
1675 if (IS_ERR(pl022->clk)) {
1676 status = PTR_ERR(pl022->clk);
1677 dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n");
1678 goto err_no_clk;
1679 }
1680
1681
1682 clk_enable(pl022->clk);
1683 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
1684 SSP_CR1(pl022->virtbase));
1685 load_ssp_default_config(pl022);
1686 clk_disable(pl022->clk);
1687
1688 status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022",
1689 pl022);
1690 if (status < 0) {
1691 dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status);
1692 goto err_no_irq;
1693 }
1694
1695 status = init_queue(pl022);
1696 if (status != 0) {
1697 dev_err(&adev->dev, "probe - problem initializing queue\n");
1698 goto err_init_queue;
1699 }
1700 status = start_queue(pl022);
1701 if (status != 0) {
1702 dev_err(&adev->dev, "probe - problem starting queue\n");
1703 goto err_start_queue;
1704 }
1705
1706 amba_set_drvdata(adev, pl022);
1707 status = spi_register_master(master);
1708 if (status != 0) {
1709 dev_err(&adev->dev,
1710 "probe - problem registering spi master\n");
1711 goto err_spi_register;
1712 }
1713 dev_dbg(dev, "probe succeded\n");
1714 return 0;
1715
1716 err_spi_register:
1717 err_start_queue:
1718 err_init_queue:
1719 destroy_queue(pl022);
1720 free_irq(adev->irq[0], pl022);
1721 err_no_irq:
1722 clk_put(pl022->clk);
1723 err_no_clk:
1724 iounmap(pl022->virtbase);
1725 err_no_ioremap:
1726 amba_release_regions(adev);
1727 err_no_ioregion:
1728 spi_master_put(master);
1729 err_no_master:
1730 err_no_pdata:
1731 return status;
1732}
1733
1734static int __exit
1735pl022_remove(struct amba_device *adev)
1736{
1737 struct pl022 *pl022 = amba_get_drvdata(adev);
1738 int status = 0;
1739 if (!pl022)
1740 return 0;
1741
1742
1743 status = destroy_queue(pl022);
1744 if (status != 0) {
1745 dev_err(&adev->dev,
1746 "queue remove failed (%d)\n", status);
1747 return status;
1748 }
1749 load_ssp_default_config(pl022);
1750 free_irq(adev->irq[0], pl022);
1751 clk_disable(pl022->clk);
1752 clk_put(pl022->clk);
1753 iounmap(pl022->virtbase);
1754 amba_release_regions(adev);
1755 tasklet_disable(&pl022->pump_transfers);
1756 spi_unregister_master(pl022->master);
1757 spi_master_put(pl022->master);
1758 amba_set_drvdata(adev, NULL);
1759 dev_dbg(&adev->dev, "remove succeded\n");
1760 return 0;
1761}
1762
1763#ifdef CONFIG_PM
1764static int pl022_suspend(struct amba_device *adev, pm_message_t state)
1765{
1766 struct pl022 *pl022 = amba_get_drvdata(adev);
1767 int status = 0;
1768
1769 status = stop_queue(pl022);
1770 if (status) {
1771 dev_warn(&adev->dev, "suspend cannot stop queue\n");
1772 return status;
1773 }
1774
1775 clk_enable(pl022->clk);
1776 load_ssp_default_config(pl022);
1777 clk_disable(pl022->clk);
1778 dev_dbg(&adev->dev, "suspended\n");
1779 return 0;
1780}
1781
1782static int pl022_resume(struct amba_device *adev)
1783{
1784 struct pl022 *pl022 = amba_get_drvdata(adev);
1785 int status = 0;
1786
1787
1788 status = start_queue(pl022);
1789 if (status)
1790 dev_err(&adev->dev, "problem starting queue (%d)\n", status);
1791 else
1792 dev_dbg(&adev->dev, "resumed\n");
1793
1794 return status;
1795}
1796#else
1797#define pl022_suspend NULL
1798#define pl022_resume NULL
1799#endif
1800
1801static struct vendor_data vendor_arm = {
1802 .fifodepth = 8,
1803 .max_bpw = 16,
1804 .unidir = false,
1805};
1806
1807
1808static struct vendor_data vendor_st = {
1809 .fifodepth = 32,
1810 .max_bpw = 32,
1811 .unidir = false,
1812};
1813
1814static struct amba_id pl022_ids[] = {
1815 {
1816
1817
1818
1819
1820 .id = 0x00041022,
1821 .mask = 0x000fffff,
1822 .data = &vendor_arm,
1823 },
1824 {
1825
1826
1827
1828
1829 .id = 0x01080022,
1830 .mask = 0xffffffff,
1831 .data = &vendor_st,
1832 },
1833 { 0, 0 },
1834};
1835
1836static struct amba_driver pl022_driver = {
1837 .drv = {
1838 .name = "ssp-pl022",
1839 },
1840 .id_table = pl022_ids,
1841 .probe = pl022_probe,
1842 .remove = __exit_p(pl022_remove),
1843 .suspend = pl022_suspend,
1844 .resume = pl022_resume,
1845};
1846
1847
1848static int __init pl022_init(void)
1849{
1850 return amba_driver_register(&pl022_driver);
1851}
1852
1853module_init(pl022_init);
1854
1855static void __exit pl022_exit(void)
1856{
1857 amba_driver_unregister(&pl022_driver);
1858}
1859
1860module_exit(pl022_exit);
1861
1862MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
1863MODULE_DESCRIPTION("PL022 SSP Controller Driver");
1864MODULE_LICENSE("GPL");
1865