1
2
3
4
5
6
7
8#include <linux/delay.h>
9#include <linux/pci.h>
10#include <linux/wait.h>
11#include <linux/spi/spi.h>
12#include <linux/interrupt.h>
13#include <linux/sched.h>
14#include <linux/spi/spidev.h>
15#include <linux/module.h>
16#include <linux/device.h>
17#include <linux/platform_device.h>
18
19#include <linux/dmaengine.h>
20#include <linux/pch_dma.h>
21
22
23#define PCH_SPCR 0x00
24#define PCH_SPBRR 0x04
25#define PCH_SPSR 0x08
26#define PCH_SPDWR 0x0C
27#define PCH_SPDRR 0x10
28#define PCH_SSNXCR 0x18
29#define PCH_SRST 0x1C
30#define PCH_ADDRESS_SIZE 0x20
31
32#define PCH_SPSR_TFD 0x000007C0
33#define PCH_SPSR_RFD 0x0000F800
34
35#define PCH_READABLE(x) (((x) & PCH_SPSR_RFD)>>11)
36#define PCH_WRITABLE(x) (((x) & PCH_SPSR_TFD)>>6)
37
38#define PCH_RX_THOLD 7
39#define PCH_RX_THOLD_MAX 15
40
41#define PCH_TX_THOLD 2
42
43#define PCH_MAX_BAUDRATE 5000000
44#define PCH_MAX_FIFO_DEPTH 16
45
46#define STATUS_RUNNING 1
47#define STATUS_EXITING 2
48#define PCH_SLEEP_TIME 10
49
50#define SSN_LOW 0x02U
51#define SSN_HIGH 0x03U
52#define SSN_NO_CONTROL 0x00U
53#define PCH_MAX_CS 0xFF
54#define PCI_DEVICE_ID_GE_SPI 0x8816
55
56#define SPCR_SPE_BIT (1 << 0)
57#define SPCR_MSTR_BIT (1 << 1)
58#define SPCR_LSBF_BIT (1 << 4)
59#define SPCR_CPHA_BIT (1 << 5)
60#define SPCR_CPOL_BIT (1 << 6)
61#define SPCR_TFIE_BIT (1 << 8)
62#define SPCR_RFIE_BIT (1 << 9)
63#define SPCR_FIE_BIT (1 << 10)
64#define SPCR_ORIE_BIT (1 << 11)
65#define SPCR_MDFIE_BIT (1 << 12)
66#define SPCR_FICLR_BIT (1 << 24)
67#define SPSR_TFI_BIT (1 << 0)
68#define SPSR_RFI_BIT (1 << 1)
69#define SPSR_FI_BIT (1 << 2)
70#define SPSR_ORF_BIT (1 << 3)
71#define SPBRR_SIZE_BIT (1 << 10)
72
73#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|\
74 SPCR_ORIE_BIT|SPCR_MDFIE_BIT)
75
76#define SPCR_RFIC_FIELD 20
77#define SPCR_TFIC_FIELD 16
78
79#define MASK_SPBRR_SPBR_BITS ((1 << 10) - 1)
80#define MASK_RFIC_SPCR_BITS (0xf << SPCR_RFIC_FIELD)
81#define MASK_TFIC_SPCR_BITS (0xf << SPCR_TFIC_FIELD)
82
83#define PCH_CLOCK_HZ 50000000
84#define PCH_MAX_SPBR 1023
85
86
87#define PCI_DEVICE_ID_ML7213_SPI 0x802c
88#define PCI_DEVICE_ID_ML7223_SPI 0x800F
89#define PCI_DEVICE_ID_ML7831_SPI 0x8816
90
91
92
93
94
95
96
97
98#define PCH_SPI_MAX_DEV 2
99
100#define PCH_BUF_SIZE 4096
101#define PCH_DMA_TRANS_SIZE 12
102
103static int use_dma = 1;
104
105struct pch_spi_dma_ctrl {
106 struct dma_async_tx_descriptor *desc_tx;
107 struct dma_async_tx_descriptor *desc_rx;
108 struct pch_dma_slave param_tx;
109 struct pch_dma_slave param_rx;
110 struct dma_chan *chan_tx;
111 struct dma_chan *chan_rx;
112 struct scatterlist *sg_tx_p;
113 struct scatterlist *sg_rx_p;
114 struct scatterlist sg_tx;
115 struct scatterlist sg_rx;
116 int nent;
117 void *tx_buf_virt;
118 void *rx_buf_virt;
119 dma_addr_t tx_buf_dma;
120 dma_addr_t rx_buf_dma;
121};
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156struct pch_spi_data {
157 void __iomem *io_remap_addr;
158 unsigned long io_base_addr;
159 struct spi_master *master;
160 struct work_struct work;
161 wait_queue_head_t wait;
162 u8 transfer_complete;
163 u8 bcurrent_msg_processing;
164 spinlock_t lock;
165 struct list_head queue;
166 u8 status;
167 u32 bpw_len;
168 u8 transfer_active;
169 u32 tx_index;
170 u32 rx_index;
171 u16 *pkt_tx_buff;
172 u16 *pkt_rx_buff;
173 u8 n_curnt_chip;
174 struct spi_device *current_chip;
175 struct spi_message *current_msg;
176 struct spi_transfer *cur_trans;
177 struct pch_spi_board_data *board_dat;
178 struct platform_device *plat_dev;
179 int ch;
180 struct pch_spi_dma_ctrl dma;
181 int use_dma;
182 u8 irq_reg_sts;
183 int save_total_len;
184};
185
186
187
188
189
190
191
192struct pch_spi_board_data {
193 struct pci_dev *pdev;
194 u8 suspend_sts;
195 int num;
196};
197
198struct pch_pd_dev_save {
199 int num;
200 struct platform_device *pd_save[PCH_SPI_MAX_DEV];
201 struct pch_spi_board_data *board_dat;
202};
203
204static const struct pci_device_id pch_spi_pcidev_id[] = {
205 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
206 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
207 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
208 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_SPI), 1, },
209 { }
210};
211
212
213
214
215
216
217
218static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val)
219{
220 struct pch_spi_data *data = spi_master_get_devdata(master);
221 iowrite32(val, (data->io_remap_addr + idx));
222}
223
224
225
226
227
228
229static inline u32 pch_spi_readreg(struct spi_master *master, int idx)
230{
231 struct pch_spi_data *data = spi_master_get_devdata(master);
232 return ioread32(data->io_remap_addr + idx);
233}
234
235static inline void pch_spi_setclr_reg(struct spi_master *master, int idx,
236 u32 set, u32 clr)
237{
238 u32 tmp = pch_spi_readreg(master, idx);
239 tmp = (tmp & ~clr) | set;
240 pch_spi_writereg(master, idx, tmp);
241}
242
243static void pch_spi_set_master_mode(struct spi_master *master)
244{
245 pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0);
246}
247
248
249
250
251
252static void pch_spi_clear_fifo(struct spi_master *master)
253{
254 pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0);
255 pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT);
256}
257
258static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
259 void __iomem *io_remap_addr)
260{
261 u32 n_read, tx_index, rx_index, bpw_len;
262 u16 *pkt_rx_buffer, *pkt_tx_buff;
263 int read_cnt;
264 u32 reg_spcr_val;
265 void __iomem *spsr;
266 void __iomem *spdrr;
267 void __iomem *spdwr;
268
269 spsr = io_remap_addr + PCH_SPSR;
270 iowrite32(reg_spsr_val, spsr);
271
272 if (data->transfer_active) {
273 rx_index = data->rx_index;
274 tx_index = data->tx_index;
275 bpw_len = data->bpw_len;
276 pkt_rx_buffer = data->pkt_rx_buff;
277 pkt_tx_buff = data->pkt_tx_buff;
278
279 spdrr = io_remap_addr + PCH_SPDRR;
280 spdwr = io_remap_addr + PCH_SPDWR;
281
282 n_read = PCH_READABLE(reg_spsr_val);
283
284 for (read_cnt = 0; (read_cnt < n_read); read_cnt++) {
285 pkt_rx_buffer[rx_index++] = ioread32(spdrr);
286 if (tx_index < bpw_len)
287 iowrite32(pkt_tx_buff[tx_index++], spdwr);
288 }
289
290
291 if ((bpw_len - rx_index) <= PCH_MAX_FIFO_DEPTH) {
292 reg_spcr_val = ioread32(io_remap_addr + PCH_SPCR);
293 reg_spcr_val &= ~SPCR_RFIE_BIT;
294
295
296 reg_spcr_val &= ~MASK_RFIC_SPCR_BITS;
297 reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD);
298
299 iowrite32(reg_spcr_val, (io_remap_addr + PCH_SPCR));
300 }
301
302
303 data->tx_index = tx_index;
304 data->rx_index = rx_index;
305
306
307 if (reg_spsr_val & SPSR_FI_BIT) {
308 if ((tx_index == bpw_len) && (rx_index == tx_index)) {
309
310 pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
311 PCH_ALL);
312
313
314
315 data->transfer_complete = true;
316 data->transfer_active = false;
317 wake_up(&data->wait);
318 } else {
319 dev_vdbg(&data->master->dev,
320 "%s : Transfer is not completed",
321 __func__);
322 }
323 }
324 }
325}
326
327
328
329
330
331
332static irqreturn_t pch_spi_handler(int irq, void *dev_id)
333{
334 u32 reg_spsr_val;
335 void __iomem *spsr;
336 void __iomem *io_remap_addr;
337 irqreturn_t ret = IRQ_NONE;
338 struct pch_spi_data *data = dev_id;
339 struct pch_spi_board_data *board_dat = data->board_dat;
340
341 if (board_dat->suspend_sts) {
342 dev_dbg(&board_dat->pdev->dev,
343 "%s returning due to suspend\n", __func__);
344 return IRQ_NONE;
345 }
346
347 io_remap_addr = data->io_remap_addr;
348 spsr = io_remap_addr + PCH_SPSR;
349
350 reg_spsr_val = ioread32(spsr);
351
352 if (reg_spsr_val & SPSR_ORF_BIT) {
353 dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__);
354 if (data->current_msg->complete) {
355 data->transfer_complete = true;
356 data->current_msg->status = -EIO;
357 data->current_msg->complete(data->current_msg->context);
358 data->bcurrent_msg_processing = false;
359 data->current_msg = NULL;
360 data->cur_trans = NULL;
361 }
362 }
363
364 if (data->use_dma)
365 return IRQ_NONE;
366
367
368 if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
369 pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr);
370 ret = IRQ_HANDLED;
371 }
372
373 dev_dbg(&board_dat->pdev->dev, "%s EXIT return value=%d\n",
374 __func__, ret);
375
376 return ret;
377}
378
379
380
381
382
383
384static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
385{
386 u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2);
387
388
389 if (n_spbr > PCH_MAX_SPBR)
390 n_spbr = PCH_MAX_SPBR;
391
392 pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS);
393}
394
395
396
397
398
399
400static void pch_spi_set_bits_per_word(struct spi_master *master,
401 u8 bits_per_word)
402{
403 if (bits_per_word == 8)
404 pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT);
405 else
406 pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0);
407}
408
409
410
411
412
413static void pch_spi_setup_transfer(struct spi_device *spi)
414{
415 u32 flags = 0;
416
417 dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n",
418 __func__, pch_spi_readreg(spi->master, PCH_SPBRR),
419 spi->max_speed_hz);
420 pch_spi_set_baud_rate(spi->master, spi->max_speed_hz);
421
422
423 pch_spi_set_bits_per_word(spi->master, spi->bits_per_word);
424
425 if (!(spi->mode & SPI_LSB_FIRST))
426 flags |= SPCR_LSBF_BIT;
427 if (spi->mode & SPI_CPOL)
428 flags |= SPCR_CPOL_BIT;
429 if (spi->mode & SPI_CPHA)
430 flags |= SPCR_CPHA_BIT;
431 pch_spi_setclr_reg(spi->master, PCH_SPCR, flags,
432 (SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT));
433
434
435 pch_spi_clear_fifo(spi->master);
436}
437
438
439
440
441
442static void pch_spi_reset(struct spi_master *master)
443{
444
445 pch_spi_writereg(master, PCH_SRST, 0x1);
446
447
448 pch_spi_writereg(master, PCH_SRST, 0x0);
449}
450
451static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
452{
453
454 struct spi_transfer *transfer;
455 struct pch_spi_data *data = spi_master_get_devdata(pspi->master);
456 int retval;
457 unsigned long flags;
458
459 spin_lock_irqsave(&data->lock, flags);
460
461 list_for_each_entry(transfer, &pmsg->transfers, transfer_list) {
462 if (!transfer->tx_buf && !transfer->rx_buf) {
463 dev_err(&pspi->dev,
464 "%s Tx and Rx buffer NULL\n", __func__);
465 retval = -EINVAL;
466 goto err_return_spinlock;
467 }
468
469 if (!transfer->len) {
470 dev_err(&pspi->dev, "%s Transfer length invalid\n",
471 __func__);
472 retval = -EINVAL;
473 goto err_return_spinlock;
474 }
475
476 dev_dbg(&pspi->dev,
477 "%s Tx/Rx buffer valid. Transfer length valid\n",
478 __func__);
479 }
480 spin_unlock_irqrestore(&data->lock, flags);
481
482
483 if (data->status == STATUS_EXITING) {
484 dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__);
485 retval = -ESHUTDOWN;
486 goto err_out;
487 }
488
489
490 if (data->board_dat->suspend_sts) {
491 dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__);
492 retval = -EINVAL;
493 goto err_out;
494 }
495
496
497 pmsg->actual_length = 0;
498 dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status);
499
500 pmsg->status = -EINPROGRESS;
501 spin_lock_irqsave(&data->lock, flags);
502
503 list_add_tail(&pmsg->queue, &data->queue);
504 spin_unlock_irqrestore(&data->lock, flags);
505
506 dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__);
507
508 schedule_work(&data->work);
509 dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__);
510
511 retval = 0;
512
513err_out:
514 dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
515 return retval;
516err_return_spinlock:
517 dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
518 spin_unlock_irqrestore(&data->lock, flags);
519 return retval;
520}
521
522static inline void pch_spi_select_chip(struct pch_spi_data *data,
523 struct spi_device *pspi)
524{
525 if (data->current_chip != NULL) {
526 if (pspi->chip_select != data->n_curnt_chip) {
527 dev_dbg(&pspi->dev, "%s : different slave\n", __func__);
528 data->current_chip = NULL;
529 }
530 }
531
532 data->current_chip = pspi;
533
534 data->n_curnt_chip = data->current_chip->chip_select;
535
536 dev_dbg(&pspi->dev, "%s :Invoking pch_spi_setup_transfer\n", __func__);
537 pch_spi_setup_transfer(pspi);
538}
539
540static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
541{
542 int size;
543 u32 n_writes;
544 int j;
545 struct spi_message *pmsg, *tmp;
546 const u8 *tx_buf;
547 const u16 *tx_sbuf;
548
549
550 if (data->cur_trans->speed_hz) {
551 dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
552 pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
553 }
554
555
556 if (data->cur_trans->bits_per_word &&
557 (data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) {
558 dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
559 pch_spi_set_bits_per_word(data->master,
560 data->cur_trans->bits_per_word);
561 *bpw = data->cur_trans->bits_per_word;
562 } else {
563 *bpw = data->current_msg->spi->bits_per_word;
564 }
565
566
567 data->tx_index = 0;
568 data->rx_index = 0;
569
570 data->bpw_len = data->cur_trans->len / (*bpw / 8);
571
572
573 size = data->cur_trans->len * sizeof(*data->pkt_tx_buff);
574
575
576 data->pkt_tx_buff = kzalloc(size, GFP_KERNEL);
577 if (data->pkt_tx_buff != NULL) {
578 data->pkt_rx_buff = kzalloc(size, GFP_KERNEL);
579 if (!data->pkt_rx_buff)
580 kfree(data->pkt_tx_buff);
581 }
582
583 if (!data->pkt_rx_buff) {
584
585 list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
586 pmsg->status = -ENOMEM;
587
588 if (pmsg->complete)
589 pmsg->complete(pmsg->context);
590
591
592 list_del_init(&pmsg->queue);
593 }
594 return;
595 }
596
597
598 if (data->cur_trans->tx_buf != NULL) {
599 if (*bpw == 8) {
600 tx_buf = data->cur_trans->tx_buf;
601 for (j = 0; j < data->bpw_len; j++)
602 data->pkt_tx_buff[j] = *tx_buf++;
603 } else {
604 tx_sbuf = data->cur_trans->tx_buf;
605 for (j = 0; j < data->bpw_len; j++)
606 data->pkt_tx_buff[j] = *tx_sbuf++;
607 }
608 }
609
610
611 n_writes = data->bpw_len;
612 if (n_writes > PCH_MAX_FIFO_DEPTH)
613 n_writes = PCH_MAX_FIFO_DEPTH;
614
615 dev_dbg(&data->master->dev,
616 "\n%s:Pulling down SSN low - writing 0x2 to SSNXCR\n",
617 __func__);
618 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
619
620 for (j = 0; j < n_writes; j++)
621 pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]);
622
623
624 data->tx_index = j;
625
626
627 data->transfer_complete = false;
628 data->transfer_active = true;
629}
630
631static void pch_spi_nomore_transfer(struct pch_spi_data *data)
632{
633 struct spi_message *pmsg, *tmp;
634 dev_dbg(&data->master->dev, "%s called\n", __func__);
635
636
637 data->current_msg->status = 0;
638
639 if (data->current_msg->complete) {
640 dev_dbg(&data->master->dev,
641 "%s:Invoking callback of SPI core\n", __func__);
642 data->current_msg->complete(data->current_msg->context);
643 }
644
645
646 data->bcurrent_msg_processing = false;
647
648 dev_dbg(&data->master->dev,
649 "%s:data->bcurrent_msg_processing = false\n", __func__);
650
651 data->current_msg = NULL;
652 data->cur_trans = NULL;
653
654
655
656 if ((list_empty(&data->queue) == 0) &&
657 (!data->board_dat->suspend_sts) &&
658 (data->status != STATUS_EXITING)) {
659
660
661
662
663 dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__);
664 schedule_work(&data->work);
665 } else if (data->board_dat->suspend_sts ||
666 data->status == STATUS_EXITING) {
667 dev_dbg(&data->master->dev,
668 "%s suspend/remove initiated, flushing queue\n",
669 __func__);
670 list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
671 pmsg->status = -EIO;
672
673 if (pmsg->complete)
674 pmsg->complete(pmsg->context);
675
676
677 list_del_init(&pmsg->queue);
678 }
679 }
680}
681
682static void pch_spi_set_ir(struct pch_spi_data *data)
683{
684
685 if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH)
686
687 pch_spi_setclr_reg(data->master, PCH_SPCR,
688 PCH_RX_THOLD << SPCR_RFIC_FIELD |
689 SPCR_FIE_BIT | SPCR_RFIE_BIT |
690 SPCR_ORIE_BIT | SPCR_SPE_BIT,
691 MASK_RFIC_SPCR_BITS | PCH_ALL);
692 else
693
694 pch_spi_setclr_reg(data->master, PCH_SPCR,
695 PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD |
696 SPCR_FIE_BIT | SPCR_ORIE_BIT |
697 SPCR_SPE_BIT,
698 MASK_RFIC_SPCR_BITS | PCH_ALL);
699
700
701
702 dev_dbg(&data->master->dev,
703 "%s:waiting for transfer to get over\n", __func__);
704
705 wait_event_interruptible(data->wait, data->transfer_complete);
706
707
708 pch_spi_writereg(data->master, PCH_SPSR,
709 pch_spi_readreg(data->master, PCH_SPSR));
710
711 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT);
712
713 pch_spi_clear_fifo(data->master);
714}
715
716static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
717{
718 int j;
719 u8 *rx_buf;
720 u16 *rx_sbuf;
721
722
723 if (!data->cur_trans->rx_buf)
724 return;
725
726 if (bpw == 8) {
727 rx_buf = data->cur_trans->rx_buf;
728 for (j = 0; j < data->bpw_len; j++)
729 *rx_buf++ = data->pkt_rx_buff[j] & 0xFF;
730 } else {
731 rx_sbuf = data->cur_trans->rx_buf;
732 for (j = 0; j < data->bpw_len; j++)
733 *rx_sbuf++ = data->pkt_rx_buff[j];
734 }
735}
736
737static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
738{
739 int j;
740 u8 *rx_buf;
741 u16 *rx_sbuf;
742 const u8 *rx_dma_buf;
743 const u16 *rx_dma_sbuf;
744
745
746 if (!data->cur_trans->rx_buf)
747 return;
748
749 if (bpw == 8) {
750 rx_buf = data->cur_trans->rx_buf;
751 rx_dma_buf = data->dma.rx_buf_virt;
752 for (j = 0; j < data->bpw_len; j++)
753 *rx_buf++ = *rx_dma_buf++ & 0xFF;
754 data->cur_trans->rx_buf = rx_buf;
755 } else {
756 rx_sbuf = data->cur_trans->rx_buf;
757 rx_dma_sbuf = data->dma.rx_buf_virt;
758 for (j = 0; j < data->bpw_len; j++)
759 *rx_sbuf++ = *rx_dma_sbuf++;
760 data->cur_trans->rx_buf = rx_sbuf;
761 }
762}
763
764static int pch_spi_start_transfer(struct pch_spi_data *data)
765{
766 struct pch_spi_dma_ctrl *dma;
767 unsigned long flags;
768 int rtn;
769
770 dma = &data->dma;
771
772 spin_lock_irqsave(&data->lock, flags);
773
774
775 pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL);
776
777 spin_unlock_irqrestore(&data->lock, flags);
778
779
780
781 dev_dbg(&data->master->dev,
782 "%s:waiting for transfer to get over\n", __func__);
783 rtn = wait_event_interruptible_timeout(data->wait,
784 data->transfer_complete,
785 msecs_to_jiffies(2 * HZ));
786 if (!rtn)
787 dev_err(&data->master->dev,
788 "%s wait-event timeout\n", __func__);
789
790 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
791 DMA_FROM_DEVICE);
792
793 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
794 DMA_FROM_DEVICE);
795 memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
796
797 async_tx_ack(dma->desc_rx);
798 async_tx_ack(dma->desc_tx);
799 kfree(dma->sg_tx_p);
800 kfree(dma->sg_rx_p);
801
802 spin_lock_irqsave(&data->lock, flags);
803
804
805 pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
806 MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS | PCH_ALL |
807 SPCR_SPE_BIT);
808
809 pch_spi_writereg(data->master, PCH_SPSR,
810 pch_spi_readreg(data->master, PCH_SPSR));
811
812 pch_spi_clear_fifo(data->master);
813
814 spin_unlock_irqrestore(&data->lock, flags);
815
816 return rtn;
817}
818
819static void pch_dma_rx_complete(void *arg)
820{
821 struct pch_spi_data *data = arg;
822
823
824 data->transfer_complete = true;
825 wake_up_interruptible(&data->wait);
826}
827
828static bool pch_spi_filter(struct dma_chan *chan, void *slave)
829{
830 struct pch_dma_slave *param = slave;
831
832 if ((chan->chan_id == param->chan_id) &&
833 (param->dma_dev == chan->device->dev)) {
834 chan->private = param;
835 return true;
836 } else {
837 return false;
838 }
839}
840
841static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
842{
843 dma_cap_mask_t mask;
844 struct dma_chan *chan;
845 struct pci_dev *dma_dev;
846 struct pch_dma_slave *param;
847 struct pch_spi_dma_ctrl *dma;
848 unsigned int width;
849
850 if (bpw == 8)
851 width = PCH_DMA_WIDTH_1_BYTE;
852 else
853 width = PCH_DMA_WIDTH_2_BYTES;
854
855 dma = &data->dma;
856 dma_cap_zero(mask);
857 dma_cap_set(DMA_SLAVE, mask);
858
859
860 dma_dev = pci_get_slot(data->board_dat->pdev->bus,
861 PCI_DEVFN(PCI_SLOT(data->board_dat->pdev->devfn), 0));
862
863
864 param = &dma->param_tx;
865 param->dma_dev = &dma_dev->dev;
866 param->chan_id = data->ch * 2; ;
867 param->tx_reg = data->io_base_addr + PCH_SPDWR;
868 param->width = width;
869 chan = dma_request_channel(mask, pch_spi_filter, param);
870 if (!chan) {
871 dev_err(&data->master->dev,
872 "ERROR: dma_request_channel FAILS(Tx)\n");
873 data->use_dma = 0;
874 return;
875 }
876 dma->chan_tx = chan;
877
878
879 param = &dma->param_rx;
880 param->dma_dev = &dma_dev->dev;
881 param->chan_id = data->ch * 2 + 1; ;
882 param->rx_reg = data->io_base_addr + PCH_SPDRR;
883 param->width = width;
884 chan = dma_request_channel(mask, pch_spi_filter, param);
885 if (!chan) {
886 dev_err(&data->master->dev,
887 "ERROR: dma_request_channel FAILS(Rx)\n");
888 dma_release_channel(dma->chan_tx);
889 dma->chan_tx = NULL;
890 data->use_dma = 0;
891 return;
892 }
893 dma->chan_rx = chan;
894}
895
896static void pch_spi_release_dma(struct pch_spi_data *data)
897{
898 struct pch_spi_dma_ctrl *dma;
899
900 dma = &data->dma;
901 if (dma->chan_tx) {
902 dma_release_channel(dma->chan_tx);
903 dma->chan_tx = NULL;
904 }
905 if (dma->chan_rx) {
906 dma_release_channel(dma->chan_rx);
907 dma->chan_rx = NULL;
908 }
909}
910
911static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
912{
913 const u8 *tx_buf;
914 const u16 *tx_sbuf;
915 u8 *tx_dma_buf;
916 u16 *tx_dma_sbuf;
917 struct scatterlist *sg;
918 struct dma_async_tx_descriptor *desc_tx;
919 struct dma_async_tx_descriptor *desc_rx;
920 int num;
921 int i;
922 int size;
923 int rem;
924 int head;
925 unsigned long flags;
926 struct pch_spi_dma_ctrl *dma;
927
928 dma = &data->dma;
929
930
931 if (data->cur_trans->speed_hz) {
932 dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
933 spin_lock_irqsave(&data->lock, flags);
934 pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
935 spin_unlock_irqrestore(&data->lock, flags);
936 }
937
938
939 if (data->cur_trans->bits_per_word &&
940 (data->current_msg->spi->bits_per_word !=
941 data->cur_trans->bits_per_word)) {
942 dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
943 spin_lock_irqsave(&data->lock, flags);
944 pch_spi_set_bits_per_word(data->master,
945 data->cur_trans->bits_per_word);
946 spin_unlock_irqrestore(&data->lock, flags);
947 *bpw = data->cur_trans->bits_per_word;
948 } else {
949 *bpw = data->current_msg->spi->bits_per_word;
950 }
951 data->bpw_len = data->cur_trans->len / (*bpw / 8);
952
953 if (data->bpw_len > PCH_BUF_SIZE) {
954 data->bpw_len = PCH_BUF_SIZE;
955 data->cur_trans->len -= PCH_BUF_SIZE;
956 }
957
958
959 if (data->cur_trans->tx_buf != NULL) {
960 if (*bpw == 8) {
961 tx_buf = data->cur_trans->tx_buf;
962 tx_dma_buf = dma->tx_buf_virt;
963 for (i = 0; i < data->bpw_len; i++)
964 *tx_dma_buf++ = *tx_buf++;
965 } else {
966 tx_sbuf = data->cur_trans->tx_buf;
967 tx_dma_sbuf = dma->tx_buf_virt;
968 for (i = 0; i < data->bpw_len; i++)
969 *tx_dma_sbuf++ = *tx_sbuf++;
970 }
971 }
972
973
974 if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
975 if (data->bpw_len % PCH_DMA_TRANS_SIZE) {
976 num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
977 rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
978 } else {
979 num = data->bpw_len / PCH_DMA_TRANS_SIZE;
980 rem = PCH_DMA_TRANS_SIZE;
981 }
982 size = PCH_DMA_TRANS_SIZE;
983 } else {
984 num = 1;
985 size = data->bpw_len;
986 rem = data->bpw_len;
987 }
988 dev_dbg(&data->master->dev, "%s num=%d size=%d rem=%d\n",
989 __func__, num, size, rem);
990 spin_lock_irqsave(&data->lock, flags);
991
992
993 pch_spi_setclr_reg(data->master, PCH_SPCR,
994 ((size - 1) << SPCR_RFIC_FIELD) |
995 (PCH_TX_THOLD << SPCR_TFIC_FIELD),
996 MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
997
998 spin_unlock_irqrestore(&data->lock, flags);
999
1000
1001 dma->sg_rx_p = kcalloc(num, sizeof(*dma->sg_rx_p), GFP_ATOMIC);
1002 if (!dma->sg_rx_p)
1003 return;
1004
1005 sg_init_table(dma->sg_rx_p, num);
1006
1007 sg = dma->sg_rx_p;
1008 for (i = 0; i < num; i++, sg++) {
1009 if (i == (num - 2)) {
1010 sg->offset = size * i;
1011 sg->offset = sg->offset * (*bpw / 8);
1012 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
1013 sg->offset);
1014 sg_dma_len(sg) = rem;
1015 } else if (i == (num - 1)) {
1016 sg->offset = size * (i - 1) + rem;
1017 sg->offset = sg->offset * (*bpw / 8);
1018 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
1019 sg->offset);
1020 sg_dma_len(sg) = size;
1021 } else {
1022 sg->offset = size * i;
1023 sg->offset = sg->offset * (*bpw / 8);
1024 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
1025 sg->offset);
1026 sg_dma_len(sg) = size;
1027 }
1028 sg_dma_address(sg) = dma->rx_buf_dma + sg->offset;
1029 }
1030 sg = dma->sg_rx_p;
1031 desc_rx = dmaengine_prep_slave_sg(dma->chan_rx, sg,
1032 num, DMA_DEV_TO_MEM,
1033 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1034 if (!desc_rx) {
1035 dev_err(&data->master->dev,
1036 "%s:dmaengine_prep_slave_sg Failed\n", __func__);
1037 return;
1038 }
1039 dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_FROM_DEVICE);
1040 desc_rx->callback = pch_dma_rx_complete;
1041 desc_rx->callback_param = data;
1042 dma->nent = num;
1043 dma->desc_rx = desc_rx;
1044
1045
1046 if (data->bpw_len > PCH_MAX_FIFO_DEPTH) {
1047 head = PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE;
1048 if (data->bpw_len % PCH_DMA_TRANS_SIZE > 4) {
1049 num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
1050 rem = data->bpw_len % PCH_DMA_TRANS_SIZE - head;
1051 } else {
1052 num = data->bpw_len / PCH_DMA_TRANS_SIZE;
1053 rem = data->bpw_len % PCH_DMA_TRANS_SIZE +
1054 PCH_DMA_TRANS_SIZE - head;
1055 }
1056 size = PCH_DMA_TRANS_SIZE;
1057 } else {
1058 num = 1;
1059 size = data->bpw_len;
1060 rem = data->bpw_len;
1061 head = 0;
1062 }
1063
1064 dma->sg_tx_p = kcalloc(num, sizeof(*dma->sg_tx_p), GFP_ATOMIC);
1065 if (!dma->sg_tx_p)
1066 return;
1067
1068 sg_init_table(dma->sg_tx_p, num);
1069
1070 sg = dma->sg_tx_p;
1071 for (i = 0; i < num; i++, sg++) {
1072 if (i == 0) {
1073 sg->offset = 0;
1074 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size + head,
1075 sg->offset);
1076 sg_dma_len(sg) = size + head;
1077 } else if (i == (num - 1)) {
1078 sg->offset = head + size * i;
1079 sg->offset = sg->offset * (*bpw / 8);
1080 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem,
1081 sg->offset);
1082 sg_dma_len(sg) = rem;
1083 } else {
1084 sg->offset = head + size * i;
1085 sg->offset = sg->offset * (*bpw / 8);
1086 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size,
1087 sg->offset);
1088 sg_dma_len(sg) = size;
1089 }
1090 sg_dma_address(sg) = dma->tx_buf_dma + sg->offset;
1091 }
1092 sg = dma->sg_tx_p;
1093 desc_tx = dmaengine_prep_slave_sg(dma->chan_tx,
1094 sg, num, DMA_MEM_TO_DEV,
1095 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1096 if (!desc_tx) {
1097 dev_err(&data->master->dev,
1098 "%s:dmaengine_prep_slave_sg Failed\n", __func__);
1099 return;
1100 }
1101 dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_TO_DEVICE);
1102 desc_tx->callback = NULL;
1103 desc_tx->callback_param = data;
1104 dma->nent = num;
1105 dma->desc_tx = desc_tx;
1106
1107 dev_dbg(&data->master->dev, "%s:Pulling down SSN low - writing 0x2 to SSNXCR\n", __func__);
1108
1109 spin_lock_irqsave(&data->lock, flags);
1110 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
1111 desc_rx->tx_submit(desc_rx);
1112 desc_tx->tx_submit(desc_tx);
1113 spin_unlock_irqrestore(&data->lock, flags);
1114
1115
1116 data->transfer_complete = false;
1117}
1118
1119static void pch_spi_process_messages(struct work_struct *pwork)
1120{
1121 struct spi_message *pmsg, *tmp;
1122 struct pch_spi_data *data;
1123 int bpw;
1124
1125 data = container_of(pwork, struct pch_spi_data, work);
1126 dev_dbg(&data->master->dev, "%s data initialized\n", __func__);
1127
1128 spin_lock(&data->lock);
1129
1130 if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
1131 dev_dbg(&data->master->dev,
1132 "%s suspend/remove initiated, flushing queue\n", __func__);
1133 list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
1134 pmsg->status = -EIO;
1135
1136 if (pmsg->complete) {
1137 spin_unlock(&data->lock);
1138 pmsg->complete(pmsg->context);
1139 spin_lock(&data->lock);
1140 }
1141
1142
1143 list_del_init(&pmsg->queue);
1144 }
1145
1146 spin_unlock(&data->lock);
1147 return;
1148 }
1149
1150 data->bcurrent_msg_processing = true;
1151 dev_dbg(&data->master->dev,
1152 "%s Set data->bcurrent_msg_processing= true\n", __func__);
1153
1154
1155 data->current_msg = list_entry(data->queue.next, struct spi_message,
1156 queue);
1157
1158 list_del_init(&data->current_msg->queue);
1159
1160 data->current_msg->status = 0;
1161
1162 pch_spi_select_chip(data, data->current_msg->spi);
1163
1164 spin_unlock(&data->lock);
1165
1166 if (data->use_dma)
1167 pch_spi_request_dma(data,
1168 data->current_msg->spi->bits_per_word);
1169 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
1170 do {
1171 int cnt;
1172
1173
1174
1175 spin_lock(&data->lock);
1176 if (data->cur_trans == NULL) {
1177 data->cur_trans =
1178 list_entry(data->current_msg->transfers.next,
1179 struct spi_transfer, transfer_list);
1180 dev_dbg(&data->master->dev,
1181 "%s :Getting 1st transfer message\n",
1182 __func__);
1183 } else {
1184 data->cur_trans =
1185 list_entry(data->cur_trans->transfer_list.next,
1186 struct spi_transfer, transfer_list);
1187 dev_dbg(&data->master->dev,
1188 "%s :Getting next transfer message\n",
1189 __func__);
1190 }
1191 spin_unlock(&data->lock);
1192
1193 if (!data->cur_trans->len)
1194 goto out;
1195 cnt = (data->cur_trans->len - 1) / PCH_BUF_SIZE + 1;
1196 data->save_total_len = data->cur_trans->len;
1197 if (data->use_dma) {
1198 int i;
1199 char *save_rx_buf = data->cur_trans->rx_buf;
1200 for (i = 0; i < cnt; i ++) {
1201 pch_spi_handle_dma(data, &bpw);
1202 if (!pch_spi_start_transfer(data)) {
1203 data->transfer_complete = true;
1204 data->current_msg->status = -EIO;
1205 data->current_msg->complete
1206 (data->current_msg->context);
1207 data->bcurrent_msg_processing = false;
1208 data->current_msg = NULL;
1209 data->cur_trans = NULL;
1210 goto out;
1211 }
1212 pch_spi_copy_rx_data_for_dma(data, bpw);
1213 }
1214 data->cur_trans->rx_buf = save_rx_buf;
1215 } else {
1216 pch_spi_set_tx(data, &bpw);
1217 pch_spi_set_ir(data);
1218 pch_spi_copy_rx_data(data, bpw);
1219 kfree(data->pkt_rx_buff);
1220 data->pkt_rx_buff = NULL;
1221 kfree(data->pkt_tx_buff);
1222 data->pkt_tx_buff = NULL;
1223 }
1224
1225 data->cur_trans->len = data->save_total_len;
1226 data->current_msg->actual_length += data->cur_trans->len;
1227
1228 dev_dbg(&data->master->dev,
1229 "%s:data->current_msg->actual_length=%d\n",
1230 __func__, data->current_msg->actual_length);
1231
1232
1233 if (data->cur_trans->delay_usecs) {
1234 dev_dbg(&data->master->dev, "%s:delay in usec=%d\n",
1235 __func__, data->cur_trans->delay_usecs);
1236 udelay(data->cur_trans->delay_usecs);
1237 }
1238
1239 spin_lock(&data->lock);
1240
1241
1242 if ((data->cur_trans->transfer_list.next) ==
1243 &(data->current_msg->transfers)) {
1244 pch_spi_nomore_transfer(data);
1245 }
1246
1247 spin_unlock(&data->lock);
1248
1249 } while (data->cur_trans != NULL);
1250
1251out:
1252 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);
1253 if (data->use_dma)
1254 pch_spi_release_dma(data);
1255}
1256
1257static void pch_spi_free_resources(struct pch_spi_board_data *board_dat,
1258 struct pch_spi_data *data)
1259{
1260 dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
1261
1262 flush_work(&data->work);
1263}
1264
1265static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
1266 struct pch_spi_data *data)
1267{
1268 dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
1269
1270
1271 pch_spi_reset(data->master);
1272 dev_dbg(&board_dat->pdev->dev,
1273 "%s pch_spi_reset invoked successfully\n", __func__);
1274
1275 dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
1276
1277 return 0;
1278}
1279
1280static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
1281 struct pch_spi_data *data)
1282{
1283 struct pch_spi_dma_ctrl *dma;
1284
1285 dma = &data->dma;
1286 if (dma->tx_buf_dma)
1287 dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
1288 dma->tx_buf_virt, dma->tx_buf_dma);
1289 if (dma->rx_buf_dma)
1290 dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
1291 dma->rx_buf_virt, dma->rx_buf_dma);
1292}
1293
1294static int pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
1295 struct pch_spi_data *data)
1296{
1297 struct pch_spi_dma_ctrl *dma;
1298 int ret;
1299
1300 dma = &data->dma;
1301 ret = 0;
1302
1303 dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
1304 PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
1305 if (!dma->tx_buf_virt)
1306 ret = -ENOMEM;
1307
1308
1309 dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
1310 PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
1311 if (!dma->rx_buf_virt)
1312 ret = -ENOMEM;
1313
1314 return ret;
1315}
1316
1317static int pch_spi_pd_probe(struct platform_device *plat_dev)
1318{
1319 int ret;
1320 struct spi_master *master;
1321 struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
1322 struct pch_spi_data *data;
1323
1324 dev_dbg(&plat_dev->dev, "%s:debug\n", __func__);
1325
1326 master = spi_alloc_master(&board_dat->pdev->dev,
1327 sizeof(struct pch_spi_data));
1328 if (!master) {
1329 dev_err(&plat_dev->dev, "spi_alloc_master[%d] failed.\n",
1330 plat_dev->id);
1331 return -ENOMEM;
1332 }
1333
1334 data = spi_master_get_devdata(master);
1335 data->master = master;
1336
1337 platform_set_drvdata(plat_dev, data);
1338
1339
1340 data->io_base_addr = pci_resource_start(board_dat->pdev, 1) +
1341 PCH_ADDRESS_SIZE * plat_dev->id;
1342 data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0);
1343 if (!data->io_remap_addr) {
1344 dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__);
1345 ret = -ENOMEM;
1346 goto err_pci_iomap;
1347 }
1348 data->io_remap_addr += PCH_ADDRESS_SIZE * plat_dev->id;
1349
1350 dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n",
1351 plat_dev->id, data->io_remap_addr);
1352
1353
1354 master->num_chipselect = PCH_MAX_CS;
1355 master->transfer = pch_spi_transfer;
1356 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
1357 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
1358 master->max_speed_hz = PCH_MAX_BAUDRATE;
1359
1360 data->board_dat = board_dat;
1361 data->plat_dev = plat_dev;
1362 data->n_curnt_chip = 255;
1363 data->status = STATUS_RUNNING;
1364 data->ch = plat_dev->id;
1365 data->use_dma = use_dma;
1366
1367 INIT_LIST_HEAD(&data->queue);
1368 spin_lock_init(&data->lock);
1369 INIT_WORK(&data->work, pch_spi_process_messages);
1370 init_waitqueue_head(&data->wait);
1371
1372 ret = pch_spi_get_resources(board_dat, data);
1373 if (ret) {
1374 dev_err(&plat_dev->dev, "%s fail(retval=%d)\n", __func__, ret);
1375 goto err_spi_get_resources;
1376 }
1377
1378 ret = request_irq(board_dat->pdev->irq, pch_spi_handler,
1379 IRQF_SHARED, KBUILD_MODNAME, data);
1380 if (ret) {
1381 dev_err(&plat_dev->dev,
1382 "%s request_irq failed\n", __func__);
1383 goto err_request_irq;
1384 }
1385 data->irq_reg_sts = true;
1386
1387 pch_spi_set_master_mode(master);
1388
1389 if (use_dma) {
1390 dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
1391 ret = pch_alloc_dma_buf(board_dat, data);
1392 if (ret)
1393 goto err_spi_register_master;
1394 }
1395
1396 ret = spi_register_master(master);
1397 if (ret != 0) {
1398 dev_err(&plat_dev->dev,
1399 "%s spi_register_master FAILED\n", __func__);
1400 goto err_spi_register_master;
1401 }
1402
1403 return 0;
1404
1405err_spi_register_master:
1406 pch_free_dma_buf(board_dat, data);
1407 free_irq(board_dat->pdev->irq, data);
1408err_request_irq:
1409 pch_spi_free_resources(board_dat, data);
1410err_spi_get_resources:
1411 pci_iounmap(board_dat->pdev, data->io_remap_addr);
1412err_pci_iomap:
1413 spi_master_put(master);
1414
1415 return ret;
1416}
1417
1418static int pch_spi_pd_remove(struct platform_device *plat_dev)
1419{
1420 struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
1421 struct pch_spi_data *data = platform_get_drvdata(plat_dev);
1422 int count;
1423 unsigned long flags;
1424
1425 dev_dbg(&plat_dev->dev, "%s:[ch%d] irq=%d\n",
1426 __func__, plat_dev->id, board_dat->pdev->irq);
1427
1428 if (use_dma)
1429 pch_free_dma_buf(board_dat, data);
1430
1431
1432
1433 count = 500;
1434 spin_lock_irqsave(&data->lock, flags);
1435 data->status = STATUS_EXITING;
1436 while ((list_empty(&data->queue) == 0) && --count) {
1437 dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n",
1438 __func__);
1439 spin_unlock_irqrestore(&data->lock, flags);
1440 msleep(PCH_SLEEP_TIME);
1441 spin_lock_irqsave(&data->lock, flags);
1442 }
1443 spin_unlock_irqrestore(&data->lock, flags);
1444
1445 pch_spi_free_resources(board_dat, data);
1446
1447 if (data->irq_reg_sts) {
1448
1449 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
1450 data->irq_reg_sts = false;
1451 free_irq(board_dat->pdev->irq, data);
1452 }
1453
1454 pci_iounmap(board_dat->pdev, data->io_remap_addr);
1455 spi_unregister_master(data->master);
1456
1457 return 0;
1458}
1459#ifdef CONFIG_PM
1460static int pch_spi_pd_suspend(struct platform_device *pd_dev,
1461 pm_message_t state)
1462{
1463 u8 count;
1464 struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
1465 struct pch_spi_data *data = platform_get_drvdata(pd_dev);
1466
1467 dev_dbg(&pd_dev->dev, "%s ENTRY\n", __func__);
1468
1469 if (!board_dat) {
1470 dev_err(&pd_dev->dev,
1471 "%s pci_get_drvdata returned NULL\n", __func__);
1472 return -EFAULT;
1473 }
1474
1475
1476
1477 count = 255;
1478 while ((--count) > 0) {
1479 if (!(data->bcurrent_msg_processing))
1480 break;
1481 msleep(PCH_SLEEP_TIME);
1482 }
1483
1484
1485 if (data->irq_reg_sts) {
1486
1487 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
1488 pch_spi_reset(data->master);
1489 free_irq(board_dat->pdev->irq, data);
1490
1491 data->irq_reg_sts = false;
1492 dev_dbg(&pd_dev->dev,
1493 "%s free_irq invoked successfully.\n", __func__);
1494 }
1495
1496 return 0;
1497}
1498
1499static int pch_spi_pd_resume(struct platform_device *pd_dev)
1500{
1501 struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
1502 struct pch_spi_data *data = platform_get_drvdata(pd_dev);
1503 int retval;
1504
1505 if (!board_dat) {
1506 dev_err(&pd_dev->dev,
1507 "%s pci_get_drvdata returned NULL\n", __func__);
1508 return -EFAULT;
1509 }
1510
1511 if (!data->irq_reg_sts) {
1512
1513 retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
1514 IRQF_SHARED, KBUILD_MODNAME, data);
1515 if (retval < 0) {
1516 dev_err(&pd_dev->dev,
1517 "%s request_irq failed\n", __func__);
1518 return retval;
1519 }
1520
1521
1522 pch_spi_reset(data->master);
1523 pch_spi_set_master_mode(data->master);
1524 data->irq_reg_sts = true;
1525 }
1526 return 0;
1527}
1528#else
1529#define pch_spi_pd_suspend NULL
1530#define pch_spi_pd_resume NULL
1531#endif
1532
1533static struct platform_driver pch_spi_pd_driver = {
1534 .driver = {
1535 .name = "pch-spi",
1536 },
1537 .probe = pch_spi_pd_probe,
1538 .remove = pch_spi_pd_remove,
1539 .suspend = pch_spi_pd_suspend,
1540 .resume = pch_spi_pd_resume
1541};
1542
1543static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1544{
1545 struct pch_spi_board_data *board_dat;
1546 struct platform_device *pd_dev = NULL;
1547 int retval;
1548 int i;
1549 struct pch_pd_dev_save *pd_dev_save;
1550
1551 pd_dev_save = kzalloc(sizeof(*pd_dev_save), GFP_KERNEL);
1552 if (!pd_dev_save)
1553 return -ENOMEM;
1554
1555 board_dat = kzalloc(sizeof(*board_dat), GFP_KERNEL);
1556 if (!board_dat) {
1557 retval = -ENOMEM;
1558 goto err_no_mem;
1559 }
1560
1561 retval = pci_request_regions(pdev, KBUILD_MODNAME);
1562 if (retval) {
1563 dev_err(&pdev->dev, "%s request_region failed\n", __func__);
1564 goto pci_request_regions;
1565 }
1566
1567 board_dat->pdev = pdev;
1568 board_dat->num = id->driver_data;
1569 pd_dev_save->num = id->driver_data;
1570 pd_dev_save->board_dat = board_dat;
1571
1572 retval = pci_enable_device(pdev);
1573 if (retval) {
1574 dev_err(&pdev->dev, "%s pci_enable_device failed\n", __func__);
1575 goto pci_enable_device;
1576 }
1577
1578 for (i = 0; i < board_dat->num; i++) {
1579 pd_dev = platform_device_alloc("pch-spi", i);
1580 if (!pd_dev) {
1581 dev_err(&pdev->dev, "platform_device_alloc failed\n");
1582 retval = -ENOMEM;
1583 goto err_platform_device;
1584 }
1585 pd_dev_save->pd_save[i] = pd_dev;
1586 pd_dev->dev.parent = &pdev->dev;
1587
1588 retval = platform_device_add_data(pd_dev, board_dat,
1589 sizeof(*board_dat));
1590 if (retval) {
1591 dev_err(&pdev->dev,
1592 "platform_device_add_data failed\n");
1593 platform_device_put(pd_dev);
1594 goto err_platform_device;
1595 }
1596
1597 retval = platform_device_add(pd_dev);
1598 if (retval) {
1599 dev_err(&pdev->dev, "platform_device_add failed\n");
1600 platform_device_put(pd_dev);
1601 goto err_platform_device;
1602 }
1603 }
1604
1605 pci_set_drvdata(pdev, pd_dev_save);
1606
1607 return 0;
1608
1609err_platform_device:
1610 while (--i >= 0)
1611 platform_device_unregister(pd_dev_save->pd_save[i]);
1612 pci_disable_device(pdev);
1613pci_enable_device:
1614 pci_release_regions(pdev);
1615pci_request_regions:
1616 kfree(board_dat);
1617err_no_mem:
1618 kfree(pd_dev_save);
1619
1620 return retval;
1621}
1622
1623static void pch_spi_remove(struct pci_dev *pdev)
1624{
1625 int i;
1626 struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
1627
1628 dev_dbg(&pdev->dev, "%s ENTRY:pdev=%p\n", __func__, pdev);
1629
1630 for (i = 0; i < pd_dev_save->num; i++)
1631 platform_device_unregister(pd_dev_save->pd_save[i]);
1632
1633 pci_disable_device(pdev);
1634 pci_release_regions(pdev);
1635 kfree(pd_dev_save->board_dat);
1636 kfree(pd_dev_save);
1637}
1638
1639#ifdef CONFIG_PM
1640static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state)
1641{
1642 int retval;
1643 struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
1644
1645 dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
1646
1647 pd_dev_save->board_dat->suspend_sts = true;
1648
1649
1650 retval = pci_save_state(pdev);
1651 if (retval == 0) {
1652 pci_enable_wake(pdev, PCI_D3hot, 0);
1653 pci_disable_device(pdev);
1654 pci_set_power_state(pdev, PCI_D3hot);
1655 } else {
1656 dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__);
1657 }
1658
1659 return retval;
1660}
1661
1662static int pch_spi_resume(struct pci_dev *pdev)
1663{
1664 int retval;
1665 struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
1666 dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
1667
1668 pci_set_power_state(pdev, PCI_D0);
1669 pci_restore_state(pdev);
1670
1671 retval = pci_enable_device(pdev);
1672 if (retval < 0) {
1673 dev_err(&pdev->dev,
1674 "%s pci_enable_device failed\n", __func__);
1675 } else {
1676 pci_enable_wake(pdev, PCI_D3hot, 0);
1677
1678
1679 pd_dev_save->board_dat->suspend_sts = false;
1680 }
1681
1682 return retval;
1683}
1684#else
1685#define pch_spi_suspend NULL
1686#define pch_spi_resume NULL
1687
1688#endif
1689
1690static struct pci_driver pch_spi_pcidev_driver = {
1691 .name = "pch_spi",
1692 .id_table = pch_spi_pcidev_id,
1693 .probe = pch_spi_probe,
1694 .remove = pch_spi_remove,
1695 .suspend = pch_spi_suspend,
1696 .resume = pch_spi_resume,
1697};
1698
1699static int __init pch_spi_init(void)
1700{
1701 int ret;
1702 ret = platform_driver_register(&pch_spi_pd_driver);
1703 if (ret)
1704 return ret;
1705
1706 ret = pci_register_driver(&pch_spi_pcidev_driver);
1707 if (ret) {
1708 platform_driver_unregister(&pch_spi_pd_driver);
1709 return ret;
1710 }
1711
1712 return 0;
1713}
1714module_init(pch_spi_init);
1715
1716static void __exit pch_spi_exit(void)
1717{
1718 pci_unregister_driver(&pch_spi_pcidev_driver);
1719 platform_driver_unregister(&pch_spi_pd_driver);
1720}
1721module_exit(pch_spi_exit);
1722
1723module_param(use_dma, int, 0644);
1724MODULE_PARM_DESC(use_dma,
1725 "to use DMA for data transfers pass 1 else 0; default 1");
1726
1727MODULE_LICENSE("GPL");
1728MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor ML7xxx IOH SPI Driver");
1729MODULE_DEVICE_TABLE(pci, pch_spi_pcidev_id);
1730
1731