1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/delay.h>
17#include <linux/pci.h>
18#include <linux/wait.h>
19#include <linux/spi/spi.h>
20#include <linux/interrupt.h>
21#include <linux/sched.h>
22#include <linux/spi/spidev.h>
23#include <linux/module.h>
24#include <linux/device.h>
25#include <linux/platform_device.h>
26
27#include <linux/dmaengine.h>
28#include <linux/pch_dma.h>
29
30
31#define PCH_SPCR 0x00
32#define PCH_SPBRR 0x04
33#define PCH_SPSR 0x08
34#define PCH_SPDWR 0x0C
35#define PCH_SPDRR 0x10
36#define PCH_SSNXCR 0x18
37#define PCH_SRST 0x1C
38#define PCH_ADDRESS_SIZE 0x20
39
40#define PCH_SPSR_TFD 0x000007C0
41#define PCH_SPSR_RFD 0x0000F800
42
43#define PCH_READABLE(x) (((x) & PCH_SPSR_RFD)>>11)
44#define PCH_WRITABLE(x) (((x) & PCH_SPSR_TFD)>>6)
45
46#define PCH_RX_THOLD 7
47#define PCH_RX_THOLD_MAX 15
48
49#define PCH_TX_THOLD 2
50
51#define PCH_MAX_BAUDRATE 5000000
52#define PCH_MAX_FIFO_DEPTH 16
53
54#define STATUS_RUNNING 1
55#define STATUS_EXITING 2
56#define PCH_SLEEP_TIME 10
57
58#define SSN_LOW 0x02U
59#define SSN_HIGH 0x03U
60#define SSN_NO_CONTROL 0x00U
61#define PCH_MAX_CS 0xFF
62#define PCI_DEVICE_ID_GE_SPI 0x8816
63
64#define SPCR_SPE_BIT (1 << 0)
65#define SPCR_MSTR_BIT (1 << 1)
66#define SPCR_LSBF_BIT (1 << 4)
67#define SPCR_CPHA_BIT (1 << 5)
68#define SPCR_CPOL_BIT (1 << 6)
69#define SPCR_TFIE_BIT (1 << 8)
70#define SPCR_RFIE_BIT (1 << 9)
71#define SPCR_FIE_BIT (1 << 10)
72#define SPCR_ORIE_BIT (1 << 11)
73#define SPCR_MDFIE_BIT (1 << 12)
74#define SPCR_FICLR_BIT (1 << 24)
75#define SPSR_TFI_BIT (1 << 0)
76#define SPSR_RFI_BIT (1 << 1)
77#define SPSR_FI_BIT (1 << 2)
78#define SPSR_ORF_BIT (1 << 3)
79#define SPBRR_SIZE_BIT (1 << 10)
80
81#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|\
82 SPCR_ORIE_BIT|SPCR_MDFIE_BIT)
83
84#define SPCR_RFIC_FIELD 20
85#define SPCR_TFIC_FIELD 16
86
87#define MASK_SPBRR_SPBR_BITS ((1 << 10) - 1)
88#define MASK_RFIC_SPCR_BITS (0xf << SPCR_RFIC_FIELD)
89#define MASK_TFIC_SPCR_BITS (0xf << SPCR_TFIC_FIELD)
90
91#define PCH_CLOCK_HZ 50000000
92#define PCH_MAX_SPBR 1023
93
94
95#define PCI_VENDOR_ID_ROHM 0x10DB
96#define PCI_DEVICE_ID_ML7213_SPI 0x802c
97#define PCI_DEVICE_ID_ML7223_SPI 0x800F
98#define PCI_DEVICE_ID_ML7831_SPI 0x8816
99
100
101
102
103
104
105
106
107#define PCH_SPI_MAX_DEV 2
108
109#define PCH_BUF_SIZE 4096
110#define PCH_DMA_TRANS_SIZE 12
111
112static int use_dma = 1;
113
114struct pch_spi_dma_ctrl {
115 struct dma_async_tx_descriptor *desc_tx;
116 struct dma_async_tx_descriptor *desc_rx;
117 struct pch_dma_slave param_tx;
118 struct pch_dma_slave param_rx;
119 struct dma_chan *chan_tx;
120 struct dma_chan *chan_rx;
121 struct scatterlist *sg_tx_p;
122 struct scatterlist *sg_rx_p;
123 struct scatterlist sg_tx;
124 struct scatterlist sg_rx;
125 int nent;
126 void *tx_buf_virt;
127 void *rx_buf_virt;
128 dma_addr_t tx_buf_dma;
129 dma_addr_t rx_buf_dma;
130};
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165struct pch_spi_data {
166 void __iomem *io_remap_addr;
167 unsigned long io_base_addr;
168 struct spi_master *master;
169 struct work_struct work;
170 wait_queue_head_t wait;
171 u8 transfer_complete;
172 u8 bcurrent_msg_processing;
173 spinlock_t lock;
174 struct list_head queue;
175 u8 status;
176 u32 bpw_len;
177 u8 transfer_active;
178 u32 tx_index;
179 u32 rx_index;
180 u16 *pkt_tx_buff;
181 u16 *pkt_rx_buff;
182 u8 n_curnt_chip;
183 struct spi_device *current_chip;
184 struct spi_message *current_msg;
185 struct spi_transfer *cur_trans;
186 struct pch_spi_board_data *board_dat;
187 struct platform_device *plat_dev;
188 int ch;
189 struct pch_spi_dma_ctrl dma;
190 int use_dma;
191 u8 irq_reg_sts;
192 int save_total_len;
193};
194
195
196
197
198
199
200
201struct pch_spi_board_data {
202 struct pci_dev *pdev;
203 u8 suspend_sts;
204 int num;
205};
206
207struct pch_pd_dev_save {
208 int num;
209 struct platform_device *pd_save[PCH_SPI_MAX_DEV];
210 struct pch_spi_board_data *board_dat;
211};
212
213static const struct pci_device_id pch_spi_pcidev_id[] = {
214 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
215 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
216 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
217 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_SPI), 1, },
218 { }
219};
220
221
222
223
224
225
226
227static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val)
228{
229 struct pch_spi_data *data = spi_master_get_devdata(master);
230 iowrite32(val, (data->io_remap_addr + idx));
231}
232
233
234
235
236
237
238static inline u32 pch_spi_readreg(struct spi_master *master, int idx)
239{
240 struct pch_spi_data *data = spi_master_get_devdata(master);
241 return ioread32(data->io_remap_addr + idx);
242}
243
244static inline void pch_spi_setclr_reg(struct spi_master *master, int idx,
245 u32 set, u32 clr)
246{
247 u32 tmp = pch_spi_readreg(master, idx);
248 tmp = (tmp & ~clr) | set;
249 pch_spi_writereg(master, idx, tmp);
250}
251
252static void pch_spi_set_master_mode(struct spi_master *master)
253{
254 pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0);
255}
256
257
258
259
260
261static void pch_spi_clear_fifo(struct spi_master *master)
262{
263 pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0);
264 pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT);
265}
266
267static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
268 void __iomem *io_remap_addr)
269{
270 u32 n_read, tx_index, rx_index, bpw_len;
271 u16 *pkt_rx_buffer, *pkt_tx_buff;
272 int read_cnt;
273 u32 reg_spcr_val;
274 void __iomem *spsr;
275 void __iomem *spdrr;
276 void __iomem *spdwr;
277
278 spsr = io_remap_addr + PCH_SPSR;
279 iowrite32(reg_spsr_val, spsr);
280
281 if (data->transfer_active) {
282 rx_index = data->rx_index;
283 tx_index = data->tx_index;
284 bpw_len = data->bpw_len;
285 pkt_rx_buffer = data->pkt_rx_buff;
286 pkt_tx_buff = data->pkt_tx_buff;
287
288 spdrr = io_remap_addr + PCH_SPDRR;
289 spdwr = io_remap_addr + PCH_SPDWR;
290
291 n_read = PCH_READABLE(reg_spsr_val);
292
293 for (read_cnt = 0; (read_cnt < n_read); read_cnt++) {
294 pkt_rx_buffer[rx_index++] = ioread32(spdrr);
295 if (tx_index < bpw_len)
296 iowrite32(pkt_tx_buff[tx_index++], spdwr);
297 }
298
299
300 if ((bpw_len - rx_index) <= PCH_MAX_FIFO_DEPTH) {
301 reg_spcr_val = ioread32(io_remap_addr + PCH_SPCR);
302 reg_spcr_val &= ~SPCR_RFIE_BIT;
303
304
305 reg_spcr_val &= ~MASK_RFIC_SPCR_BITS;
306 reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD);
307
308 iowrite32(reg_spcr_val, (io_remap_addr + PCH_SPCR));
309 }
310
311
312 data->tx_index = tx_index;
313 data->rx_index = rx_index;
314
315
316 if (reg_spsr_val & SPSR_FI_BIT) {
317 if ((tx_index == bpw_len) && (rx_index == tx_index)) {
318
319 pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
320 PCH_ALL);
321
322
323
324 data->transfer_complete = true;
325 data->transfer_active = false;
326 wake_up(&data->wait);
327 } else {
328 dev_vdbg(&data->master->dev,
329 "%s : Transfer is not completed",
330 __func__);
331 }
332 }
333 }
334}
335
336
337
338
339
340
341static irqreturn_t pch_spi_handler(int irq, void *dev_id)
342{
343 u32 reg_spsr_val;
344 void __iomem *spsr;
345 void __iomem *io_remap_addr;
346 irqreturn_t ret = IRQ_NONE;
347 struct pch_spi_data *data = dev_id;
348 struct pch_spi_board_data *board_dat = data->board_dat;
349
350 if (board_dat->suspend_sts) {
351 dev_dbg(&board_dat->pdev->dev,
352 "%s returning due to suspend\n", __func__);
353 return IRQ_NONE;
354 }
355
356 io_remap_addr = data->io_remap_addr;
357 spsr = io_remap_addr + PCH_SPSR;
358
359 reg_spsr_val = ioread32(spsr);
360
361 if (reg_spsr_val & SPSR_ORF_BIT) {
362 dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__);
363 if (data->current_msg->complete) {
364 data->transfer_complete = true;
365 data->current_msg->status = -EIO;
366 data->current_msg->complete(data->current_msg->context);
367 data->bcurrent_msg_processing = false;
368 data->current_msg = NULL;
369 data->cur_trans = NULL;
370 }
371 }
372
373 if (data->use_dma)
374 return IRQ_NONE;
375
376
377 if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
378 pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr);
379 ret = IRQ_HANDLED;
380 }
381
382 dev_dbg(&board_dat->pdev->dev, "%s EXIT return value=%d\n",
383 __func__, ret);
384
385 return ret;
386}
387
388
389
390
391
392
393static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
394{
395 u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2);
396
397
398 if (n_spbr > PCH_MAX_SPBR)
399 n_spbr = PCH_MAX_SPBR;
400
401 pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS);
402}
403
404
405
406
407
408
409static void pch_spi_set_bits_per_word(struct spi_master *master,
410 u8 bits_per_word)
411{
412 if (bits_per_word == 8)
413 pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT);
414 else
415 pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0);
416}
417
418
419
420
421
422static void pch_spi_setup_transfer(struct spi_device *spi)
423{
424 u32 flags = 0;
425
426 dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n",
427 __func__, pch_spi_readreg(spi->master, PCH_SPBRR),
428 spi->max_speed_hz);
429 pch_spi_set_baud_rate(spi->master, spi->max_speed_hz);
430
431
432 pch_spi_set_bits_per_word(spi->master, spi->bits_per_word);
433
434 if (!(spi->mode & SPI_LSB_FIRST))
435 flags |= SPCR_LSBF_BIT;
436 if (spi->mode & SPI_CPOL)
437 flags |= SPCR_CPOL_BIT;
438 if (spi->mode & SPI_CPHA)
439 flags |= SPCR_CPHA_BIT;
440 pch_spi_setclr_reg(spi->master, PCH_SPCR, flags,
441 (SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT));
442
443
444 pch_spi_clear_fifo(spi->master);
445}
446
447
448
449
450
451static void pch_spi_reset(struct spi_master *master)
452{
453
454 pch_spi_writereg(master, PCH_SRST, 0x1);
455
456
457 pch_spi_writereg(master, PCH_SRST, 0x0);
458}
459
460static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
461{
462
463 struct spi_transfer *transfer;
464 struct pch_spi_data *data = spi_master_get_devdata(pspi->master);
465 int retval;
466 unsigned long flags;
467
468 spin_lock_irqsave(&data->lock, flags);
469
470 list_for_each_entry(transfer, &pmsg->transfers, transfer_list) {
471 if (!transfer->tx_buf && !transfer->rx_buf) {
472 dev_err(&pspi->dev,
473 "%s Tx and Rx buffer NULL\n", __func__);
474 retval = -EINVAL;
475 goto err_return_spinlock;
476 }
477
478 if (!transfer->len) {
479 dev_err(&pspi->dev, "%s Transfer length invalid\n",
480 __func__);
481 retval = -EINVAL;
482 goto err_return_spinlock;
483 }
484
485 dev_dbg(&pspi->dev,
486 "%s Tx/Rx buffer valid. Transfer length valid\n",
487 __func__);
488 }
489 spin_unlock_irqrestore(&data->lock, flags);
490
491
492 if (data->status == STATUS_EXITING) {
493 dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__);
494 retval = -ESHUTDOWN;
495 goto err_out;
496 }
497
498
499 if (data->board_dat->suspend_sts) {
500 dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__);
501 retval = -EINVAL;
502 goto err_out;
503 }
504
505
506 pmsg->actual_length = 0;
507 dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status);
508
509 pmsg->status = -EINPROGRESS;
510 spin_lock_irqsave(&data->lock, flags);
511
512 list_add_tail(&pmsg->queue, &data->queue);
513 spin_unlock_irqrestore(&data->lock, flags);
514
515 dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__);
516
517 schedule_work(&data->work);
518 dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__);
519
520 retval = 0;
521
522err_out:
523 dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
524 return retval;
525err_return_spinlock:
526 dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
527 spin_unlock_irqrestore(&data->lock, flags);
528 return retval;
529}
530
531static inline void pch_spi_select_chip(struct pch_spi_data *data,
532 struct spi_device *pspi)
533{
534 if (data->current_chip != NULL) {
535 if (pspi->chip_select != data->n_curnt_chip) {
536 dev_dbg(&pspi->dev, "%s : different slave\n", __func__);
537 data->current_chip = NULL;
538 }
539 }
540
541 data->current_chip = pspi;
542
543 data->n_curnt_chip = data->current_chip->chip_select;
544
545 dev_dbg(&pspi->dev, "%s :Invoking pch_spi_setup_transfer\n", __func__);
546 pch_spi_setup_transfer(pspi);
547}
548
549static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
550{
551 int size;
552 u32 n_writes;
553 int j;
554 struct spi_message *pmsg, *tmp;
555 const u8 *tx_buf;
556 const u16 *tx_sbuf;
557
558
559 if (data->cur_trans->speed_hz) {
560 dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
561 pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
562 }
563
564
565 if (data->cur_trans->bits_per_word &&
566 (data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) {
567 dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
568 pch_spi_set_bits_per_word(data->master,
569 data->cur_trans->bits_per_word);
570 *bpw = data->cur_trans->bits_per_word;
571 } else {
572 *bpw = data->current_msg->spi->bits_per_word;
573 }
574
575
576 data->tx_index = 0;
577 data->rx_index = 0;
578
579 data->bpw_len = data->cur_trans->len / (*bpw / 8);
580
581
582 size = data->cur_trans->len * sizeof(*data->pkt_tx_buff);
583
584
585 data->pkt_tx_buff = kzalloc(size, GFP_KERNEL);
586 if (data->pkt_tx_buff != NULL) {
587 data->pkt_rx_buff = kzalloc(size, GFP_KERNEL);
588 if (!data->pkt_rx_buff)
589 kfree(data->pkt_tx_buff);
590 }
591
592 if (!data->pkt_rx_buff) {
593
594 list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
595 pmsg->status = -ENOMEM;
596
597 if (pmsg->complete)
598 pmsg->complete(pmsg->context);
599
600
601 list_del_init(&pmsg->queue);
602 }
603 return;
604 }
605
606
607 if (data->cur_trans->tx_buf != NULL) {
608 if (*bpw == 8) {
609 tx_buf = data->cur_trans->tx_buf;
610 for (j = 0; j < data->bpw_len; j++)
611 data->pkt_tx_buff[j] = *tx_buf++;
612 } else {
613 tx_sbuf = data->cur_trans->tx_buf;
614 for (j = 0; j < data->bpw_len; j++)
615 data->pkt_tx_buff[j] = *tx_sbuf++;
616 }
617 }
618
619
620 n_writes = data->bpw_len;
621 if (n_writes > PCH_MAX_FIFO_DEPTH)
622 n_writes = PCH_MAX_FIFO_DEPTH;
623
624 dev_dbg(&data->master->dev,
625 "\n%s:Pulling down SSN low - writing 0x2 to SSNXCR\n",
626 __func__);
627 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
628
629 for (j = 0; j < n_writes; j++)
630 pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]);
631
632
633 data->tx_index = j;
634
635
636 data->transfer_complete = false;
637 data->transfer_active = true;
638}
639
640static void pch_spi_nomore_transfer(struct pch_spi_data *data)
641{
642 struct spi_message *pmsg, *tmp;
643 dev_dbg(&data->master->dev, "%s called\n", __func__);
644
645
646 data->current_msg->status = 0;
647
648 if (data->current_msg->complete) {
649 dev_dbg(&data->master->dev,
650 "%s:Invoking callback of SPI core\n", __func__);
651 data->current_msg->complete(data->current_msg->context);
652 }
653
654
655 data->bcurrent_msg_processing = false;
656
657 dev_dbg(&data->master->dev,
658 "%s:data->bcurrent_msg_processing = false\n", __func__);
659
660 data->current_msg = NULL;
661 data->cur_trans = NULL;
662
663
664
665 if ((list_empty(&data->queue) == 0) &&
666 (!data->board_dat->suspend_sts) &&
667 (data->status != STATUS_EXITING)) {
668
669
670
671
672 dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__);
673 schedule_work(&data->work);
674 } else if (data->board_dat->suspend_sts ||
675 data->status == STATUS_EXITING) {
676 dev_dbg(&data->master->dev,
677 "%s suspend/remove initiated, flushing queue\n",
678 __func__);
679 list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
680 pmsg->status = -EIO;
681
682 if (pmsg->complete)
683 pmsg->complete(pmsg->context);
684
685
686 list_del_init(&pmsg->queue);
687 }
688 }
689}
690
691static void pch_spi_set_ir(struct pch_spi_data *data)
692{
693
694 if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH)
695
696 pch_spi_setclr_reg(data->master, PCH_SPCR,
697 PCH_RX_THOLD << SPCR_RFIC_FIELD |
698 SPCR_FIE_BIT | SPCR_RFIE_BIT |
699 SPCR_ORIE_BIT | SPCR_SPE_BIT,
700 MASK_RFIC_SPCR_BITS | PCH_ALL);
701 else
702
703 pch_spi_setclr_reg(data->master, PCH_SPCR,
704 PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD |
705 SPCR_FIE_BIT | SPCR_ORIE_BIT |
706 SPCR_SPE_BIT,
707 MASK_RFIC_SPCR_BITS | PCH_ALL);
708
709
710
711 dev_dbg(&data->master->dev,
712 "%s:waiting for transfer to get over\n", __func__);
713
714 wait_event_interruptible(data->wait, data->transfer_complete);
715
716
717 pch_spi_writereg(data->master, PCH_SPSR,
718 pch_spi_readreg(data->master, PCH_SPSR));
719
720 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT);
721
722 pch_spi_clear_fifo(data->master);
723}
724
725static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
726{
727 int j;
728 u8 *rx_buf;
729 u16 *rx_sbuf;
730
731
732 if (!data->cur_trans->rx_buf)
733 return;
734
735 if (bpw == 8) {
736 rx_buf = data->cur_trans->rx_buf;
737 for (j = 0; j < data->bpw_len; j++)
738 *rx_buf++ = data->pkt_rx_buff[j] & 0xFF;
739 } else {
740 rx_sbuf = data->cur_trans->rx_buf;
741 for (j = 0; j < data->bpw_len; j++)
742 *rx_sbuf++ = data->pkt_rx_buff[j];
743 }
744}
745
746static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
747{
748 int j;
749 u8 *rx_buf;
750 u16 *rx_sbuf;
751 const u8 *rx_dma_buf;
752 const u16 *rx_dma_sbuf;
753
754
755 if (!data->cur_trans->rx_buf)
756 return;
757
758 if (bpw == 8) {
759 rx_buf = data->cur_trans->rx_buf;
760 rx_dma_buf = data->dma.rx_buf_virt;
761 for (j = 0; j < data->bpw_len; j++)
762 *rx_buf++ = *rx_dma_buf++ & 0xFF;
763 data->cur_trans->rx_buf = rx_buf;
764 } else {
765 rx_sbuf = data->cur_trans->rx_buf;
766 rx_dma_sbuf = data->dma.rx_buf_virt;
767 for (j = 0; j < data->bpw_len; j++)
768 *rx_sbuf++ = *rx_dma_sbuf++;
769 data->cur_trans->rx_buf = rx_sbuf;
770 }
771}
772
773static int pch_spi_start_transfer(struct pch_spi_data *data)
774{
775 struct pch_spi_dma_ctrl *dma;
776 unsigned long flags;
777 int rtn;
778
779 dma = &data->dma;
780
781 spin_lock_irqsave(&data->lock, flags);
782
783
784 pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL);
785
786 spin_unlock_irqrestore(&data->lock, flags);
787
788
789
790 dev_dbg(&data->master->dev,
791 "%s:waiting for transfer to get over\n", __func__);
792 rtn = wait_event_interruptible_timeout(data->wait,
793 data->transfer_complete,
794 msecs_to_jiffies(2 * HZ));
795 if (!rtn)
796 dev_err(&data->master->dev,
797 "%s wait-event timeout\n", __func__);
798
799 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
800 DMA_FROM_DEVICE);
801
802 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
803 DMA_FROM_DEVICE);
804 memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
805
806 async_tx_ack(dma->desc_rx);
807 async_tx_ack(dma->desc_tx);
808 kfree(dma->sg_tx_p);
809 kfree(dma->sg_rx_p);
810
811 spin_lock_irqsave(&data->lock, flags);
812
813
814 pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
815 MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS | PCH_ALL |
816 SPCR_SPE_BIT);
817
818 pch_spi_writereg(data->master, PCH_SPSR,
819 pch_spi_readreg(data->master, PCH_SPSR));
820
821 pch_spi_clear_fifo(data->master);
822
823 spin_unlock_irqrestore(&data->lock, flags);
824
825 return rtn;
826}
827
828static void pch_dma_rx_complete(void *arg)
829{
830 struct pch_spi_data *data = arg;
831
832
833 data->transfer_complete = true;
834 wake_up_interruptible(&data->wait);
835}
836
837static bool pch_spi_filter(struct dma_chan *chan, void *slave)
838{
839 struct pch_dma_slave *param = slave;
840
841 if ((chan->chan_id == param->chan_id) &&
842 (param->dma_dev == chan->device->dev)) {
843 chan->private = param;
844 return true;
845 } else {
846 return false;
847 }
848}
849
850static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
851{
852 dma_cap_mask_t mask;
853 struct dma_chan *chan;
854 struct pci_dev *dma_dev;
855 struct pch_dma_slave *param;
856 struct pch_spi_dma_ctrl *dma;
857 unsigned int width;
858
859 if (bpw == 8)
860 width = PCH_DMA_WIDTH_1_BYTE;
861 else
862 width = PCH_DMA_WIDTH_2_BYTES;
863
864 dma = &data->dma;
865 dma_cap_zero(mask);
866 dma_cap_set(DMA_SLAVE, mask);
867
868
869 dma_dev = pci_get_slot(data->board_dat->pdev->bus,
870 PCI_DEVFN(PCI_SLOT(data->board_dat->pdev->devfn), 0));
871
872
873 param = &dma->param_tx;
874 param->dma_dev = &dma_dev->dev;
875 param->chan_id = data->ch * 2; ;
876 param->tx_reg = data->io_base_addr + PCH_SPDWR;
877 param->width = width;
878 chan = dma_request_channel(mask, pch_spi_filter, param);
879 if (!chan) {
880 dev_err(&data->master->dev,
881 "ERROR: dma_request_channel FAILS(Tx)\n");
882 data->use_dma = 0;
883 return;
884 }
885 dma->chan_tx = chan;
886
887
888 param = &dma->param_rx;
889 param->dma_dev = &dma_dev->dev;
890 param->chan_id = data->ch * 2 + 1; ;
891 param->rx_reg = data->io_base_addr + PCH_SPDRR;
892 param->width = width;
893 chan = dma_request_channel(mask, pch_spi_filter, param);
894 if (!chan) {
895 dev_err(&data->master->dev,
896 "ERROR: dma_request_channel FAILS(Rx)\n");
897 dma_release_channel(dma->chan_tx);
898 dma->chan_tx = NULL;
899 data->use_dma = 0;
900 return;
901 }
902 dma->chan_rx = chan;
903}
904
905static void pch_spi_release_dma(struct pch_spi_data *data)
906{
907 struct pch_spi_dma_ctrl *dma;
908
909 dma = &data->dma;
910 if (dma->chan_tx) {
911 dma_release_channel(dma->chan_tx);
912 dma->chan_tx = NULL;
913 }
914 if (dma->chan_rx) {
915 dma_release_channel(dma->chan_rx);
916 dma->chan_rx = NULL;
917 }
918}
919
920static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
921{
922 const u8 *tx_buf;
923 const u16 *tx_sbuf;
924 u8 *tx_dma_buf;
925 u16 *tx_dma_sbuf;
926 struct scatterlist *sg;
927 struct dma_async_tx_descriptor *desc_tx;
928 struct dma_async_tx_descriptor *desc_rx;
929 int num;
930 int i;
931 int size;
932 int rem;
933 int head;
934 unsigned long flags;
935 struct pch_spi_dma_ctrl *dma;
936
937 dma = &data->dma;
938
939
940 if (data->cur_trans->speed_hz) {
941 dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
942 spin_lock_irqsave(&data->lock, flags);
943 pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
944 spin_unlock_irqrestore(&data->lock, flags);
945 }
946
947
948 if (data->cur_trans->bits_per_word &&
949 (data->current_msg->spi->bits_per_word !=
950 data->cur_trans->bits_per_word)) {
951 dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
952 spin_lock_irqsave(&data->lock, flags);
953 pch_spi_set_bits_per_word(data->master,
954 data->cur_trans->bits_per_word);
955 spin_unlock_irqrestore(&data->lock, flags);
956 *bpw = data->cur_trans->bits_per_word;
957 } else {
958 *bpw = data->current_msg->spi->bits_per_word;
959 }
960 data->bpw_len = data->cur_trans->len / (*bpw / 8);
961
962 if (data->bpw_len > PCH_BUF_SIZE) {
963 data->bpw_len = PCH_BUF_SIZE;
964 data->cur_trans->len -= PCH_BUF_SIZE;
965 }
966
967
968 if (data->cur_trans->tx_buf != NULL) {
969 if (*bpw == 8) {
970 tx_buf = data->cur_trans->tx_buf;
971 tx_dma_buf = dma->tx_buf_virt;
972 for (i = 0; i < data->bpw_len; i++)
973 *tx_dma_buf++ = *tx_buf++;
974 } else {
975 tx_sbuf = data->cur_trans->tx_buf;
976 tx_dma_sbuf = dma->tx_buf_virt;
977 for (i = 0; i < data->bpw_len; i++)
978 *tx_dma_sbuf++ = *tx_sbuf++;
979 }
980 }
981
982
983 if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
984 if (data->bpw_len % PCH_DMA_TRANS_SIZE) {
985 num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
986 rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
987 } else {
988 num = data->bpw_len / PCH_DMA_TRANS_SIZE;
989 rem = PCH_DMA_TRANS_SIZE;
990 }
991 size = PCH_DMA_TRANS_SIZE;
992 } else {
993 num = 1;
994 size = data->bpw_len;
995 rem = data->bpw_len;
996 }
997 dev_dbg(&data->master->dev, "%s num=%d size=%d rem=%d\n",
998 __func__, num, size, rem);
999 spin_lock_irqsave(&data->lock, flags);
1000
1001
1002 pch_spi_setclr_reg(data->master, PCH_SPCR,
1003 ((size - 1) << SPCR_RFIC_FIELD) |
1004 (PCH_TX_THOLD << SPCR_TFIC_FIELD),
1005 MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
1006
1007 spin_unlock_irqrestore(&data->lock, flags);
1008
1009
1010 dma->sg_rx_p = kcalloc(num, sizeof(*dma->sg_rx_p), GFP_ATOMIC);
1011 sg_init_table(dma->sg_rx_p, num);
1012
1013 sg = dma->sg_rx_p;
1014 for (i = 0; i < num; i++, sg++) {
1015 if (i == (num - 2)) {
1016 sg->offset = size * i;
1017 sg->offset = sg->offset * (*bpw / 8);
1018 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
1019 sg->offset);
1020 sg_dma_len(sg) = rem;
1021 } else if (i == (num - 1)) {
1022 sg->offset = size * (i - 1) + rem;
1023 sg->offset = sg->offset * (*bpw / 8);
1024 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
1025 sg->offset);
1026 sg_dma_len(sg) = size;
1027 } else {
1028 sg->offset = size * i;
1029 sg->offset = sg->offset * (*bpw / 8);
1030 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
1031 sg->offset);
1032 sg_dma_len(sg) = size;
1033 }
1034 sg_dma_address(sg) = dma->rx_buf_dma + sg->offset;
1035 }
1036 sg = dma->sg_rx_p;
1037 desc_rx = dmaengine_prep_slave_sg(dma->chan_rx, sg,
1038 num, DMA_DEV_TO_MEM,
1039 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1040 if (!desc_rx) {
1041 dev_err(&data->master->dev,
1042 "%s:dmaengine_prep_slave_sg Failed\n", __func__);
1043 return;
1044 }
1045 dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_FROM_DEVICE);
1046 desc_rx->callback = pch_dma_rx_complete;
1047 desc_rx->callback_param = data;
1048 dma->nent = num;
1049 dma->desc_rx = desc_rx;
1050
1051
1052 if (data->bpw_len > PCH_MAX_FIFO_DEPTH) {
1053 head = PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE;
1054 if (data->bpw_len % PCH_DMA_TRANS_SIZE > 4) {
1055 num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
1056 rem = data->bpw_len % PCH_DMA_TRANS_SIZE - head;
1057 } else {
1058 num = data->bpw_len / PCH_DMA_TRANS_SIZE;
1059 rem = data->bpw_len % PCH_DMA_TRANS_SIZE +
1060 PCH_DMA_TRANS_SIZE - head;
1061 }
1062 size = PCH_DMA_TRANS_SIZE;
1063 } else {
1064 num = 1;
1065 size = data->bpw_len;
1066 rem = data->bpw_len;
1067 head = 0;
1068 }
1069
1070 dma->sg_tx_p = kcalloc(num, sizeof(*dma->sg_tx_p), GFP_ATOMIC);
1071 sg_init_table(dma->sg_tx_p, num);
1072
1073 sg = dma->sg_tx_p;
1074 for (i = 0; i < num; i++, sg++) {
1075 if (i == 0) {
1076 sg->offset = 0;
1077 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size + head,
1078 sg->offset);
1079 sg_dma_len(sg) = size + head;
1080 } else if (i == (num - 1)) {
1081 sg->offset = head + size * i;
1082 sg->offset = sg->offset * (*bpw / 8);
1083 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem,
1084 sg->offset);
1085 sg_dma_len(sg) = rem;
1086 } else {
1087 sg->offset = head + size * i;
1088 sg->offset = sg->offset * (*bpw / 8);
1089 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size,
1090 sg->offset);
1091 sg_dma_len(sg) = size;
1092 }
1093 sg_dma_address(sg) = dma->tx_buf_dma + sg->offset;
1094 }
1095 sg = dma->sg_tx_p;
1096 desc_tx = dmaengine_prep_slave_sg(dma->chan_tx,
1097 sg, num, DMA_MEM_TO_DEV,
1098 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1099 if (!desc_tx) {
1100 dev_err(&data->master->dev,
1101 "%s:dmaengine_prep_slave_sg Failed\n", __func__);
1102 return;
1103 }
1104 dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_TO_DEVICE);
1105 desc_tx->callback = NULL;
1106 desc_tx->callback_param = data;
1107 dma->nent = num;
1108 dma->desc_tx = desc_tx;
1109
1110 dev_dbg(&data->master->dev, "%s:Pulling down SSN low - writing 0x2 to SSNXCR\n", __func__);
1111
1112 spin_lock_irqsave(&data->lock, flags);
1113 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
1114 desc_rx->tx_submit(desc_rx);
1115 desc_tx->tx_submit(desc_tx);
1116 spin_unlock_irqrestore(&data->lock, flags);
1117
1118
1119 data->transfer_complete = false;
1120}
1121
1122static void pch_spi_process_messages(struct work_struct *pwork)
1123{
1124 struct spi_message *pmsg, *tmp;
1125 struct pch_spi_data *data;
1126 int bpw;
1127
1128 data = container_of(pwork, struct pch_spi_data, work);
1129 dev_dbg(&data->master->dev, "%s data initialized\n", __func__);
1130
1131 spin_lock(&data->lock);
1132
1133 if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
1134 dev_dbg(&data->master->dev,
1135 "%s suspend/remove initiated, flushing queue\n", __func__);
1136 list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
1137 pmsg->status = -EIO;
1138
1139 if (pmsg->complete) {
1140 spin_unlock(&data->lock);
1141 pmsg->complete(pmsg->context);
1142 spin_lock(&data->lock);
1143 }
1144
1145
1146 list_del_init(&pmsg->queue);
1147 }
1148
1149 spin_unlock(&data->lock);
1150 return;
1151 }
1152
1153 data->bcurrent_msg_processing = true;
1154 dev_dbg(&data->master->dev,
1155 "%s Set data->bcurrent_msg_processing= true\n", __func__);
1156
1157
1158 data->current_msg = list_entry(data->queue.next, struct spi_message,
1159 queue);
1160
1161 list_del_init(&data->current_msg->queue);
1162
1163 data->current_msg->status = 0;
1164
1165 pch_spi_select_chip(data, data->current_msg->spi);
1166
1167 spin_unlock(&data->lock);
1168
1169 if (data->use_dma)
1170 pch_spi_request_dma(data,
1171 data->current_msg->spi->bits_per_word);
1172 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
1173 do {
1174 int cnt;
1175
1176
1177
1178 spin_lock(&data->lock);
1179 if (data->cur_trans == NULL) {
1180 data->cur_trans =
1181 list_entry(data->current_msg->transfers.next,
1182 struct spi_transfer, transfer_list);
1183 dev_dbg(&data->master->dev,
1184 "%s :Getting 1st transfer message\n",
1185 __func__);
1186 } else {
1187 data->cur_trans =
1188 list_entry(data->cur_trans->transfer_list.next,
1189 struct spi_transfer, transfer_list);
1190 dev_dbg(&data->master->dev,
1191 "%s :Getting next transfer message\n",
1192 __func__);
1193 }
1194 spin_unlock(&data->lock);
1195
1196 if (!data->cur_trans->len)
1197 goto out;
1198 cnt = (data->cur_trans->len - 1) / PCH_BUF_SIZE + 1;
1199 data->save_total_len = data->cur_trans->len;
1200 if (data->use_dma) {
1201 int i;
1202 char *save_rx_buf = data->cur_trans->rx_buf;
1203 for (i = 0; i < cnt; i ++) {
1204 pch_spi_handle_dma(data, &bpw);
1205 if (!pch_spi_start_transfer(data)) {
1206 data->transfer_complete = true;
1207 data->current_msg->status = -EIO;
1208 data->current_msg->complete
1209 (data->current_msg->context);
1210 data->bcurrent_msg_processing = false;
1211 data->current_msg = NULL;
1212 data->cur_trans = NULL;
1213 goto out;
1214 }
1215 pch_spi_copy_rx_data_for_dma(data, bpw);
1216 }
1217 data->cur_trans->rx_buf = save_rx_buf;
1218 } else {
1219 pch_spi_set_tx(data, &bpw);
1220 pch_spi_set_ir(data);
1221 pch_spi_copy_rx_data(data, bpw);
1222 kfree(data->pkt_rx_buff);
1223 data->pkt_rx_buff = NULL;
1224 kfree(data->pkt_tx_buff);
1225 data->pkt_tx_buff = NULL;
1226 }
1227
1228 data->cur_trans->len = data->save_total_len;
1229 data->current_msg->actual_length += data->cur_trans->len;
1230
1231 dev_dbg(&data->master->dev,
1232 "%s:data->current_msg->actual_length=%d\n",
1233 __func__, data->current_msg->actual_length);
1234
1235
1236 if (data->cur_trans->delay_usecs) {
1237 dev_dbg(&data->master->dev, "%s:delay in usec=%d\n",
1238 __func__, data->cur_trans->delay_usecs);
1239 udelay(data->cur_trans->delay_usecs);
1240 }
1241
1242 spin_lock(&data->lock);
1243
1244
1245 if ((data->cur_trans->transfer_list.next) ==
1246 &(data->current_msg->transfers)) {
1247 pch_spi_nomore_transfer(data);
1248 }
1249
1250 spin_unlock(&data->lock);
1251
1252 } while (data->cur_trans != NULL);
1253
1254out:
1255 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);
1256 if (data->use_dma)
1257 pch_spi_release_dma(data);
1258}
1259
1260static void pch_spi_free_resources(struct pch_spi_board_data *board_dat,
1261 struct pch_spi_data *data)
1262{
1263 dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
1264
1265 flush_work(&data->work);
1266}
1267
1268static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
1269 struct pch_spi_data *data)
1270{
1271 dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
1272
1273
1274 pch_spi_reset(data->master);
1275 dev_dbg(&board_dat->pdev->dev,
1276 "%s pch_spi_reset invoked successfully\n", __func__);
1277
1278 dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
1279
1280 return 0;
1281}
1282
1283static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
1284 struct pch_spi_data *data)
1285{
1286 struct pch_spi_dma_ctrl *dma;
1287
1288 dma = &data->dma;
1289 if (dma->tx_buf_dma)
1290 dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
1291 dma->tx_buf_virt, dma->tx_buf_dma);
1292 if (dma->rx_buf_dma)
1293 dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
1294 dma->rx_buf_virt, dma->rx_buf_dma);
1295}
1296
1297static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
1298 struct pch_spi_data *data)
1299{
1300 struct pch_spi_dma_ctrl *dma;
1301
1302 dma = &data->dma;
1303
1304 dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
1305 PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
1306
1307 dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
1308 PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
1309}
1310
1311static int pch_spi_pd_probe(struct platform_device *plat_dev)
1312{
1313 int ret;
1314 struct spi_master *master;
1315 struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
1316 struct pch_spi_data *data;
1317
1318 dev_dbg(&plat_dev->dev, "%s:debug\n", __func__);
1319
1320 master = spi_alloc_master(&board_dat->pdev->dev,
1321 sizeof(struct pch_spi_data));
1322 if (!master) {
1323 dev_err(&plat_dev->dev, "spi_alloc_master[%d] failed.\n",
1324 plat_dev->id);
1325 return -ENOMEM;
1326 }
1327
1328 data = spi_master_get_devdata(master);
1329 data->master = master;
1330
1331 platform_set_drvdata(plat_dev, data);
1332
1333
1334 data->io_base_addr = pci_resource_start(board_dat->pdev, 1) +
1335 PCH_ADDRESS_SIZE * plat_dev->id;
1336 data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0);
1337 if (!data->io_remap_addr) {
1338 dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__);
1339 ret = -ENOMEM;
1340 goto err_pci_iomap;
1341 }
1342 data->io_remap_addr += PCH_ADDRESS_SIZE * plat_dev->id;
1343
1344 dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n",
1345 plat_dev->id, data->io_remap_addr);
1346
1347
1348 master->num_chipselect = PCH_MAX_CS;
1349 master->transfer = pch_spi_transfer;
1350 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
1351 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
1352 master->max_speed_hz = PCH_MAX_BAUDRATE;
1353
1354 data->board_dat = board_dat;
1355 data->plat_dev = plat_dev;
1356 data->n_curnt_chip = 255;
1357 data->status = STATUS_RUNNING;
1358 data->ch = plat_dev->id;
1359 data->use_dma = use_dma;
1360
1361 INIT_LIST_HEAD(&data->queue);
1362 spin_lock_init(&data->lock);
1363 INIT_WORK(&data->work, pch_spi_process_messages);
1364 init_waitqueue_head(&data->wait);
1365
1366 ret = pch_spi_get_resources(board_dat, data);
1367 if (ret) {
1368 dev_err(&plat_dev->dev, "%s fail(retval=%d)\n", __func__, ret);
1369 goto err_spi_get_resources;
1370 }
1371
1372 ret = request_irq(board_dat->pdev->irq, pch_spi_handler,
1373 IRQF_SHARED, KBUILD_MODNAME, data);
1374 if (ret) {
1375 dev_err(&plat_dev->dev,
1376 "%s request_irq failed\n", __func__);
1377 goto err_request_irq;
1378 }
1379 data->irq_reg_sts = true;
1380
1381 pch_spi_set_master_mode(master);
1382
1383 if (use_dma) {
1384 dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
1385 pch_alloc_dma_buf(board_dat, data);
1386 }
1387
1388 ret = spi_register_master(master);
1389 if (ret != 0) {
1390 dev_err(&plat_dev->dev,
1391 "%s spi_register_master FAILED\n", __func__);
1392 goto err_spi_register_master;
1393 }
1394
1395 return 0;
1396
1397err_spi_register_master:
1398 pch_free_dma_buf(board_dat, data);
1399 free_irq(board_dat->pdev->irq, data);
1400err_request_irq:
1401 pch_spi_free_resources(board_dat, data);
1402err_spi_get_resources:
1403 pci_iounmap(board_dat->pdev, data->io_remap_addr);
1404err_pci_iomap:
1405 spi_master_put(master);
1406
1407 return ret;
1408}
1409
1410static int pch_spi_pd_remove(struct platform_device *plat_dev)
1411{
1412 struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
1413 struct pch_spi_data *data = platform_get_drvdata(plat_dev);
1414 int count;
1415 unsigned long flags;
1416
1417 dev_dbg(&plat_dev->dev, "%s:[ch%d] irq=%d\n",
1418 __func__, plat_dev->id, board_dat->pdev->irq);
1419
1420 if (use_dma)
1421 pch_free_dma_buf(board_dat, data);
1422
1423
1424
1425 count = 500;
1426 spin_lock_irqsave(&data->lock, flags);
1427 data->status = STATUS_EXITING;
1428 while ((list_empty(&data->queue) == 0) && --count) {
1429 dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n",
1430 __func__);
1431 spin_unlock_irqrestore(&data->lock, flags);
1432 msleep(PCH_SLEEP_TIME);
1433 spin_lock_irqsave(&data->lock, flags);
1434 }
1435 spin_unlock_irqrestore(&data->lock, flags);
1436
1437 pch_spi_free_resources(board_dat, data);
1438
1439 if (data->irq_reg_sts) {
1440
1441 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
1442 data->irq_reg_sts = false;
1443 free_irq(board_dat->pdev->irq, data);
1444 }
1445
1446 pci_iounmap(board_dat->pdev, data->io_remap_addr);
1447 spi_unregister_master(data->master);
1448
1449 return 0;
1450}
1451#ifdef CONFIG_PM
1452static int pch_spi_pd_suspend(struct platform_device *pd_dev,
1453 pm_message_t state)
1454{
1455 u8 count;
1456 struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
1457 struct pch_spi_data *data = platform_get_drvdata(pd_dev);
1458
1459 dev_dbg(&pd_dev->dev, "%s ENTRY\n", __func__);
1460
1461 if (!board_dat) {
1462 dev_err(&pd_dev->dev,
1463 "%s pci_get_drvdata returned NULL\n", __func__);
1464 return -EFAULT;
1465 }
1466
1467
1468
1469 count = 255;
1470 while ((--count) > 0) {
1471 if (!(data->bcurrent_msg_processing))
1472 break;
1473 msleep(PCH_SLEEP_TIME);
1474 }
1475
1476
1477 if (data->irq_reg_sts) {
1478
1479 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
1480 pch_spi_reset(data->master);
1481 free_irq(board_dat->pdev->irq, data);
1482
1483 data->irq_reg_sts = false;
1484 dev_dbg(&pd_dev->dev,
1485 "%s free_irq invoked successfully.\n", __func__);
1486 }
1487
1488 return 0;
1489}
1490
1491static int pch_spi_pd_resume(struct platform_device *pd_dev)
1492{
1493 struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
1494 struct pch_spi_data *data = platform_get_drvdata(pd_dev);
1495 int retval;
1496
1497 if (!board_dat) {
1498 dev_err(&pd_dev->dev,
1499 "%s pci_get_drvdata returned NULL\n", __func__);
1500 return -EFAULT;
1501 }
1502
1503 if (!data->irq_reg_sts) {
1504
1505 retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
1506 IRQF_SHARED, KBUILD_MODNAME, data);
1507 if (retval < 0) {
1508 dev_err(&pd_dev->dev,
1509 "%s request_irq failed\n", __func__);
1510 return retval;
1511 }
1512
1513
1514 pch_spi_reset(data->master);
1515 pch_spi_set_master_mode(data->master);
1516 data->irq_reg_sts = true;
1517 }
1518 return 0;
1519}
1520#else
1521#define pch_spi_pd_suspend NULL
1522#define pch_spi_pd_resume NULL
1523#endif
1524
1525static struct platform_driver pch_spi_pd_driver = {
1526 .driver = {
1527 .name = "pch-spi",
1528 },
1529 .probe = pch_spi_pd_probe,
1530 .remove = pch_spi_pd_remove,
1531 .suspend = pch_spi_pd_suspend,
1532 .resume = pch_spi_pd_resume
1533};
1534
1535static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1536{
1537 struct pch_spi_board_data *board_dat;
1538 struct platform_device *pd_dev = NULL;
1539 int retval;
1540 int i;
1541 struct pch_pd_dev_save *pd_dev_save;
1542
1543 pd_dev_save = kzalloc(sizeof(*pd_dev_save), GFP_KERNEL);
1544 if (!pd_dev_save)
1545 return -ENOMEM;
1546
1547 board_dat = kzalloc(sizeof(*board_dat), GFP_KERNEL);
1548 if (!board_dat) {
1549 retval = -ENOMEM;
1550 goto err_no_mem;
1551 }
1552
1553 retval = pci_request_regions(pdev, KBUILD_MODNAME);
1554 if (retval) {
1555 dev_err(&pdev->dev, "%s request_region failed\n", __func__);
1556 goto pci_request_regions;
1557 }
1558
1559 board_dat->pdev = pdev;
1560 board_dat->num = id->driver_data;
1561 pd_dev_save->num = id->driver_data;
1562 pd_dev_save->board_dat = board_dat;
1563
1564 retval = pci_enable_device(pdev);
1565 if (retval) {
1566 dev_err(&pdev->dev, "%s pci_enable_device failed\n", __func__);
1567 goto pci_enable_device;
1568 }
1569
1570 for (i = 0; i < board_dat->num; i++) {
1571 pd_dev = platform_device_alloc("pch-spi", i);
1572 if (!pd_dev) {
1573 dev_err(&pdev->dev, "platform_device_alloc failed\n");
1574 retval = -ENOMEM;
1575 goto err_platform_device;
1576 }
1577 pd_dev_save->pd_save[i] = pd_dev;
1578 pd_dev->dev.parent = &pdev->dev;
1579
1580 retval = platform_device_add_data(pd_dev, board_dat,
1581 sizeof(*board_dat));
1582 if (retval) {
1583 dev_err(&pdev->dev,
1584 "platform_device_add_data failed\n");
1585 platform_device_put(pd_dev);
1586 goto err_platform_device;
1587 }
1588
1589 retval = platform_device_add(pd_dev);
1590 if (retval) {
1591 dev_err(&pdev->dev, "platform_device_add failed\n");
1592 platform_device_put(pd_dev);
1593 goto err_platform_device;
1594 }
1595 }
1596
1597 pci_set_drvdata(pdev, pd_dev_save);
1598
1599 return 0;
1600
1601err_platform_device:
1602 while (--i >= 0)
1603 platform_device_unregister(pd_dev_save->pd_save[i]);
1604 pci_disable_device(pdev);
1605pci_enable_device:
1606 pci_release_regions(pdev);
1607pci_request_regions:
1608 kfree(board_dat);
1609err_no_mem:
1610 kfree(pd_dev_save);
1611
1612 return retval;
1613}
1614
1615static void pch_spi_remove(struct pci_dev *pdev)
1616{
1617 int i;
1618 struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
1619
1620 dev_dbg(&pdev->dev, "%s ENTRY:pdev=%p\n", __func__, pdev);
1621
1622 for (i = 0; i < pd_dev_save->num; i++)
1623 platform_device_unregister(pd_dev_save->pd_save[i]);
1624
1625 pci_disable_device(pdev);
1626 pci_release_regions(pdev);
1627 kfree(pd_dev_save->board_dat);
1628 kfree(pd_dev_save);
1629}
1630
1631#ifdef CONFIG_PM
1632static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state)
1633{
1634 int retval;
1635 struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
1636
1637 dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
1638
1639 pd_dev_save->board_dat->suspend_sts = true;
1640
1641
1642 retval = pci_save_state(pdev);
1643 if (retval == 0) {
1644 pci_enable_wake(pdev, PCI_D3hot, 0);
1645 pci_disable_device(pdev);
1646 pci_set_power_state(pdev, PCI_D3hot);
1647 } else {
1648 dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__);
1649 }
1650
1651 return retval;
1652}
1653
1654static int pch_spi_resume(struct pci_dev *pdev)
1655{
1656 int retval;
1657 struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
1658 dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
1659
1660 pci_set_power_state(pdev, PCI_D0);
1661 pci_restore_state(pdev);
1662
1663 retval = pci_enable_device(pdev);
1664 if (retval < 0) {
1665 dev_err(&pdev->dev,
1666 "%s pci_enable_device failed\n", __func__);
1667 } else {
1668 pci_enable_wake(pdev, PCI_D3hot, 0);
1669
1670
1671 pd_dev_save->board_dat->suspend_sts = false;
1672 }
1673
1674 return retval;
1675}
1676#else
1677#define pch_spi_suspend NULL
1678#define pch_spi_resume NULL
1679
1680#endif
1681
1682static struct pci_driver pch_spi_pcidev_driver = {
1683 .name = "pch_spi",
1684 .id_table = pch_spi_pcidev_id,
1685 .probe = pch_spi_probe,
1686 .remove = pch_spi_remove,
1687 .suspend = pch_spi_suspend,
1688 .resume = pch_spi_resume,
1689};
1690
1691static int __init pch_spi_init(void)
1692{
1693 int ret;
1694 ret = platform_driver_register(&pch_spi_pd_driver);
1695 if (ret)
1696 return ret;
1697
1698 ret = pci_register_driver(&pch_spi_pcidev_driver);
1699 if (ret) {
1700 platform_driver_unregister(&pch_spi_pd_driver);
1701 return ret;
1702 }
1703
1704 return 0;
1705}
1706module_init(pch_spi_init);
1707
1708static void __exit pch_spi_exit(void)
1709{
1710 pci_unregister_driver(&pch_spi_pcidev_driver);
1711 platform_driver_unregister(&pch_spi_pd_driver);
1712}
1713module_exit(pch_spi_exit);
1714
1715module_param(use_dma, int, 0644);
1716MODULE_PARM_DESC(use_dma,
1717 "to use DMA for data transfers pass 1 else 0; default 1");
1718
1719MODULE_LICENSE("GPL");
1720MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor ML7xxx IOH SPI Driver");
1721MODULE_DEVICE_TABLE(pci, pch_spi_pcidev_id);
1722
1723