1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/delay.h>
21#include <linux/pci.h>
22#include <linux/wait.h>
23#include <linux/spi/spi.h>
24#include <linux/interrupt.h>
25#include <linux/sched.h>
26#include <linux/spi/spidev.h>
27#include <linux/module.h>
28#include <linux/device.h>
29#include <linux/platform_device.h>
30
31#include <linux/dmaengine.h>
32#include <linux/pch_dma.h>
33
34
35#define PCH_SPCR 0x00
36#define PCH_SPBRR 0x04
37#define PCH_SPSR 0x08
38#define PCH_SPDWR 0x0C
39#define PCH_SPDRR 0x10
40#define PCH_SSNXCR 0x18
41#define PCH_SRST 0x1C
42#define PCH_ADDRESS_SIZE 0x20
43
44#define PCH_SPSR_TFD 0x000007C0
45#define PCH_SPSR_RFD 0x0000F800
46
47#define PCH_READABLE(x) (((x) & PCH_SPSR_RFD)>>11)
48#define PCH_WRITABLE(x) (((x) & PCH_SPSR_TFD)>>6)
49
50#define PCH_RX_THOLD 7
51#define PCH_RX_THOLD_MAX 15
52
53#define PCH_TX_THOLD 2
54
55#define PCH_MAX_BAUDRATE 5000000
56#define PCH_MAX_FIFO_DEPTH 16
57
58#define STATUS_RUNNING 1
59#define STATUS_EXITING 2
60#define PCH_SLEEP_TIME 10
61
62#define SSN_LOW 0x02U
63#define SSN_HIGH 0x03U
64#define SSN_NO_CONTROL 0x00U
65#define PCH_MAX_CS 0xFF
66#define PCI_DEVICE_ID_GE_SPI 0x8816
67
68#define SPCR_SPE_BIT (1 << 0)
69#define SPCR_MSTR_BIT (1 << 1)
70#define SPCR_LSBF_BIT (1 << 4)
71#define SPCR_CPHA_BIT (1 << 5)
72#define SPCR_CPOL_BIT (1 << 6)
73#define SPCR_TFIE_BIT (1 << 8)
74#define SPCR_RFIE_BIT (1 << 9)
75#define SPCR_FIE_BIT (1 << 10)
76#define SPCR_ORIE_BIT (1 << 11)
77#define SPCR_MDFIE_BIT (1 << 12)
78#define SPCR_FICLR_BIT (1 << 24)
79#define SPSR_TFI_BIT (1 << 0)
80#define SPSR_RFI_BIT (1 << 1)
81#define SPSR_FI_BIT (1 << 2)
82#define SPSR_ORF_BIT (1 << 3)
83#define SPBRR_SIZE_BIT (1 << 10)
84
85#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|\
86 SPCR_ORIE_BIT|SPCR_MDFIE_BIT)
87
88#define SPCR_RFIC_FIELD 20
89#define SPCR_TFIC_FIELD 16
90
91#define MASK_SPBRR_SPBR_BITS ((1 << 10) - 1)
92#define MASK_RFIC_SPCR_BITS (0xf << SPCR_RFIC_FIELD)
93#define MASK_TFIC_SPCR_BITS (0xf << SPCR_TFIC_FIELD)
94
95#define PCH_CLOCK_HZ 50000000
96#define PCH_MAX_SPBR 1023
97
98
99#define PCI_VENDOR_ID_ROHM 0x10DB
100#define PCI_DEVICE_ID_ML7213_SPI 0x802c
101#define PCI_DEVICE_ID_ML7223_SPI 0x800F
102#define PCI_DEVICE_ID_ML7831_SPI 0x8816
103
104
105
106
107
108
109
110
111#define PCH_SPI_MAX_DEV 2
112
113#define PCH_BUF_SIZE 4096
114#define PCH_DMA_TRANS_SIZE 12
115
116static int use_dma = 1;
117
118struct pch_spi_dma_ctrl {
119 struct dma_async_tx_descriptor *desc_tx;
120 struct dma_async_tx_descriptor *desc_rx;
121 struct pch_dma_slave param_tx;
122 struct pch_dma_slave param_rx;
123 struct dma_chan *chan_tx;
124 struct dma_chan *chan_rx;
125 struct scatterlist *sg_tx_p;
126 struct scatterlist *sg_rx_p;
127 struct scatterlist sg_tx;
128 struct scatterlist sg_rx;
129 int nent;
130 void *tx_buf_virt;
131 void *rx_buf_virt;
132 dma_addr_t tx_buf_dma;
133 dma_addr_t rx_buf_dma;
134};
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171struct pch_spi_data {
172 void __iomem *io_remap_addr;
173 unsigned long io_base_addr;
174 struct spi_master *master;
175 struct work_struct work;
176 struct workqueue_struct *wk;
177 wait_queue_head_t wait;
178 u8 transfer_complete;
179 u8 bcurrent_msg_processing;
180 spinlock_t lock;
181 struct list_head queue;
182 u8 status;
183 u32 bpw_len;
184 u8 transfer_active;
185 u32 tx_index;
186 u32 rx_index;
187 u16 *pkt_tx_buff;
188 u16 *pkt_rx_buff;
189 u8 n_curnt_chip;
190 struct spi_device *current_chip;
191 struct spi_message *current_msg;
192 struct spi_transfer *cur_trans;
193 struct pch_spi_board_data *board_dat;
194 struct platform_device *plat_dev;
195 int ch;
196 struct pch_spi_dma_ctrl dma;
197 int use_dma;
198 u8 irq_reg_sts;
199 int save_total_len;
200};
201
202
203
204
205
206
207
208struct pch_spi_board_data {
209 struct pci_dev *pdev;
210 u8 suspend_sts;
211 int num;
212};
213
214struct pch_pd_dev_save {
215 int num;
216 struct platform_device *pd_save[PCH_SPI_MAX_DEV];
217 struct pch_spi_board_data *board_dat;
218};
219
220static DEFINE_PCI_DEVICE_TABLE(pch_spi_pcidev_id) = {
221 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
222 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
223 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
224 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_SPI), 1, },
225 { }
226};
227
228
229
230
231
232
233
234static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val)
235{
236 struct pch_spi_data *data = spi_master_get_devdata(master);
237 iowrite32(val, (data->io_remap_addr + idx));
238}
239
240
241
242
243
244
245static inline u32 pch_spi_readreg(struct spi_master *master, int idx)
246{
247 struct pch_spi_data *data = spi_master_get_devdata(master);
248 return ioread32(data->io_remap_addr + idx);
249}
250
251static inline void pch_spi_setclr_reg(struct spi_master *master, int idx,
252 u32 set, u32 clr)
253{
254 u32 tmp = pch_spi_readreg(master, idx);
255 tmp = (tmp & ~clr) | set;
256 pch_spi_writereg(master, idx, tmp);
257}
258
259static void pch_spi_set_master_mode(struct spi_master *master)
260{
261 pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0);
262}
263
264
265
266
267
268static void pch_spi_clear_fifo(struct spi_master *master)
269{
270 pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0);
271 pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT);
272}
273
274static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
275 void __iomem *io_remap_addr)
276{
277 u32 n_read, tx_index, rx_index, bpw_len;
278 u16 *pkt_rx_buffer, *pkt_tx_buff;
279 int read_cnt;
280 u32 reg_spcr_val;
281 void __iomem *spsr;
282 void __iomem *spdrr;
283 void __iomem *spdwr;
284
285 spsr = io_remap_addr + PCH_SPSR;
286 iowrite32(reg_spsr_val, spsr);
287
288 if (data->transfer_active) {
289 rx_index = data->rx_index;
290 tx_index = data->tx_index;
291 bpw_len = data->bpw_len;
292 pkt_rx_buffer = data->pkt_rx_buff;
293 pkt_tx_buff = data->pkt_tx_buff;
294
295 spdrr = io_remap_addr + PCH_SPDRR;
296 spdwr = io_remap_addr + PCH_SPDWR;
297
298 n_read = PCH_READABLE(reg_spsr_val);
299
300 for (read_cnt = 0; (read_cnt < n_read); read_cnt++) {
301 pkt_rx_buffer[rx_index++] = ioread32(spdrr);
302 if (tx_index < bpw_len)
303 iowrite32(pkt_tx_buff[tx_index++], spdwr);
304 }
305
306
307 if ((bpw_len - rx_index) <= PCH_MAX_FIFO_DEPTH) {
308 reg_spcr_val = ioread32(io_remap_addr + PCH_SPCR);
309 reg_spcr_val &= ~SPCR_RFIE_BIT;
310
311
312 reg_spcr_val &= ~MASK_RFIC_SPCR_BITS;
313 reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD);
314
315 iowrite32(reg_spcr_val, (io_remap_addr + PCH_SPCR));
316 }
317
318
319 data->tx_index = tx_index;
320 data->rx_index = rx_index;
321
322
323 if (reg_spsr_val & SPSR_FI_BIT) {
324 if ((tx_index == bpw_len) && (rx_index == tx_index)) {
325
326 pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
327 PCH_ALL);
328
329
330
331 data->transfer_complete = true;
332 data->transfer_active = false;
333 wake_up(&data->wait);
334 } else {
335 dev_err(&data->master->dev,
336 "%s : Transfer is not completed",
337 __func__);
338 }
339 }
340 }
341}
342
343
344
345
346
347
348static irqreturn_t pch_spi_handler(int irq, void *dev_id)
349{
350 u32 reg_spsr_val;
351 void __iomem *spsr;
352 void __iomem *io_remap_addr;
353 irqreturn_t ret = IRQ_NONE;
354 struct pch_spi_data *data = dev_id;
355 struct pch_spi_board_data *board_dat = data->board_dat;
356
357 if (board_dat->suspend_sts) {
358 dev_dbg(&board_dat->pdev->dev,
359 "%s returning due to suspend\n", __func__);
360 return IRQ_NONE;
361 }
362
363 io_remap_addr = data->io_remap_addr;
364 spsr = io_remap_addr + PCH_SPSR;
365
366 reg_spsr_val = ioread32(spsr);
367
368 if (reg_spsr_val & SPSR_ORF_BIT) {
369 dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__);
370 if (data->current_msg->complete != 0) {
371 data->transfer_complete = true;
372 data->current_msg->status = -EIO;
373 data->current_msg->complete(data->current_msg->context);
374 data->bcurrent_msg_processing = false;
375 data->current_msg = NULL;
376 data->cur_trans = NULL;
377 }
378 }
379
380 if (data->use_dma)
381 return IRQ_NONE;
382
383
384 if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
385 pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr);
386 ret = IRQ_HANDLED;
387 }
388
389 dev_dbg(&board_dat->pdev->dev, "%s EXIT return value=%d\n",
390 __func__, ret);
391
392 return ret;
393}
394
395
396
397
398
399
400static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
401{
402 u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2);
403
404
405 if (n_spbr > PCH_MAX_SPBR)
406 n_spbr = PCH_MAX_SPBR;
407
408 pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS);
409}
410
411
412
413
414
415
416static void pch_spi_set_bits_per_word(struct spi_master *master,
417 u8 bits_per_word)
418{
419 if (bits_per_word == 8)
420 pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT);
421 else
422 pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0);
423}
424
425
426
427
428
429static void pch_spi_setup_transfer(struct spi_device *spi)
430{
431 u32 flags = 0;
432
433 dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n",
434 __func__, pch_spi_readreg(spi->master, PCH_SPBRR),
435 spi->max_speed_hz);
436 pch_spi_set_baud_rate(spi->master, spi->max_speed_hz);
437
438
439 pch_spi_set_bits_per_word(spi->master, spi->bits_per_word);
440
441 if (!(spi->mode & SPI_LSB_FIRST))
442 flags |= SPCR_LSBF_BIT;
443 if (spi->mode & SPI_CPOL)
444 flags |= SPCR_CPOL_BIT;
445 if (spi->mode & SPI_CPHA)
446 flags |= SPCR_CPHA_BIT;
447 pch_spi_setclr_reg(spi->master, PCH_SPCR, flags,
448 (SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT));
449
450
451 pch_spi_clear_fifo(spi->master);
452}
453
454
455
456
457
458static void pch_spi_reset(struct spi_master *master)
459{
460
461 pch_spi_writereg(master, PCH_SRST, 0x1);
462
463
464 pch_spi_writereg(master, PCH_SRST, 0x0);
465}
466
467static int pch_spi_setup(struct spi_device *pspi)
468{
469
470 if (pspi->bits_per_word == 0) {
471 pspi->bits_per_word = 8;
472 dev_dbg(&pspi->dev, "%s 8 bits per word\n", __func__);
473 }
474
475 if ((pspi->bits_per_word != 8) && (pspi->bits_per_word != 16)) {
476 dev_err(&pspi->dev, "%s Invalid bits per word\n", __func__);
477 return -EINVAL;
478 }
479
480
481
482
483 if ((pspi->max_speed_hz) > PCH_MAX_BAUDRATE)
484 pspi->max_speed_hz = PCH_MAX_BAUDRATE;
485
486 dev_dbg(&pspi->dev, "%s MODE = %x\n", __func__,
487 (pspi->mode) & (SPI_CPOL | SPI_CPHA));
488
489 return 0;
490}
491
492static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
493{
494
495 struct spi_transfer *transfer;
496 struct pch_spi_data *data = spi_master_get_devdata(pspi->master);
497 int retval;
498 unsigned long flags;
499
500
501 if (unlikely(list_empty(&pmsg->transfers) == 1)) {
502 dev_err(&pspi->dev, "%s list empty\n", __func__);
503 retval = -EINVAL;
504 goto err_out;
505 }
506
507 if (unlikely(pspi->max_speed_hz == 0)) {
508 dev_err(&pspi->dev, "%s pch_spi_transfer maxspeed=%d\n",
509 __func__, pspi->max_speed_hz);
510 retval = -EINVAL;
511 goto err_out;
512 }
513
514 dev_dbg(&pspi->dev, "%s Transfer List not empty. "
515 "Transfer Speed is set.\n", __func__);
516
517 spin_lock_irqsave(&data->lock, flags);
518
519 list_for_each_entry(transfer, &pmsg->transfers, transfer_list) {
520 if (!transfer->tx_buf && !transfer->rx_buf) {
521 dev_err(&pspi->dev,
522 "%s Tx and Rx buffer NULL\n", __func__);
523 retval = -EINVAL;
524 goto err_return_spinlock;
525 }
526
527 if (!transfer->len) {
528 dev_err(&pspi->dev, "%s Transfer length invalid\n",
529 __func__);
530 retval = -EINVAL;
531 goto err_return_spinlock;
532 }
533
534 dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length"
535 " valid\n", __func__);
536
537
538 if (transfer->speed_hz > PCH_MAX_BAUDRATE)
539 transfer->speed_hz = PCH_MAX_BAUDRATE;
540
541
542 if (transfer->bits_per_word) {
543 if ((transfer->bits_per_word != 8)
544 && (transfer->bits_per_word != 16)) {
545 retval = -EINVAL;
546 dev_err(&pspi->dev,
547 "%s Invalid bits per word\n", __func__);
548 goto err_return_spinlock;
549 }
550 }
551 }
552 spin_unlock_irqrestore(&data->lock, flags);
553
554
555 if (data->status == STATUS_EXITING) {
556 dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__);
557 retval = -ESHUTDOWN;
558 goto err_out;
559 }
560
561
562 if (data->board_dat->suspend_sts) {
563 dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__);
564 retval = -EINVAL;
565 goto err_out;
566 }
567
568
569 pmsg->actual_length = 0;
570 dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status);
571
572 pmsg->status = -EINPROGRESS;
573 spin_lock_irqsave(&data->lock, flags);
574
575 list_add_tail(&pmsg->queue, &data->queue);
576 spin_unlock_irqrestore(&data->lock, flags);
577
578 dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__);
579
580
581 queue_work(data->wk, &data->work);
582 dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__);
583
584 retval = 0;
585
586err_out:
587 dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
588 return retval;
589err_return_spinlock:
590 dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
591 spin_unlock_irqrestore(&data->lock, flags);
592 return retval;
593}
594
595static inline void pch_spi_select_chip(struct pch_spi_data *data,
596 struct spi_device *pspi)
597{
598 if (data->current_chip != NULL) {
599 if (pspi->chip_select != data->n_curnt_chip) {
600 dev_dbg(&pspi->dev, "%s : different slave\n", __func__);
601 data->current_chip = NULL;
602 }
603 }
604
605 data->current_chip = pspi;
606
607 data->n_curnt_chip = data->current_chip->chip_select;
608
609 dev_dbg(&pspi->dev, "%s :Invoking pch_spi_setup_transfer\n", __func__);
610 pch_spi_setup_transfer(pspi);
611}
612
613static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
614{
615 int size;
616 u32 n_writes;
617 int j;
618 struct spi_message *pmsg;
619 const u8 *tx_buf;
620 const u16 *tx_sbuf;
621
622
623 if (data->cur_trans->speed_hz) {
624 dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
625 pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
626 }
627
628
629 if (data->cur_trans->bits_per_word &&
630 (data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) {
631 dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
632 pch_spi_set_bits_per_word(data->master,
633 data->cur_trans->bits_per_word);
634 *bpw = data->cur_trans->bits_per_word;
635 } else {
636 *bpw = data->current_msg->spi->bits_per_word;
637 }
638
639
640 data->tx_index = 0;
641 data->rx_index = 0;
642
643 data->bpw_len = data->cur_trans->len / (*bpw / 8);
644
645
646 size = data->cur_trans->len * sizeof(*data->pkt_tx_buff);
647
648
649 data->pkt_tx_buff = kzalloc(size, GFP_KERNEL);
650 if (data->pkt_tx_buff != NULL) {
651 data->pkt_rx_buff = kzalloc(size, GFP_KERNEL);
652 if (!data->pkt_rx_buff)
653 kfree(data->pkt_tx_buff);
654 }
655
656 if (!data->pkt_rx_buff) {
657
658 dev_err(&data->master->dev, "%s :kzalloc failed\n", __func__);
659 list_for_each_entry(pmsg, data->queue.next, queue) {
660 pmsg->status = -ENOMEM;
661
662 if (pmsg->complete != 0)
663 pmsg->complete(pmsg->context);
664
665
666 list_del_init(&pmsg->queue);
667 }
668 return;
669 }
670
671
672 if (data->cur_trans->tx_buf != NULL) {
673 if (*bpw == 8) {
674 tx_buf = data->cur_trans->tx_buf;
675 for (j = 0; j < data->bpw_len; j++)
676 data->pkt_tx_buff[j] = *tx_buf++;
677 } else {
678 tx_sbuf = data->cur_trans->tx_buf;
679 for (j = 0; j < data->bpw_len; j++)
680 data->pkt_tx_buff[j] = *tx_sbuf++;
681 }
682 }
683
684
685 n_writes = data->bpw_len;
686 if (n_writes > PCH_MAX_FIFO_DEPTH)
687 n_writes = PCH_MAX_FIFO_DEPTH;
688
689 dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing "
690 "0x2 to SSNXCR\n", __func__);
691 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
692
693 for (j = 0; j < n_writes; j++)
694 pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]);
695
696
697 data->tx_index = j;
698
699
700 data->transfer_complete = false;
701 data->transfer_active = true;
702}
703
704static void pch_spi_nomore_transfer(struct pch_spi_data *data)
705{
706 struct spi_message *pmsg;
707 dev_dbg(&data->master->dev, "%s called\n", __func__);
708
709
710 data->current_msg->status = 0;
711
712 if (data->current_msg->complete != 0) {
713 dev_dbg(&data->master->dev,
714 "%s:Invoking callback of SPI core\n", __func__);
715 data->current_msg->complete(data->current_msg->context);
716 }
717
718
719 data->bcurrent_msg_processing = false;
720
721 dev_dbg(&data->master->dev,
722 "%s:data->bcurrent_msg_processing = false\n", __func__);
723
724 data->current_msg = NULL;
725 data->cur_trans = NULL;
726
727
728
729 if ((list_empty(&data->queue) == 0) &&
730 (!data->board_dat->suspend_sts) &&
731 (data->status != STATUS_EXITING)) {
732
733
734
735
736 dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__);
737 queue_work(data->wk, &data->work);
738 } else if (data->board_dat->suspend_sts ||
739 data->status == STATUS_EXITING) {
740 dev_dbg(&data->master->dev,
741 "%s suspend/remove initiated, flushing queue\n",
742 __func__);
743 list_for_each_entry(pmsg, data->queue.next, queue) {
744 pmsg->status = -EIO;
745
746 if (pmsg->complete)
747 pmsg->complete(pmsg->context);
748
749
750 list_del_init(&pmsg->queue);
751 }
752 }
753}
754
755static void pch_spi_set_ir(struct pch_spi_data *data)
756{
757
758 if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH)
759
760 pch_spi_setclr_reg(data->master, PCH_SPCR,
761 PCH_RX_THOLD << SPCR_RFIC_FIELD |
762 SPCR_FIE_BIT | SPCR_RFIE_BIT |
763 SPCR_ORIE_BIT | SPCR_SPE_BIT,
764 MASK_RFIC_SPCR_BITS | PCH_ALL);
765 else
766
767 pch_spi_setclr_reg(data->master, PCH_SPCR,
768 PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD |
769 SPCR_FIE_BIT | SPCR_ORIE_BIT |
770 SPCR_SPE_BIT,
771 MASK_RFIC_SPCR_BITS | PCH_ALL);
772
773
774
775 dev_dbg(&data->master->dev,
776 "%s:waiting for transfer to get over\n", __func__);
777
778 wait_event_interruptible(data->wait, data->transfer_complete);
779
780
781 pch_spi_writereg(data->master, PCH_SPSR,
782 pch_spi_readreg(data->master, PCH_SPSR));
783
784 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT);
785
786 pch_spi_clear_fifo(data->master);
787}
788
789static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
790{
791 int j;
792 u8 *rx_buf;
793 u16 *rx_sbuf;
794
795
796 if (!data->cur_trans->rx_buf)
797 return;
798
799 if (bpw == 8) {
800 rx_buf = data->cur_trans->rx_buf;
801 for (j = 0; j < data->bpw_len; j++)
802 *rx_buf++ = data->pkt_rx_buff[j] & 0xFF;
803 } else {
804 rx_sbuf = data->cur_trans->rx_buf;
805 for (j = 0; j < data->bpw_len; j++)
806 *rx_sbuf++ = data->pkt_rx_buff[j];
807 }
808}
809
810static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
811{
812 int j;
813 u8 *rx_buf;
814 u16 *rx_sbuf;
815 const u8 *rx_dma_buf;
816 const u16 *rx_dma_sbuf;
817
818
819 if (!data->cur_trans->rx_buf)
820 return;
821
822 if (bpw == 8) {
823 rx_buf = data->cur_trans->rx_buf;
824 rx_dma_buf = data->dma.rx_buf_virt;
825 for (j = 0; j < data->bpw_len; j++)
826 *rx_buf++ = *rx_dma_buf++ & 0xFF;
827 data->cur_trans->rx_buf = rx_buf;
828 } else {
829 rx_sbuf = data->cur_trans->rx_buf;
830 rx_dma_sbuf = data->dma.rx_buf_virt;
831 for (j = 0; j < data->bpw_len; j++)
832 *rx_sbuf++ = *rx_dma_sbuf++;
833 data->cur_trans->rx_buf = rx_sbuf;
834 }
835}
836
837static int pch_spi_start_transfer(struct pch_spi_data *data)
838{
839 struct pch_spi_dma_ctrl *dma;
840 unsigned long flags;
841 int rtn;
842
843 dma = &data->dma;
844
845 spin_lock_irqsave(&data->lock, flags);
846
847
848 pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL);
849
850 spin_unlock_irqrestore(&data->lock, flags);
851
852
853
854 dev_dbg(&data->master->dev,
855 "%s:waiting for transfer to get over\n", __func__);
856 rtn = wait_event_interruptible_timeout(data->wait,
857 data->transfer_complete,
858 msecs_to_jiffies(2 * HZ));
859 if (!rtn)
860 dev_err(&data->master->dev,
861 "%s wait-event timeout\n", __func__);
862
863 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
864 DMA_FROM_DEVICE);
865
866 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
867 DMA_FROM_DEVICE);
868 memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
869
870 async_tx_ack(dma->desc_rx);
871 async_tx_ack(dma->desc_tx);
872 kfree(dma->sg_tx_p);
873 kfree(dma->sg_rx_p);
874
875 spin_lock_irqsave(&data->lock, flags);
876
877
878 pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
879 MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS | PCH_ALL |
880 SPCR_SPE_BIT);
881
882 pch_spi_writereg(data->master, PCH_SPSR,
883 pch_spi_readreg(data->master, PCH_SPSR));
884
885 pch_spi_clear_fifo(data->master);
886
887 spin_unlock_irqrestore(&data->lock, flags);
888
889 return rtn;
890}
891
892static void pch_dma_rx_complete(void *arg)
893{
894 struct pch_spi_data *data = arg;
895
896
897 data->transfer_complete = true;
898 wake_up_interruptible(&data->wait);
899}
900
901static bool pch_spi_filter(struct dma_chan *chan, void *slave)
902{
903 struct pch_dma_slave *param = slave;
904
905 if ((chan->chan_id == param->chan_id) &&
906 (param->dma_dev == chan->device->dev)) {
907 chan->private = param;
908 return true;
909 } else {
910 return false;
911 }
912}
913
914static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
915{
916 dma_cap_mask_t mask;
917 struct dma_chan *chan;
918 struct pci_dev *dma_dev;
919 struct pch_dma_slave *param;
920 struct pch_spi_dma_ctrl *dma;
921 unsigned int width;
922
923 if (bpw == 8)
924 width = PCH_DMA_WIDTH_1_BYTE;
925 else
926 width = PCH_DMA_WIDTH_2_BYTES;
927
928 dma = &data->dma;
929 dma_cap_zero(mask);
930 dma_cap_set(DMA_SLAVE, mask);
931
932
933 dma_dev = pci_get_bus_and_slot(data->board_dat->pdev->bus->number,
934 PCI_DEVFN(12, 0));
935
936
937 param = &dma->param_tx;
938 param->dma_dev = &dma_dev->dev;
939 param->chan_id = data->master->bus_num * 2;
940 param->tx_reg = data->io_base_addr + PCH_SPDWR;
941 param->width = width;
942 chan = dma_request_channel(mask, pch_spi_filter, param);
943 if (!chan) {
944 dev_err(&data->master->dev,
945 "ERROR: dma_request_channel FAILS(Tx)\n");
946 data->use_dma = 0;
947 return;
948 }
949 dma->chan_tx = chan;
950
951
952 param = &dma->param_rx;
953 param->dma_dev = &dma_dev->dev;
954 param->chan_id = data->master->bus_num * 2 + 1;
955 param->rx_reg = data->io_base_addr + PCH_SPDRR;
956 param->width = width;
957 chan = dma_request_channel(mask, pch_spi_filter, param);
958 if (!chan) {
959 dev_err(&data->master->dev,
960 "ERROR: dma_request_channel FAILS(Rx)\n");
961 dma_release_channel(dma->chan_tx);
962 dma->chan_tx = NULL;
963 data->use_dma = 0;
964 return;
965 }
966 dma->chan_rx = chan;
967}
968
969static void pch_spi_release_dma(struct pch_spi_data *data)
970{
971 struct pch_spi_dma_ctrl *dma;
972
973 dma = &data->dma;
974 if (dma->chan_tx) {
975 dma_release_channel(dma->chan_tx);
976 dma->chan_tx = NULL;
977 }
978 if (dma->chan_rx) {
979 dma_release_channel(dma->chan_rx);
980 dma->chan_rx = NULL;
981 }
982 return;
983}
984
985static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
986{
987 const u8 *tx_buf;
988 const u16 *tx_sbuf;
989 u8 *tx_dma_buf;
990 u16 *tx_dma_sbuf;
991 struct scatterlist *sg;
992 struct dma_async_tx_descriptor *desc_tx;
993 struct dma_async_tx_descriptor *desc_rx;
994 int num;
995 int i;
996 int size;
997 int rem;
998 int head;
999 unsigned long flags;
1000 struct pch_spi_dma_ctrl *dma;
1001
1002 dma = &data->dma;
1003
1004
1005 if (data->cur_trans->speed_hz) {
1006 dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
1007 spin_lock_irqsave(&data->lock, flags);
1008 pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
1009 spin_unlock_irqrestore(&data->lock, flags);
1010 }
1011
1012
1013 if (data->cur_trans->bits_per_word &&
1014 (data->current_msg->spi->bits_per_word !=
1015 data->cur_trans->bits_per_word)) {
1016 dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
1017 spin_lock_irqsave(&data->lock, flags);
1018 pch_spi_set_bits_per_word(data->master,
1019 data->cur_trans->bits_per_word);
1020 spin_unlock_irqrestore(&data->lock, flags);
1021 *bpw = data->cur_trans->bits_per_word;
1022 } else {
1023 *bpw = data->current_msg->spi->bits_per_word;
1024 }
1025 data->bpw_len = data->cur_trans->len / (*bpw / 8);
1026
1027 if (data->bpw_len > PCH_BUF_SIZE) {
1028 data->bpw_len = PCH_BUF_SIZE;
1029 data->cur_trans->len -= PCH_BUF_SIZE;
1030 }
1031
1032
1033 if (data->cur_trans->tx_buf != NULL) {
1034 if (*bpw == 8) {
1035 tx_buf = data->cur_trans->tx_buf;
1036 tx_dma_buf = dma->tx_buf_virt;
1037 for (i = 0; i < data->bpw_len; i++)
1038 *tx_dma_buf++ = *tx_buf++;
1039 } else {
1040 tx_sbuf = data->cur_trans->tx_buf;
1041 tx_dma_sbuf = dma->tx_buf_virt;
1042 for (i = 0; i < data->bpw_len; i++)
1043 *tx_dma_sbuf++ = *tx_sbuf++;
1044 }
1045 }
1046
1047
1048 if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
1049 if (data->bpw_len % PCH_DMA_TRANS_SIZE) {
1050 num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
1051 rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
1052 } else {
1053 num = data->bpw_len / PCH_DMA_TRANS_SIZE;
1054 rem = PCH_DMA_TRANS_SIZE;
1055 }
1056 size = PCH_DMA_TRANS_SIZE;
1057 } else {
1058 num = 1;
1059 size = data->bpw_len;
1060 rem = data->bpw_len;
1061 }
1062 dev_dbg(&data->master->dev, "%s num=%d size=%d rem=%d\n",
1063 __func__, num, size, rem);
1064 spin_lock_irqsave(&data->lock, flags);
1065
1066
1067 pch_spi_setclr_reg(data->master, PCH_SPCR,
1068 ((size - 1) << SPCR_RFIC_FIELD) |
1069 (PCH_TX_THOLD << SPCR_TFIC_FIELD),
1070 MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
1071
1072 spin_unlock_irqrestore(&data->lock, flags);
1073
1074
1075 dma->sg_rx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
1076 sg_init_table(dma->sg_rx_p, num);
1077
1078 sg = dma->sg_rx_p;
1079 for (i = 0; i < num; i++, sg++) {
1080 if (i == (num - 2)) {
1081 sg->offset = size * i;
1082 sg->offset = sg->offset * (*bpw / 8);
1083 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
1084 sg->offset);
1085 sg_dma_len(sg) = rem;
1086 } else if (i == (num - 1)) {
1087 sg->offset = size * (i - 1) + rem;
1088 sg->offset = sg->offset * (*bpw / 8);
1089 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
1090 sg->offset);
1091 sg_dma_len(sg) = size;
1092 } else {
1093 sg->offset = size * i;
1094 sg->offset = sg->offset * (*bpw / 8);
1095 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
1096 sg->offset);
1097 sg_dma_len(sg) = size;
1098 }
1099 sg_dma_address(sg) = dma->rx_buf_dma + sg->offset;
1100 }
1101 sg = dma->sg_rx_p;
1102 desc_rx = dmaengine_prep_slave_sg(dma->chan_rx, sg,
1103 num, DMA_DEV_TO_MEM,
1104 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1105 if (!desc_rx) {
1106 dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
1107 __func__);
1108 return;
1109 }
1110 dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_FROM_DEVICE);
1111 desc_rx->callback = pch_dma_rx_complete;
1112 desc_rx->callback_param = data;
1113 dma->nent = num;
1114 dma->desc_rx = desc_rx;
1115
1116
1117 if (data->bpw_len > PCH_MAX_FIFO_DEPTH) {
1118 head = PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE;
1119 if (data->bpw_len % PCH_DMA_TRANS_SIZE > 4) {
1120 num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
1121 rem = data->bpw_len % PCH_DMA_TRANS_SIZE - head;
1122 } else {
1123 num = data->bpw_len / PCH_DMA_TRANS_SIZE;
1124 rem = data->bpw_len % PCH_DMA_TRANS_SIZE +
1125 PCH_DMA_TRANS_SIZE - head;
1126 }
1127 size = PCH_DMA_TRANS_SIZE;
1128 } else {
1129 num = 1;
1130 size = data->bpw_len;
1131 rem = data->bpw_len;
1132 head = 0;
1133 }
1134
1135 dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
1136 sg_init_table(dma->sg_tx_p, num);
1137
1138 sg = dma->sg_tx_p;
1139 for (i = 0; i < num; i++, sg++) {
1140 if (i == 0) {
1141 sg->offset = 0;
1142 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size + head,
1143 sg->offset);
1144 sg_dma_len(sg) = size + head;
1145 } else if (i == (num - 1)) {
1146 sg->offset = head + size * i;
1147 sg->offset = sg->offset * (*bpw / 8);
1148 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem,
1149 sg->offset);
1150 sg_dma_len(sg) = rem;
1151 } else {
1152 sg->offset = head + size * i;
1153 sg->offset = sg->offset * (*bpw / 8);
1154 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size,
1155 sg->offset);
1156 sg_dma_len(sg) = size;
1157 }
1158 sg_dma_address(sg) = dma->tx_buf_dma + sg->offset;
1159 }
1160 sg = dma->sg_tx_p;
1161 desc_tx = dmaengine_prep_slave_sg(dma->chan_tx,
1162 sg, num, DMA_MEM_TO_DEV,
1163 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1164 if (!desc_tx) {
1165 dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
1166 __func__);
1167 return;
1168 }
1169 dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_TO_DEVICE);
1170 desc_tx->callback = NULL;
1171 desc_tx->callback_param = data;
1172 dma->nent = num;
1173 dma->desc_tx = desc_tx;
1174
1175 dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing "
1176 "0x2 to SSNXCR\n", __func__);
1177
1178 spin_lock_irqsave(&data->lock, flags);
1179 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
1180 desc_rx->tx_submit(desc_rx);
1181 desc_tx->tx_submit(desc_tx);
1182 spin_unlock_irqrestore(&data->lock, flags);
1183
1184
1185 data->transfer_complete = false;
1186}
1187
1188static void pch_spi_process_messages(struct work_struct *pwork)
1189{
1190 struct spi_message *pmsg;
1191 struct pch_spi_data *data;
1192 int bpw;
1193
1194 data = container_of(pwork, struct pch_spi_data, work);
1195 dev_dbg(&data->master->dev, "%s data initialized\n", __func__);
1196
1197 spin_lock(&data->lock);
1198
1199 if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
1200 dev_dbg(&data->master->dev, "%s suspend/remove initiated,"
1201 "flushing queue\n", __func__);
1202 list_for_each_entry(pmsg, data->queue.next, queue) {
1203 pmsg->status = -EIO;
1204
1205 if (pmsg->complete != 0) {
1206 spin_unlock(&data->lock);
1207 pmsg->complete(pmsg->context);
1208 spin_lock(&data->lock);
1209 }
1210
1211
1212 list_del_init(&pmsg->queue);
1213 }
1214
1215 spin_unlock(&data->lock);
1216 return;
1217 }
1218
1219 data->bcurrent_msg_processing = true;
1220 dev_dbg(&data->master->dev,
1221 "%s Set data->bcurrent_msg_processing= true\n", __func__);
1222
1223
1224 data->current_msg = list_entry(data->queue.next, struct spi_message,
1225 queue);
1226
1227 list_del_init(&data->current_msg->queue);
1228
1229 data->current_msg->status = 0;
1230
1231 pch_spi_select_chip(data, data->current_msg->spi);
1232
1233 spin_unlock(&data->lock);
1234
1235 if (data->use_dma)
1236 pch_spi_request_dma(data,
1237 data->current_msg->spi->bits_per_word);
1238 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
1239 do {
1240 int cnt;
1241
1242
1243
1244 spin_lock(&data->lock);
1245 if (data->cur_trans == NULL) {
1246 data->cur_trans =
1247 list_entry(data->current_msg->transfers.next,
1248 struct spi_transfer, transfer_list);
1249 dev_dbg(&data->master->dev, "%s "
1250 ":Getting 1st transfer message\n", __func__);
1251 } else {
1252 data->cur_trans =
1253 list_entry(data->cur_trans->transfer_list.next,
1254 struct spi_transfer, transfer_list);
1255 dev_dbg(&data->master->dev, "%s "
1256 ":Getting next transfer message\n", __func__);
1257 }
1258 spin_unlock(&data->lock);
1259
1260 if (!data->cur_trans->len)
1261 goto out;
1262 cnt = (data->cur_trans->len - 1) / PCH_BUF_SIZE + 1;
1263 data->save_total_len = data->cur_trans->len;
1264 if (data->use_dma) {
1265 int i;
1266 char *save_rx_buf = data->cur_trans->rx_buf;
1267 for (i = 0; i < cnt; i ++) {
1268 pch_spi_handle_dma(data, &bpw);
1269 if (!pch_spi_start_transfer(data)) {
1270 data->transfer_complete = true;
1271 data->current_msg->status = -EIO;
1272 data->current_msg->complete
1273 (data->current_msg->context);
1274 data->bcurrent_msg_processing = false;
1275 data->current_msg = NULL;
1276 data->cur_trans = NULL;
1277 goto out;
1278 }
1279 pch_spi_copy_rx_data_for_dma(data, bpw);
1280 }
1281 data->cur_trans->rx_buf = save_rx_buf;
1282 } else {
1283 pch_spi_set_tx(data, &bpw);
1284 pch_spi_set_ir(data);
1285 pch_spi_copy_rx_data(data, bpw);
1286 kfree(data->pkt_rx_buff);
1287 data->pkt_rx_buff = NULL;
1288 kfree(data->pkt_tx_buff);
1289 data->pkt_tx_buff = NULL;
1290 }
1291
1292 data->cur_trans->len = data->save_total_len;
1293 data->current_msg->actual_length += data->cur_trans->len;
1294
1295 dev_dbg(&data->master->dev,
1296 "%s:data->current_msg->actual_length=%d\n",
1297 __func__, data->current_msg->actual_length);
1298
1299
1300 if (data->cur_trans->delay_usecs) {
1301 dev_dbg(&data->master->dev, "%s:"
1302 "delay in usec=%d\n", __func__,
1303 data->cur_trans->delay_usecs);
1304 udelay(data->cur_trans->delay_usecs);
1305 }
1306
1307 spin_lock(&data->lock);
1308
1309
1310 if ((data->cur_trans->transfer_list.next) ==
1311 &(data->current_msg->transfers)) {
1312 pch_spi_nomore_transfer(data);
1313 }
1314
1315 spin_unlock(&data->lock);
1316
1317 } while (data->cur_trans != NULL);
1318
1319out:
1320 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);
1321 if (data->use_dma)
1322 pch_spi_release_dma(data);
1323}
1324
1325static void pch_spi_free_resources(struct pch_spi_board_data *board_dat,
1326 struct pch_spi_data *data)
1327{
1328 dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
1329
1330
1331 if (data->wk != NULL) {
1332 destroy_workqueue(data->wk);
1333 data->wk = NULL;
1334 dev_dbg(&board_dat->pdev->dev,
1335 "%s destroy_workqueue invoked successfully\n",
1336 __func__);
1337 }
1338}
1339
1340static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
1341 struct pch_spi_data *data)
1342{
1343 int retval = 0;
1344
1345 dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
1346
1347
1348 data->wk = create_singlethread_workqueue(KBUILD_MODNAME);
1349 if (!data->wk) {
1350 dev_err(&board_dat->pdev->dev,
1351 "%s create_singlet hread_workqueue failed\n", __func__);
1352 retval = -EBUSY;
1353 goto err_return;
1354 }
1355
1356
1357 pch_spi_reset(data->master);
1358 dev_dbg(&board_dat->pdev->dev,
1359 "%s pch_spi_reset invoked successfully\n", __func__);
1360
1361 dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
1362
1363err_return:
1364 if (retval != 0) {
1365 dev_err(&board_dat->pdev->dev,
1366 "%s FAIL:invoking pch_spi_free_resources\n", __func__);
1367 pch_spi_free_resources(board_dat, data);
1368 }
1369
1370 dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval);
1371
1372 return retval;
1373}
1374
1375static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
1376 struct pch_spi_data *data)
1377{
1378 struct pch_spi_dma_ctrl *dma;
1379
1380 dma = &data->dma;
1381 if (dma->tx_buf_dma)
1382 dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
1383 dma->tx_buf_virt, dma->tx_buf_dma);
1384 if (dma->rx_buf_dma)
1385 dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
1386 dma->rx_buf_virt, dma->rx_buf_dma);
1387 return;
1388}
1389
1390static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
1391 struct pch_spi_data *data)
1392{
1393 struct pch_spi_dma_ctrl *dma;
1394
1395 dma = &data->dma;
1396
1397 dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
1398 PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
1399
1400 dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
1401 PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
1402}
1403
1404static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev)
1405{
1406 int ret;
1407 struct spi_master *master;
1408 struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
1409 struct pch_spi_data *data;
1410
1411 dev_dbg(&plat_dev->dev, "%s:debug\n", __func__);
1412
1413 master = spi_alloc_master(&board_dat->pdev->dev,
1414 sizeof(struct pch_spi_data));
1415 if (!master) {
1416 dev_err(&plat_dev->dev, "spi_alloc_master[%d] failed.\n",
1417 plat_dev->id);
1418 return -ENOMEM;
1419 }
1420
1421 data = spi_master_get_devdata(master);
1422 data->master = master;
1423
1424 platform_set_drvdata(plat_dev, data);
1425
1426
1427 data->io_base_addr = pci_resource_start(board_dat->pdev, 1) +
1428 PCH_ADDRESS_SIZE * plat_dev->id;
1429 data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0) +
1430 PCH_ADDRESS_SIZE * plat_dev->id;
1431 if (!data->io_remap_addr) {
1432 dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__);
1433 ret = -ENOMEM;
1434 goto err_pci_iomap;
1435 }
1436
1437 dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n",
1438 plat_dev->id, data->io_remap_addr);
1439
1440
1441 master->num_chipselect = PCH_MAX_CS;
1442 master->setup = pch_spi_setup;
1443 master->transfer = pch_spi_transfer;
1444 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
1445
1446 data->board_dat = board_dat;
1447 data->plat_dev = plat_dev;
1448 data->n_curnt_chip = 255;
1449 data->status = STATUS_RUNNING;
1450 data->ch = plat_dev->id;
1451 data->use_dma = use_dma;
1452
1453 INIT_LIST_HEAD(&data->queue);
1454 spin_lock_init(&data->lock);
1455 INIT_WORK(&data->work, pch_spi_process_messages);
1456 init_waitqueue_head(&data->wait);
1457
1458 ret = pch_spi_get_resources(board_dat, data);
1459 if (ret) {
1460 dev_err(&plat_dev->dev, "%s fail(retval=%d)\n", __func__, ret);
1461 goto err_spi_get_resources;
1462 }
1463
1464 ret = request_irq(board_dat->pdev->irq, pch_spi_handler,
1465 IRQF_SHARED, KBUILD_MODNAME, data);
1466 if (ret) {
1467 dev_err(&plat_dev->dev,
1468 "%s request_irq failed\n", __func__);
1469 goto err_request_irq;
1470 }
1471 data->irq_reg_sts = true;
1472
1473 pch_spi_set_master_mode(master);
1474
1475 ret = spi_register_master(master);
1476 if (ret != 0) {
1477 dev_err(&plat_dev->dev,
1478 "%s spi_register_master FAILED\n", __func__);
1479 goto err_spi_register_master;
1480 }
1481
1482 if (use_dma) {
1483 dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
1484 pch_alloc_dma_buf(board_dat, data);
1485 }
1486
1487 return 0;
1488
1489err_spi_register_master:
1490 free_irq(board_dat->pdev->irq, board_dat);
1491err_request_irq:
1492 pch_spi_free_resources(board_dat, data);
1493err_spi_get_resources:
1494 pci_iounmap(board_dat->pdev, data->io_remap_addr);
1495err_pci_iomap:
1496 spi_master_put(master);
1497
1498 return ret;
1499}
1500
1501static int __devexit pch_spi_pd_remove(struct platform_device *plat_dev)
1502{
1503 struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
1504 struct pch_spi_data *data = platform_get_drvdata(plat_dev);
1505 int count;
1506 unsigned long flags;
1507
1508 dev_dbg(&plat_dev->dev, "%s:[ch%d] irq=%d\n",
1509 __func__, plat_dev->id, board_dat->pdev->irq);
1510
1511 if (use_dma)
1512 pch_free_dma_buf(board_dat, data);
1513
1514
1515
1516 count = 500;
1517 spin_lock_irqsave(&data->lock, flags);
1518 data->status = STATUS_EXITING;
1519 while ((list_empty(&data->queue) == 0) && --count) {
1520 dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n",
1521 __func__);
1522 spin_unlock_irqrestore(&data->lock, flags);
1523 msleep(PCH_SLEEP_TIME);
1524 spin_lock_irqsave(&data->lock, flags);
1525 }
1526 spin_unlock_irqrestore(&data->lock, flags);
1527
1528 pch_spi_free_resources(board_dat, data);
1529
1530 if (data->irq_reg_sts) {
1531
1532 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
1533 data->irq_reg_sts = false;
1534 free_irq(board_dat->pdev->irq, data);
1535 }
1536
1537 pci_iounmap(board_dat->pdev, data->io_remap_addr);
1538 spi_unregister_master(data->master);
1539
1540 return 0;
1541}
1542#ifdef CONFIG_PM
1543static int pch_spi_pd_suspend(struct platform_device *pd_dev,
1544 pm_message_t state)
1545{
1546 u8 count;
1547 struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
1548 struct pch_spi_data *data = platform_get_drvdata(pd_dev);
1549
1550 dev_dbg(&pd_dev->dev, "%s ENTRY\n", __func__);
1551
1552 if (!board_dat) {
1553 dev_err(&pd_dev->dev,
1554 "%s pci_get_drvdata returned NULL\n", __func__);
1555 return -EFAULT;
1556 }
1557
1558
1559
1560 count = 255;
1561 while ((--count) > 0) {
1562 if (!(data->bcurrent_msg_processing))
1563 break;
1564 msleep(PCH_SLEEP_TIME);
1565 }
1566
1567
1568 if (data->irq_reg_sts) {
1569
1570 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
1571 pch_spi_reset(data->master);
1572 free_irq(board_dat->pdev->irq, data);
1573
1574 data->irq_reg_sts = false;
1575 dev_dbg(&pd_dev->dev,
1576 "%s free_irq invoked successfully.\n", __func__);
1577 }
1578
1579 return 0;
1580}
1581
1582static int pch_spi_pd_resume(struct platform_device *pd_dev)
1583{
1584 struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
1585 struct pch_spi_data *data = platform_get_drvdata(pd_dev);
1586 int retval;
1587
1588 if (!board_dat) {
1589 dev_err(&pd_dev->dev,
1590 "%s pci_get_drvdata returned NULL\n", __func__);
1591 return -EFAULT;
1592 }
1593
1594 if (!data->irq_reg_sts) {
1595
1596 retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
1597 IRQF_SHARED, KBUILD_MODNAME, data);
1598 if (retval < 0) {
1599 dev_err(&pd_dev->dev,
1600 "%s request_irq failed\n", __func__);
1601 return retval;
1602 }
1603
1604
1605 pch_spi_reset(data->master);
1606 pch_spi_set_master_mode(data->master);
1607 data->irq_reg_sts = true;
1608 }
1609 return 0;
1610}
1611#else
1612#define pch_spi_pd_suspend NULL
1613#define pch_spi_pd_resume NULL
1614#endif
1615
1616static struct platform_driver pch_spi_pd_driver = {
1617 .driver = {
1618 .name = "pch-spi",
1619 .owner = THIS_MODULE,
1620 },
1621 .probe = pch_spi_pd_probe,
1622 .remove = __devexit_p(pch_spi_pd_remove),
1623 .suspend = pch_spi_pd_suspend,
1624 .resume = pch_spi_pd_resume
1625};
1626
1627static int __devinit pch_spi_probe(struct pci_dev *pdev,
1628 const struct pci_device_id *id)
1629{
1630 struct pch_spi_board_data *board_dat;
1631 struct platform_device *pd_dev = NULL;
1632 int retval;
1633 int i;
1634 struct pch_pd_dev_save *pd_dev_save;
1635
1636 pd_dev_save = kzalloc(sizeof(struct pch_pd_dev_save), GFP_KERNEL);
1637 if (!pd_dev_save) {
1638 dev_err(&pdev->dev, "%s Can't allocate pd_dev_sav\n", __func__);
1639 return -ENOMEM;
1640 }
1641
1642 board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL);
1643 if (!board_dat) {
1644 dev_err(&pdev->dev, "%s Can't allocate board_dat\n", __func__);
1645 retval = -ENOMEM;
1646 goto err_no_mem;
1647 }
1648
1649 retval = pci_request_regions(pdev, KBUILD_MODNAME);
1650 if (retval) {
1651 dev_err(&pdev->dev, "%s request_region failed\n", __func__);
1652 goto pci_request_regions;
1653 }
1654
1655 board_dat->pdev = pdev;
1656 board_dat->num = id->driver_data;
1657 pd_dev_save->num = id->driver_data;
1658 pd_dev_save->board_dat = board_dat;
1659
1660 retval = pci_enable_device(pdev);
1661 if (retval) {
1662 dev_err(&pdev->dev, "%s pci_enable_device failed\n", __func__);
1663 goto pci_enable_device;
1664 }
1665
1666 for (i = 0; i < board_dat->num; i++) {
1667 pd_dev = platform_device_alloc("pch-spi", i);
1668 if (!pd_dev) {
1669 dev_err(&pdev->dev, "platform_device_alloc failed\n");
1670 goto err_platform_device;
1671 }
1672 pd_dev_save->pd_save[i] = pd_dev;
1673 pd_dev->dev.parent = &pdev->dev;
1674
1675 retval = platform_device_add_data(pd_dev, board_dat,
1676 sizeof(*board_dat));
1677 if (retval) {
1678 dev_err(&pdev->dev,
1679 "platform_device_add_data failed\n");
1680 platform_device_put(pd_dev);
1681 goto err_platform_device;
1682 }
1683
1684 retval = platform_device_add(pd_dev);
1685 if (retval) {
1686 dev_err(&pdev->dev, "platform_device_add failed\n");
1687 platform_device_put(pd_dev);
1688 goto err_platform_device;
1689 }
1690 }
1691
1692 pci_set_drvdata(pdev, pd_dev_save);
1693
1694 return 0;
1695
1696err_platform_device:
1697 pci_disable_device(pdev);
1698pci_enable_device:
1699 pci_release_regions(pdev);
1700pci_request_regions:
1701 kfree(board_dat);
1702err_no_mem:
1703 kfree(pd_dev_save);
1704
1705 return retval;
1706}
1707
1708static void __devexit pch_spi_remove(struct pci_dev *pdev)
1709{
1710 int i;
1711 struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
1712
1713 dev_dbg(&pdev->dev, "%s ENTRY:pdev=%p\n", __func__, pdev);
1714
1715 for (i = 0; i < pd_dev_save->num; i++)
1716 platform_device_unregister(pd_dev_save->pd_save[i]);
1717
1718 pci_disable_device(pdev);
1719 pci_release_regions(pdev);
1720 kfree(pd_dev_save->board_dat);
1721 kfree(pd_dev_save);
1722}
1723
1724#ifdef CONFIG_PM
1725static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state)
1726{
1727 int retval;
1728 struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
1729
1730 dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
1731
1732 pd_dev_save->board_dat->suspend_sts = true;
1733
1734
1735 retval = pci_save_state(pdev);
1736 if (retval == 0) {
1737 pci_enable_wake(pdev, PCI_D3hot, 0);
1738 pci_disable_device(pdev);
1739 pci_set_power_state(pdev, PCI_D3hot);
1740 } else {
1741 dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__);
1742 }
1743
1744 return retval;
1745}
1746
1747static int pch_spi_resume(struct pci_dev *pdev)
1748{
1749 int retval;
1750 struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
1751 dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
1752
1753 pci_set_power_state(pdev, PCI_D0);
1754 pci_restore_state(pdev);
1755
1756 retval = pci_enable_device(pdev);
1757 if (retval < 0) {
1758 dev_err(&pdev->dev,
1759 "%s pci_enable_device failed\n", __func__);
1760 } else {
1761 pci_enable_wake(pdev, PCI_D3hot, 0);
1762
1763
1764 pd_dev_save->board_dat->suspend_sts = false;
1765 }
1766
1767 return retval;
1768}
1769#else
1770#define pch_spi_suspend NULL
1771#define pch_spi_resume NULL
1772
1773#endif
1774
1775static struct pci_driver pch_spi_pcidev_driver = {
1776 .name = "pch_spi",
1777 .id_table = pch_spi_pcidev_id,
1778 .probe = pch_spi_probe,
1779 .remove = __devexit_p(pch_spi_remove),
1780 .suspend = pch_spi_suspend,
1781 .resume = pch_spi_resume,
1782};
1783
1784static int __init pch_spi_init(void)
1785{
1786 int ret;
1787 ret = platform_driver_register(&pch_spi_pd_driver);
1788 if (ret)
1789 return ret;
1790
1791 ret = pci_register_driver(&pch_spi_pcidev_driver);
1792 if (ret)
1793 return ret;
1794
1795 return 0;
1796}
1797module_init(pch_spi_init);
1798
1799static void __exit pch_spi_exit(void)
1800{
1801 pci_unregister_driver(&pch_spi_pcidev_driver);
1802 platform_driver_unregister(&pch_spi_pd_driver);
1803}
1804module_exit(pch_spi_exit);
1805
1806module_param(use_dma, int, 0644);
1807MODULE_PARM_DESC(use_dma,
1808 "to use DMA for data transfers pass 1 else 0; default 1");
1809
1810MODULE_LICENSE("GPL");
1811MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor ML7xxx IOH SPI Driver");
1812