1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/dma-mapping.h>
16#include <linux/interrupt.h>
17#include <linux/module.h>
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h>
20#include <linux/platform_device.h>
21#include <linux/clk.h>
22#include <linux/dmaengine.h>
23#include <linux/dma-mapping.h>
24#include <linux/dma/pxa-dma.h>
25#include <linux/gpio.h>
26#include <linux/slab.h>
27#include <linux/sched/clock.h>
28
29#include <net/irda/irda.h>
30#include <net/irda/irmod.h>
31#include <net/irda/wrapper.h>
32#include <net/irda/irda_device.h>
33
34#include <linux/platform_data/irda-pxaficp.h>
35#undef __REG
36#define __REG(x) ((x) & 0xffff)
37#include <mach/regs-uart.h>
38
39#define ICCR0 0x0000
40#define ICCR1 0x0004
41#define ICCR2 0x0008
42#define ICDR 0x000c
43#define ICSR0 0x0014
44#define ICSR1 0x0018
45
46#define ICCR0_AME (1 << 7)
47#define ICCR0_TIE (1 << 6)
48#define ICCR0_RIE (1 << 5)
49#define ICCR0_RXE (1 << 4)
50#define ICCR0_TXE (1 << 3)
51#define ICCR0_TUS (1 << 2)
52#define ICCR0_LBM (1 << 1)
53#define ICCR0_ITR (1 << 0)
54
55#define ICCR2_RXP (1 << 3)
56#define ICCR2_TXP (1 << 2)
57#define ICCR2_TRIG (3 << 0)
58#define ICCR2_TRIG_8 (0 << 0)
59#define ICCR2_TRIG_16 (1 << 0)
60#define ICCR2_TRIG_32 (2 << 0)
61
62#define ICSR0_EOC (1 << 6)
63#define ICSR0_FRE (1 << 5)
64#define ICSR0_RFS (1 << 4)
65#define ICSR0_TFS (1 << 3)
66#define ICSR0_RAB (1 << 2)
67#define ICSR0_TUR (1 << 1)
68#define ICSR0_EIF (1 << 0)
69
70#define ICSR1_ROR (1 << 6)
71#define ICSR1_CRE (1 << 5)
72#define ICSR1_EOF (1 << 4)
73#define ICSR1_TNF (1 << 3)
74#define ICSR1_RNE (1 << 2)
75#define ICSR1_TBY (1 << 1)
76#define ICSR1_RSY (1 << 0)
77
78#define IrSR_RXPL_NEG_IS_ZERO (1<<4)
79#define IrSR_RXPL_POS_IS_ZERO 0x0
80#define IrSR_TXPL_NEG_IS_ZERO (1<<3)
81#define IrSR_TXPL_POS_IS_ZERO 0x0
82#define IrSR_XMODE_PULSE_1_6 (1<<2)
83#define IrSR_XMODE_PULSE_3_16 0x0
84#define IrSR_RCVEIR_IR_MODE (1<<1)
85#define IrSR_RCVEIR_UART_MODE 0x0
86#define IrSR_XMITIR_IR_MODE (1<<0)
87#define IrSR_XMITIR_UART_MODE 0x0
88
89#define IrSR_IR_RECEIVE_ON (\
90 IrSR_RXPL_NEG_IS_ZERO | \
91 IrSR_TXPL_POS_IS_ZERO | \
92 IrSR_XMODE_PULSE_3_16 | \
93 IrSR_RCVEIR_IR_MODE | \
94 IrSR_XMITIR_UART_MODE)
95
96#define IrSR_IR_TRANSMIT_ON (\
97 IrSR_RXPL_NEG_IS_ZERO | \
98 IrSR_TXPL_POS_IS_ZERO | \
99 IrSR_XMODE_PULSE_3_16 | \
100 IrSR_RCVEIR_UART_MODE | \
101 IrSR_XMITIR_IR_MODE)
102
103
104#define ficp_writel(irda, val, off) \
105 do { \
106 dev_vdbg(irda->dev, \
107 "%s():%d ficp_writel(0x%x, %s)\n", \
108 __func__, __LINE__, (val), #off); \
109 writel_relaxed((val), (irda)->irda_base + (off)); \
110 } while (0)
111
112#define ficp_readl(irda, off) \
113 ({ \
114 unsigned int _v; \
115 _v = readl_relaxed((irda)->irda_base + (off)); \
116 dev_vdbg(irda->dev, \
117 "%s():%d ficp_readl(%s): 0x%x\n", \
118 __func__, __LINE__, #off, _v); \
119 _v; \
120 })
121
122#define stuart_writel(irda, val, off) \
123 do { \
124 dev_vdbg(irda->dev, \
125 "%s():%d stuart_writel(0x%x, %s)\n", \
126 __func__, __LINE__, (val), #off); \
127 writel_relaxed((val), (irda)->stuart_base + (off)); \
128 } while (0)
129
130#define stuart_readl(irda, off) \
131 ({ \
132 unsigned int _v; \
133 _v = readl_relaxed((irda)->stuart_base + (off)); \
134 dev_vdbg(irda->dev, \
135 "%s():%d stuart_readl(%s): 0x%x\n", \
136 __func__, __LINE__, #off, _v); \
137 _v; \
138 })
139
140struct pxa_irda {
141 int speed;
142 int newspeed;
143 unsigned long long last_clk;
144
145 void __iomem *stuart_base;
146 void __iomem *irda_base;
147 unsigned char *dma_rx_buff;
148 unsigned char *dma_tx_buff;
149 dma_addr_t dma_rx_buff_phy;
150 dma_addr_t dma_tx_buff_phy;
151 unsigned int dma_tx_buff_len;
152 struct dma_chan *txdma;
153 struct dma_chan *rxdma;
154 dma_cookie_t rx_cookie;
155 dma_cookie_t tx_cookie;
156 int drcmr_rx;
157 int drcmr_tx;
158
159 int uart_irq;
160 int icp_irq;
161
162 struct irlap_cb *irlap;
163 struct qos_info qos;
164
165 iobuff_t tx_buff;
166 iobuff_t rx_buff;
167
168 struct device *dev;
169 struct pxaficp_platform_data *pdata;
170 struct clk *fir_clk;
171 struct clk *sir_clk;
172 struct clk *cur_clk;
173};
174
175static int pxa_irda_set_speed(struct pxa_irda *si, int speed);
176
177static inline void pxa_irda_disable_clk(struct pxa_irda *si)
178{
179 if (si->cur_clk)
180 clk_disable_unprepare(si->cur_clk);
181 si->cur_clk = NULL;
182}
183
184static inline void pxa_irda_enable_firclk(struct pxa_irda *si)
185{
186 si->cur_clk = si->fir_clk;
187 clk_prepare_enable(si->fir_clk);
188}
189
190static inline void pxa_irda_enable_sirclk(struct pxa_irda *si)
191{
192 si->cur_clk = si->sir_clk;
193 clk_prepare_enable(si->sir_clk);
194}
195
196
197#define IS_FIR(si) ((si)->speed >= 4000000)
198#define IRDA_FRAME_SIZE_LIMIT 2047
199
200static void pxa_irda_fir_dma_rx_irq(void *data);
201static void pxa_irda_fir_dma_tx_irq(void *data);
202
203inline static void pxa_irda_fir_dma_rx_start(struct pxa_irda *si)
204{
205 struct dma_async_tx_descriptor *tx;
206
207 tx = dmaengine_prep_slave_single(si->rxdma, si->dma_rx_buff_phy,
208 IRDA_FRAME_SIZE_LIMIT, DMA_FROM_DEVICE,
209 DMA_PREP_INTERRUPT);
210 if (!tx) {
211 dev_err(si->dev, "prep_slave_sg() failed\n");
212 return;
213 }
214 tx->callback = pxa_irda_fir_dma_rx_irq;
215 tx->callback_param = si;
216 si->rx_cookie = dmaengine_submit(tx);
217 dma_async_issue_pending(si->rxdma);
218}
219
220inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si)
221{
222 struct dma_async_tx_descriptor *tx;
223
224 tx = dmaengine_prep_slave_single(si->txdma, si->dma_tx_buff_phy,
225 si->dma_tx_buff_len, DMA_TO_DEVICE,
226 DMA_PREP_INTERRUPT);
227 if (!tx) {
228 dev_err(si->dev, "prep_slave_sg() failed\n");
229 return;
230 }
231 tx->callback = pxa_irda_fir_dma_tx_irq;
232 tx->callback_param = si;
233 si->tx_cookie = dmaengine_submit(tx);
234 dma_async_issue_pending(si->rxdma);
235}
236
237
238
239
240static void pxa_irda_set_mode(struct pxa_irda *si, int mode)
241{
242 if (si->pdata->transceiver_mode)
243 si->pdata->transceiver_mode(si->dev, mode);
244 else {
245 if (gpio_is_valid(si->pdata->gpio_pwdown))
246 gpio_set_value(si->pdata->gpio_pwdown,
247 !(mode & IR_OFF) ^
248 !si->pdata->gpio_pwdown_inverted);
249 pxa2xx_transceiver_mode(si->dev, mode);
250 }
251}
252
253
254
255
256static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
257{
258 unsigned long flags;
259 unsigned int divisor;
260
261 switch (speed) {
262 case 9600: case 19200: case 38400:
263 case 57600: case 115200:
264
265
266
267 divisor = 14745600 / (16 * speed);
268
269 local_irq_save(flags);
270
271 if (IS_FIR(si)) {
272
273 dmaengine_terminate_all(si->rxdma);
274
275 ficp_writel(si, 0, ICCR0);
276 pxa_irda_disable_clk(si);
277
278
279 pxa_irda_set_mode(si, IR_SIRMODE);
280
281
282 pxa_irda_enable_sirclk(si);
283 }
284
285
286 stuart_writel(si, 0, STIER);
287
288
289 stuart_writel(si, stuart_readl(si, STLCR) | LCR_DLAB, STLCR);
290 stuart_writel(si, divisor & 0xff, STDLL);
291 stuart_writel(si, divisor >> 8, STDLH);
292 stuart_writel(si, stuart_readl(si, STLCR) & ~LCR_DLAB, STLCR);
293
294 si->speed = speed;
295 stuart_writel(si, IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6,
296 STISR);
297 stuart_writel(si, IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE,
298 STIER);
299
300 local_irq_restore(flags);
301 break;
302
303 case 4000000:
304 local_irq_save(flags);
305
306
307 stuart_writel(si, 0, STIER);
308 stuart_writel(si, 0, STISR);
309 pxa_irda_disable_clk(si);
310
311
312 ficp_writel(si, 0, ICCR0);
313
314
315 pxa_irda_set_mode(si, IR_FIRMODE);
316
317
318 pxa_irda_enable_firclk(si);
319
320 si->speed = speed;
321 pxa_irda_fir_dma_rx_start(si);
322 ficp_writel(si, ICCR0_ITR | ICCR0_RXE, ICCR0);
323
324 local_irq_restore(flags);
325 break;
326
327 default:
328 return -EINVAL;
329 }
330
331 return 0;
332}
333
334
335static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id)
336{
337 struct net_device *dev = dev_id;
338 struct pxa_irda *si = netdev_priv(dev);
339 int iir, lsr, data;
340
341 iir = stuart_readl(si, STIIR);
342
343 switch (iir & 0x0F) {
344 case 0x06:
345 lsr = stuart_readl(si, STLSR);
346 while (lsr & LSR_FIFOE) {
347 data = stuart_readl(si, STRBR);
348 if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) {
349 printk(KERN_DEBUG "pxa_ir: sir receiving error\n");
350 dev->stats.rx_errors++;
351 if (lsr & LSR_FE)
352 dev->stats.rx_frame_errors++;
353 if (lsr & LSR_OE)
354 dev->stats.rx_fifo_errors++;
355 } else {
356 dev->stats.rx_bytes++;
357 async_unwrap_char(dev, &dev->stats,
358 &si->rx_buff, data);
359 }
360 lsr = stuart_readl(si, STLSR);
361 }
362 si->last_clk = sched_clock();
363 break;
364
365 case 0x04:
366
367
368 case 0x0C:
369 do {
370 dev->stats.rx_bytes++;
371 async_unwrap_char(dev, &dev->stats, &si->rx_buff,
372 stuart_readl(si, STRBR));
373 } while (stuart_readl(si, STLSR) & LSR_DR);
374 si->last_clk = sched_clock();
375 break;
376
377 case 0x02:
378 while ((si->tx_buff.len) &&
379 (stuart_readl(si, STLSR) & LSR_TDRQ)) {
380 stuart_writel(si, *si->tx_buff.data++, STTHR);
381 si->tx_buff.len -= 1;
382 }
383
384 if (si->tx_buff.len == 0) {
385 dev->stats.tx_packets++;
386 dev->stats.tx_bytes += si->tx_buff.data - si->tx_buff.head;
387
388
389 while ((stuart_readl(si, STLSR) & LSR_TEMT) == 0)
390 cpu_relax();
391 si->last_clk = sched_clock();
392
393
394
395
396
397
398 if (si->newspeed) {
399 pxa_irda_set_speed(si, si->newspeed);
400 si->newspeed = 0;
401 } else {
402
403 stuart_writel(si, IrSR_IR_RECEIVE_ON |
404 IrSR_XMODE_PULSE_1_6, STISR);
405
406 stuart_writel(si, IER_UUE | IER_RLSE |
407 IER_RAVIE | IER_RTIOE, STIER);
408 }
409
410 netif_wake_queue(dev);
411 }
412 break;
413 }
414
415 return IRQ_HANDLED;
416}
417
418
419static void pxa_irda_fir_dma_rx_irq(void *data)
420{
421 struct net_device *dev = data;
422 struct pxa_irda *si = netdev_priv(dev);
423
424 dmaengine_terminate_all(si->rxdma);
425 netdev_dbg(dev, "pxa_ir: fir rx dma bus error\n");
426}
427
428
429static void pxa_irda_fir_dma_tx_irq(void *data)
430{
431 struct net_device *dev = data;
432 struct pxa_irda *si = netdev_priv(dev);
433
434 dmaengine_terminate_all(si->txdma);
435 if (dmaengine_tx_status(si->txdma, si->tx_cookie, NULL) == DMA_ERROR) {
436 dev->stats.tx_errors++;
437 } else {
438 dev->stats.tx_packets++;
439 dev->stats.tx_bytes += si->dma_tx_buff_len;
440 }
441
442 while (ficp_readl(si, ICSR1) & ICSR1_TBY)
443 cpu_relax();
444 si->last_clk = sched_clock();
445
446
447
448
449
450 udelay(120);
451
452 if (si->newspeed) {
453 pxa_irda_set_speed(si, si->newspeed);
454 si->newspeed = 0;
455 } else {
456 int i = 64;
457
458 ficp_writel(si, 0, ICCR0);
459 pxa_irda_fir_dma_rx_start(si);
460 while ((ficp_readl(si, ICSR1) & ICSR1_RNE) && i--)
461 ficp_readl(si, ICDR);
462 ficp_writel(si, ICCR0_ITR | ICCR0_RXE, ICCR0);
463
464 if (i < 0)
465 printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n");
466 }
467 netif_wake_queue(dev);
468}
469
470
471static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0)
472{
473 unsigned int len, stat, data;
474 struct dma_tx_state state;
475
476
477
478 dmaengine_tx_status(si->rxdma, si->rx_cookie, &state);
479 len = IRDA_FRAME_SIZE_LIMIT - state.residue;
480
481 do {
482
483 stat = ficp_readl(si, ICSR1);
484 rmb();
485 data = ficp_readl(si, ICDR);
486
487 if (stat & (ICSR1_CRE | ICSR1_ROR)) {
488 dev->stats.rx_errors++;
489 if (stat & ICSR1_CRE) {
490 printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n");
491 dev->stats.rx_crc_errors++;
492 }
493 if (stat & ICSR1_ROR) {
494 printk(KERN_DEBUG "pxa_ir: fir receive overrun\n");
495 dev->stats.rx_over_errors++;
496 }
497 } else {
498 si->dma_rx_buff[len++] = data;
499 }
500
501 if (stat & ICSR1_EOF)
502 break;
503 } while (ficp_readl(si, ICSR0) & ICSR0_EIF);
504
505 if (stat & ICSR1_EOF) {
506
507 struct sk_buff *skb;
508
509 if (icsr0 & ICSR0_FRE) {
510 printk(KERN_ERR "pxa_ir: dropping erroneous frame\n");
511 dev->stats.rx_dropped++;
512 return;
513 }
514
515 skb = alloc_skb(len+1,GFP_ATOMIC);
516 if (!skb) {
517 printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n");
518 dev->stats.rx_dropped++;
519 return;
520 }
521
522
523 skb_reserve(skb, 1);
524 skb_copy_to_linear_data(skb, si->dma_rx_buff, len);
525 skb_put(skb, len);
526
527
528 skb->dev = dev;
529 skb_reset_mac_header(skb);
530 skb->protocol = htons(ETH_P_IRDA);
531 netif_rx(skb);
532
533 dev->stats.rx_packets++;
534 dev->stats.rx_bytes += len;
535 }
536}
537
538
539static irqreturn_t pxa_irda_fir_irq(int irq, void *dev_id)
540{
541 struct net_device *dev = dev_id;
542 struct pxa_irda *si = netdev_priv(dev);
543 int icsr0, i = 64;
544
545
546 dmaengine_terminate_all(si->rxdma);
547 si->last_clk = sched_clock();
548 icsr0 = ficp_readl(si, ICSR0);
549
550 if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) {
551 if (icsr0 & ICSR0_FRE) {
552 printk(KERN_DEBUG "pxa_ir: fir receive frame error\n");
553 dev->stats.rx_frame_errors++;
554 } else {
555 printk(KERN_DEBUG "pxa_ir: fir receive abort\n");
556 dev->stats.rx_errors++;
557 }
558 ficp_writel(si, icsr0 & (ICSR0_FRE | ICSR0_RAB), ICSR0);
559 }
560
561 if (icsr0 & ICSR0_EIF) {
562
563 pxa_irda_fir_irq_eif(si, dev, icsr0);
564 }
565
566 ficp_writel(si, 0, ICCR0);
567 pxa_irda_fir_dma_rx_start(si);
568 while ((ficp_readl(si, ICSR1) & ICSR1_RNE) && i--)
569 ficp_readl(si, ICDR);
570 ficp_writel(si, ICCR0_ITR | ICCR0_RXE, ICCR0);
571
572 if (i < 0)
573 printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n");
574
575 return IRQ_HANDLED;
576}
577
578
579static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
580{
581 struct pxa_irda *si = netdev_priv(dev);
582 int speed = irda_get_next_speed(skb);
583
584
585
586
587
588
589 if (speed != si->speed && speed != -1)
590 si->newspeed = speed;
591
592
593
594
595 if (skb->len == 0) {
596 if (si->newspeed) {
597 si->newspeed = 0;
598 pxa_irda_set_speed(si, speed);
599 }
600 dev_kfree_skb(skb);
601 return NETDEV_TX_OK;
602 }
603
604 netif_stop_queue(dev);
605
606 if (!IS_FIR(si)) {
607 si->tx_buff.data = si->tx_buff.head;
608 si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize);
609
610
611 stuart_writel(si, 0, STIER);
612 stuart_writel(si, IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6,
613 STISR);
614
615
616 stuart_writel(si, IER_UUE | IER_TIE, STIER);
617 } else {
618 unsigned long mtt = irda_get_mtt(skb);
619
620 si->dma_tx_buff_len = skb->len;
621 skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len);
622
623 if (mtt)
624 while ((sched_clock() - si->last_clk) * 1000 < mtt)
625 cpu_relax();
626
627
628 dmaengine_terminate_all(si->rxdma);
629 ficp_writel(si, 0, ICCR0);
630
631 pxa_irda_fir_dma_tx_start(si);
632 ficp_writel(si, ICCR0_ITR | ICCR0_TXE, ICCR0);
633 }
634
635 dev_kfree_skb(skb);
636 return NETDEV_TX_OK;
637}
638
639static int pxa_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
640{
641 struct if_irda_req *rq = (struct if_irda_req *)ifreq;
642 struct pxa_irda *si = netdev_priv(dev);
643 int ret;
644
645 switch (cmd) {
646 case SIOCSBANDWIDTH:
647 ret = -EPERM;
648 if (capable(CAP_NET_ADMIN)) {
649
650
651
652
653 if (netif_running(dev)) {
654 ret = pxa_irda_set_speed(si,
655 rq->ifr_baudrate);
656 } else {
657 printk(KERN_INFO "pxa_ir: SIOCSBANDWIDTH: !netif_running\n");
658 ret = 0;
659 }
660 }
661 break;
662
663 case SIOCSMEDIABUSY:
664 ret = -EPERM;
665 if (capable(CAP_NET_ADMIN)) {
666 irda_device_set_media_busy(dev, TRUE);
667 ret = 0;
668 }
669 break;
670
671 case SIOCGRECEIVING:
672 ret = 0;
673 rq->ifr_receiving = IS_FIR(si) ? 0
674 : si->rx_buff.state != OUTSIDE_FRAME;
675 break;
676
677 default:
678 ret = -EOPNOTSUPP;
679 break;
680 }
681
682 return ret;
683}
684
685static void pxa_irda_startup(struct pxa_irda *si)
686{
687
688 stuart_writel(si, 0, STIER);
689
690 stuart_writel(si, MCR_OUT2, STMCR);
691
692 stuart_writel(si, LCR_WLS0 | LCR_WLS1, STLCR);
693
694 stuart_writel(si, FCR_TRFIFOE | FCR_ITL_32, STFCR);
695
696
697 ficp_writel(si, 0, ICCR0);
698
699 ficp_writel(si, ICCR2_TXP | ICCR2_TRIG_32, ICCR2);
700
701
702 si->speed = 4000000;
703 pxa_irda_set_speed(si, 9600);
704
705 printk(KERN_DEBUG "pxa_ir: irda startup\n");
706}
707
708static void pxa_irda_shutdown(struct pxa_irda *si)
709{
710 unsigned long flags;
711
712 local_irq_save(flags);
713
714
715 stuart_writel(si, 0, STIER);
716
717 stuart_writel(si, 0, STISR);
718
719
720 dmaengine_terminate_all(si->rxdma);
721 dmaengine_terminate_all(si->txdma);
722
723 ficp_writel(si, 0, ICCR0);
724
725
726 pxa_irda_disable_clk(si);
727
728 local_irq_restore(flags);
729
730
731 pxa_irda_set_mode(si, IR_OFF);
732
733 printk(KERN_DEBUG "pxa_ir: irda shutdown\n");
734}
735
736static int pxa_irda_start(struct net_device *dev)
737{
738 struct pxa_irda *si = netdev_priv(dev);
739 dma_cap_mask_t mask;
740 struct dma_slave_config config;
741 struct pxad_param param;
742 int err;
743
744 si->speed = 9600;
745
746 err = request_irq(si->uart_irq, pxa_irda_sir_irq, 0, dev->name, dev);
747 if (err)
748 goto err_irq1;
749
750 err = request_irq(si->icp_irq, pxa_irda_fir_irq, 0, dev->name, dev);
751 if (err)
752 goto err_irq2;
753
754
755
756
757 disable_irq(si->uart_irq);
758 disable_irq(si->icp_irq);
759
760 err = -EBUSY;
761 dma_cap_zero(mask);
762 dma_cap_set(DMA_SLAVE, mask);
763 param.prio = PXAD_PRIO_LOWEST;
764
765 memset(&config, 0, sizeof(config));
766 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
767 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
768 config.src_addr = (dma_addr_t)si->irda_base + ICDR;
769 config.dst_addr = (dma_addr_t)si->irda_base + ICDR;
770 config.src_maxburst = 32;
771 config.dst_maxburst = 32;
772
773 param.drcmr = si->drcmr_rx;
774 si->rxdma = dma_request_slave_channel_compat(mask, pxad_filter_fn,
775 ¶m, &dev->dev, "rx");
776 if (!si->rxdma)
777 goto err_rx_dma;
778
779 param.drcmr = si->drcmr_tx;
780 si->txdma = dma_request_slave_channel_compat(mask, pxad_filter_fn,
781 ¶m, &dev->dev, "tx");
782 if (!si->txdma)
783 goto err_tx_dma;
784
785 err = dmaengine_slave_config(si->rxdma, &config);
786 if (err)
787 goto err_dma_rx_buff;
788 err = dmaengine_slave_config(si->txdma, &config);
789 if (err)
790 goto err_dma_rx_buff;
791
792 err = -ENOMEM;
793 si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
794 &si->dma_rx_buff_phy, GFP_KERNEL);
795 if (!si->dma_rx_buff)
796 goto err_dma_rx_buff;
797
798 si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
799 &si->dma_tx_buff_phy, GFP_KERNEL);
800 if (!si->dma_tx_buff)
801 goto err_dma_tx_buff;
802
803
804 pxa_irda_startup(si);
805
806
807
808
809 si->irlap = irlap_open(dev, &si->qos, "pxa");
810 err = -ENOMEM;
811 if (!si->irlap)
812 goto err_irlap;
813
814
815
816
817 enable_irq(si->uart_irq);
818 enable_irq(si->icp_irq);
819 netif_start_queue(dev);
820
821 printk(KERN_DEBUG "pxa_ir: irda driver opened\n");
822
823 return 0;
824
825err_irlap:
826 pxa_irda_shutdown(si);
827 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
828err_dma_tx_buff:
829 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
830err_dma_rx_buff:
831 dma_release_channel(si->txdma);
832err_tx_dma:
833 dma_release_channel(si->rxdma);
834err_rx_dma:
835 free_irq(si->icp_irq, dev);
836err_irq2:
837 free_irq(si->uart_irq, dev);
838err_irq1:
839
840 return err;
841}
842
843static int pxa_irda_stop(struct net_device *dev)
844{
845 struct pxa_irda *si = netdev_priv(dev);
846
847 netif_stop_queue(dev);
848
849 pxa_irda_shutdown(si);
850
851
852 if (si->irlap) {
853 irlap_close(si->irlap);
854 si->irlap = NULL;
855 }
856
857 free_irq(si->uart_irq, dev);
858 free_irq(si->icp_irq, dev);
859
860 dmaengine_terminate_all(si->rxdma);
861 dmaengine_terminate_all(si->txdma);
862 dma_release_channel(si->rxdma);
863 dma_release_channel(si->txdma);
864
865 if (si->dma_rx_buff)
866 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
867 if (si->dma_tx_buff)
868 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
869
870 printk(KERN_DEBUG "pxa_ir: irda driver closed\n");
871 return 0;
872}
873
874static int pxa_irda_suspend(struct platform_device *_dev, pm_message_t state)
875{
876 struct net_device *dev = platform_get_drvdata(_dev);
877 struct pxa_irda *si;
878
879 if (dev && netif_running(dev)) {
880 si = netdev_priv(dev);
881 netif_device_detach(dev);
882 pxa_irda_shutdown(si);
883 }
884
885 return 0;
886}
887
888static int pxa_irda_resume(struct platform_device *_dev)
889{
890 struct net_device *dev = platform_get_drvdata(_dev);
891 struct pxa_irda *si;
892
893 if (dev && netif_running(dev)) {
894 si = netdev_priv(dev);
895 pxa_irda_startup(si);
896 netif_device_attach(dev);
897 netif_wake_queue(dev);
898 }
899
900 return 0;
901}
902
903
904static int pxa_irda_init_iobuf(iobuff_t *io, int size)
905{
906 io->head = kmalloc(size, GFP_KERNEL | GFP_DMA);
907 if (io->head != NULL) {
908 io->truesize = size;
909 io->in_frame = FALSE;
910 io->state = OUTSIDE_FRAME;
911 io->data = io->head;
912 }
913 return io->head ? 0 : -ENOMEM;
914}
915
916static const struct net_device_ops pxa_irda_netdev_ops = {
917 .ndo_open = pxa_irda_start,
918 .ndo_stop = pxa_irda_stop,
919 .ndo_start_xmit = pxa_irda_hard_xmit,
920 .ndo_do_ioctl = pxa_irda_ioctl,
921};
922
923static int pxa_irda_probe(struct platform_device *pdev)
924{
925 struct net_device *dev;
926 struct resource *res;
927 struct pxa_irda *si;
928 void __iomem *ficp, *stuart;
929 unsigned int baudrate_mask;
930 int err;
931
932 if (!pdev->dev.platform_data)
933 return -ENODEV;
934
935 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
936 ficp = devm_ioremap_resource(&pdev->dev, res);
937 if (IS_ERR(ficp)) {
938 dev_err(&pdev->dev, "resource ficp not defined\n");
939 return PTR_ERR(ficp);
940 }
941
942 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
943 stuart = devm_ioremap_resource(&pdev->dev, res);
944 if (IS_ERR(stuart)) {
945 dev_err(&pdev->dev, "resource stuart not defined\n");
946 return PTR_ERR(stuart);
947 }
948
949 dev = alloc_irdadev(sizeof(struct pxa_irda));
950 if (!dev) {
951 err = -ENOMEM;
952 goto err_mem_1;
953 }
954
955 SET_NETDEV_DEV(dev, &pdev->dev);
956 si = netdev_priv(dev);
957 si->dev = &pdev->dev;
958 si->pdata = pdev->dev.platform_data;
959
960 si->irda_base = ficp;
961 si->stuart_base = stuart;
962 si->uart_irq = platform_get_irq(pdev, 0);
963 si->icp_irq = platform_get_irq(pdev, 1);
964
965 si->sir_clk = devm_clk_get(&pdev->dev, "UARTCLK");
966 si->fir_clk = devm_clk_get(&pdev->dev, "FICPCLK");
967 if (IS_ERR(si->sir_clk) || IS_ERR(si->fir_clk)) {
968 err = PTR_ERR(IS_ERR(si->sir_clk) ? si->sir_clk : si->fir_clk);
969 goto err_mem_4;
970 }
971
972 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
973 if (res)
974 si->drcmr_rx = res->start;
975 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
976 if (res)
977 si->drcmr_tx = res->start;
978
979
980
981
982 err = pxa_irda_init_iobuf(&si->rx_buff, 14384);
983 if (err)
984 goto err_mem_4;
985 err = pxa_irda_init_iobuf(&si->tx_buff, 4000);
986 if (err)
987 goto err_mem_5;
988
989 if (gpio_is_valid(si->pdata->gpio_pwdown)) {
990 err = gpio_request(si->pdata->gpio_pwdown, "IrDA switch");
991 if (err)
992 goto err_startup;
993 err = gpio_direction_output(si->pdata->gpio_pwdown,
994 !si->pdata->gpio_pwdown_inverted);
995 if (err) {
996 gpio_free(si->pdata->gpio_pwdown);
997 goto err_startup;
998 }
999 }
1000
1001 if (si->pdata->startup) {
1002 err = si->pdata->startup(si->dev);
1003 if (err)
1004 goto err_startup;
1005 }
1006
1007 if (gpio_is_valid(si->pdata->gpio_pwdown) && si->pdata->startup)
1008 dev_warn(si->dev, "gpio_pwdown and startup() both defined!\n");
1009
1010 dev->netdev_ops = &pxa_irda_netdev_ops;
1011
1012 irda_init_max_qos_capabilies(&si->qos);
1013
1014 baudrate_mask = 0;
1015 if (si->pdata->transceiver_cap & IR_SIRMODE)
1016 baudrate_mask |= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
1017 if (si->pdata->transceiver_cap & IR_FIRMODE)
1018 baudrate_mask |= IR_4000000 << 8;
1019
1020 si->qos.baud_rate.bits &= baudrate_mask;
1021 si->qos.min_turn_time.bits = 7;
1022
1023 irda_qos_bits_to_value(&si->qos);
1024
1025 err = register_netdev(dev);
1026
1027 if (err == 0)
1028 platform_set_drvdata(pdev, dev);
1029
1030 if (err) {
1031 if (si->pdata->shutdown)
1032 si->pdata->shutdown(si->dev);
1033err_startup:
1034 kfree(si->tx_buff.head);
1035err_mem_5:
1036 kfree(si->rx_buff.head);
1037err_mem_4:
1038 free_netdev(dev);
1039 }
1040err_mem_1:
1041 return err;
1042}
1043
1044static int pxa_irda_remove(struct platform_device *_dev)
1045{
1046 struct net_device *dev = platform_get_drvdata(_dev);
1047
1048 if (dev) {
1049 struct pxa_irda *si = netdev_priv(dev);
1050 unregister_netdev(dev);
1051 if (gpio_is_valid(si->pdata->gpio_pwdown))
1052 gpio_free(si->pdata->gpio_pwdown);
1053 if (si->pdata->shutdown)
1054 si->pdata->shutdown(si->dev);
1055 kfree(si->tx_buff.head);
1056 kfree(si->rx_buff.head);
1057 free_netdev(dev);
1058 }
1059
1060 return 0;
1061}
1062
1063static struct platform_driver pxa_ir_driver = {
1064 .driver = {
1065 .name = "pxa2xx-ir",
1066 },
1067 .probe = pxa_irda_probe,
1068 .remove = pxa_irda_remove,
1069 .suspend = pxa_irda_suspend,
1070 .resume = pxa_irda_resume,
1071};
1072
1073module_platform_driver(pxa_ir_driver);
1074
1075MODULE_LICENSE("GPL");
1076MODULE_ALIAS("platform:pxa2xx-ir");
1077