1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#include <linux/module.h>
43#include <linux/kernel.h>
44#include <linux/types.h>
45#include <linux/skbuff.h>
46#include <linux/netdevice.h>
47#include <linux/ioport.h>
48#include <linux/delay.h>
49#include <linux/init.h>
50#include <linux/rtnetlink.h>
51#include <linux/dma-mapping.h>
52#include <linux/gfp.h>
53
54#include <asm/io.h>
55#include <asm/dma.h>
56#include <asm/byteorder.h>
57
58#include <net/irda/irda.h>
59#include <net/irda/wrapper.h>
60#include <net/irda/irda_device.h>
61#include "w83977af.h"
62#include "w83977af_ir.h"
63
64#ifdef CONFIG_ARCH_NETWINDER
65#undef CONFIG_NETWINDER_TX_DMA_PROBLEMS
66#define CONFIG_NETWINDER_RX_DMA_PROBLEMS
67#endif
68#define CONFIG_USE_W977_PNP
69#define PIO_MAX_SPEED 115200
70
71static char *driver_name = "w83977af_ir";
72static int qos_mtt_bits = 0x07;
73
74#define CHIP_IO_EXTENT 8
75
76static unsigned int io[] = { 0x180, ~0, ~0, ~0 };
77#ifdef CONFIG_ARCH_NETWINDER
78static unsigned int irq[] = { 6, 0, 0, 0 };
79#else
80static unsigned int irq[] = { 11, 0, 0, 0 };
81#endif
82static unsigned int dma[] = { 1, 0, 0, 0 };
83static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE };
84static unsigned int efio = W977_EFIO_BASE;
85
86static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
87
88
89static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
90 unsigned int dma);
91static int w83977af_close(struct w83977af_ir *self);
92static int w83977af_probe(int iobase, int irq, int dma);
93static int w83977af_dma_receive(struct w83977af_ir *self);
94static int w83977af_dma_receive_complete(struct w83977af_ir *self);
95static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
96 struct net_device *dev);
97static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
98static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
99static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
100static int w83977af_is_receiving(struct w83977af_ir *self);
101
102static int w83977af_net_open(struct net_device *dev);
103static int w83977af_net_close(struct net_device *dev);
104static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
105
106
107
108
109
110
111
112static int __init w83977af_init(void)
113{
114 int i;
115
116 IRDA_DEBUG(0, "%s()\n", __func__ );
117
118 for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
119 if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
120 return 0;
121 }
122 return -ENODEV;
123}
124
125
126
127
128
129
130
131static void __exit w83977af_cleanup(void)
132{
133 int i;
134
135 IRDA_DEBUG(4, "%s()\n", __func__ );
136
137 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
138 if (dev_self[i])
139 w83977af_close(dev_self[i]);
140 }
141}
142
143static const struct net_device_ops w83977_netdev_ops = {
144 .ndo_open = w83977af_net_open,
145 .ndo_stop = w83977af_net_close,
146 .ndo_start_xmit = w83977af_hard_xmit,
147 .ndo_do_ioctl = w83977af_net_ioctl,
148};
149
150
151
152
153
154
155
156static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
157 unsigned int dma)
158{
159 struct net_device *dev;
160 struct w83977af_ir *self;
161 int err;
162
163 IRDA_DEBUG(0, "%s()\n", __func__ );
164
165
166 if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
167 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
168 __func__ , iobase);
169 return -ENODEV;
170 }
171
172 if (w83977af_probe(iobase, irq, dma) == -1) {
173 err = -1;
174 goto err_out;
175 }
176
177
178
179 dev = alloc_irdadev(sizeof(struct w83977af_ir));
180 if (dev == NULL) {
181 printk( KERN_ERR "IrDA: Can't allocate memory for "
182 "IrDA control block!\n");
183 err = -ENOMEM;
184 goto err_out;
185 }
186
187 self = netdev_priv(dev);
188 spin_lock_init(&self->lock);
189
190
191
192 self->io.fir_base = iobase;
193 self->io.irq = irq;
194 self->io.fir_ext = CHIP_IO_EXTENT;
195 self->io.dma = dma;
196 self->io.fifo_size = 32;
197
198
199 irda_init_max_qos_capabilies(&self->qos);
200
201
202
203
204 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
205 IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
206
207
208 self->qos.min_turn_time.bits = qos_mtt_bits;
209 irda_qos_bits_to_value(&self->qos);
210
211
212 self->rx_buff.truesize = 14384;
213 self->tx_buff.truesize = 4000;
214
215
216 self->rx_buff.head =
217 dma_alloc_coherent(NULL, self->rx_buff.truesize,
218 &self->rx_buff_dma, GFP_KERNEL);
219 if (self->rx_buff.head == NULL) {
220 err = -ENOMEM;
221 goto err_out1;
222 }
223
224 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
225
226 self->tx_buff.head =
227 dma_alloc_coherent(NULL, self->tx_buff.truesize,
228 &self->tx_buff_dma, GFP_KERNEL);
229 if (self->tx_buff.head == NULL) {
230 err = -ENOMEM;
231 goto err_out2;
232 }
233 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
234
235 self->rx_buff.in_frame = FALSE;
236 self->rx_buff.state = OUTSIDE_FRAME;
237 self->tx_buff.data = self->tx_buff.head;
238 self->rx_buff.data = self->rx_buff.head;
239 self->netdev = dev;
240
241 dev->netdev_ops = &w83977_netdev_ops;
242
243 err = register_netdev(dev);
244 if (err) {
245 IRDA_ERROR("%s(), register_netdevice() failed!\n", __func__);
246 goto err_out3;
247 }
248 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
249
250
251 dev_self[i] = self;
252
253 return 0;
254err_out3:
255 dma_free_coherent(NULL, self->tx_buff.truesize,
256 self->tx_buff.head, self->tx_buff_dma);
257err_out2:
258 dma_free_coherent(NULL, self->rx_buff.truesize,
259 self->rx_buff.head, self->rx_buff_dma);
260err_out1:
261 free_netdev(dev);
262err_out:
263 release_region(iobase, CHIP_IO_EXTENT);
264 return err;
265}
266
267
268
269
270
271
272
273static int w83977af_close(struct w83977af_ir *self)
274{
275 int iobase;
276
277 IRDA_DEBUG(0, "%s()\n", __func__ );
278
279 iobase = self->io.fir_base;
280
281#ifdef CONFIG_USE_W977_PNP
282
283 w977_efm_enter(efio);
284
285 w977_select_device(W977_DEVICE_IR, efio);
286
287
288 w977_write_reg(0x30, 0x00, efio);
289
290 w977_efm_exit(efio);
291#endif
292
293
294 unregister_netdev(self->netdev);
295
296
297 IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
298 __func__ , self->io.fir_base);
299 release_region(self->io.fir_base, self->io.fir_ext);
300
301 if (self->tx_buff.head)
302 dma_free_coherent(NULL, self->tx_buff.truesize,
303 self->tx_buff.head, self->tx_buff_dma);
304
305 if (self->rx_buff.head)
306 dma_free_coherent(NULL, self->rx_buff.truesize,
307 self->rx_buff.head, self->rx_buff_dma);
308
309 free_netdev(self->netdev);
310
311 return 0;
312}
313
314static int w83977af_probe(int iobase, int irq, int dma)
315{
316 int version;
317 int i;
318
319 for (i=0; i < 2; i++) {
320 IRDA_DEBUG( 0, "%s()\n", __func__ );
321#ifdef CONFIG_USE_W977_PNP
322
323 w977_efm_enter(efbase[i]);
324
325 w977_select_device(W977_DEVICE_IR, efbase[i]);
326
327
328 w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
329 w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
330
331 w977_write_reg(0x70, irq, efbase[i]);
332#ifdef CONFIG_ARCH_NETWINDER
333
334 w977_write_reg(0x74, dma+1, efbase[i]);
335#else
336 w977_write_reg(0x74, dma, efbase[i]);
337#endif
338 w977_write_reg(0x75, 0x04, efbase[i]);
339
340
341 w977_write_reg(0xf0, APEDCRC|ENBNKSEL, efbase[i]);
342
343
344 w977_write_reg(0x30, 0x01, efbase[i]);
345
346 w977_efm_exit(efbase[i]);
347#endif
348
349 switch_bank(iobase, SET2);
350 outb(iobase+2, 0x00);
351
352
353 switch_bank(iobase, SET0);
354 outb(HCR_EN_IRQ, iobase+HCR);
355
356
357 switch_bank(iobase, SET2);
358 outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
359
360
361 switch_bank(iobase, SET0);
362 outb(HCR_SIR, iobase+HCR);
363
364
365 switch_bank(iobase, SET3);
366 version = inb(iobase+AUID);
367
368
369 if (0x10 == (version & 0xf0)) {
370 efio = efbase[i];
371
372
373 switch_bank(iobase, SET2);
374 outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
375
376
377 switch_bank(iobase, SET0);
378 outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
379 UFR_EN_FIFO,iobase+UFR);
380
381
382 switch_bank(iobase, SET4);
383 outb(2048 & 0xff, iobase+6);
384 outb((2048 >> 8) & 0x1f, iobase+7);
385
386
387
388
389
390
391
392
393
394
395
396
397
398 switch_bank(iobase, SET7);
399 outb(0x40, iobase+7);
400
401 IRDA_MESSAGE("W83977AF (IR) driver loaded. "
402 "Version: 0x%02x\n", version);
403
404 return 0;
405 } else {
406
407 IRDA_DEBUG( 0, "%s(), Wrong chip version", __func__ );
408 }
409 }
410 return -1;
411}
412
413static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
414{
415 int ir_mode = HCR_SIR;
416 int iobase;
417 __u8 set;
418
419 iobase = self->io.fir_base;
420
421
422 self->io.speed = speed;
423
424
425 set = inb(iobase+SSR);
426
427
428 switch_bank(iobase, SET0);
429 outb(0, iobase+ICR);
430
431
432 switch_bank(iobase, SET2);
433 outb(0x00, iobase+ABHL);
434
435 switch (speed) {
436 case 9600: outb(0x0c, iobase+ABLL); break;
437 case 19200: outb(0x06, iobase+ABLL); break;
438 case 38400: outb(0x03, iobase+ABLL); break;
439 case 57600: outb(0x02, iobase+ABLL); break;
440 case 115200: outb(0x01, iobase+ABLL); break;
441 case 576000:
442 ir_mode = HCR_MIR_576;
443 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__ );
444 break;
445 case 1152000:
446 ir_mode = HCR_MIR_1152;
447 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__ );
448 break;
449 case 4000000:
450 ir_mode = HCR_FIR;
451 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__ );
452 break;
453 default:
454 ir_mode = HCR_FIR;
455 IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __func__ , speed);
456 break;
457 }
458
459
460 switch_bank(iobase, SET0);
461 outb(ir_mode, iobase+HCR);
462
463
464 switch_bank(iobase, SET2);
465 outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
466
467
468 switch_bank(iobase, SET0);
469 outb(0x00, iobase+UFR);
470 outb(UFR_EN_FIFO, iobase+UFR);
471 outb(0xa7, iobase+UFR);
472
473 netif_wake_queue(self->netdev);
474
475
476 switch_bank(iobase, SET0);
477 if (speed > PIO_MAX_SPEED) {
478 outb(ICR_EFSFI, iobase+ICR);
479 w83977af_dma_receive(self);
480 } else
481 outb(ICR_ERBRI, iobase+ICR);
482
483
484 outb(set, iobase+SSR);
485}
486
487
488
489
490
491
492
493static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
494 struct net_device *dev)
495{
496 struct w83977af_ir *self;
497 __s32 speed;
498 int iobase;
499 __u8 set;
500 int mtt;
501
502 self = netdev_priv(dev);
503
504 iobase = self->io.fir_base;
505
506 IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __func__ , jiffies,
507 (int) skb->len);
508
509
510 netif_stop_queue(dev);
511
512
513 speed = irda_get_next_speed(skb);
514 if ((speed != self->io.speed) && (speed != -1)) {
515
516 if (!skb->len) {
517 w83977af_change_speed(self, speed);
518 dev_kfree_skb(skb);
519 return NETDEV_TX_OK;
520 } else
521 self->new_speed = speed;
522 }
523
524
525 set = inb(iobase+SSR);
526
527
528 if (self->io.speed > PIO_MAX_SPEED) {
529 self->tx_buff.data = self->tx_buff.head;
530 skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
531 self->tx_buff.len = skb->len;
532
533 mtt = irda_get_mtt(skb);
534 IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
535 if (mtt)
536 udelay(mtt);
537
538
539 switch_bank(iobase, SET0);
540 outb(ICR_EDMAI, iobase+ICR);
541 w83977af_dma_write(self, iobase);
542 } else {
543 self->tx_buff.data = self->tx_buff.head;
544 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
545 self->tx_buff.truesize);
546
547
548 switch_bank(iobase, SET0);
549 outb(ICR_ETXTHI, iobase+ICR);
550 }
551 dev_kfree_skb(skb);
552
553
554 outb(set, iobase+SSR);
555
556 return NETDEV_TX_OK;
557}
558
559
560
561
562
563
564
565static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
566{
567 __u8 set;
568#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
569 unsigned long flags;
570 __u8 hcr;
571#endif
572 IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
573
574
575 set = inb(iobase+SSR);
576
577
578 switch_bank(iobase, SET0);
579 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
580
581
582 switch_bank(iobase, SET2);
583 outb(ADCR1_D_CHSW|ADCR1_ADV_SL, iobase+ADCR1);
584#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
585 spin_lock_irqsave(&self->lock, flags);
586
587 disable_dma(self->io.dma);
588 clear_dma_ff(self->io.dma);
589 set_dma_mode(self->io.dma, DMA_MODE_READ);
590 set_dma_addr(self->io.dma, self->tx_buff_dma);
591 set_dma_count(self->io.dma, self->tx_buff.len);
592#else
593 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
594 DMA_MODE_WRITE);
595#endif
596 self->io.direction = IO_XMIT;
597
598
599 switch_bank(iobase, SET0);
600#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
601 hcr = inb(iobase+HCR);
602 outb(hcr | HCR_EN_DMA, iobase+HCR);
603 enable_dma(self->io.dma);
604 spin_unlock_irqrestore(&self->lock, flags);
605#else
606 outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
607#endif
608
609
610 outb(set, iobase+SSR);
611}
612
613
614
615
616
617
618
619static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
620{
621 int actual = 0;
622 __u8 set;
623
624 IRDA_DEBUG(4, "%s()\n", __func__ );
625
626
627 set = inb(iobase+SSR);
628
629 switch_bank(iobase, SET0);
630 if (!(inb_p(iobase+USR) & USR_TSRE)) {
631 IRDA_DEBUG(4,
632 "%s(), warning, FIFO not empty yet!\n", __func__ );
633
634 fifo_size -= 17;
635 IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n",
636 __func__ , fifo_size);
637 }
638
639
640 while ((fifo_size-- > 0) && (actual < len)) {
641
642 outb(buf[actual++], iobase+TBR);
643 }
644
645 IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
646 __func__ , fifo_size, actual, len);
647
648
649 outb(set, iobase+SSR);
650
651 return actual;
652}
653
654
655
656
657
658
659
660
661static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
662{
663 int iobase;
664 __u8 set;
665
666 IRDA_DEBUG(4, "%s(%ld)\n", __func__ , jiffies);
667
668 IRDA_ASSERT(self != NULL, return;);
669
670 iobase = self->io.fir_base;
671
672
673 set = inb(iobase+SSR);
674
675
676 switch_bank(iobase, SET0);
677 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
678
679
680 if (inb(iobase+AUDR) & AUDR_UNDR) {
681 IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
682
683 self->netdev->stats.tx_errors++;
684 self->netdev->stats.tx_fifo_errors++;
685
686
687 outb(AUDR_UNDR, iobase+AUDR);
688 } else
689 self->netdev->stats.tx_packets++;
690
691
692 if (self->new_speed) {
693 w83977af_change_speed(self, self->new_speed);
694 self->new_speed = 0;
695 }
696
697
698
699 netif_wake_queue(self->netdev);
700
701
702 outb(set, iobase+SSR);
703}
704
705
706
707
708
709
710
711
712static int w83977af_dma_receive(struct w83977af_ir *self)
713{
714 int iobase;
715 __u8 set;
716#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
717 unsigned long flags;
718 __u8 hcr;
719#endif
720 IRDA_ASSERT(self != NULL, return -1;);
721
722 IRDA_DEBUG(4, "%s\n", __func__ );
723
724 iobase= self->io.fir_base;
725
726
727 set = inb(iobase+SSR);
728
729
730 switch_bank(iobase, SET0);
731 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
732
733
734 switch_bank(iobase, SET2);
735 outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)|ADCR1_ADV_SL,
736 iobase+ADCR1);
737
738 self->io.direction = IO_RECV;
739 self->rx_buff.data = self->rx_buff.head;
740
741#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
742 spin_lock_irqsave(&self->lock, flags);
743
744 disable_dma(self->io.dma);
745 clear_dma_ff(self->io.dma);
746 set_dma_mode(self->io.dma, DMA_MODE_READ);
747 set_dma_addr(self->io.dma, self->rx_buff_dma);
748 set_dma_count(self->io.dma, self->rx_buff.truesize);
749#else
750 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
751 DMA_MODE_READ);
752#endif
753
754
755
756
757
758 switch_bank(iobase, SET0);
759 outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR);
760 self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
761
762
763 switch_bank(iobase, SET0);
764#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
765 hcr = inb(iobase+HCR);
766 outb(hcr | HCR_EN_DMA, iobase+HCR);
767 enable_dma(self->io.dma);
768 spin_unlock_irqrestore(&self->lock, flags);
769#else
770 outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
771#endif
772
773 outb(set, iobase+SSR);
774
775 return 0;
776}
777
778
779
780
781
782
783
784static int w83977af_dma_receive_complete(struct w83977af_ir *self)
785{
786 struct sk_buff *skb;
787 struct st_fifo *st_fifo;
788 int len;
789 int iobase;
790 __u8 set;
791 __u8 status;
792
793 IRDA_DEBUG(4, "%s\n", __func__ );
794
795 st_fifo = &self->st_fifo;
796
797 iobase = self->io.fir_base;
798
799
800 set = inb(iobase+SSR);
801
802 iobase = self->io.fir_base;
803
804
805 switch_bank(iobase, SET5);
806 while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) {
807 st_fifo->entries[st_fifo->tail].status = status;
808
809 st_fifo->entries[st_fifo->tail].len = inb(iobase+RFLFL);
810 st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8;
811
812 st_fifo->tail++;
813 st_fifo->len++;
814 }
815
816 while (st_fifo->len) {
817
818 status = st_fifo->entries[st_fifo->head].status;
819 len = st_fifo->entries[st_fifo->head].len;
820 st_fifo->head++;
821 st_fifo->len--;
822
823
824 if (status & FS_FO_ERR_MSK) {
825 if (status & FS_FO_LST_FR) {
826
827 self->netdev->stats.rx_errors += len;
828 } else {
829
830 self->netdev->stats.rx_errors++;
831
832 self->rx_buff.data += len;
833
834 if (status & FS_FO_MX_LEX)
835 self->netdev->stats.rx_length_errors++;
836
837 if (status & FS_FO_PHY_ERR)
838 self->netdev->stats.rx_frame_errors++;
839
840 if (status & FS_FO_CRC_ERR)
841 self->netdev->stats.rx_crc_errors++;
842 }
843
844 if (status & FS_FO_RX_OV)
845 self->netdev->stats.rx_fifo_errors++;
846
847 if (status & FS_FO_FSF_OV)
848 self->netdev->stats.rx_fifo_errors++;
849
850 } else {
851
852 switch_bank(iobase, SET0);
853 if (inb(iobase+USR) & USR_RDR) {
854 udelay(80);
855 }
856
857 skb = dev_alloc_skb(len+1);
858 if (skb == NULL) {
859 printk(KERN_INFO
860 "%s(), memory squeeze, dropping frame.\n", __func__);
861
862 outb(set, iobase+SSR);
863
864 return FALSE;
865 }
866
867
868 skb_reserve(skb, 1);
869
870
871 if (self->io.speed < 4000000) {
872 skb_put(skb, len-2);
873 skb_copy_to_linear_data(skb,
874 self->rx_buff.data,
875 len - 2);
876 } else {
877 skb_put(skb, len-4);
878 skb_copy_to_linear_data(skb,
879 self->rx_buff.data,
880 len - 4);
881 }
882
883
884 self->rx_buff.data += len;
885 self->netdev->stats.rx_packets++;
886
887 skb->dev = self->netdev;
888 skb_reset_mac_header(skb);
889 skb->protocol = htons(ETH_P_IRDA);
890 netif_rx(skb);
891 }
892 }
893
894 outb(set, iobase+SSR);
895
896 return TRUE;
897}
898
899
900
901
902
903
904
905static void w83977af_pio_receive(struct w83977af_ir *self)
906{
907 __u8 byte = 0x00;
908 int iobase;
909
910 IRDA_DEBUG(4, "%s()\n", __func__ );
911
912 IRDA_ASSERT(self != NULL, return;);
913
914 iobase = self->io.fir_base;
915
916
917 do {
918 byte = inb(iobase+RBR);
919 async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
920 byte);
921 } while (inb(iobase+USR) & USR_RDR);
922}
923
924
925
926
927
928
929
930static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
931{
932 int actual;
933 __u8 new_icr = 0;
934 __u8 set;
935 int iobase;
936
937 IRDA_DEBUG(4, "%s(), isr=%#x\n", __func__ , isr);
938
939 iobase = self->io.fir_base;
940
941 if (isr & ISR_TXTH_I) {
942
943 actual = w83977af_pio_write(self->io.fir_base,
944 self->tx_buff.data,
945 self->tx_buff.len,
946 self->io.fifo_size);
947
948 self->tx_buff.data += actual;
949 self->tx_buff.len -= actual;
950
951 self->io.direction = IO_XMIT;
952
953
954 if (self->tx_buff.len > 0) {
955 new_icr |= ICR_ETXTHI;
956 } else {
957 set = inb(iobase+SSR);
958 switch_bank(iobase, SET0);
959 outb(AUDR_SFEND, iobase+AUDR);
960 outb(set, iobase+SSR);
961
962 self->netdev->stats.tx_packets++;
963
964
965 netif_wake_queue(self->netdev);
966 new_icr |= ICR_ETBREI;
967 }
968 }
969
970 if (isr & ISR_TXEMP_I) {
971
972 if (self->new_speed) {
973 IRDA_DEBUG(2,
974 "%s(), Changing speed!\n", __func__ );
975 w83977af_change_speed(self, self->new_speed);
976 self->new_speed = 0;
977 }
978
979
980 self->io.direction = IO_RECV;
981 new_icr |= ICR_ERBRI;
982 }
983
984
985 if (isr & ISR_RXTH_I) {
986 w83977af_pio_receive(self);
987
988
989 new_icr |= ICR_ERBRI;
990 }
991 return new_icr;
992}
993
994
995
996
997
998
999
1000static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
1001{
1002 __u8 new_icr = 0;
1003 __u8 set;
1004 int iobase;
1005
1006 iobase = self->io.fir_base;
1007 set = inb(iobase+SSR);
1008
1009
1010 if (isr & (ISR_FEND_I|ISR_FSF_I)) {
1011 if (w83977af_dma_receive_complete(self)) {
1012
1013
1014 new_icr |= ICR_EFSFI;
1015 } else {
1016
1017
1018
1019 switch_bank(iobase, SET4);
1020 outb(0x01, iobase+TMRL);
1021 outb(0x00, iobase+TMRH);
1022
1023
1024 outb(IR_MSL_EN_TMR, iobase+IR_MSL);
1025
1026 new_icr |= ICR_ETMRI;
1027 }
1028 }
1029
1030 if (isr & ISR_TMR_I) {
1031
1032 switch_bank(iobase, SET4);
1033 outb(0, iobase+IR_MSL);
1034
1035
1036
1037
1038
1039
1040 if (self->io.direction == IO_XMIT) {
1041 w83977af_dma_write(self, iobase);
1042
1043 new_icr |= ICR_EDMAI;
1044 } else {
1045
1046 w83977af_dma_receive_complete(self);
1047
1048 new_icr |= ICR_EFSFI;
1049 }
1050 }
1051
1052 if (isr & ISR_DMA_I) {
1053 w83977af_dma_xmit_complete(self);
1054
1055
1056
1057
1058
1059
1060
1061
1062 w83977af_dma_receive(self);
1063 new_icr = ICR_EFSFI;
1064
1065 }
1066
1067
1068 outb(set, iobase+SSR);
1069
1070 return new_icr;
1071}
1072
1073
1074
1075
1076
1077
1078
1079static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
1080{
1081 struct net_device *dev = dev_id;
1082 struct w83977af_ir *self;
1083 __u8 set, icr, isr;
1084 int iobase;
1085
1086 self = netdev_priv(dev);
1087
1088 iobase = self->io.fir_base;
1089
1090
1091 set = inb(iobase+SSR);
1092 switch_bank(iobase, SET0);
1093
1094 icr = inb(iobase+ICR);
1095 isr = inb(iobase+ISR) & icr;
1096
1097 outb(0, iobase+ICR);
1098
1099 if (isr) {
1100
1101 if (self->io.speed > PIO_MAX_SPEED )
1102 icr = w83977af_fir_interrupt(self, isr);
1103 else
1104 icr = w83977af_sir_interrupt(self, isr);
1105 }
1106
1107 outb(icr, iobase+ICR);
1108 outb(set, iobase+SSR);
1109 return IRQ_RETVAL(isr);
1110}
1111
1112
1113
1114
1115
1116
1117
1118static int w83977af_is_receiving(struct w83977af_ir *self)
1119{
1120 int status = FALSE;
1121 int iobase;
1122 __u8 set;
1123
1124 IRDA_ASSERT(self != NULL, return FALSE;);
1125
1126 if (self->io.speed > 115200) {
1127 iobase = self->io.fir_base;
1128
1129
1130 set = inb(iobase+SSR);
1131 switch_bank(iobase, SET2);
1132 if ((inb(iobase+RXFDTH) & 0x3f) != 0) {
1133
1134 status = TRUE;
1135 }
1136 outb(set, iobase+SSR);
1137 } else
1138 status = (self->rx_buff.state != OUTSIDE_FRAME);
1139
1140 return status;
1141}
1142
1143
1144
1145
1146
1147
1148
1149static int w83977af_net_open(struct net_device *dev)
1150{
1151 struct w83977af_ir *self;
1152 int iobase;
1153 char hwname[32];
1154 __u8 set;
1155
1156 IRDA_DEBUG(0, "%s()\n", __func__ );
1157
1158 IRDA_ASSERT(dev != NULL, return -1;);
1159 self = netdev_priv(dev);
1160
1161 IRDA_ASSERT(self != NULL, return 0;);
1162
1163 iobase = self->io.fir_base;
1164
1165 if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
1166 (void *) dev)) {
1167 return -EAGAIN;
1168 }
1169
1170
1171
1172
1173 if (request_dma(self->io.dma, dev->name)) {
1174 free_irq(self->io.irq, self);
1175 return -EAGAIN;
1176 }
1177
1178
1179 set = inb(iobase+SSR);
1180
1181
1182 switch_bank(iobase, SET0);
1183 if (self->io.speed > 115200) {
1184 outb(ICR_EFSFI, iobase+ICR);
1185 w83977af_dma_receive(self);
1186 } else
1187 outb(ICR_ERBRI, iobase+ICR);
1188
1189
1190 outb(set, iobase+SSR);
1191
1192
1193 netif_start_queue(dev);
1194
1195
1196 sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
1197
1198
1199
1200
1201
1202 self->irlap = irlap_open(dev, &self->qos, hwname);
1203
1204 return 0;
1205}
1206
1207
1208
1209
1210
1211
1212
1213static int w83977af_net_close(struct net_device *dev)
1214{
1215 struct w83977af_ir *self;
1216 int iobase;
1217 __u8 set;
1218
1219 IRDA_DEBUG(0, "%s()\n", __func__ );
1220
1221 IRDA_ASSERT(dev != NULL, return -1;);
1222
1223 self = netdev_priv(dev);
1224
1225 IRDA_ASSERT(self != NULL, return 0;);
1226
1227 iobase = self->io.fir_base;
1228
1229
1230 netif_stop_queue(dev);
1231
1232
1233 if (self->irlap)
1234 irlap_close(self->irlap);
1235 self->irlap = NULL;
1236
1237 disable_dma(self->io.dma);
1238
1239
1240 set = inb(iobase+SSR);
1241
1242
1243 switch_bank(iobase, SET0);
1244 outb(0, iobase+ICR);
1245
1246 free_irq(self->io.irq, dev);
1247 free_dma(self->io.dma);
1248
1249
1250 outb(set, iobase+SSR);
1251
1252 return 0;
1253}
1254
1255
1256
1257
1258
1259
1260
1261static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1262{
1263 struct if_irda_req *irq = (struct if_irda_req *) rq;
1264 struct w83977af_ir *self;
1265 unsigned long flags;
1266 int ret = 0;
1267
1268 IRDA_ASSERT(dev != NULL, return -1;);
1269
1270 self = netdev_priv(dev);
1271
1272 IRDA_ASSERT(self != NULL, return -1;);
1273
1274 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
1275
1276 spin_lock_irqsave(&self->lock, flags);
1277
1278 switch (cmd) {
1279 case SIOCSBANDWIDTH:
1280 if (!capable(CAP_NET_ADMIN)) {
1281 ret = -EPERM;
1282 goto out;
1283 }
1284 w83977af_change_speed(self, irq->ifr_baudrate);
1285 break;
1286 case SIOCSMEDIABUSY:
1287 if (!capable(CAP_NET_ADMIN)) {
1288 ret = -EPERM;
1289 goto out;
1290 }
1291 irda_device_set_media_busy(self->netdev, TRUE);
1292 break;
1293 case SIOCGRECEIVING:
1294 irq->ifr_receiving = w83977af_is_receiving(self);
1295 break;
1296 default:
1297 ret = -EOPNOTSUPP;
1298 }
1299out:
1300 spin_unlock_irqrestore(&self->lock, flags);
1301 return ret;
1302}
1303
1304MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
1305MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
1306MODULE_LICENSE("GPL");
1307
1308
1309module_param(qos_mtt_bits, int, 0);
1310MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
1311module_param_array(io, int, NULL, 0);
1312MODULE_PARM_DESC(io, "Base I/O addresses");
1313module_param_array(irq, int, NULL, 0);
1314MODULE_PARM_DESC(irq, "IRQ lines");
1315
1316
1317
1318
1319
1320
1321
1322module_init(w83977af_init);
1323
1324
1325
1326
1327
1328
1329
1330module_exit(w83977af_cleanup);
1331