1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41#include <linux/module.h>
42#include <linux/kernel.h>
43#include <linux/types.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/ioport.h>
47#include <linux/delay.h>
48#include <linux/slab.h>
49#include <linux/init.h>
50#include <linux/rtnetlink.h>
51#include <linux/pci.h>
52#include <linux/dma-mapping.h>
53
54#include <asm/io.h>
55#include <asm/dma.h>
56#include <asm/byteorder.h>
57
58#include <linux/pm.h>
59
60#include <net/irda/wrapper.h>
61#include <net/irda/irda.h>
62#include <net/irda/irda_device.h>
63
64#include "via-ircc.h"
65
66#define VIA_MODULE_NAME "via-ircc"
67#define CHIP_IO_EXTENT 0x40
68
69static char *driver_name = VIA_MODULE_NAME;
70
71
72static int qos_mtt_bits = 0x07;
73static int dongle_id = 0;
74
75
76module_param(dongle_id, int, 0);
77
78
79
80
81
82static struct via_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL };
83
84
85static int via_ircc_open(int i, chipio_t * info, unsigned int id);
86static int via_ircc_close(struct via_ircc_cb *self);
87static int via_ircc_dma_receive(struct via_ircc_cb *self);
88static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
89 int iobase);
90static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
91 struct net_device *dev);
92static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
93 struct net_device *dev);
94static void via_hw_init(struct via_ircc_cb *self);
95static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
96static irqreturn_t via_ircc_interrupt(int irq, void *dev_id);
97static int via_ircc_is_receiving(struct via_ircc_cb *self);
98static int via_ircc_read_dongle_id(int iobase);
99
100static int via_ircc_net_open(struct net_device *dev);
101static int via_ircc_net_close(struct net_device *dev);
102static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
103 int cmd);
104static void via_ircc_change_dongle_speed(int iobase, int speed,
105 int dongle_id);
106static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
107static void hwreset(struct via_ircc_cb *self);
108static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
109static int upload_rxdata(struct via_ircc_cb *self, int iobase);
110static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_device_id *id);
111static void __devexit via_remove_one (struct pci_dev *pdev);
112
113
114static void iodelay(int udelay)
115{
116 u8 data;
117 int i;
118
119 for (i = 0; i < udelay; i++) {
120 data = inb(0x80);
121 }
122}
123
124static struct pci_device_id via_pci_tbl[] = {
125 { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
126 { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
127 { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
128 { PCI_VENDOR_ID_VIA, 0x3147, PCI_ANY_ID, PCI_ANY_ID,0,0,3 },
129 { PCI_VENDOR_ID_VIA, 0x3177, PCI_ANY_ID, PCI_ANY_ID,0,0,4 },
130 { 0, }
131};
132
133MODULE_DEVICE_TABLE(pci,via_pci_tbl);
134
135
136static struct pci_driver via_driver = {
137 .name = VIA_MODULE_NAME,
138 .id_table = via_pci_tbl,
139 .probe = via_init_one,
140 .remove = __devexit_p(via_remove_one),
141};
142
143
144
145
146
147
148
149static int __init via_ircc_init(void)
150{
151 int rc;
152
153 IRDA_DEBUG(3, "%s()\n", __func__);
154
155 rc = pci_register_driver(&via_driver);
156 if (rc < 0) {
157 IRDA_DEBUG(0, "%s(): error rc = %d, returning -ENODEV...\n",
158 __func__, rc);
159 return -ENODEV;
160 }
161 return 0;
162}
163
164static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_device_id *id)
165{
166 int rc;
167 u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
168 u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
169 chipio_t info;
170
171 IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __func__, id->device);
172
173 rc = pci_enable_device (pcidev);
174 if (rc) {
175 IRDA_DEBUG(0, "%s(): error rc = %d\n", __func__, rc);
176 return -ENODEV;
177 }
178
179
180 if ( ReadLPCReg(0x20) != 0x3C )
181 Chipset=0x3096;
182 else
183 Chipset=0x3076;
184
185 if (Chipset==0x3076) {
186 IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __func__);
187
188 WriteLPCReg(7,0x0c );
189 temp=ReadLPCReg(0x30);
190 if((temp&0x01)==1) {
191 WriteLPCReg(0x1d, 0x82 );
192 WriteLPCReg(0x23,0x18);
193 temp=ReadLPCReg(0xF0);
194 if((temp&0x01)==0) {
195 temp=(ReadLPCReg(0x74)&0x03);
196 FirDRQ0=temp + 4;
197 temp=(ReadLPCReg(0x74)&0x0C) >> 2;
198 FirDRQ1=temp + 4;
199 } else {
200 temp=(ReadLPCReg(0x74)&0x0C) >> 2;
201 FirDRQ0=temp + 4;
202 FirDRQ1=FirDRQ0;
203 }
204 FirIRQ=(ReadLPCReg(0x70)&0x0f);
205 FirIOBase=ReadLPCReg(0x60 ) << 8;
206 FirIOBase=FirIOBase| ReadLPCReg(0x61) ;
207 FirIOBase=FirIOBase ;
208 info.fir_base=FirIOBase;
209 info.irq=FirIRQ;
210 info.dma=FirDRQ1;
211 info.dma2=FirDRQ0;
212 pci_read_config_byte(pcidev,0x40,&bTmp);
213 pci_write_config_byte(pcidev,0x40,((bTmp | 0x08) & 0xfe));
214 pci_read_config_byte(pcidev,0x42,&bTmp);
215 pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
216 pci_write_config_byte(pcidev,0x5a,0xc0);
217 WriteLPCReg(0x28, 0x70 );
218 if (via_ircc_open(0, &info,0x3076) == 0)
219 rc=0;
220 } else
221 rc = -ENODEV;
222 } else {
223 IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __func__);
224
225 pci_read_config_byte(pcidev,0x67,&bTmp);
226 if((bTmp&0x01)==1) {
227
228 pci_read_config_byte(pcidev,0x42,&oldPCI_40);
229 pci_write_config_byte(pcidev,0x42,oldPCI_40 | 0x80);
230 pci_read_config_byte(pcidev,0x40,&oldPCI_40);
231 pci_write_config_byte(pcidev,0x40,oldPCI_40 & 0xf7);
232 pci_read_config_byte(pcidev,0x44,&oldPCI_44);
233 pci_write_config_byte(pcidev,0x44,0x4e);
234
235 if((bTmp&0x02)==0) {
236 pci_read_config_byte(pcidev,0x44,&bTmp1);
237 FirDRQ0 = (bTmp1 & 0x30) >> 4;
238 pci_read_config_byte(pcidev,0x44,&bTmp1);
239 FirDRQ1 = (bTmp1 & 0xc0) >> 6;
240 } else {
241 pci_read_config_byte(pcidev,0x44,&bTmp1);
242 FirDRQ0 = (bTmp1 & 0x30) >> 4 ;
243 FirDRQ1=0;
244 }
245 pci_read_config_byte(pcidev,0x47,&bTmp1);
246 FirIRQ = bTmp1 & 0x0f;
247
248 pci_read_config_byte(pcidev,0x69,&bTmp);
249 FirIOBase = bTmp << 8;
250 pci_read_config_byte(pcidev,0x68,&bTmp);
251 FirIOBase = (FirIOBase | bTmp ) & 0xfff0;
252
253 info.fir_base=FirIOBase;
254 info.irq=FirIRQ;
255 info.dma=FirDRQ1;
256 info.dma2=FirDRQ0;
257 if (via_ircc_open(0, &info,0x3096) == 0)
258 rc=0;
259 } else
260 rc = -ENODEV;
261 }
262
263 IRDA_DEBUG(2, "%s(): End - rc = %d\n", __func__, rc);
264 return rc;
265}
266
267
268
269
270
271
272
273static void via_ircc_clean(void)
274{
275 int i;
276
277 IRDA_DEBUG(3, "%s()\n", __func__);
278
279 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
280 if (dev_self[i])
281 via_ircc_close(dev_self[i]);
282 }
283}
284
285static void __devexit via_remove_one (struct pci_dev *pdev)
286{
287 IRDA_DEBUG(3, "%s()\n", __func__);
288
289
290
291
292
293 via_ircc_clean();
294
295
296
297 pci_disable_device(pdev);
298}
299
300static void __exit via_ircc_cleanup(void)
301{
302 IRDA_DEBUG(3, "%s()\n", __func__);
303
304
305
306
307 via_ircc_clean();
308
309
310 pci_unregister_driver (&via_driver);
311}
312
313static const struct net_device_ops via_ircc_sir_ops = {
314 .ndo_start_xmit = via_ircc_hard_xmit_sir,
315 .ndo_open = via_ircc_net_open,
316 .ndo_stop = via_ircc_net_close,
317 .ndo_do_ioctl = via_ircc_net_ioctl,
318};
319static const struct net_device_ops via_ircc_fir_ops = {
320 .ndo_start_xmit = via_ircc_hard_xmit_fir,
321 .ndo_open = via_ircc_net_open,
322 .ndo_stop = via_ircc_net_close,
323 .ndo_do_ioctl = via_ircc_net_ioctl,
324};
325
326
327
328
329
330
331
332static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
333{
334 struct net_device *dev;
335 struct via_ircc_cb *self;
336 int err;
337
338 IRDA_DEBUG(3, "%s()\n", __func__);
339
340 if (i >= ARRAY_SIZE(dev_self))
341 return -ENOMEM;
342
343
344 dev = alloc_irdadev(sizeof(struct via_ircc_cb));
345 if (dev == NULL)
346 return -ENOMEM;
347
348 self = netdev_priv(dev);
349 self->netdev = dev;
350 spin_lock_init(&self->lock);
351
352
353
354
355
356
357 dev_self[i] = self;
358 self->index = i;
359
360 self->io.cfg_base = info->cfg_base;
361 self->io.fir_base = info->fir_base;
362 self->io.irq = info->irq;
363 self->io.fir_ext = CHIP_IO_EXTENT;
364 self->io.dma = info->dma;
365 self->io.dma2 = info->dma2;
366 self->io.fifo_size = 32;
367 self->chip_id = id;
368 self->st_fifo.len = 0;
369 self->RxDataReady = 0;
370
371
372 if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
373 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
374 __func__, self->io.fir_base);
375 err = -ENODEV;
376 goto err_out1;
377 }
378
379
380 irda_init_max_qos_capabilies(&self->qos);
381
382
383 if (!dongle_id)
384 dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
385 self->io.dongle_id = dongle_id;
386
387
388
389 switch( self->io.dongle_id ){
390 case 0x0d:
391 self->qos.baud_rate.bits =
392 IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 |
393 IR_576000 | IR_1152000 | (IR_4000000 << 8);
394 break;
395 default:
396 self->qos.baud_rate.bits =
397 IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
398 break;
399 }
400
401
402
403
404
405
406
407
408 self->qos.min_turn_time.bits = qos_mtt_bits;
409 irda_qos_bits_to_value(&self->qos);
410
411
412 self->rx_buff.truesize = 14384 + 2048;
413 self->tx_buff.truesize = 14384 + 2048;
414
415
416 self->rx_buff.head =
417 dma_alloc_coherent(NULL, self->rx_buff.truesize,
418 &self->rx_buff_dma, GFP_KERNEL);
419 if (self->rx_buff.head == NULL) {
420 err = -ENOMEM;
421 goto err_out2;
422 }
423 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
424
425 self->tx_buff.head =
426 dma_alloc_coherent(NULL, self->tx_buff.truesize,
427 &self->tx_buff_dma, GFP_KERNEL);
428 if (self->tx_buff.head == NULL) {
429 err = -ENOMEM;
430 goto err_out3;
431 }
432 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
433
434 self->rx_buff.in_frame = FALSE;
435 self->rx_buff.state = OUTSIDE_FRAME;
436 self->tx_buff.data = self->tx_buff.head;
437 self->rx_buff.data = self->rx_buff.head;
438
439
440 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
441 self->tx_fifo.tail = self->tx_buff.head;
442
443
444 dev->netdev_ops = &via_ircc_sir_ops;
445
446 err = register_netdev(dev);
447 if (err)
448 goto err_out4;
449
450 IRDA_MESSAGE("IrDA: Registered device %s (via-ircc)\n", dev->name);
451
452
453
454 self->io.speed = 9600;
455 via_hw_init(self);
456 return 0;
457 err_out4:
458 dma_free_coherent(NULL, self->tx_buff.truesize,
459 self->tx_buff.head, self->tx_buff_dma);
460 err_out3:
461 dma_free_coherent(NULL, self->rx_buff.truesize,
462 self->rx_buff.head, self->rx_buff_dma);
463 err_out2:
464 release_region(self->io.fir_base, self->io.fir_ext);
465 err_out1:
466 free_netdev(dev);
467 dev_self[i] = NULL;
468 return err;
469}
470
471
472
473
474
475
476
477static int via_ircc_close(struct via_ircc_cb *self)
478{
479 int iobase;
480
481 IRDA_DEBUG(3, "%s()\n", __func__);
482
483 IRDA_ASSERT(self != NULL, return -1;);
484
485 iobase = self->io.fir_base;
486
487 ResetChip(iobase, 5);
488
489 unregister_netdev(self->netdev);
490
491
492 IRDA_DEBUG(2, "%s(), Releasing Region %03x\n",
493 __func__, self->io.fir_base);
494 release_region(self->io.fir_base, self->io.fir_ext);
495 if (self->tx_buff.head)
496 dma_free_coherent(NULL, self->tx_buff.truesize,
497 self->tx_buff.head, self->tx_buff_dma);
498 if (self->rx_buff.head)
499 dma_free_coherent(NULL, self->rx_buff.truesize,
500 self->rx_buff.head, self->rx_buff_dma);
501 dev_self[self->index] = NULL;
502
503 free_netdev(self->netdev);
504
505 return 0;
506}
507
508
509
510
511
512
513
514
515static void via_hw_init(struct via_ircc_cb *self)
516{
517 int iobase = self->io.fir_base;
518
519 IRDA_DEBUG(3, "%s()\n", __func__);
520
521 SetMaxRxPacketSize(iobase, 0x0fff);
522
523 EnRXFIFOReadyInt(iobase, OFF);
524 EnRXFIFOHalfLevelInt(iobase, OFF);
525 EnTXFIFOHalfLevelInt(iobase, OFF);
526 EnTXFIFOUnderrunEOMInt(iobase, ON);
527 EnTXFIFOReadyInt(iobase, OFF);
528 InvertTX(iobase, OFF);
529 InvertRX(iobase, OFF);
530
531 if (ReadLPCReg(0x20) == 0x3c)
532 WriteLPCReg(0xF0, 0);
533
534 EnRXSpecInt(iobase, ON);
535
536
537
538 ResetChip(iobase, 5);
539 EnableDMA(iobase, OFF);
540 EnableTX(iobase, OFF);
541 EnableRX(iobase, OFF);
542 EnRXDMA(iobase, OFF);
543 EnTXDMA(iobase, OFF);
544 RXStart(iobase, OFF);
545 TXStart(iobase, OFF);
546 InitCard(iobase);
547 CommonInit(iobase);
548 SIRFilter(iobase, ON);
549 SetSIR(iobase, ON);
550 CRC16(iobase, ON);
551 EnTXCRC(iobase, 0);
552 WriteReg(iobase, I_ST_CT_0, 0x00);
553 SetBaudRate(iobase, 9600);
554 SetPulseWidth(iobase, 12);
555 SetSendPreambleCount(iobase, 0);
556
557 self->io.speed = 9600;
558 self->st_fifo.len = 0;
559
560 via_ircc_change_dongle_speed(iobase, self->io.speed,
561 self->io.dongle_id);
562
563 WriteReg(iobase, I_ST_CT_0, 0x80);
564}
565
566
567
568
569
570static int via_ircc_read_dongle_id(int iobase)
571{
572 int dongle_id = 9;
573
574 IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
575 return dongle_id;
576}
577
578
579
580
581
582
583static void via_ircc_change_dongle_speed(int iobase, int speed,
584 int dongle_id)
585{
586 u8 mode = 0;
587
588
589 speed = speed;
590
591 IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n",
592 __func__, speed, iobase, dongle_id);
593
594 switch (dongle_id) {
595
596
597
598
599 case 0x08:
600 UseOneRX(iobase, ON);
601 InvertTX(iobase, OFF);
602 InvertRX(iobase, OFF);
603
604 EnRX2(iobase, ON);
605 EnGPIOtoRX2(iobase, OFF);
606
607 if (IsSIROn(iobase)) {
608
609 SlowIRRXLowActive(iobase, ON);
610 udelay(1000);
611 SlowIRRXLowActive(iobase, OFF);
612 } else {
613 if (IsMIROn(iobase)) {
614
615 SlowIRRXLowActive(iobase, OFF);
616 udelay(20);
617 } else {
618 if (IsFIROn(iobase)) {
619
620 SlowIRRXLowActive(iobase, OFF);
621 udelay(20);
622 }
623 }
624 }
625 break;
626
627 case 0x09:
628 UseOneRX(iobase, ON);
629 InvertTX(iobase, OFF);
630 InvertRX(iobase, OFF);
631
632 EnRX2(iobase, ON);
633 EnGPIOtoRX2(iobase, OFF);
634 if (IsSIROn(iobase)) {
635
636 SlowIRRXLowActive(iobase, ON);
637 udelay(20);
638
639 SlowIRRXLowActive(iobase, OFF);
640 }
641 if (IsMIROn(iobase)) {
642
643 SlowIRRXLowActive(iobase, OFF);
644 udelay(20);
645
646 SlowIRRXLowActive(iobase, ON);
647 } else {
648 if (IsFIROn(iobase)) {
649
650 SlowIRRXLowActive(iobase, OFF);
651
652 WriteTX(iobase, ON);
653 udelay(20);
654
655 SlowIRRXLowActive(iobase, ON);
656 udelay(20);
657
658 WriteTX(iobase, OFF);
659 }
660 }
661 break;
662
663 case 0x0d:
664 UseOneRX(iobase, OFF);
665 InvertTX(iobase, OFF);
666 InvertRX(iobase, OFF);
667 SlowIRRXLowActive(iobase, OFF);
668 if (IsSIROn(iobase)) {
669 EnGPIOtoRX2(iobase, OFF);
670 WriteGIO(iobase, OFF);
671 EnRX2(iobase, OFF);
672 } else {
673 EnGPIOtoRX2(iobase, OFF);
674 WriteGIO(iobase, OFF);
675 EnRX2(iobase, OFF);
676 }
677 break;
678
679 case 0x11:
680
681 IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__);
682
683 UseOneRX(iobase, ON);
684 InvertTX(iobase, OFF);
685 InvertRX(iobase, ON);
686
687 EnRX2(iobase, ON);
688 EnGPIOtoRX2(iobase, OFF);
689
690 if( IsSIROn(iobase) ){
691
692
693 SlowIRRXLowActive(iobase, ON);
694 udelay(20);
695
696 SlowIRRXLowActive(iobase, OFF);
697
698 } else{
699 IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __func__);
700 }
701 break;
702
703 case 0x0ff:
704 if (IsSIROn(iobase))
705 mode = 0;
706 else if (IsMIROn(iobase))
707 mode = 1;
708 else if (IsFIROn(iobase))
709 mode = 2;
710 else if (IsVFIROn(iobase))
711 mode = 5;
712 SI_SetMode(iobase, mode);
713 break;
714
715 default:
716 IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n",
717 __func__, dongle_id);
718 }
719}
720
721
722
723
724
725
726
727static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
728{
729 struct net_device *dev = self->netdev;
730 u16 iobase;
731 u8 value = 0, bTmp;
732
733 iobase = self->io.fir_base;
734
735 self->io.speed = speed;
736 IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __func__, speed);
737
738 WriteReg(iobase, I_ST_CT_0, 0x0);
739
740
741 switch (speed) {
742 case 2400:
743 case 9600:
744 case 19200:
745 case 38400:
746 case 57600:
747 case 115200:
748 value = (115200/speed)-1;
749 SetSIR(iobase, ON);
750 CRC16(iobase, ON);
751 break;
752 case 576000:
753
754
755 value = 0;
756 SetSIR(iobase, ON);
757 CRC16(iobase, ON);
758 break;
759 case 1152000:
760 value = 0;
761 SetMIR(iobase, ON);
762
763 break;
764 case 4000000:
765 value = 0;
766 SetFIR(iobase, ON);
767 SetPulseWidth(iobase, 0);
768 SetSendPreambleCount(iobase, 14);
769 CRC16(iobase, OFF);
770 EnTXCRC(iobase, ON);
771 break;
772 case 16000000:
773 value = 0;
774 SetVFIR(iobase, ON);
775
776 break;
777 default:
778 value = 0;
779 break;
780 }
781
782
783 bTmp = (ReadReg(iobase, I_CF_H_1) & 0x03);
784 bTmp |= value << 2;
785 WriteReg(iobase, I_CF_H_1, bTmp);
786
787
788 via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
789
790
791 SetFIFO(iobase, 64);
792
793
794 WriteReg(iobase, I_ST_CT_0, 0x80);
795
796
797
798
799
800
801 if (IsSIROn(iobase)) {
802 SIRFilter(iobase, ON);
803 SIRRecvAny(iobase, ON);
804 } else {
805 SIRFilter(iobase, OFF);
806 SIRRecvAny(iobase, OFF);
807 }
808
809 if (speed > 115200) {
810
811 dev->netdev_ops = &via_ircc_fir_ops;
812 via_ircc_dma_receive(self);
813 } else {
814
815 dev->netdev_ops = &via_ircc_sir_ops;
816 }
817 netif_wake_queue(dev);
818}
819
820
821
822
823
824
825
826static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
827 struct net_device *dev)
828{
829 struct via_ircc_cb *self;
830 unsigned long flags;
831 u16 iobase;
832 __u32 speed;
833
834 self = netdev_priv(dev);
835 IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
836 iobase = self->io.fir_base;
837
838 netif_stop_queue(dev);
839
840 speed = irda_get_next_speed(skb);
841 if ((speed != self->io.speed) && (speed != -1)) {
842
843 if (!skb->len) {
844 via_ircc_change_speed(self, speed);
845 dev->trans_start = jiffies;
846 dev_kfree_skb(skb);
847 return NETDEV_TX_OK;
848 } else
849 self->new_speed = speed;
850 }
851 InitCard(iobase);
852 CommonInit(iobase);
853 SIRFilter(iobase, ON);
854 SetSIR(iobase, ON);
855 CRC16(iobase, ON);
856 EnTXCRC(iobase, 0);
857 WriteReg(iobase, I_ST_CT_0, 0x00);
858
859 spin_lock_irqsave(&self->lock, flags);
860 self->tx_buff.data = self->tx_buff.head;
861 self->tx_buff.len =
862 async_wrap_skb(skb, self->tx_buff.data,
863 self->tx_buff.truesize);
864
865 dev->stats.tx_bytes += self->tx_buff.len;
866
867 SetBaudRate(iobase, self->io.speed);
868 SetPulseWidth(iobase, 12);
869 SetSendPreambleCount(iobase, 0);
870 WriteReg(iobase, I_ST_CT_0, 0x80);
871
872 EnableTX(iobase, ON);
873 EnableRX(iobase, OFF);
874
875 ResetChip(iobase, 0);
876 ResetChip(iobase, 1);
877 ResetChip(iobase, 2);
878 ResetChip(iobase, 3);
879 ResetChip(iobase, 4);
880
881 EnAllInt(iobase, ON);
882 EnTXDMA(iobase, ON);
883 EnRXDMA(iobase, OFF);
884
885 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
886 DMA_TX_MODE);
887
888 SetSendByte(iobase, self->tx_buff.len);
889 RXStart(iobase, OFF);
890 TXStart(iobase, ON);
891
892 dev->trans_start = jiffies;
893 spin_unlock_irqrestore(&self->lock, flags);
894 dev_kfree_skb(skb);
895 return NETDEV_TX_OK;
896}
897
898static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
899 struct net_device *dev)
900{
901 struct via_ircc_cb *self;
902 u16 iobase;
903 __u32 speed;
904 unsigned long flags;
905
906 self = netdev_priv(dev);
907 iobase = self->io.fir_base;
908
909 if (self->st_fifo.len)
910 return NETDEV_TX_OK;
911 if (self->chip_id == 0x3076)
912 iodelay(1500);
913 else
914 udelay(1500);
915 netif_stop_queue(dev);
916 speed = irda_get_next_speed(skb);
917 if ((speed != self->io.speed) && (speed != -1)) {
918 if (!skb->len) {
919 via_ircc_change_speed(self, speed);
920 dev->trans_start = jiffies;
921 dev_kfree_skb(skb);
922 return NETDEV_TX_OK;
923 } else
924 self->new_speed = speed;
925 }
926 spin_lock_irqsave(&self->lock, flags);
927 self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
928 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
929
930 self->tx_fifo.tail += skb->len;
931 dev->stats.tx_bytes += skb->len;
932 skb_copy_from_linear_data(skb,
933 self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
934 self->tx_fifo.len++;
935 self->tx_fifo.free++;
936
937 via_ircc_dma_xmit(self, iobase);
938
939
940 dev->trans_start = jiffies;
941 dev_kfree_skb(skb);
942 spin_unlock_irqrestore(&self->lock, flags);
943 return NETDEV_TX_OK;
944
945}
946
947static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
948{
949 EnTXDMA(iobase, OFF);
950 self->io.direction = IO_XMIT;
951 EnPhys(iobase, ON);
952 EnableTX(iobase, ON);
953 EnableRX(iobase, OFF);
954 ResetChip(iobase, 0);
955 ResetChip(iobase, 1);
956 ResetChip(iobase, 2);
957 ResetChip(iobase, 3);
958 ResetChip(iobase, 4);
959 EnAllInt(iobase, ON);
960 EnTXDMA(iobase, ON);
961 EnRXDMA(iobase, OFF);
962 irda_setup_dma(self->io.dma,
963 ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
964 self->tx_buff.head) + self->tx_buff_dma,
965 self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
966 IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
967 __func__, self->tx_fifo.ptr,
968 self->tx_fifo.queue[self->tx_fifo.ptr].len,
969 self->tx_fifo.len);
970
971 SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
972 RXStart(iobase, OFF);
973 TXStart(iobase, ON);
974 return 0;
975
976}
977
978
979
980
981
982
983
984
985static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
986{
987 int iobase;
988 int ret = TRUE;
989 u8 Tx_status;
990
991 IRDA_DEBUG(3, "%s()\n", __func__);
992
993 iobase = self->io.fir_base;
994
995
996
997
998 Tx_status = GetTXStatus(iobase);
999 if (Tx_status & 0x08) {
1000 self->netdev->stats.tx_errors++;
1001 self->netdev->stats.tx_fifo_errors++;
1002 hwreset(self);
1003
1004 } else {
1005 self->netdev->stats.tx_packets++;
1006 ResetChip(iobase, 3);
1007 ResetChip(iobase, 4);
1008 }
1009
1010 if (self->new_speed) {
1011 via_ircc_change_speed(self, self->new_speed);
1012 self->new_speed = 0;
1013 }
1014
1015
1016 if (IsFIROn(iobase)) {
1017 if (self->tx_fifo.len) {
1018 self->tx_fifo.len--;
1019 self->tx_fifo.ptr++;
1020 }
1021 }
1022 IRDA_DEBUG(1,
1023 "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
1024 __func__,
1025 self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1036 self->tx_fifo.tail = self->tx_buff.head;
1037
1038
1039
1040
1041
1042
1043 netif_wake_queue(self->netdev);
1044
1045 return ret;
1046}
1047
1048
1049
1050
1051
1052
1053
1054static int via_ircc_dma_receive(struct via_ircc_cb *self)
1055{
1056 int iobase;
1057
1058 iobase = self->io.fir_base;
1059
1060 IRDA_DEBUG(3, "%s()\n", __func__);
1061
1062 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1063 self->tx_fifo.tail = self->tx_buff.head;
1064 self->RxDataReady = 0;
1065 self->io.direction = IO_RECV;
1066 self->rx_buff.data = self->rx_buff.head;
1067 self->st_fifo.len = self->st_fifo.pending_bytes = 0;
1068 self->st_fifo.tail = self->st_fifo.head = 0;
1069
1070 EnPhys(iobase, ON);
1071 EnableTX(iobase, OFF);
1072 EnableRX(iobase, ON);
1073
1074 ResetChip(iobase, 0);
1075 ResetChip(iobase, 1);
1076 ResetChip(iobase, 2);
1077 ResetChip(iobase, 3);
1078 ResetChip(iobase, 4);
1079
1080 EnAllInt(iobase, ON);
1081 EnTXDMA(iobase, OFF);
1082 EnRXDMA(iobase, ON);
1083 irda_setup_dma(self->io.dma2, self->rx_buff_dma,
1084 self->rx_buff.truesize, DMA_RX_MODE);
1085 TXStart(iobase, OFF);
1086 RXStart(iobase, ON);
1087
1088 return 0;
1089}
1090
1091
1092
1093
1094
1095
1096
1097
1098static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
1099 int iobase)
1100{
1101 struct st_fifo *st_fifo;
1102 struct sk_buff *skb;
1103 int len, i;
1104 u8 status = 0;
1105
1106 iobase = self->io.fir_base;
1107 st_fifo = &self->st_fifo;
1108
1109 if (self->io.speed < 4000000) {
1110 len = GetRecvByte(iobase, self);
1111 skb = dev_alloc_skb(len + 1);
1112 if (skb == NULL)
1113 return FALSE;
1114
1115 skb_reserve(skb, 1);
1116 skb_put(skb, len - 2);
1117 if (self->chip_id == 0x3076) {
1118 for (i = 0; i < len - 2; i++)
1119 skb->data[i] = self->rx_buff.data[i * 2];
1120 } else {
1121 if (self->chip_id == 0x3096) {
1122 for (i = 0; i < len - 2; i++)
1123 skb->data[i] =
1124 self->rx_buff.data[i];
1125 }
1126 }
1127
1128 self->rx_buff.data += len;
1129 self->netdev->stats.rx_bytes += len;
1130 self->netdev->stats.rx_packets++;
1131 skb->dev = self->netdev;
1132 skb_reset_mac_header(skb);
1133 skb->protocol = htons(ETH_P_IRDA);
1134 netif_rx(skb);
1135 return TRUE;
1136 }
1137
1138 else {
1139 len = GetRecvByte(iobase, self);
1140 if (len == 0)
1141 return TRUE;
1142 if (((len - 4) < 2) || ((len - 4) > 2048)) {
1143 IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
1144 __func__, len, RxCurCount(iobase, self),
1145 self->RxLastCount);
1146 hwreset(self);
1147 return FALSE;
1148 }
1149 IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
1150 __func__,
1151 st_fifo->len, len - 4, RxCurCount(iobase, self));
1152
1153 st_fifo->entries[st_fifo->tail].status = status;
1154 st_fifo->entries[st_fifo->tail].len = len;
1155 st_fifo->pending_bytes += len;
1156 st_fifo->tail++;
1157 st_fifo->len++;
1158 if (st_fifo->tail > MAX_RX_WINDOW)
1159 st_fifo->tail = 0;
1160 self->RxDataReady = 0;
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171 EnableRX(iobase, OFF);
1172 EnRXDMA(iobase, OFF);
1173 RXStart(iobase, OFF);
1174
1175
1176 if (st_fifo->head > MAX_RX_WINDOW)
1177 st_fifo->head = 0;
1178 status = st_fifo->entries[st_fifo->head].status;
1179 len = st_fifo->entries[st_fifo->head].len;
1180 st_fifo->head++;
1181 st_fifo->len--;
1182
1183 skb = dev_alloc_skb(len + 1 - 4);
1184
1185
1186
1187
1188 if ((skb == NULL) || (skb->data == NULL)
1189 || (self->rx_buff.data == NULL) || (len < 6)) {
1190 self->netdev->stats.rx_dropped++;
1191 return TRUE;
1192 }
1193 skb_reserve(skb, 1);
1194 skb_put(skb, len - 4);
1195
1196 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1197 IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __func__,
1198 len - 4, self->rx_buff.data);
1199
1200
1201 self->rx_buff.data += len;
1202 self->netdev->stats.rx_bytes += len;
1203 self->netdev->stats.rx_packets++;
1204 skb->dev = self->netdev;
1205 skb_reset_mac_header(skb);
1206 skb->protocol = htons(ETH_P_IRDA);
1207 netif_rx(skb);
1208
1209
1210 }
1211 return TRUE;
1212
1213}
1214
1215
1216
1217
1218static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1219{
1220 struct sk_buff *skb;
1221 int len;
1222 struct st_fifo *st_fifo;
1223 st_fifo = &self->st_fifo;
1224
1225 len = GetRecvByte(iobase, self);
1226
1227 IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
1228
1229 if ((len - 4) < 2) {
1230 self->netdev->stats.rx_dropped++;
1231 return FALSE;
1232 }
1233
1234 skb = dev_alloc_skb(len + 1);
1235 if (skb == NULL) {
1236 self->netdev->stats.rx_dropped++;
1237 return FALSE;
1238 }
1239 skb_reserve(skb, 1);
1240 skb_put(skb, len - 4 + 1);
1241 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1);
1242 st_fifo->tail++;
1243 st_fifo->len++;
1244 if (st_fifo->tail > MAX_RX_WINDOW)
1245 st_fifo->tail = 0;
1246
1247 self->rx_buff.data += len;
1248 self->netdev->stats.rx_bytes += len;
1249 self->netdev->stats.rx_packets++;
1250 skb->dev = self->netdev;
1251 skb_reset_mac_header(skb);
1252 skb->protocol = htons(ETH_P_IRDA);
1253 netif_rx(skb);
1254 if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
1255 RXStart(iobase, ON);
1256 } else {
1257 EnableRX(iobase, OFF);
1258 EnRXDMA(iobase, OFF);
1259 RXStart(iobase, OFF);
1260 }
1261 return TRUE;
1262}
1263
1264
1265
1266
1267
1268static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1269{
1270 struct st_fifo *st_fifo;
1271 struct sk_buff *skb;
1272 int len;
1273 u8 status;
1274
1275 st_fifo = &self->st_fifo;
1276
1277 if (CkRxRecv(iobase, self)) {
1278
1279 self->RetryCount = 0;
1280 SetTimer(iobase, 20);
1281 self->RxDataReady++;
1282 return FALSE;
1283 } else
1284 self->RetryCount++;
1285
1286 if ((self->RetryCount >= 1) ||
1287 ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize)
1288 || (st_fifo->len >= (MAX_RX_WINDOW))) {
1289 while (st_fifo->len > 0) {
1290
1291 if (st_fifo->head > MAX_RX_WINDOW)
1292 st_fifo->head = 0;
1293 status = st_fifo->entries[st_fifo->head].status;
1294 len = st_fifo->entries[st_fifo->head].len;
1295 st_fifo->head++;
1296 st_fifo->len--;
1297
1298 skb = dev_alloc_skb(len + 1 - 4);
1299
1300
1301
1302
1303 if ((skb == NULL) || (skb->data == NULL)
1304 || (self->rx_buff.data == NULL) || (len < 6)) {
1305 self->netdev->stats.rx_dropped++;
1306 continue;
1307 }
1308 skb_reserve(skb, 1);
1309 skb_put(skb, len - 4);
1310 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1311
1312 IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __func__,
1313 len - 4, st_fifo->head);
1314
1315
1316 self->rx_buff.data += len;
1317 self->netdev->stats.rx_bytes += len;
1318 self->netdev->stats.rx_packets++;
1319 skb->dev = self->netdev;
1320 skb_reset_mac_header(skb);
1321 skb->protocol = htons(ETH_P_IRDA);
1322 netif_rx(skb);
1323 }
1324 self->RetryCount = 0;
1325
1326 IRDA_DEBUG(2,
1327 "%s(): End of upload HostStatus=%x,RxStatus=%x\n",
1328 __func__,
1329 GetHostStatus(iobase), GetRXStatus(iobase));
1330
1331
1332
1333
1334
1335 if ((GetRXStatus(iobase) & 0x10)
1336 && (RxCurCount(iobase, self) != self->RxLastCount)) {
1337 upload_rxdata(self, iobase);
1338 if (irda_device_txqueue_empty(self->netdev))
1339 via_ircc_dma_receive(self);
1340 }
1341 }
1342 else
1343 SetTimer(iobase, 4);
1344 return TRUE;
1345
1346}
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1357{
1358 struct net_device *dev = dev_id;
1359 struct via_ircc_cb *self = netdev_priv(dev);
1360 int iobase;
1361 u8 iHostIntType, iRxIntType, iTxIntType;
1362
1363 iobase = self->io.fir_base;
1364 spin_lock(&self->lock);
1365 iHostIntType = GetHostStatus(iobase);
1366
1367 IRDA_DEBUG(4, "%s(): iHostIntType %02x: %s %s %s %02x\n",
1368 __func__, iHostIntType,
1369 (iHostIntType & 0x40) ? "Timer" : "",
1370 (iHostIntType & 0x20) ? "Tx" : "",
1371 (iHostIntType & 0x10) ? "Rx" : "",
1372 (iHostIntType & 0x0e) >> 1);
1373
1374 if ((iHostIntType & 0x40) != 0) {
1375 self->EventFlag.TimeOut++;
1376 ClearTimerInt(iobase, 1);
1377 if (self->io.direction == IO_XMIT) {
1378 via_ircc_dma_xmit(self, iobase);
1379 }
1380 if (self->io.direction == IO_RECV) {
1381
1382
1383
1384 if (self->RxDataReady > 30) {
1385 hwreset(self);
1386 if (irda_device_txqueue_empty(self->netdev)) {
1387 via_ircc_dma_receive(self);
1388 }
1389 } else {
1390 RxTimerHandler(self, iobase);
1391 }
1392 }
1393 }
1394 if ((iHostIntType & 0x20) != 0) {
1395 iTxIntType = GetTXStatus(iobase);
1396
1397 IRDA_DEBUG(4, "%s(): iTxIntType %02x: %s %s %s %s\n",
1398 __func__, iTxIntType,
1399 (iTxIntType & 0x08) ? "FIFO underr." : "",
1400 (iTxIntType & 0x04) ? "EOM" : "",
1401 (iTxIntType & 0x02) ? "FIFO ready" : "",
1402 (iTxIntType & 0x01) ? "Early EOM" : "");
1403
1404 if (iTxIntType & 0x4) {
1405 self->EventFlag.EOMessage++;
1406 if (via_ircc_dma_xmit_complete(self)) {
1407 if (irda_device_txqueue_empty
1408 (self->netdev)) {
1409 via_ircc_dma_receive(self);
1410 }
1411 } else {
1412 self->EventFlag.Unknown++;
1413 }
1414 }
1415 }
1416
1417 if ((iHostIntType & 0x10) != 0) {
1418
1419 iRxIntType = GetRXStatus(iobase);
1420
1421 IRDA_DEBUG(4, "%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
1422 __func__, iRxIntType,
1423 (iRxIntType & 0x80) ? "PHY err." : "",
1424 (iRxIntType & 0x40) ? "CRC err" : "",
1425 (iRxIntType & 0x20) ? "FIFO overr." : "",
1426 (iRxIntType & 0x10) ? "EOF" : "",
1427 (iRxIntType & 0x08) ? "RxData" : "",
1428 (iRxIntType & 0x02) ? "RxMaxLen" : "",
1429 (iRxIntType & 0x01) ? "SIR bad" : "");
1430 if (!iRxIntType)
1431 IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __func__);
1432
1433 if (iRxIntType & 0x10) {
1434 if (via_ircc_dma_receive_complete(self, iobase)) {
1435
1436 via_ircc_dma_receive(self);
1437 }
1438 }
1439 else {
1440 IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
1441 __func__, iRxIntType, iHostIntType,
1442 RxCurCount(iobase, self),
1443 self->RxLastCount);
1444
1445 if (iRxIntType & 0x20) {
1446 ResetChip(iobase, 0);
1447 ResetChip(iobase, 1);
1448 } else {
1449
1450 if (iRxIntType != 0x08)
1451 hwreset(self);
1452 }
1453 via_ircc_dma_receive(self);
1454 }
1455
1456 }
1457 spin_unlock(&self->lock);
1458 return IRQ_RETVAL(iHostIntType);
1459}
1460
1461static void hwreset(struct via_ircc_cb *self)
1462{
1463 int iobase;
1464 iobase = self->io.fir_base;
1465
1466 IRDA_DEBUG(3, "%s()\n", __func__);
1467
1468 ResetChip(iobase, 5);
1469 EnableDMA(iobase, OFF);
1470 EnableTX(iobase, OFF);
1471 EnableRX(iobase, OFF);
1472 EnRXDMA(iobase, OFF);
1473 EnTXDMA(iobase, OFF);
1474 RXStart(iobase, OFF);
1475 TXStart(iobase, OFF);
1476 InitCard(iobase);
1477 CommonInit(iobase);
1478 SIRFilter(iobase, ON);
1479 SetSIR(iobase, ON);
1480 CRC16(iobase, ON);
1481 EnTXCRC(iobase, 0);
1482 WriteReg(iobase, I_ST_CT_0, 0x00);
1483 SetBaudRate(iobase, 9600);
1484 SetPulseWidth(iobase, 12);
1485 SetSendPreambleCount(iobase, 0);
1486 WriteReg(iobase, I_ST_CT_0, 0x80);
1487
1488
1489 via_ircc_change_speed(self, self->io.speed);
1490
1491 self->st_fifo.len = 0;
1492}
1493
1494
1495
1496
1497
1498
1499
1500static int via_ircc_is_receiving(struct via_ircc_cb *self)
1501{
1502 int status = FALSE;
1503 int iobase;
1504
1505 IRDA_ASSERT(self != NULL, return FALSE;);
1506
1507 iobase = self->io.fir_base;
1508 if (CkRxRecv(iobase, self))
1509 status = TRUE;
1510
1511 IRDA_DEBUG(2, "%s(): status=%x....\n", __func__, status);
1512
1513 return status;
1514}
1515
1516
1517
1518
1519
1520
1521
1522
1523static int via_ircc_net_open(struct net_device *dev)
1524{
1525 struct via_ircc_cb *self;
1526 int iobase;
1527 char hwname[32];
1528
1529 IRDA_DEBUG(3, "%s()\n", __func__);
1530
1531 IRDA_ASSERT(dev != NULL, return -1;);
1532 self = netdev_priv(dev);
1533 dev->stats.rx_packets = 0;
1534 IRDA_ASSERT(self != NULL, return 0;);
1535 iobase = self->io.fir_base;
1536 if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
1537 IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name,
1538 self->io.irq);
1539 return -EAGAIN;
1540 }
1541
1542
1543
1544
1545 if (request_dma(self->io.dma, dev->name)) {
1546 IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
1547 self->io.dma);
1548 free_irq(self->io.irq, self);
1549 return -EAGAIN;
1550 }
1551 if (self->io.dma2 != self->io.dma) {
1552 if (request_dma(self->io.dma2, dev->name)) {
1553 IRDA_WARNING("%s, unable to allocate dma2=%d\n",
1554 driver_name, self->io.dma2);
1555 free_irq(self->io.irq, self);
1556 free_dma(self->io.dma);
1557 return -EAGAIN;
1558 }
1559 }
1560
1561
1562
1563 EnAllInt(iobase, ON);
1564 EnInternalLoop(iobase, OFF);
1565 EnExternalLoop(iobase, OFF);
1566
1567
1568 via_ircc_dma_receive(self);
1569
1570
1571 netif_start_queue(dev);
1572
1573
1574
1575
1576
1577 sprintf(hwname, "VIA @ 0x%x", iobase);
1578 self->irlap = irlap_open(dev, &self->qos, hwname);
1579
1580 self->RxLastCount = 0;
1581
1582 return 0;
1583}
1584
1585
1586
1587
1588
1589
1590
1591static int via_ircc_net_close(struct net_device *dev)
1592{
1593 struct via_ircc_cb *self;
1594 int iobase;
1595
1596 IRDA_DEBUG(3, "%s()\n", __func__);
1597
1598 IRDA_ASSERT(dev != NULL, return -1;);
1599 self = netdev_priv(dev);
1600 IRDA_ASSERT(self != NULL, return 0;);
1601
1602
1603 netif_stop_queue(dev);
1604
1605 if (self->irlap)
1606 irlap_close(self->irlap);
1607 self->irlap = NULL;
1608 iobase = self->io.fir_base;
1609 EnTXDMA(iobase, OFF);
1610 EnRXDMA(iobase, OFF);
1611 DisableDmaChannel(self->io.dma);
1612
1613
1614 EnAllInt(iobase, OFF);
1615 free_irq(self->io.irq, dev);
1616 free_dma(self->io.dma);
1617 if (self->io.dma2 != self->io.dma)
1618 free_dma(self->io.dma2);
1619
1620 return 0;
1621}
1622
1623
1624
1625
1626
1627
1628
1629static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
1630 int cmd)
1631{
1632 struct if_irda_req *irq = (struct if_irda_req *) rq;
1633 struct via_ircc_cb *self;
1634 unsigned long flags;
1635 int ret = 0;
1636
1637 IRDA_ASSERT(dev != NULL, return -1;);
1638 self = netdev_priv(dev);
1639 IRDA_ASSERT(self != NULL, return -1;);
1640 IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
1641 cmd);
1642
1643 spin_lock_irqsave(&self->lock, flags);
1644 switch (cmd) {
1645 case SIOCSBANDWIDTH:
1646 if (!capable(CAP_NET_ADMIN)) {
1647 ret = -EPERM;
1648 goto out;
1649 }
1650 via_ircc_change_speed(self, irq->ifr_baudrate);
1651 break;
1652 case SIOCSMEDIABUSY:
1653 if (!capable(CAP_NET_ADMIN)) {
1654 ret = -EPERM;
1655 goto out;
1656 }
1657 irda_device_set_media_busy(self->netdev, TRUE);
1658 break;
1659 case SIOCGRECEIVING:
1660 irq->ifr_receiving = via_ircc_is_receiving(self);
1661 break;
1662 default:
1663 ret = -EOPNOTSUPP;
1664 }
1665 out:
1666 spin_unlock_irqrestore(&self->lock, flags);
1667 return ret;
1668}
1669
1670MODULE_AUTHOR("VIA Technologies,inc");
1671MODULE_DESCRIPTION("VIA IrDA Device Driver");
1672MODULE_LICENSE("GPL");
1673
1674module_init(via_ircc_init);
1675module_exit(via_ircc_cleanup);
1676